{"repo_id":"AgentLab","entity_id":"py:main","uri":"program://AgentLab/module/main#L1-L74","kind":"module","name":"main","path":"main.py","language":"python","start_line":1,"end_line":74,"context_start_line":1,"context_end_line":74,"code":"\"\"\"\nNote: This script is a convenience script to launch experiments instead of using\nthe command line.\n\nCopy this script and modify at will, but don't push your changes to the\nrepository.\n\"\"\"\n\nimport logging\n\nfrom agentlab.agents.generic_agent import (\n AGENT_LLAMA3_70B,\n AGENT_LLAMA31_70B,\n RANDOM_SEARCH_AGENT,\n AGENT_4o,\n AGENT_4o_MINI,\n AGENT_o3_MINI,\n AGENT_37_SONNET,\n AGENT_CLAUDE_SONNET_35,\n AGENT_GPT5_MINI,\n)\nfrom agentlab.experiments.study import Study\n\nlogging.getLogger().setLevel(logging.INFO)\n\n# choose your agent or provide a new agent\nagent_args = [AGENT_4o_MINI]\n# agent_args = [AGENT_4o]\n\n\n# ## select the benchmark to run on\nbenchmark = \"miniwob_tiny_test\"\n# benchmark = \"miniwob\"\n# benchmark = \"workarena_l1\"\n# benchmark = \"workarena_l2\"\n# benchmark = \"workarena_l3\"\n# benchmark = \"webarena\"\n\n# Set reproducibility_mode = True for reproducibility\n# this will \"ask\" agents to be deterministic. Also, it will prevent you from launching if you have\n# local changes. For your custom agents you need to implement set_reproducibility_mode\nreproducibility_mode = False\n\n# Set relaunch = True to relaunch an existing study, this will continue incomplete\n# experiments and relaunch errored experiments\nrelaunch = False\n\n## Number of parallel jobs\nn_jobs = 4 # Make sure to use 1 job when debugging in VSCode\n# n_jobs = -1 # to use all available cores\n\n\nif __name__ == \"__main__\": # necessary for dask backend\n\n if reproducibility_mode:\n [a.set_reproducibility_mode() for a in agent_args]\n\n if relaunch:\n # relaunch an existing study\n study = Study.load_most_recent(contains=None)\n study.find_incomplete(include_errors=True)\n\n else:\n study = Study(agent_args, benchmark, logging_level_stdout=logging.WARNING)\n\n study.run(\n n_jobs=n_jobs,\n parallel_backend=\"ray\",\n strict_reproducibility=reproducibility_mode,\n n_relaunch=3,\n )\n\n if reproducibility_mode:\n study.append_to_journal(strict_reproducibility=True)","source_hash":"cf52282bd08c6ac5ba93fcfc1ccacc25b04c07ed59f1f8a8ad630906d8499366","truncated":false} {"repo_id":"AgentLab","entity_id":"py:main_workarena_debug","uri":"program://AgentLab/module/main_workarena_debug#L1-L77","kind":"module","name":"main_workarena_debug","path":"main_workarena_debug.py","language":"python","start_line":1,"end_line":77,"context_start_line":1,"context_end_line":77,"code":"\"\"\"\nNote: This script is a convenience script to launch experiments instead of using\nthe command line.\n\nCopy this script and modify at will, but don't push your changes to the\nrepository.\n\"\"\"\n\nimport logging\nfrom copy import deepcopy\n\nimport bgym\n\nfrom agentlab.agents.tool_use_agent.tool_use_agent import (\n DEFAULT_PROMPT_CONFIG,\n GPT_4_1,\n ToolUseAgentArgs,\n)\nfrom agentlab.experiments.study import Study\n\nlogging.getLogger().setLevel(logging.INFO)\n\nconfig = deepcopy(DEFAULT_PROMPT_CONFIG)\n# config.keep_last_n_obs = 1\nconfig.obs.use_som = True\n\n\nagent_configs = [\n ToolUseAgentArgs(\n model_args=GPT_4_1,\n config=config,\n ),\n # ToolUseAgentArgs(\n # model_args=GPT_4_1,\n # config=config,\n # ),\n]\n\nfor agent_config in agent_configs:\n agent_config.config.action_subsets = (\"workarena\",) # use the workarena action set\n\n\n# ## select the benchmark to run on\n# benchmark = \"miniwob_tiny_test\"\nbenchmark = \"workarena_l1\"\n\n\nbenchmark = bgym.DEFAULT_BENCHMARKS[benchmark](n_repeats=4) # type: bgym.Benchmark\nbenchmark = benchmark.subset_from_glob(\"task_name\", \"*create*\")\n\n# for env_args in benchmark.env_args_list:\n# print(env_args.task_name)\n# env_args.max_steps = 15\n\nrelaunch = False\n\n## Number of parallel jobs\nn_jobs = 10 # Make sure to use 1 job when debugging in VSCode\nparallel_backend = \"ray\"\n# parallel_backend = \"sequential\" # activate sequential backend for debugging in VSCode\n\nif __name__ == \"__main__\": # necessary for dask backend\n\n if relaunch:\n # relaunch an existing study\n study = Study.load_most_recent(contains=None)\n study.find_incomplete(include_errors=True)\n\n else:\n study = Study(agent_configs, benchmark, logging_level_stdout=logging.WARNING)\n\n study.run(\n n_jobs=n_jobs,\n parallel_backend=parallel_backend, # \"ray\", \"joblib\" or \"sequential\"\n strict_reproducibility=False,\n n_relaunch=3,\n )","source_hash":"92a99e0d2b6b2fb8c18fb1060abf2c17ba8d9a100132f242959c7fb8ac18a4d8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:add_study_to_repro_journal","uri":"program://AgentLab/module/add_study_to_repro_journal#L1-L18","kind":"module","name":"add_study_to_repro_journal","path":"add_study_to_repro_journal.py","language":"python","start_line":1,"end_line":18,"context_start_line":1,"context_end_line":18,"code":"import os\nfrom pathlib import Path\nfrom agentlab.experiments.study import Study\n\n\nbase_dir = \"/home/toolkit/ui_copilot_results\"\n\nexp_paths = [\n \"2025-01-31_22-08-34_genericagent-o3-mini-2025-01-31-on-workarena-l1\",\n # '2025-02-02_01-53-45_genericagent-openai-o1-mini-2024-09-12-on-workarena-l1',\n \"2025-02-02_01-55-04_genericagent-openai-o1-mini-2024-09-12-on-workarena-l1\",\n]\nfull_paths = [os.path.join(base_dir, exp_path) for exp_path in exp_paths]\n\nfor full_path in full_paths:\n study = Study.load(Path(full_path))\n\n study.append_to_journal(strict_reproducibility=False)","source_hash":"dc9b4b94f8f744a3656b875dd287be370af8551604785b276bd041bd6ba5b408","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.test_main","uri":"program://AgentLab/module/tests.test_main#L1-L28","kind":"module","name":"tests.test_main","path":"tests/test_main.py","language":"python","start_line":1,"end_line":28,"context_start_line":1,"context_end_line":28,"code":"import subprocess\nimport sys\nfrom pathlib import Path\n\nimport pytest\n\n\n@pytest.mark.pricy\ndef test_main_script_execution():\n # this should trigger agent_4o_mini on miniwob_tiny_test unless this was\n # reconfigured differently.\n path = Path(__file__).parent.parent / \"main.py\"\n\n sys.path.insert(0, str(path.parent))\n\n # just make sure it's in the right state\n main = __import__(path.stem)\n assert main.benchmark == \"miniwob_tiny_test\"\n assert main.reproducibility_mode == False\n assert main.relaunch == False\n assert main.n_jobs <= 10\n\n result = subprocess.run([\"python\", str(path)], capture_output=True, text=True, timeout=5 * 60)\n assert result.returncode == 0\n\n\nif __name__ == \"__main__\":\n test_main_script_execution()","source_hash":"1c339f46889f59d74a302b20ab4529bc84b49bcb05ef73f1d278f62ac5758348","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.test_main.test_main_script_execution","uri":"program://AgentLab/function/tests.test_main.test_main_script_execution#L9-L24","kind":"function","name":"test_main_script_execution","path":"tests/test_main.py","language":"python","start_line":9,"end_line":24,"context_start_line":1,"context_end_line":28,"code":"import subprocess\nimport sys\nfrom pathlib import Path\n\nimport pytest\n\n\n@pytest.mark.pricy\ndef test_main_script_execution():\n # this should trigger agent_4o_mini on miniwob_tiny_test unless this was\n # reconfigured differently.\n path = Path(__file__).parent.parent / \"main.py\"\n\n sys.path.insert(0, str(path.parent))\n\n # just make sure it's in the right state\n main = __import__(path.stem)\n assert main.benchmark == \"miniwob_tiny_test\"\n assert main.reproducibility_mode == False\n assert main.relaunch == False\n assert main.n_jobs <= 10\n\n result = subprocess.run([\"python\", str(path)], capture_output=True, text=True, timeout=5 * 60)\n assert result.returncode == 0\n\n\nif __name__ == \"__main__\":\n test_main_script_execution()","source_hash":"1c339f46889f59d74a302b20ab4529bc84b49bcb05ef73f1d278f62ac5758348","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.test_ui_assistant","uri":"program://AgentLab/module/tests.test_ui_assistant#L1-L9","kind":"module","name":"tests.test_ui_assistant","path":"tests/test_ui_assistant.py","language":"python","start_line":1,"end_line":9,"context_start_line":1,"context_end_line":9,"code":"from agentlab.ui_assistant import make_exp_args\nfrom agentlab.agents.generic_agent import AGENT_4o\n\n\ndef test_make_exp_args():\n \"\"\"Basic unit test to detect refactoring errors.\"\"\"\n exp_args = make_exp_args(AGENT_4o, \"https://www.google.com\")\n\n assert exp_args.agent_args.flags.action.demo_mode == \"default\"","source_hash":"3a084bf5c64c372102deec1fae59a53a3c7f99ae7b2bb77735960cb40568a15a","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.test_ui_assistant.test_make_exp_args","uri":"program://AgentLab/function/tests.test_ui_assistant.test_make_exp_args#L5-L9","kind":"function","name":"test_make_exp_args","path":"tests/test_ui_assistant.py","language":"python","start_line":5,"end_line":9,"context_start_line":1,"context_end_line":9,"code":"from agentlab.ui_assistant import make_exp_args\nfrom agentlab.agents.generic_agent import AGENT_4o\n\n\ndef test_make_exp_args():\n \"\"\"Basic unit test to detect refactoring errors.\"\"\"\n exp_args = make_exp_args(AGENT_4o, \"https://www.google.com\")\n\n assert exp_args.agent_args.flags.action.demo_mode == \"default\"","source_hash":"3a084bf5c64c372102deec1fae59a53a3c7f99ae7b2bb77735960cb40568a15a","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.verify_rate_limit_anthropic","uri":"program://AgentLab/module/tests.verify_rate_limit_anthropic#L1-L89","kind":"module","name":"tests.verify_rate_limit_anthropic","path":"tests/verify_rate_limit_anthropic.py","language":"python","start_line":1,"end_line":89,"context_start_line":1,"context_end_line":89,"code":"import os\nimport time\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\n\nimport anthropic\n\nclient = anthropic.Anthropic(api_key=os.environ[\"ANTHROPIC_API_KEY\"])\n\n\ndef make_request(messages):\n response = client.messages.create(\n model=\"claude-3-5-sonnet-20241022\", max_tokens=10, messages=messages\n )\n return response.usage\n\n\ndef make_message(text):\n return {\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": text,\n }\n ],\n }\n\n\ndef add_cache_control(message: dict, cache_type=\"ephemeral\"):\n message[\"content\"][0][\"cache_control\"] = {\"type\": cache_type}\n\n\ndef remove_cache_control(message: dict):\n if \"cache_control\" in message[\"content\"][0]:\n del message[\"content\"][0][\"cache_control\"]\n\n\ndef test_rate_limit_single(thread_id):\n # Create ~100k token message that will be cached\n big_text = \"This is a large block of text for caching. \" * 10000 # ~100k tokens\n medium_text = \"This is a large block of text for caching. \" * 2000 # ~10k tokens\n\n print(f\"Thread {thread_id}: Starting rate limit test with cached content...\")\n\n # Rebuild conversation each time (simulating web agent)\n messages = []\n\n # Add all previous conversation turns\n for i in range(5):\n if i == 0:\n messages.append(make_message(big_text))\n t0 = time.time()\n else:\n messages.append(make_message(medium_text))\n add_cache_control(messages[-1])\n try:\n usage = make_request(messages)\n dt = time.time() - t0\n print(f\"{dt:.2f}: Thread {thread_id}: {usage}\")\n except Exception as e:\n print(f\"Thread {thread_id}: Error - {e}\")\n break\n remove_cache_control(messages[-1])\n\n\ndef test_rate_limit_parallel(num_threads=3):\n print(f\"Starting parallel rate limit test with {num_threads} threads...\")\n\n with ThreadPoolExecutor(max_workers=num_threads) as executor:\n futures = [executor.submit(test_rate_limit_single, i) for i in range(num_threads)]\n\n for future in as_completed(futures):\n try:\n future.result()\n except Exception as e:\n print(f\"Thread completed with error: {e}\")\n\n\ndef test_rate_limit():\n # Original single-threaded version\n test_rate_limit_single(0)\n\n\nif __name__ == \"__main__\":\n # Use parallel version to quickly exhaust rate limits\n test_rate_limit_parallel(num_threads=3)\n\n # Or use original single-threaded version\n # test_rate_limit()","source_hash":"a74fab2ec054dffa24e51f8b972c405769daca2f4af30651e2c9a3b558a387d8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.verify_rate_limit_anthropic.make_request","uri":"program://AgentLab/function/tests.verify_rate_limit_anthropic.make_request#L10-L14","kind":"function","name":"make_request","path":"tests/verify_rate_limit_anthropic.py","language":"python","start_line":10,"end_line":14,"context_start_line":1,"context_end_line":34,"code":"import os\nimport time\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\n\nimport anthropic\n\nclient = anthropic.Anthropic(api_key=os.environ[\"ANTHROPIC_API_KEY\"])\n\n\ndef make_request(messages):\n response = client.messages.create(\n model=\"claude-3-5-sonnet-20241022\", max_tokens=10, messages=messages\n )\n return response.usage\n\n\ndef make_message(text):\n return {\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": text,\n }\n ],\n }\n\n\ndef add_cache_control(message: dict, cache_type=\"ephemeral\"):\n message[\"content\"][0][\"cache_control\"] = {\"type\": cache_type}\n\n\ndef remove_cache_control(message: dict):\n if \"cache_control\" in message[\"content\"][0]:","source_hash":"a74fab2ec054dffa24e51f8b972c405769daca2f4af30651e2c9a3b558a387d8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.verify_rate_limit_anthropic.make_message","uri":"program://AgentLab/function/tests.verify_rate_limit_anthropic.make_message#L17-L26","kind":"function","name":"make_message","path":"tests/verify_rate_limit_anthropic.py","language":"python","start_line":17,"end_line":26,"context_start_line":1,"context_end_line":46,"code":"import os\nimport time\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\n\nimport anthropic\n\nclient = anthropic.Anthropic(api_key=os.environ[\"ANTHROPIC_API_KEY\"])\n\n\ndef make_request(messages):\n response = client.messages.create(\n model=\"claude-3-5-sonnet-20241022\", max_tokens=10, messages=messages\n )\n return response.usage\n\n\ndef make_message(text):\n return {\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": text,\n }\n ],\n }\n\n\ndef add_cache_control(message: dict, cache_type=\"ephemeral\"):\n message[\"content\"][0][\"cache_control\"] = {\"type\": cache_type}\n\n\ndef remove_cache_control(message: dict):\n if \"cache_control\" in message[\"content\"][0]:\n del message[\"content\"][0][\"cache_control\"]\n\n\ndef test_rate_limit_single(thread_id):\n # Create ~100k token message that will be cached\n big_text = \"This is a large block of text for caching. \" * 10000 # ~100k tokens\n medium_text = \"This is a large block of text for caching. \" * 2000 # ~10k tokens\n\n print(f\"Thread {thread_id}: Starting rate limit test with cached content...\")\n\n # Rebuild conversation each time (simulating web agent)\n messages = []","source_hash":"a74fab2ec054dffa24e51f8b972c405769daca2f4af30651e2c9a3b558a387d8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.verify_rate_limit_anthropic.add_cache_control","uri":"program://AgentLab/function/tests.verify_rate_limit_anthropic.add_cache_control#L29-L30","kind":"function","name":"add_cache_control","path":"tests/verify_rate_limit_anthropic.py","language":"python","start_line":29,"end_line":30,"context_start_line":9,"context_end_line":50,"code":"\ndef make_request(messages):\n response = client.messages.create(\n model=\"claude-3-5-sonnet-20241022\", max_tokens=10, messages=messages\n )\n return response.usage\n\n\ndef make_message(text):\n return {\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": text,\n }\n ],\n }\n\n\ndef add_cache_control(message: dict, cache_type=\"ephemeral\"):\n message[\"content\"][0][\"cache_control\"] = {\"type\": cache_type}\n\n\ndef remove_cache_control(message: dict):\n if \"cache_control\" in message[\"content\"][0]:\n del message[\"content\"][0][\"cache_control\"]\n\n\ndef test_rate_limit_single(thread_id):\n # Create ~100k token message that will be cached\n big_text = \"This is a large block of text for caching. \" * 10000 # ~100k tokens\n medium_text = \"This is a large block of text for caching. \" * 2000 # ~10k tokens\n\n print(f\"Thread {thread_id}: Starting rate limit test with cached content...\")\n\n # Rebuild conversation each time (simulating web agent)\n messages = []\n\n # Add all previous conversation turns\n for i in range(5):\n if i == 0:","source_hash":"a74fab2ec054dffa24e51f8b972c405769daca2f4af30651e2c9a3b558a387d8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.verify_rate_limit_anthropic.remove_cache_control","uri":"program://AgentLab/function/tests.verify_rate_limit_anthropic.remove_cache_control#L33-L35","kind":"function","name":"remove_cache_control","path":"tests/verify_rate_limit_anthropic.py","language":"python","start_line":33,"end_line":35,"context_start_line":13,"context_end_line":55,"code":" )\n return response.usage\n\n\ndef make_message(text):\n return {\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": text,\n }\n ],\n }\n\n\ndef add_cache_control(message: dict, cache_type=\"ephemeral\"):\n message[\"content\"][0][\"cache_control\"] = {\"type\": cache_type}\n\n\ndef remove_cache_control(message: dict):\n if \"cache_control\" in message[\"content\"][0]:\n del message[\"content\"][0][\"cache_control\"]\n\n\ndef test_rate_limit_single(thread_id):\n # Create ~100k token message that will be cached\n big_text = \"This is a large block of text for caching. \" * 10000 # ~100k tokens\n medium_text = \"This is a large block of text for caching. \" * 2000 # ~10k tokens\n\n print(f\"Thread {thread_id}: Starting rate limit test with cached content...\")\n\n # Rebuild conversation each time (simulating web agent)\n messages = []\n\n # Add all previous conversation turns\n for i in range(5):\n if i == 0:\n messages.append(make_message(big_text))\n t0 = time.time()\n else:\n messages.append(make_message(medium_text))\n add_cache_control(messages[-1])","source_hash":"a74fab2ec054dffa24e51f8b972c405769daca2f4af30651e2c9a3b558a387d8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.verify_rate_limit_anthropic.test_rate_limit_single","uri":"program://AgentLab/function/tests.verify_rate_limit_anthropic.test_rate_limit_single#L38-L63","kind":"function","name":"test_rate_limit_single","path":"tests/verify_rate_limit_anthropic.py","language":"python","start_line":38,"end_line":63,"context_start_line":18,"context_end_line":83,"code":" return {\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": text,\n }\n ],\n }\n\n\ndef add_cache_control(message: dict, cache_type=\"ephemeral\"):\n message[\"content\"][0][\"cache_control\"] = {\"type\": cache_type}\n\n\ndef remove_cache_control(message: dict):\n if \"cache_control\" in message[\"content\"][0]:\n del message[\"content\"][0][\"cache_control\"]\n\n\ndef test_rate_limit_single(thread_id):\n # Create ~100k token message that will be cached\n big_text = \"This is a large block of text for caching. \" * 10000 # ~100k tokens\n medium_text = \"This is a large block of text for caching. \" * 2000 # ~10k tokens\n\n print(f\"Thread {thread_id}: Starting rate limit test with cached content...\")\n\n # Rebuild conversation each time (simulating web agent)\n messages = []\n\n # Add all previous conversation turns\n for i in range(5):\n if i == 0:\n messages.append(make_message(big_text))\n t0 = time.time()\n else:\n messages.append(make_message(medium_text))\n add_cache_control(messages[-1])\n try:\n usage = make_request(messages)\n dt = time.time() - t0\n print(f\"{dt:.2f}: Thread {thread_id}: {usage}\")\n except Exception as e:\n print(f\"Thread {thread_id}: Error - {e}\")\n break\n remove_cache_control(messages[-1])\n\n\ndef test_rate_limit_parallel(num_threads=3):\n print(f\"Starting parallel rate limit test with {num_threads} threads...\")\n\n with ThreadPoolExecutor(max_workers=num_threads) as executor:\n futures = [executor.submit(test_rate_limit_single, i) for i in range(num_threads)]\n\n for future in as_completed(futures):\n try:\n future.result()\n except Exception as e:\n print(f\"Thread completed with error: {e}\")\n\n\ndef test_rate_limit():\n # Original single-threaded version\n test_rate_limit_single(0)\n\n","source_hash":"a74fab2ec054dffa24e51f8b972c405769daca2f4af30651e2c9a3b558a387d8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.verify_rate_limit_anthropic.test_rate_limit_parallel","uri":"program://AgentLab/function/tests.verify_rate_limit_anthropic.test_rate_limit_parallel#L66-L76","kind":"function","name":"test_rate_limit_parallel","path":"tests/verify_rate_limit_anthropic.py","language":"python","start_line":66,"end_line":76,"context_start_line":46,"context_end_line":89,"code":" messages = []\n\n # Add all previous conversation turns\n for i in range(5):\n if i == 0:\n messages.append(make_message(big_text))\n t0 = time.time()\n else:\n messages.append(make_message(medium_text))\n add_cache_control(messages[-1])\n try:\n usage = make_request(messages)\n dt = time.time() - t0\n print(f\"{dt:.2f}: Thread {thread_id}: {usage}\")\n except Exception as e:\n print(f\"Thread {thread_id}: Error - {e}\")\n break\n remove_cache_control(messages[-1])\n\n\ndef test_rate_limit_parallel(num_threads=3):\n print(f\"Starting parallel rate limit test with {num_threads} threads...\")\n\n with ThreadPoolExecutor(max_workers=num_threads) as executor:\n futures = [executor.submit(test_rate_limit_single, i) for i in range(num_threads)]\n\n for future in as_completed(futures):\n try:\n future.result()\n except Exception as e:\n print(f\"Thread completed with error: {e}\")\n\n\ndef test_rate_limit():\n # Original single-threaded version\n test_rate_limit_single(0)\n\n\nif __name__ == \"__main__\":\n # Use parallel version to quickly exhaust rate limits\n test_rate_limit_parallel(num_threads=3)\n\n # Or use original single-threaded version\n # test_rate_limit()","source_hash":"a74fab2ec054dffa24e51f8b972c405769daca2f4af30651e2c9a3b558a387d8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.verify_rate_limit_anthropic.test_rate_limit","uri":"program://AgentLab/function/tests.verify_rate_limit_anthropic.test_rate_limit#L79-L81","kind":"function","name":"test_rate_limit","path":"tests/verify_rate_limit_anthropic.py","language":"python","start_line":79,"end_line":81,"context_start_line":59,"context_end_line":89,"code":" print(f\"{dt:.2f}: Thread {thread_id}: {usage}\")\n except Exception as e:\n print(f\"Thread {thread_id}: Error - {e}\")\n break\n remove_cache_control(messages[-1])\n\n\ndef test_rate_limit_parallel(num_threads=3):\n print(f\"Starting parallel rate limit test with {num_threads} threads...\")\n\n with ThreadPoolExecutor(max_workers=num_threads) as executor:\n futures = [executor.submit(test_rate_limit_single, i) for i in range(num_threads)]\n\n for future in as_completed(futures):\n try:\n future.result()\n except Exception as e:\n print(f\"Thread completed with error: {e}\")\n\n\ndef test_rate_limit():\n # Original single-threaded version\n test_rate_limit_single(0)\n\n\nif __name__ == \"__main__\":\n # Use parallel version to quickly exhaust rate limits\n test_rate_limit_parallel(num_threads=3)\n\n # Or use original single-threaded version\n # test_rate_limit()","source_hash":"a74fab2ec054dffa24e51f8b972c405769daca2f4af30651e2c9a3b558a387d8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.analyze.test_overlay_utils","uri":"program://AgentLab/module/tests.analyze.test_overlay_utils#L1-L81","kind":"module","name":"tests.analyze.test_overlay_utils","path":"tests/analyze/test_overlay_utils.py","language":"python","start_line":1,"end_line":81,"context_start_line":1,"context_end_line":81,"code":"from PIL import Image\n\nfrom agentlab.analyze import overlay_utils\n\n\ndef test_parse_function_calls():\n\n test_code = \"\"\"\nmouse_click(34, 59)\nfill(\"a234\", \"test\")\nclick('b123', button=\"right\", modifiers=[\"Shift\", \"Control\"])\nselect_option(\"c456\", [\"option1\", \"option2\"])\n\"\"\"\n\n result = overlay_utils.parse_function_calls(test_code)\n\n assert result[1].function_name == \"mouse_click\"\n assert result[1].name == \"y\"\n assert test_code[result[1].start_index : result[1].stop_index] == \"59\"\n\n assert result[8].function_name == \"select_option\"\n assert result[8].name == \"options\"\n assert test_code[result[8].start_index : result[8].stop_index] == '[\"option1\", \"option2\"]'\n\n\ndef test_filtering_args():\n test_code = \"\"\"\nmouse_click(34, 59)\nfill(\"a234\", \"test\")\nmouse_drag_and_drop(34, 59, to_x=100, to_y=200)\ndrag_and_drop(\"a123\", \"b456\")\n\"\"\"\n result = overlay_utils.parse_function_calls(test_code)\n args = overlay_utils.find_bids_and_xy_pairs(result)\n\n assert len(args) == 6 # Expecting 4 args: 2 mouse clicks, 1 fill, 1 select_option\n\n assert args[0].function_name == \"mouse_click\"\n assert args[0].name == \"xy\"\n assert args[0].value == (34.0, 59.0)\n assert test_code[args[0].start_index : args[0].stop_index] == \"34, 59\"\n\n assert args[2].name == \"from_xy\"\n assert args[3].name == \"to_xy\"\n assert test_code[args[3].start_index : args[3].stop_index] == \"to_x=100, to_y=200\"\n\n\ndef manual_eval():\n \"\"\"Manual test function that displays the resulting image.\"\"\"\n import matplotlib.pyplot as plt\n\n # Create a white test image\n img = Image.new(\"RGB\", (400, 300), \"white\")\n\n # Test action string with multiple function calls\n action_string = \"\"\"mouse_click(100, 150)\nfill(\"search_box\", \"hello world\")\nclick(\"submit_btn\")\"\"\"\n\n # Mock properties mapping bids to bounding boxes\n properties = {\n \"search_box\": {\"bbox\": (50, 50, 100, 50)},\n \"submit_btn\": {\"bbox\": (150, 100, 120, 30)},\n }\n\n # Annotate the image and get colored HTML\n html_result = overlay_utils.annotate_action(img, action_string, properties, colormap=\"tab10\")\n\n # Display result\n plt.figure(figsize=(10, 6))\n plt.imshow(img)\n plt.axis(\"off\")\n plt.show()\n\n print(\"HTML with colored arguments:\")\n print(html_result)\n print(\"\\nManual test completed!\")\n\n\nif __name__ == \"__main__\":\n manual_eval()","source_hash":"9cc9b9688d8e5c68972d712ecf778467e597d4632f32b2585c4ee95da238a7e1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.analyze.test_overlay_utils.test_parse_function_calls","uri":"program://AgentLab/function/tests.analyze.test_overlay_utils.test_parse_function_calls#L6-L23","kind":"function","name":"test_parse_function_calls","path":"tests/analyze/test_overlay_utils.py","language":"python","start_line":6,"end_line":23,"context_start_line":1,"context_end_line":43,"code":"from PIL import Image\n\nfrom agentlab.analyze import overlay_utils\n\n\ndef test_parse_function_calls():\n\n test_code = \"\"\"\nmouse_click(34, 59)\nfill(\"a234\", \"test\")\nclick('b123', button=\"right\", modifiers=[\"Shift\", \"Control\"])\nselect_option(\"c456\", [\"option1\", \"option2\"])\n\"\"\"\n\n result = overlay_utils.parse_function_calls(test_code)\n\n assert result[1].function_name == \"mouse_click\"\n assert result[1].name == \"y\"\n assert test_code[result[1].start_index : result[1].stop_index] == \"59\"\n\n assert result[8].function_name == \"select_option\"\n assert result[8].name == \"options\"\n assert test_code[result[8].start_index : result[8].stop_index] == '[\"option1\", \"option2\"]'\n\n\ndef test_filtering_args():\n test_code = \"\"\"\nmouse_click(34, 59)\nfill(\"a234\", \"test\")\nmouse_drag_and_drop(34, 59, to_x=100, to_y=200)\ndrag_and_drop(\"a123\", \"b456\")\n\"\"\"\n result = overlay_utils.parse_function_calls(test_code)\n args = overlay_utils.find_bids_and_xy_pairs(result)\n\n assert len(args) == 6 # Expecting 4 args: 2 mouse clicks, 1 fill, 1 select_option\n\n assert args[0].function_name == \"mouse_click\"\n assert args[0].name == \"xy\"\n assert args[0].value == (34.0, 59.0)\n assert test_code[args[0].start_index : args[0].stop_index] == \"34, 59\"\n\n assert args[2].name == \"from_xy\"","source_hash":"9cc9b9688d8e5c68972d712ecf778467e597d4632f32b2585c4ee95da238a7e1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.analyze.test_overlay_utils.test_filtering_args","uri":"program://AgentLab/function/tests.analyze.test_overlay_utils.test_filtering_args#L26-L45","kind":"function","name":"test_filtering_args","path":"tests/analyze/test_overlay_utils.py","language":"python","start_line":26,"end_line":45,"context_start_line":6,"context_end_line":65,"code":"def test_parse_function_calls():\n\n test_code = \"\"\"\nmouse_click(34, 59)\nfill(\"a234\", \"test\")\nclick('b123', button=\"right\", modifiers=[\"Shift\", \"Control\"])\nselect_option(\"c456\", [\"option1\", \"option2\"])\n\"\"\"\n\n result = overlay_utils.parse_function_calls(test_code)\n\n assert result[1].function_name == \"mouse_click\"\n assert result[1].name == \"y\"\n assert test_code[result[1].start_index : result[1].stop_index] == \"59\"\n\n assert result[8].function_name == \"select_option\"\n assert result[8].name == \"options\"\n assert test_code[result[8].start_index : result[8].stop_index] == '[\"option1\", \"option2\"]'\n\n\ndef test_filtering_args():\n test_code = \"\"\"\nmouse_click(34, 59)\nfill(\"a234\", \"test\")\nmouse_drag_and_drop(34, 59, to_x=100, to_y=200)\ndrag_and_drop(\"a123\", \"b456\")\n\"\"\"\n result = overlay_utils.parse_function_calls(test_code)\n args = overlay_utils.find_bids_and_xy_pairs(result)\n\n assert len(args) == 6 # Expecting 4 args: 2 mouse clicks, 1 fill, 1 select_option\n\n assert args[0].function_name == \"mouse_click\"\n assert args[0].name == \"xy\"\n assert args[0].value == (34.0, 59.0)\n assert test_code[args[0].start_index : args[0].stop_index] == \"34, 59\"\n\n assert args[2].name == \"from_xy\"\n assert args[3].name == \"to_xy\"\n assert test_code[args[3].start_index : args[3].stop_index] == \"to_x=100, to_y=200\"\n\n\ndef manual_eval():\n \"\"\"Manual test function that displays the resulting image.\"\"\"\n import matplotlib.pyplot as plt\n\n # Create a white test image\n img = Image.new(\"RGB\", (400, 300), \"white\")\n\n # Test action string with multiple function calls\n action_string = \"\"\"mouse_click(100, 150)\nfill(\"search_box\", \"hello world\")\nclick(\"submit_btn\")\"\"\"\n\n # Mock properties mapping bids to bounding boxes\n properties = {\n \"search_box\": {\"bbox\": (50, 50, 100, 50)},\n \"submit_btn\": {\"bbox\": (150, 100, 120, 30)},\n }\n","source_hash":"9cc9b9688d8e5c68972d712ecf778467e597d4632f32b2585c4ee95da238a7e1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.analyze.test_overlay_utils.manual_eval","uri":"program://AgentLab/function/tests.analyze.test_overlay_utils.manual_eval#L48-L77","kind":"function","name":"manual_eval","path":"tests/analyze/test_overlay_utils.py","language":"python","start_line":48,"end_line":77,"context_start_line":28,"context_end_line":81,"code":"mouse_click(34, 59)\nfill(\"a234\", \"test\")\nmouse_drag_and_drop(34, 59, to_x=100, to_y=200)\ndrag_and_drop(\"a123\", \"b456\")\n\"\"\"\n result = overlay_utils.parse_function_calls(test_code)\n args = overlay_utils.find_bids_and_xy_pairs(result)\n\n assert len(args) == 6 # Expecting 4 args: 2 mouse clicks, 1 fill, 1 select_option\n\n assert args[0].function_name == \"mouse_click\"\n assert args[0].name == \"xy\"\n assert args[0].value == (34.0, 59.0)\n assert test_code[args[0].start_index : args[0].stop_index] == \"34, 59\"\n\n assert args[2].name == \"from_xy\"\n assert args[3].name == \"to_xy\"\n assert test_code[args[3].start_index : args[3].stop_index] == \"to_x=100, to_y=200\"\n\n\ndef manual_eval():\n \"\"\"Manual test function that displays the resulting image.\"\"\"\n import matplotlib.pyplot as plt\n\n # Create a white test image\n img = Image.new(\"RGB\", (400, 300), \"white\")\n\n # Test action string with multiple function calls\n action_string = \"\"\"mouse_click(100, 150)\nfill(\"search_box\", \"hello world\")\nclick(\"submit_btn\")\"\"\"\n\n # Mock properties mapping bids to bounding boxes\n properties = {\n \"search_box\": {\"bbox\": (50, 50, 100, 50)},\n \"submit_btn\": {\"bbox\": (150, 100, 120, 30)},\n }\n\n # Annotate the image and get colored HTML\n html_result = overlay_utils.annotate_action(img, action_string, properties, colormap=\"tab10\")\n\n # Display result\n plt.figure(figsize=(10, 6))\n plt.imshow(img)\n plt.axis(\"off\")\n plt.show()\n\n print(\"HTML with colored arguments:\")\n print(html_result)\n print(\"\\nManual test completed!\")\n\n\nif __name__ == \"__main__\":\n manual_eval()","source_hash":"9cc9b9688d8e5c68972d712ecf778467e597d4632f32b2585c4ee95da238a7e1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.analyze.test_inspect_results","uri":"program://AgentLab/module/tests.analyze.test_inspect_results#L1-L35","kind":"module","name":"tests.analyze.test_inspect_results","path":"tests/analyze/test_inspect_results.py","language":"python","start_line":1,"end_line":35,"context_start_line":1,"context_end_line":35,"code":"from pathlib import Path\nimport shutil\nimport tempfile\n\nimport pandas as pd\nfrom agentlab.analyze.inspect_results import get_study_summary\n\n\ndef test_get_study_summary():\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n study_dir = Path(tmp_dir) / \"test_study\"\n\n study_dir_original = Path(__file__).parent.parent / \"data\" / \"test_study\"\n\n # recursively copy the study to the temp dir using shutil\n shutil.copytree(study_dir_original, study_dir)\n\n sentinel = {}\n\n summary = get_study_summary(study_dir, sentinel=sentinel)\n assert isinstance(summary, pd.DataFrame)\n assert sentinel[\"from_cache\"] == False\n\n summary = get_study_summary(study_dir, sentinel=sentinel)\n assert isinstance(summary, pd.DataFrame)\n assert sentinel[\"from_cache\"] == True\n\n summary = get_study_summary(study_dir, ignore_cache=True, sentinel=sentinel)\n assert isinstance(summary, pd.DataFrame)\n assert sentinel[\"from_cache\"] == False\n\n\nif __name__ == \"__main__\":\n test_get_study_summary()","source_hash":"1b1e25989eb1e81a99bbac77634a76f275aa278a331ac3e97b1ddea85e224bd9","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.analyze.test_inspect_results.test_get_study_summary","uri":"program://AgentLab/function/tests.analyze.test_inspect_results.test_get_study_summary#L9-L31","kind":"function","name":"test_get_study_summary","path":"tests/analyze/test_inspect_results.py","language":"python","start_line":9,"end_line":31,"context_start_line":1,"context_end_line":35,"code":"from pathlib import Path\nimport shutil\nimport tempfile\n\nimport pandas as pd\nfrom agentlab.analyze.inspect_results import get_study_summary\n\n\ndef test_get_study_summary():\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n study_dir = Path(tmp_dir) / \"test_study\"\n\n study_dir_original = Path(__file__).parent.parent / \"data\" / \"test_study\"\n\n # recursively copy the study to the temp dir using shutil\n shutil.copytree(study_dir_original, study_dir)\n\n sentinel = {}\n\n summary = get_study_summary(study_dir, sentinel=sentinel)\n assert isinstance(summary, pd.DataFrame)\n assert sentinel[\"from_cache\"] == False\n\n summary = get_study_summary(study_dir, sentinel=sentinel)\n assert isinstance(summary, pd.DataFrame)\n assert sentinel[\"from_cache\"] == True\n\n summary = get_study_summary(study_dir, ignore_cache=True, sentinel=sentinel)\n assert isinstance(summary, pd.DataFrame)\n assert sentinel[\"from_cache\"] == False\n\n\nif __name__ == \"__main__\":\n test_get_study_summary()","source_hash":"1b1e25989eb1e81a99bbac77634a76f275aa278a331ac3e97b1ddea85e224bd9","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_huggingface_utils","uri":"program://AgentLab/module/tests.llm.test_huggingface_utils#L1-L32","kind":"module","name":"tests.llm.test_huggingface_utils","path":"tests/llm/test_huggingface_utils.py","language":"python","start_line":1,"end_line":32,"context_start_line":1,"context_end_line":32,"code":"import pytest\n\nfrom agentlab.llm.chat_api import HuggingFaceURLChatModel, make_system_message, make_user_message\nfrom agentlab.llm.llm_utils import download_and_save_model\nfrom agentlab.llm.prompt_templates import STARCHAT_PROMPT_TEMPLATE\n\n# TODO(optimass): figure out a good model for all tests\n\n\n@pytest.mark.skip(reason=\"Requires a local model checkpoint\")\ndef test_CustomLLMChatbot_locally():\n # model_path = \"google/flan-t5-base\" # remote model on HuggingFace Hub\n model_path = \"/mnt/ui_copilot/data_rw/models/starcoderbase-1b-ft\" # local model in shared volum\n\n chatbot = HuggingFaceURLChatModel(model_path=model_path, temperature=1e-3)\n\n messages = [\n make_system_message(\"Please tell me back the following word: \"),\n make_user_message(\"bird\"),\n ]\n\n answer = chatbot(messages)\n\n print(answer.content)\n\n\n@pytest.mark.skip(reason=\"Requires downloading a large file on disk local model checkpoint\")\ndef test_download_and_save_model():\n model_path = \"meta-llama/Llama-2-70b-chat\"\n save_dir = \"test_models\"\n\n download_and_save_model(model_path, save_dir)","source_hash":"edea9db32c39d22cd6c155ff848d563256bff43e05e0fc6f3282e583888fe9d3","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_huggingface_utils.test_CustomLLMChatbot_locally","uri":"program://AgentLab/function/tests.llm.test_huggingface_utils.test_CustomLLMChatbot_locally#L11-L24","kind":"function","name":"test_CustomLLMChatbot_locally","path":"tests/llm/test_huggingface_utils.py","language":"python","start_line":11,"end_line":24,"context_start_line":1,"context_end_line":32,"code":"import pytest\n\nfrom agentlab.llm.chat_api import HuggingFaceURLChatModel, make_system_message, make_user_message\nfrom agentlab.llm.llm_utils import download_and_save_model\nfrom agentlab.llm.prompt_templates import STARCHAT_PROMPT_TEMPLATE\n\n# TODO(optimass): figure out a good model for all tests\n\n\n@pytest.mark.skip(reason=\"Requires a local model checkpoint\")\ndef test_CustomLLMChatbot_locally():\n # model_path = \"google/flan-t5-base\" # remote model on HuggingFace Hub\n model_path = \"/mnt/ui_copilot/data_rw/models/starcoderbase-1b-ft\" # local model in shared volum\n\n chatbot = HuggingFaceURLChatModel(model_path=model_path, temperature=1e-3)\n\n messages = [\n make_system_message(\"Please tell me back the following word: \"),\n make_user_message(\"bird\"),\n ]\n\n answer = chatbot(messages)\n\n print(answer.content)\n\n\n@pytest.mark.skip(reason=\"Requires downloading a large file on disk local model checkpoint\")\ndef test_download_and_save_model():\n model_path = \"meta-llama/Llama-2-70b-chat\"\n save_dir = \"test_models\"\n\n download_and_save_model(model_path, save_dir)","source_hash":"edea9db32c39d22cd6c155ff848d563256bff43e05e0fc6f3282e583888fe9d3","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_huggingface_utils.test_download_and_save_model","uri":"program://AgentLab/function/tests.llm.test_huggingface_utils.test_download_and_save_model#L28-L32","kind":"function","name":"test_download_and_save_model","path":"tests/llm/test_huggingface_utils.py","language":"python","start_line":28,"end_line":32,"context_start_line":8,"context_end_line":32,"code":"\n\n@pytest.mark.skip(reason=\"Requires a local model checkpoint\")\ndef test_CustomLLMChatbot_locally():\n # model_path = \"google/flan-t5-base\" # remote model on HuggingFace Hub\n model_path = \"/mnt/ui_copilot/data_rw/models/starcoderbase-1b-ft\" # local model in shared volum\n\n chatbot = HuggingFaceURLChatModel(model_path=model_path, temperature=1e-3)\n\n messages = [\n make_system_message(\"Please tell me back the following word: \"),\n make_user_message(\"bird\"),\n ]\n\n answer = chatbot(messages)\n\n print(answer.content)\n\n\n@pytest.mark.skip(reason=\"Requires downloading a large file on disk local model checkpoint\")\ndef test_download_and_save_model():\n model_path = \"meta-llama/Llama-2-70b-chat\"\n save_dir = \"test_models\"\n\n download_and_save_model(model_path, save_dir)","source_hash":"edea9db32c39d22cd6c155ff848d563256bff43e05e0fc6f3282e583888fe9d3","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_llm_configs","uri":"program://AgentLab/module/tests.llm.test_llm_configs#L1-L8","kind":"module","name":"tests.llm.test_llm_configs","path":"tests/llm/test_llm_configs.py","language":"python","start_line":1,"end_line":8,"context_start_line":1,"context_end_line":8,"code":"from agentlab.llm.llm_configs import CHAT_MODEL_ARGS_DICT\nfrom agentlab.llm.chat_api import BaseModelArgs\n\n\ndef test_llm_configs():\n\n for _, args in CHAT_MODEL_ARGS_DICT.items():\n assert isinstance(args, BaseModelArgs)","source_hash":"c905bf93d4a57f7fd18cc7000a71dbf31077cd1603c5b7f5641bd6000502b880","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_llm_configs.test_llm_configs","uri":"program://AgentLab/function/tests.llm.test_llm_configs.test_llm_configs#L5-L8","kind":"function","name":"test_llm_configs","path":"tests/llm/test_llm_configs.py","language":"python","start_line":5,"end_line":8,"context_start_line":1,"context_end_line":8,"code":"from agentlab.llm.llm_configs import CHAT_MODEL_ARGS_DICT\nfrom agentlab.llm.chat_api import BaseModelArgs\n\n\ndef test_llm_configs():\n\n for _, args in CHAT_MODEL_ARGS_DICT.items():\n assert isinstance(args, BaseModelArgs)","source_hash":"c905bf93d4a57f7fd18cc7000a71dbf31077cd1603c5b7f5641bd6000502b880","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_tracking","uri":"program://AgentLab/module/tests.llm.test_tracking#L1-L177","kind":"module","name":"tests.llm.test_tracking","path":"tests/llm/test_tracking.py","language":"python","start_line":1,"end_line":177,"context_start_line":1,"context_end_line":177,"code":"import os\nimport time\nfrom functools import partial\n\nimport pytest\n\nimport agentlab.llm.tracking as tracking\nfrom agentlab.llm.chat_api import (\n AzureChatModel,\n OpenAIChatModel,\n OpenRouterChatModel,\n make_system_message,\n make_user_message,\n)\n\n\ndef test_get_action_decorator():\n action, agent_info = tracking.cost_tracker_decorator(lambda x, y: call_llm())(None, None)\n assert action == \"action\"\n assert agent_info[\"stats\"] == {\n \"input_tokens\": 1,\n \"output_tokens\": 1,\n \"cost\": 1.0,\n }\n\n\nOPENROUTER_API_KEY_AVAILABLE = os.environ.get(\"OPENROUTER_API_KEY\") is not None\n\nOPENROUTER_MODELS = (\n \"anthropic/claude-3.5-sonnet\",\n \"meta-llama/llama-3.1-405b-instruct\",\n \"meta-llama/llama-3.1-70b-instruct\",\n \"meta-llama/llama-3.1-8b-instruct\",\n \"google/gemini-pro-1.5\",\n)\n\n\n@pytest.mark.skipif(not OPENROUTER_API_KEY_AVAILABLE, reason=\"OpenRouter API key is not available\")\ndef test_get_pricing_openrouter():\n pricing = tracking.get_pricing_openrouter()\n assert isinstance(pricing, dict)\n assert all(isinstance(v, dict) for v in pricing.values())\n for model in OPENROUTER_MODELS:\n assert model in pricing\n assert isinstance(pricing[model], dict)\n assert all(isinstance(v, float) for v in pricing[model].values())\n\n\ndef test_get_pricing_openai():\n pricing = tracking.get_pricing_openai()\n assert isinstance(pricing, dict)\n assert all(\"prompt\" in pricing[model] and \"completion\" in pricing[model] for model in pricing)\n assert all(isinstance(pricing[model][\"prompt\"], float) for model in pricing)\n assert all(isinstance(pricing[model][\"completion\"], float) for model in pricing)\n\n\ndef call_llm():\n if hasattr(tracking.TRACKER, \"instance\") and isinstance(\n tracking.TRACKER.instance, tracking.LLMTracker\n ):\n tracking.TRACKER.instance(1, 1, 1)\n return \"action\", {\"stats\": {}}\n\n\ndef test_tracker():\n with tracking.set_tracker() as tracker:\n _, _ = call_llm()\n\n assert tracker.stats[\"cost\"] == 1\n\n\ndef test_imbricate_trackers():\n with tracking.set_tracker() as tracker4:\n with tracking.set_tracker() as tracker1:\n _, _ = call_llm()\n with tracking.set_tracker() as tracker3:\n _, _ = call_llm()\n _, _ = call_llm()\n with tracking.set_tracker() as tracker1bis:\n _, _ = call_llm()\n\n assert tracker1.stats[\"cost\"] == 1\n assert tracker1bis.stats[\"cost\"] == 1\n assert tracker3.stats[\"cost\"] == 3\n assert tracker4.stats[\"cost\"] == 4\n\n\ndef test_threaded_trackers():\n \"\"\"thread_2 occurs in the middle of thread_1, results should be separate.\"\"\"\n import threading\n\n def thread_1(results=None):\n with tracking.set_tracker() as tracker:\n time.sleep(1)\n _, _ = call_llm()\n time.sleep(1)\n results[0] = tracker.stats\n\n def thread_2(results=None):\n time.sleep(1)\n with tracking.set_tracker() as tracker:\n _, _ = call_llm()\n results[1] = tracker.stats\n\n results = [None] * 2\n threads = [\n threading.Thread(target=partial(thread_1, results=results)),\n threading.Thread(target=partial(thread_2, results=results)),\n ]\n for thread in threads:\n thread.start()\n for thread in threads:\n thread.join()\n\n assert all(result[\"cost\"] == 1 for result in results)\n\n\nOPENAI_API_KEY_AVAILABLE = os.environ.get(\"OPENAI_API_KEY\") is not None\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not OPENAI_API_KEY_AVAILABLE, reason=\"OpenAI API key is not available\")\ndef test_openai_chat_model():\n chat_model = OpenAIChatModel(\"gpt-4o-mini\")\n assert chat_model.input_cost > 0\n assert chat_model.output_cost > 0\n\n messages = [\n make_system_message(\"You are an helpful virtual assistant\"),\n make_user_message(\"Give the third prime number\"),\n ]\n with tracking.set_tracker() as tracker:\n answer = chat_model(messages)\n assert \"5\" in answer.get(\"content\")\n assert tracker.stats[\"cost\"] > 0\n\n\nAZURE_OPENAI_API_KEY_AVAILABLE = (\n os.environ.get(\"AZURE_OPENAI_API_KEY\") is not None\n and os.environ.get(\"AZURE_OPENAI_ENDPOINT\") is not None\n)\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(\n not AZURE_OPENAI_API_KEY_AVAILABLE, reason=\"Azure OpenAI API key is not available\"\n)\ndef test_azure_chat_model():\n chat_model = AzureChatModel(model_name=\"gpt-4.1-nano\", deployment_name=\"gpt-4.1-nano\")\n assert chat_model.input_cost > 0\n assert chat_model.output_cost > 0\n\n messages = [\n make_system_message(\"You are an helpful virtual assistant\"),\n make_user_message(\"Give the third prime number\"),\n ]\n with tracking.set_tracker() as tracker:\n answer = chat_model(messages)\n assert \"5\" in answer.get(\"content\")\n assert tracker.stats[\"cost\"] > 0\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not OPENROUTER_API_KEY_AVAILABLE, reason=\"OpenRouter API key is not available\")\ndef test_openrouter_chat_model():\n chat_model = OpenRouterChatModel(\"openai/gpt-4o-mini\")\n assert chat_model.input_cost > 0\n assert chat_model.output_cost > 0\n\n messages = [\n make_system_message(\"You are an helpful virtual assistant\"),\n make_user_message(\"Give the third prime number\"),\n ]\n with tracking.set_tracker() as tracker:\n answer = chat_model(messages)\n assert \"5\" in answer.get(\"content\")\n assert tracker.stats[\"cost\"] > 0","source_hash":"216a1e92d2c1072a6f138def3517be4247855c2c6b9687a0b4c3f597044c5813","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_tracking.test_get_action_decorator","uri":"program://AgentLab/function/tests.llm.test_tracking.test_get_action_decorator#L17-L24","kind":"function","name":"test_get_action_decorator","path":"tests/llm/test_tracking.py","language":"python","start_line":17,"end_line":24,"context_start_line":1,"context_end_line":44,"code":"import os\nimport time\nfrom functools import partial\n\nimport pytest\n\nimport agentlab.llm.tracking as tracking\nfrom agentlab.llm.chat_api import (\n AzureChatModel,\n OpenAIChatModel,\n OpenRouterChatModel,\n make_system_message,\n make_user_message,\n)\n\n\ndef test_get_action_decorator():\n action, agent_info = tracking.cost_tracker_decorator(lambda x, y: call_llm())(None, None)\n assert action == \"action\"\n assert agent_info[\"stats\"] == {\n \"input_tokens\": 1,\n \"output_tokens\": 1,\n \"cost\": 1.0,\n }\n\n\nOPENROUTER_API_KEY_AVAILABLE = os.environ.get(\"OPENROUTER_API_KEY\") is not None\n\nOPENROUTER_MODELS = (\n \"anthropic/claude-3.5-sonnet\",\n \"meta-llama/llama-3.1-405b-instruct\",\n \"meta-llama/llama-3.1-70b-instruct\",\n \"meta-llama/llama-3.1-8b-instruct\",\n \"google/gemini-pro-1.5\",\n)\n\n\n@pytest.mark.skipif(not OPENROUTER_API_KEY_AVAILABLE, reason=\"OpenRouter API key is not available\")\ndef test_get_pricing_openrouter():\n pricing = tracking.get_pricing_openrouter()\n assert isinstance(pricing, dict)\n assert all(isinstance(v, dict) for v in pricing.values())\n for model in OPENROUTER_MODELS:\n assert model in pricing","source_hash":"216a1e92d2c1072a6f138def3517be4247855c2c6b9687a0b4c3f597044c5813","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_tracking.test_get_pricing_openrouter","uri":"program://AgentLab/function/tests.llm.test_tracking.test_get_pricing_openrouter#L39-L46","kind":"function","name":"test_get_pricing_openrouter","path":"tests/llm/test_tracking.py","language":"python","start_line":39,"end_line":46,"context_start_line":19,"context_end_line":66,"code":" assert action == \"action\"\n assert agent_info[\"stats\"] == {\n \"input_tokens\": 1,\n \"output_tokens\": 1,\n \"cost\": 1.0,\n }\n\n\nOPENROUTER_API_KEY_AVAILABLE = os.environ.get(\"OPENROUTER_API_KEY\") is not None\n\nOPENROUTER_MODELS = (\n \"anthropic/claude-3.5-sonnet\",\n \"meta-llama/llama-3.1-405b-instruct\",\n \"meta-llama/llama-3.1-70b-instruct\",\n \"meta-llama/llama-3.1-8b-instruct\",\n \"google/gemini-pro-1.5\",\n)\n\n\n@pytest.mark.skipif(not OPENROUTER_API_KEY_AVAILABLE, reason=\"OpenRouter API key is not available\")\ndef test_get_pricing_openrouter():\n pricing = tracking.get_pricing_openrouter()\n assert isinstance(pricing, dict)\n assert all(isinstance(v, dict) for v in pricing.values())\n for model in OPENROUTER_MODELS:\n assert model in pricing\n assert isinstance(pricing[model], dict)\n assert all(isinstance(v, float) for v in pricing[model].values())\n\n\ndef test_get_pricing_openai():\n pricing = tracking.get_pricing_openai()\n assert isinstance(pricing, dict)\n assert all(\"prompt\" in pricing[model] and \"completion\" in pricing[model] for model in pricing)\n assert all(isinstance(pricing[model][\"prompt\"], float) for model in pricing)\n assert all(isinstance(pricing[model][\"completion\"], float) for model in pricing)\n\n\ndef call_llm():\n if hasattr(tracking.TRACKER, \"instance\") and isinstance(\n tracking.TRACKER.instance, tracking.LLMTracker\n ):\n tracking.TRACKER.instance(1, 1, 1)\n return \"action\", {\"stats\": {}}\n\n\ndef test_tracker():\n with tracking.set_tracker() as tracker:","source_hash":"216a1e92d2c1072a6f138def3517be4247855c2c6b9687a0b4c3f597044c5813","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_tracking.test_get_pricing_openai","uri":"program://AgentLab/function/tests.llm.test_tracking.test_get_pricing_openai#L49-L54","kind":"function","name":"test_get_pricing_openai","path":"tests/llm/test_tracking.py","language":"python","start_line":49,"end_line":54,"context_start_line":29,"context_end_line":74,"code":"OPENROUTER_MODELS = (\n \"anthropic/claude-3.5-sonnet\",\n \"meta-llama/llama-3.1-405b-instruct\",\n \"meta-llama/llama-3.1-70b-instruct\",\n \"meta-llama/llama-3.1-8b-instruct\",\n \"google/gemini-pro-1.5\",\n)\n\n\n@pytest.mark.skipif(not OPENROUTER_API_KEY_AVAILABLE, reason=\"OpenRouter API key is not available\")\ndef test_get_pricing_openrouter():\n pricing = tracking.get_pricing_openrouter()\n assert isinstance(pricing, dict)\n assert all(isinstance(v, dict) for v in pricing.values())\n for model in OPENROUTER_MODELS:\n assert model in pricing\n assert isinstance(pricing[model], dict)\n assert all(isinstance(v, float) for v in pricing[model].values())\n\n\ndef test_get_pricing_openai():\n pricing = tracking.get_pricing_openai()\n assert isinstance(pricing, dict)\n assert all(\"prompt\" in pricing[model] and \"completion\" in pricing[model] for model in pricing)\n assert all(isinstance(pricing[model][\"prompt\"], float) for model in pricing)\n assert all(isinstance(pricing[model][\"completion\"], float) for model in pricing)\n\n\ndef call_llm():\n if hasattr(tracking.TRACKER, \"instance\") and isinstance(\n tracking.TRACKER.instance, tracking.LLMTracker\n ):\n tracking.TRACKER.instance(1, 1, 1)\n return \"action\", {\"stats\": {}}\n\n\ndef test_tracker():\n with tracking.set_tracker() as tracker:\n _, _ = call_llm()\n\n assert tracker.stats[\"cost\"] == 1\n\n\ndef test_imbricate_trackers():\n with tracking.set_tracker() as tracker4:\n with tracking.set_tracker() as tracker1:","source_hash":"216a1e92d2c1072a6f138def3517be4247855c2c6b9687a0b4c3f597044c5813","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_tracking.call_llm","uri":"program://AgentLab/function/tests.llm.test_tracking.call_llm#L57-L62","kind":"function","name":"call_llm","path":"tests/llm/test_tracking.py","language":"python","start_line":57,"end_line":62,"context_start_line":37,"context_end_line":82,"code":"\n@pytest.mark.skipif(not OPENROUTER_API_KEY_AVAILABLE, reason=\"OpenRouter API key is not available\")\ndef test_get_pricing_openrouter():\n pricing = tracking.get_pricing_openrouter()\n assert isinstance(pricing, dict)\n assert all(isinstance(v, dict) for v in pricing.values())\n for model in OPENROUTER_MODELS:\n assert model in pricing\n assert isinstance(pricing[model], dict)\n assert all(isinstance(v, float) for v in pricing[model].values())\n\n\ndef test_get_pricing_openai():\n pricing = tracking.get_pricing_openai()\n assert isinstance(pricing, dict)\n assert all(\"prompt\" in pricing[model] and \"completion\" in pricing[model] for model in pricing)\n assert all(isinstance(pricing[model][\"prompt\"], float) for model in pricing)\n assert all(isinstance(pricing[model][\"completion\"], float) for model in pricing)\n\n\ndef call_llm():\n if hasattr(tracking.TRACKER, \"instance\") and isinstance(\n tracking.TRACKER.instance, tracking.LLMTracker\n ):\n tracking.TRACKER.instance(1, 1, 1)\n return \"action\", {\"stats\": {}}\n\n\ndef test_tracker():\n with tracking.set_tracker() as tracker:\n _, _ = call_llm()\n\n assert tracker.stats[\"cost\"] == 1\n\n\ndef test_imbricate_trackers():\n with tracking.set_tracker() as tracker4:\n with tracking.set_tracker() as tracker1:\n _, _ = call_llm()\n with tracking.set_tracker() as tracker3:\n _, _ = call_llm()\n _, _ = call_llm()\n with tracking.set_tracker() as tracker1bis:\n _, _ = call_llm()\n\n assert tracker1.stats[\"cost\"] == 1","source_hash":"216a1e92d2c1072a6f138def3517be4247855c2c6b9687a0b4c3f597044c5813","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_tracking.test_tracker","uri":"program://AgentLab/function/tests.llm.test_tracking.test_tracker#L65-L69","kind":"function","name":"test_tracker","path":"tests/llm/test_tracking.py","language":"python","start_line":65,"end_line":69,"context_start_line":45,"context_end_line":89,"code":" assert isinstance(pricing[model], dict)\n assert all(isinstance(v, float) for v in pricing[model].values())\n\n\ndef test_get_pricing_openai():\n pricing = tracking.get_pricing_openai()\n assert isinstance(pricing, dict)\n assert all(\"prompt\" in pricing[model] and \"completion\" in pricing[model] for model in pricing)\n assert all(isinstance(pricing[model][\"prompt\"], float) for model in pricing)\n assert all(isinstance(pricing[model][\"completion\"], float) for model in pricing)\n\n\ndef call_llm():\n if hasattr(tracking.TRACKER, \"instance\") and isinstance(\n tracking.TRACKER.instance, tracking.LLMTracker\n ):\n tracking.TRACKER.instance(1, 1, 1)\n return \"action\", {\"stats\": {}}\n\n\ndef test_tracker():\n with tracking.set_tracker() as tracker:\n _, _ = call_llm()\n\n assert tracker.stats[\"cost\"] == 1\n\n\ndef test_imbricate_trackers():\n with tracking.set_tracker() as tracker4:\n with tracking.set_tracker() as tracker1:\n _, _ = call_llm()\n with tracking.set_tracker() as tracker3:\n _, _ = call_llm()\n _, _ = call_llm()\n with tracking.set_tracker() as tracker1bis:\n _, _ = call_llm()\n\n assert tracker1.stats[\"cost\"] == 1\n assert tracker1bis.stats[\"cost\"] == 1\n assert tracker3.stats[\"cost\"] == 3\n assert tracker4.stats[\"cost\"] == 4\n\n\ndef test_threaded_trackers():\n \"\"\"thread_2 occurs in the middle of thread_1, results should be separate.\"\"\"","source_hash":"216a1e92d2c1072a6f138def3517be4247855c2c6b9687a0b4c3f597044c5813","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_tracking.test_imbricate_trackers","uri":"program://AgentLab/function/tests.llm.test_tracking.test_imbricate_trackers#L72-L85","kind":"function","name":"test_imbricate_trackers","path":"tests/llm/test_tracking.py","language":"python","start_line":72,"end_line":85,"context_start_line":52,"context_end_line":105,"code":" assert all(\"prompt\" in pricing[model] and \"completion\" in pricing[model] for model in pricing)\n assert all(isinstance(pricing[model][\"prompt\"], float) for model in pricing)\n assert all(isinstance(pricing[model][\"completion\"], float) for model in pricing)\n\n\ndef call_llm():\n if hasattr(tracking.TRACKER, \"instance\") and isinstance(\n tracking.TRACKER.instance, tracking.LLMTracker\n ):\n tracking.TRACKER.instance(1, 1, 1)\n return \"action\", {\"stats\": {}}\n\n\ndef test_tracker():\n with tracking.set_tracker() as tracker:\n _, _ = call_llm()\n\n assert tracker.stats[\"cost\"] == 1\n\n\ndef test_imbricate_trackers():\n with tracking.set_tracker() as tracker4:\n with tracking.set_tracker() as tracker1:\n _, _ = call_llm()\n with tracking.set_tracker() as tracker3:\n _, _ = call_llm()\n _, _ = call_llm()\n with tracking.set_tracker() as tracker1bis:\n _, _ = call_llm()\n\n assert tracker1.stats[\"cost\"] == 1\n assert tracker1bis.stats[\"cost\"] == 1\n assert tracker3.stats[\"cost\"] == 3\n assert tracker4.stats[\"cost\"] == 4\n\n\ndef test_threaded_trackers():\n \"\"\"thread_2 occurs in the middle of thread_1, results should be separate.\"\"\"\n import threading\n\n def thread_1(results=None):\n with tracking.set_tracker() as tracker:\n time.sleep(1)\n _, _ = call_llm()\n time.sleep(1)\n results[0] = tracker.stats\n\n def thread_2(results=None):\n time.sleep(1)\n with tracking.set_tracker() as tracker:\n _, _ = call_llm()\n results[1] = tracker.stats\n\n results = [None] * 2","source_hash":"216a1e92d2c1072a6f138def3517be4247855c2c6b9687a0b4c3f597044c5813","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_tracking.test_threaded_trackers","uri":"program://AgentLab/function/tests.llm.test_tracking.test_threaded_trackers#L88-L115","kind":"function","name":"test_threaded_trackers","path":"tests/llm/test_tracking.py","language":"python","start_line":88,"end_line":115,"context_start_line":68,"context_end_line":135,"code":"\n assert tracker.stats[\"cost\"] == 1\n\n\ndef test_imbricate_trackers():\n with tracking.set_tracker() as tracker4:\n with tracking.set_tracker() as tracker1:\n _, _ = call_llm()\n with tracking.set_tracker() as tracker3:\n _, _ = call_llm()\n _, _ = call_llm()\n with tracking.set_tracker() as tracker1bis:\n _, _ = call_llm()\n\n assert tracker1.stats[\"cost\"] == 1\n assert tracker1bis.stats[\"cost\"] == 1\n assert tracker3.stats[\"cost\"] == 3\n assert tracker4.stats[\"cost\"] == 4\n\n\ndef test_threaded_trackers():\n \"\"\"thread_2 occurs in the middle of thread_1, results should be separate.\"\"\"\n import threading\n\n def thread_1(results=None):\n with tracking.set_tracker() as tracker:\n time.sleep(1)\n _, _ = call_llm()\n time.sleep(1)\n results[0] = tracker.stats\n\n def thread_2(results=None):\n time.sleep(1)\n with tracking.set_tracker() as tracker:\n _, _ = call_llm()\n results[1] = tracker.stats\n\n results = [None] * 2\n threads = [\n threading.Thread(target=partial(thread_1, results=results)),\n threading.Thread(target=partial(thread_2, results=results)),\n ]\n for thread in threads:\n thread.start()\n for thread in threads:\n thread.join()\n\n assert all(result[\"cost\"] == 1 for result in results)\n\n\nOPENAI_API_KEY_AVAILABLE = os.environ.get(\"OPENAI_API_KEY\") is not None\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not OPENAI_API_KEY_AVAILABLE, reason=\"OpenAI API key is not available\")\ndef test_openai_chat_model():\n chat_model = OpenAIChatModel(\"gpt-4o-mini\")\n assert chat_model.input_cost > 0\n assert chat_model.output_cost > 0\n\n messages = [\n make_system_message(\"You are an helpful virtual assistant\"),\n make_user_message(\"Give the third prime number\"),\n ]\n with tracking.set_tracker() as tracker:\n answer = chat_model(messages)\n assert \"5\" in answer.get(\"content\")\n assert tracker.stats[\"cost\"] > 0","source_hash":"216a1e92d2c1072a6f138def3517be4247855c2c6b9687a0b4c3f597044c5813","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_tracking.test_openai_chat_model","uri":"program://AgentLab/function/tests.llm.test_tracking.test_openai_chat_model#L123-L135","kind":"function","name":"test_openai_chat_model","path":"tests/llm/test_tracking.py","language":"python","start_line":123,"end_line":135,"context_start_line":103,"context_end_line":155,"code":" results[1] = tracker.stats\n\n results = [None] * 2\n threads = [\n threading.Thread(target=partial(thread_1, results=results)),\n threading.Thread(target=partial(thread_2, results=results)),\n ]\n for thread in threads:\n thread.start()\n for thread in threads:\n thread.join()\n\n assert all(result[\"cost\"] == 1 for result in results)\n\n\nOPENAI_API_KEY_AVAILABLE = os.environ.get(\"OPENAI_API_KEY\") is not None\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not OPENAI_API_KEY_AVAILABLE, reason=\"OpenAI API key is not available\")\ndef test_openai_chat_model():\n chat_model = OpenAIChatModel(\"gpt-4o-mini\")\n assert chat_model.input_cost > 0\n assert chat_model.output_cost > 0\n\n messages = [\n make_system_message(\"You are an helpful virtual assistant\"),\n make_user_message(\"Give the third prime number\"),\n ]\n with tracking.set_tracker() as tracker:\n answer = chat_model(messages)\n assert \"5\" in answer.get(\"content\")\n assert tracker.stats[\"cost\"] > 0\n\n\nAZURE_OPENAI_API_KEY_AVAILABLE = (\n os.environ.get(\"AZURE_OPENAI_API_KEY\") is not None\n and os.environ.get(\"AZURE_OPENAI_ENDPOINT\") is not None\n)\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(\n not AZURE_OPENAI_API_KEY_AVAILABLE, reason=\"Azure OpenAI API key is not available\"\n)\ndef test_azure_chat_model():\n chat_model = AzureChatModel(model_name=\"gpt-4.1-nano\", deployment_name=\"gpt-4.1-nano\")\n assert chat_model.input_cost > 0\n assert chat_model.output_cost > 0\n\n messages = [\n make_system_message(\"You are an helpful virtual assistant\"),\n make_user_message(\"Give the third prime number\"),","source_hash":"216a1e92d2c1072a6f138def3517be4247855c2c6b9687a0b4c3f597044c5813","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_tracking.test_azure_chat_model","uri":"program://AgentLab/function/tests.llm.test_tracking.test_azure_chat_model#L148-L160","kind":"function","name":"test_azure_chat_model","path":"tests/llm/test_tracking.py","language":"python","start_line":148,"end_line":160,"context_start_line":128,"context_end_line":177,"code":" messages = [\n make_system_message(\"You are an helpful virtual assistant\"),\n make_user_message(\"Give the third prime number\"),\n ]\n with tracking.set_tracker() as tracker:\n answer = chat_model(messages)\n assert \"5\" in answer.get(\"content\")\n assert tracker.stats[\"cost\"] > 0\n\n\nAZURE_OPENAI_API_KEY_AVAILABLE = (\n os.environ.get(\"AZURE_OPENAI_API_KEY\") is not None\n and os.environ.get(\"AZURE_OPENAI_ENDPOINT\") is not None\n)\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(\n not AZURE_OPENAI_API_KEY_AVAILABLE, reason=\"Azure OpenAI API key is not available\"\n)\ndef test_azure_chat_model():\n chat_model = AzureChatModel(model_name=\"gpt-4.1-nano\", deployment_name=\"gpt-4.1-nano\")\n assert chat_model.input_cost > 0\n assert chat_model.output_cost > 0\n\n messages = [\n make_system_message(\"You are an helpful virtual assistant\"),\n make_user_message(\"Give the third prime number\"),\n ]\n with tracking.set_tracker() as tracker:\n answer = chat_model(messages)\n assert \"5\" in answer.get(\"content\")\n assert tracker.stats[\"cost\"] > 0\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not OPENROUTER_API_KEY_AVAILABLE, reason=\"OpenRouter API key is not available\")\ndef test_openrouter_chat_model():\n chat_model = OpenRouterChatModel(\"openai/gpt-4o-mini\")\n assert chat_model.input_cost > 0\n assert chat_model.output_cost > 0\n\n messages = [\n make_system_message(\"You are an helpful virtual assistant\"),\n make_user_message(\"Give the third prime number\"),\n ]\n with tracking.set_tracker() as tracker:\n answer = chat_model(messages)\n assert \"5\" in answer.get(\"content\")\n assert tracker.stats[\"cost\"] > 0","source_hash":"216a1e92d2c1072a6f138def3517be4247855c2c6b9687a0b4c3f597044c5813","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_tracking.test_openrouter_chat_model","uri":"program://AgentLab/function/tests.llm.test_tracking.test_openrouter_chat_model#L165-L177","kind":"function","name":"test_openrouter_chat_model","path":"tests/llm/test_tracking.py","language":"python","start_line":165,"end_line":177,"context_start_line":145,"context_end_line":177,"code":"@pytest.mark.skipif(\n not AZURE_OPENAI_API_KEY_AVAILABLE, reason=\"Azure OpenAI API key is not available\"\n)\ndef test_azure_chat_model():\n chat_model = AzureChatModel(model_name=\"gpt-4.1-nano\", deployment_name=\"gpt-4.1-nano\")\n assert chat_model.input_cost > 0\n assert chat_model.output_cost > 0\n\n messages = [\n make_system_message(\"You are an helpful virtual assistant\"),\n make_user_message(\"Give the third prime number\"),\n ]\n with tracking.set_tracker() as tracker:\n answer = chat_model(messages)\n assert \"5\" in answer.get(\"content\")\n assert tracker.stats[\"cost\"] > 0\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not OPENROUTER_API_KEY_AVAILABLE, reason=\"OpenRouter API key is not available\")\ndef test_openrouter_chat_model():\n chat_model = OpenRouterChatModel(\"openai/gpt-4o-mini\")\n assert chat_model.input_cost > 0\n assert chat_model.output_cost > 0\n\n messages = [\n make_system_message(\"You are an helpful virtual assistant\"),\n make_user_message(\"Give the third prime number\"),\n ]\n with tracking.set_tracker() as tracker:\n answer = chat_model(messages)\n assert \"5\" in answer.get(\"content\")\n assert tracker.stats[\"cost\"] > 0","source_hash":"216a1e92d2c1072a6f138def3517be4247855c2c6b9687a0b4c3f597044c5813","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_tracking.thread_1","uri":"program://AgentLab/function/tests.llm.test_tracking.thread_1#L92-L97","kind":"function","name":"thread_1","path":"tests/llm/test_tracking.py","language":"python","start_line":92,"end_line":97,"context_start_line":72,"context_end_line":117,"code":"def test_imbricate_trackers():\n with tracking.set_tracker() as tracker4:\n with tracking.set_tracker() as tracker1:\n _, _ = call_llm()\n with tracking.set_tracker() as tracker3:\n _, _ = call_llm()\n _, _ = call_llm()\n with tracking.set_tracker() as tracker1bis:\n _, _ = call_llm()\n\n assert tracker1.stats[\"cost\"] == 1\n assert tracker1bis.stats[\"cost\"] == 1\n assert tracker3.stats[\"cost\"] == 3\n assert tracker4.stats[\"cost\"] == 4\n\n\ndef test_threaded_trackers():\n \"\"\"thread_2 occurs in the middle of thread_1, results should be separate.\"\"\"\n import threading\n\n def thread_1(results=None):\n with tracking.set_tracker() as tracker:\n time.sleep(1)\n _, _ = call_llm()\n time.sleep(1)\n results[0] = tracker.stats\n\n def thread_2(results=None):\n time.sleep(1)\n with tracking.set_tracker() as tracker:\n _, _ = call_llm()\n results[1] = tracker.stats\n\n results = [None] * 2\n threads = [\n threading.Thread(target=partial(thread_1, results=results)),\n threading.Thread(target=partial(thread_2, results=results)),\n ]\n for thread in threads:\n thread.start()\n for thread in threads:\n thread.join()\n\n assert all(result[\"cost\"] == 1 for result in results)\n\n","source_hash":"216a1e92d2c1072a6f138def3517be4247855c2c6b9687a0b4c3f597044c5813","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_tracking.thread_2","uri":"program://AgentLab/function/tests.llm.test_tracking.thread_2#L99-L103","kind":"function","name":"thread_2","path":"tests/llm/test_tracking.py","language":"python","start_line":99,"end_line":103,"context_start_line":79,"context_end_line":123,"code":" with tracking.set_tracker() as tracker1bis:\n _, _ = call_llm()\n\n assert tracker1.stats[\"cost\"] == 1\n assert tracker1bis.stats[\"cost\"] == 1\n assert tracker3.stats[\"cost\"] == 3\n assert tracker4.stats[\"cost\"] == 4\n\n\ndef test_threaded_trackers():\n \"\"\"thread_2 occurs in the middle of thread_1, results should be separate.\"\"\"\n import threading\n\n def thread_1(results=None):\n with tracking.set_tracker() as tracker:\n time.sleep(1)\n _, _ = call_llm()\n time.sleep(1)\n results[0] = tracker.stats\n\n def thread_2(results=None):\n time.sleep(1)\n with tracking.set_tracker() as tracker:\n _, _ = call_llm()\n results[1] = tracker.stats\n\n results = [None] * 2\n threads = [\n threading.Thread(target=partial(thread_1, results=results)),\n threading.Thread(target=partial(thread_2, results=results)),\n ]\n for thread in threads:\n thread.start()\n for thread in threads:\n thread.join()\n\n assert all(result[\"cost\"] == 1 for result in results)\n\n\nOPENAI_API_KEY_AVAILABLE = os.environ.get(\"OPENAI_API_KEY\") is not None\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not OPENAI_API_KEY_AVAILABLE, reason=\"OpenAI API key is not available\")\ndef test_openai_chat_model():","source_hash":"216a1e92d2c1072a6f138def3517be4247855c2c6b9687a0b4c3f597044c5813","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_litellm_api","uri":"program://AgentLab/module/tests.llm.test_litellm_api#L1-L167","kind":"module","name":"tests.llm.test_litellm_api","path":"tests/llm/test_litellm_api.py","language":"python","start_line":1,"end_line":167,"context_start_line":1,"context_end_line":167,"code":"import os\nfrom functools import partial\n\nimport pytest\nfrom agentlab.llm.litellm_api import LiteLLMModelArgs\nfrom agentlab.llm.response_api import APIPayload, LLMOutput\n\nchat_api_tools = [\n {\n \"type\": \"function\",\n \"name\": \"get_weather\",\n \"description\": \"Get the current weather in a given location.\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The location to get the weather for.\",\n },\n \"unit\": {\n \"type\": \"string\",\n \"enum\": [\"celsius\", \"fahrenheit\"],\n \"description\": \"The unit of temperature.\",\n },\n },\n \"required\": [\"location\"],\n },\n },\n {\n \"type\": \"function\",\n \"name\": \"get_time\",\n \"description\": \"Get the current time in a given location.\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The location to get the time for.\",\n }\n },\n \"required\": [\"location\"],\n },\n },\n]\n\n\n# test_config (setting name, BaseModelArgs, model_name, tools)\ntool_test_configs = [\n (\"gpt-4.1\", LiteLLMModelArgs, \"openai/gpt-4.1-2025-04-14\", chat_api_tools),\n # (\"claude-3\", LiteLLMModelArgs, \"anthropic/claude-3-haiku-20240307\", anthropic_tools), # fails for parallel tool calls\n # (\"claude-3.7\", LiteLLMModelArgs, \"anthropic/claude-3-7-sonnet-20250219\", anthropic_tools), # fails for parallel tool calls\n (\"claude-4-sonnet\", LiteLLMModelArgs, \"anthropic/claude-sonnet-4-20250514\", chat_api_tools),\n # (\"gpt-o3\", LiteLLMModelArgs, \"openai/o3-2025-04-16\", chat_api_tools), # fails for parallel tool calls\n # add more models as needed\n]\n\n\ndef add_user_messages(msg_builder):\n return [\n msg_builder.user().add_text(\"What is the weather in Paris and Delhi?\"),\n msg_builder.user().add_text(\"You must call multiple tools to achieve the task.\"),\n ]\n\n\n## Test multiaction\n@pytest.mark.pricy\ndef test_multi_action_tool_calls():\n \"\"\"\n Test that the model can produce multiple tool calls in parallel.\n Note: Remove assert and Uncomment commented lines to see the full behaviour of models and tool choices.\n \"\"\"\n res_df = []\n for tool_choice in [\n # \"none\",\n \"required\", # fails for Responses API\n \"any\", # fails for Responses API\n \"auto\",\n # \"get_weather\", # force a specific tool call\n ]:\n for name, llm_class, checkpoint_name, tools in tool_test_configs:\n model_args = llm_class(model_name=checkpoint_name, max_new_tokens=200, temperature=None)\n llm, msg_builder = model_args.make_model(), model_args.get_message_builder()\n messages = add_user_messages(msg_builder)\n if tool_choice == \"get_weather\": # force a specific tool call\n response: LLMOutput = llm(\n APIPayload(messages=messages, tools=tools, force_call_tool=tool_choice)\n )\n else:\n response: LLMOutput = llm(\n APIPayload(messages=messages, tools=tools, tool_choice=tool_choice)\n )\n num_tool_calls = len(response.tool_calls) if response.tool_calls else 0\n row = {\n \"model\": name,\n \"checkpoint\": checkpoint_name,\n \"tool_choice\": tool_choice,\n \"num_tool_calls\": num_tool_calls,\n \"action\": response.action,\n }\n res_df.append(row)\n assert (\n num_tool_calls == 2\n ), f\"Expected 2 tool calls, but got {num_tool_calls} for {name} with tool choice {tool_choice}\"\n # import pandas as pd\n # print(pd.DataFrame(res_df))\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not os.getenv(\"OPENAI_API_KEY\"), reason=\"Skipping as OpenAI API key not set\")\ndef test_single_tool_call():\n \"\"\"\n Test that the LLMOutput contains only one tool call when use_only_first_toolcall is True.\n \"\"\"\n for tool_choice in [\n # 'none',\n \"required\",\n \"any\",\n \"auto\",\n ]:\n for name, llm_class, checkpoint_name, tools in tool_test_configs:\n print(name, \"tool choice:\", tool_choice, \"\\n\", \"**\" * 10)\n llm_class = partial(llm_class, use_only_first_toolcall=True)\n model_args = llm_class(model_name=checkpoint_name, max_new_tokens=200, temperature=None)\n llm, msg_builder = model_args.make_model(), model_args.get_message_builder()\n messages = add_user_messages(msg_builder)\n if tool_choice == \"get_weather\": # force a specific tool call\n response: LLMOutput = llm(\n APIPayload(messages=messages, tools=tools, force_call_tool=tool_choice)\n )\n else:\n response: LLMOutput = llm(\n APIPayload(messages=messages, tools=tools, tool_choice=tool_choice)\n )\n num_tool_calls = len(response.tool_calls) if response.tool_calls else 0\n assert (\n num_tool_calls == 1\n ), f\"Expected 1 tool calls, but got {num_tool_calls} for {name} with tool choice {tool_choice }\"\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not os.getenv(\"OPENAI_API_KEY\"), reason=\"Skipping as OpenAI API key not set\")\ndef test_force_tool_call():\n \"\"\"\n Test that the model can produce a specific tool call when requested.\n The user message asks the 'weather' but we force call tool \"get_time\".\n We test if 'get_time' is present in the tool calls.\n Note: Model can have other tool calls as well.\n \"\"\"\n force_call_tool = \"get_time\"\n for name, llm_class, checkpoint_name, tools in tool_test_configs:\n model_args = llm_class(model_name=checkpoint_name, max_new_tokens=200, temperature=None)\n llm, msg_builder = model_args.make_model(), model_args.get_message_builder()\n messages = add_user_messages(msg_builder) # asks weather in Paris and Delhi\n response: LLMOutput = llm(\n APIPayload(messages=messages, tools=tools, force_call_tool=force_call_tool)\n )\n called_fn_names = [call.name for call in response.tool_calls] if response.tool_calls else []\n assert response.tool_calls is not None\n assert any(\n fn_name == \"get_time\" for fn_name in called_fn_names\n ), f\"Model:{name},Expected all tool calls to be 'get_time', but got {called_fn_names} with force call {force_call_tool}\"\n\n\nif __name__ == \"__main__\":\n test_multi_action_tool_calls()\n test_force_tool_call()\n test_single_tool_call()","source_hash":"3d536844800bbc74d0b13ec4b727a271d2cc56690613d4878114cf35d293b527","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_litellm_api.add_user_messages","uri":"program://AgentLab/function/tests.llm.test_litellm_api.add_user_messages#L58-L62","kind":"function","name":"add_user_messages","path":"tests/llm/test_litellm_api.py","language":"python","start_line":58,"end_line":62,"context_start_line":38,"context_end_line":82,"code":" \"description\": \"The location to get the time for.\",\n }\n },\n \"required\": [\"location\"],\n },\n },\n]\n\n\n# test_config (setting name, BaseModelArgs, model_name, tools)\ntool_test_configs = [\n (\"gpt-4.1\", LiteLLMModelArgs, \"openai/gpt-4.1-2025-04-14\", chat_api_tools),\n # (\"claude-3\", LiteLLMModelArgs, \"anthropic/claude-3-haiku-20240307\", anthropic_tools), # fails for parallel tool calls\n # (\"claude-3.7\", LiteLLMModelArgs, \"anthropic/claude-3-7-sonnet-20250219\", anthropic_tools), # fails for parallel tool calls\n (\"claude-4-sonnet\", LiteLLMModelArgs, \"anthropic/claude-sonnet-4-20250514\", chat_api_tools),\n # (\"gpt-o3\", LiteLLMModelArgs, \"openai/o3-2025-04-16\", chat_api_tools), # fails for parallel tool calls\n # add more models as needed\n]\n\n\ndef add_user_messages(msg_builder):\n return [\n msg_builder.user().add_text(\"What is the weather in Paris and Delhi?\"),\n msg_builder.user().add_text(\"You must call multiple tools to achieve the task.\"),\n ]\n\n\n## Test multiaction\n@pytest.mark.pricy\ndef test_multi_action_tool_calls():\n \"\"\"\n Test that the model can produce multiple tool calls in parallel.\n Note: Remove assert and Uncomment commented lines to see the full behaviour of models and tool choices.\n \"\"\"\n res_df = []\n for tool_choice in [\n # \"none\",\n \"required\", # fails for Responses API\n \"any\", # fails for Responses API\n \"auto\",\n # \"get_weather\", # force a specific tool call\n ]:\n for name, llm_class, checkpoint_name, tools in tool_test_configs:\n model_args = llm_class(model_name=checkpoint_name, max_new_tokens=200, temperature=None)\n llm, msg_builder = model_args.make_model(), model_args.get_message_builder()","source_hash":"3d536844800bbc74d0b13ec4b727a271d2cc56690613d4878114cf35d293b527","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_litellm_api.test_multi_action_tool_calls","uri":"program://AgentLab/function/tests.llm.test_litellm_api.test_multi_action_tool_calls#L67-L103","kind":"function","name":"test_multi_action_tool_calls","path":"tests/llm/test_litellm_api.py","language":"python","start_line":67,"end_line":103,"context_start_line":47,"context_end_line":123,"code":"# test_config (setting name, BaseModelArgs, model_name, tools)\ntool_test_configs = [\n (\"gpt-4.1\", LiteLLMModelArgs, \"openai/gpt-4.1-2025-04-14\", chat_api_tools),\n # (\"claude-3\", LiteLLMModelArgs, \"anthropic/claude-3-haiku-20240307\", anthropic_tools), # fails for parallel tool calls\n # (\"claude-3.7\", LiteLLMModelArgs, \"anthropic/claude-3-7-sonnet-20250219\", anthropic_tools), # fails for parallel tool calls\n (\"claude-4-sonnet\", LiteLLMModelArgs, \"anthropic/claude-sonnet-4-20250514\", chat_api_tools),\n # (\"gpt-o3\", LiteLLMModelArgs, \"openai/o3-2025-04-16\", chat_api_tools), # fails for parallel tool calls\n # add more models as needed\n]\n\n\ndef add_user_messages(msg_builder):\n return [\n msg_builder.user().add_text(\"What is the weather in Paris and Delhi?\"),\n msg_builder.user().add_text(\"You must call multiple tools to achieve the task.\"),\n ]\n\n\n## Test multiaction\n@pytest.mark.pricy\ndef test_multi_action_tool_calls():\n \"\"\"\n Test that the model can produce multiple tool calls in parallel.\n Note: Remove assert and Uncomment commented lines to see the full behaviour of models and tool choices.\n \"\"\"\n res_df = []\n for tool_choice in [\n # \"none\",\n \"required\", # fails for Responses API\n \"any\", # fails for Responses API\n \"auto\",\n # \"get_weather\", # force a specific tool call\n ]:\n for name, llm_class, checkpoint_name, tools in tool_test_configs:\n model_args = llm_class(model_name=checkpoint_name, max_new_tokens=200, temperature=None)\n llm, msg_builder = model_args.make_model(), model_args.get_message_builder()\n messages = add_user_messages(msg_builder)\n if tool_choice == \"get_weather\": # force a specific tool call\n response: LLMOutput = llm(\n APIPayload(messages=messages, tools=tools, force_call_tool=tool_choice)\n )\n else:\n response: LLMOutput = llm(\n APIPayload(messages=messages, tools=tools, tool_choice=tool_choice)\n )\n num_tool_calls = len(response.tool_calls) if response.tool_calls else 0\n row = {\n \"model\": name,\n \"checkpoint\": checkpoint_name,\n \"tool_choice\": tool_choice,\n \"num_tool_calls\": num_tool_calls,\n \"action\": response.action,\n }\n res_df.append(row)\n assert (\n num_tool_calls == 2\n ), f\"Expected 2 tool calls, but got {num_tool_calls} for {name} with tool choice {tool_choice}\"\n # import pandas as pd\n # print(pd.DataFrame(res_df))\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not os.getenv(\"OPENAI_API_KEY\"), reason=\"Skipping as OpenAI API key not set\")\ndef test_single_tool_call():\n \"\"\"\n Test that the LLMOutput contains only one tool call when use_only_first_toolcall is True.\n \"\"\"\n for tool_choice in [\n # 'none',\n \"required\",\n \"any\",\n \"auto\",\n ]:\n for name, llm_class, checkpoint_name, tools in tool_test_configs:\n print(name, \"tool choice:\", tool_choice, \"\\n\", \"**\" * 10)\n llm_class = partial(llm_class, use_only_first_toolcall=True)\n model_args = llm_class(model_name=checkpoint_name, max_new_tokens=200, temperature=None)","source_hash":"3d536844800bbc74d0b13ec4b727a271d2cc56690613d4878114cf35d293b527","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_litellm_api.test_single_tool_call","uri":"program://AgentLab/function/tests.llm.test_litellm_api.test_single_tool_call#L110-L137","kind":"function","name":"test_single_tool_call","path":"tests/llm/test_litellm_api.py","language":"python","start_line":110,"end_line":137,"context_start_line":90,"context_end_line":157,"code":" APIPayload(messages=messages, tools=tools, tool_choice=tool_choice)\n )\n num_tool_calls = len(response.tool_calls) if response.tool_calls else 0\n row = {\n \"model\": name,\n \"checkpoint\": checkpoint_name,\n \"tool_choice\": tool_choice,\n \"num_tool_calls\": num_tool_calls,\n \"action\": response.action,\n }\n res_df.append(row)\n assert (\n num_tool_calls == 2\n ), f\"Expected 2 tool calls, but got {num_tool_calls} for {name} with tool choice {tool_choice}\"\n # import pandas as pd\n # print(pd.DataFrame(res_df))\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not os.getenv(\"OPENAI_API_KEY\"), reason=\"Skipping as OpenAI API key not set\")\ndef test_single_tool_call():\n \"\"\"\n Test that the LLMOutput contains only one tool call when use_only_first_toolcall is True.\n \"\"\"\n for tool_choice in [\n # 'none',\n \"required\",\n \"any\",\n \"auto\",\n ]:\n for name, llm_class, checkpoint_name, tools in tool_test_configs:\n print(name, \"tool choice:\", tool_choice, \"\\n\", \"**\" * 10)\n llm_class = partial(llm_class, use_only_first_toolcall=True)\n model_args = llm_class(model_name=checkpoint_name, max_new_tokens=200, temperature=None)\n llm, msg_builder = model_args.make_model(), model_args.get_message_builder()\n messages = add_user_messages(msg_builder)\n if tool_choice == \"get_weather\": # force a specific tool call\n response: LLMOutput = llm(\n APIPayload(messages=messages, tools=tools, force_call_tool=tool_choice)\n )\n else:\n response: LLMOutput = llm(\n APIPayload(messages=messages, tools=tools, tool_choice=tool_choice)\n )\n num_tool_calls = len(response.tool_calls) if response.tool_calls else 0\n assert (\n num_tool_calls == 1\n ), f\"Expected 1 tool calls, but got {num_tool_calls} for {name} with tool choice {tool_choice }\"\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not os.getenv(\"OPENAI_API_KEY\"), reason=\"Skipping as OpenAI API key not set\")\ndef test_force_tool_call():\n \"\"\"\n Test that the model can produce a specific tool call when requested.\n The user message asks the 'weather' but we force call tool \"get_time\".\n We test if 'get_time' is present in the tool calls.\n Note: Model can have other tool calls as well.\n \"\"\"\n force_call_tool = \"get_time\"\n for name, llm_class, checkpoint_name, tools in tool_test_configs:\n model_args = llm_class(model_name=checkpoint_name, max_new_tokens=200, temperature=None)\n llm, msg_builder = model_args.make_model(), model_args.get_message_builder()\n messages = add_user_messages(msg_builder) # asks weather in Paris and Delhi\n response: LLMOutput = llm(\n APIPayload(messages=messages, tools=tools, force_call_tool=force_call_tool)\n )\n called_fn_names = [call.name for call in response.tool_calls] if response.tool_calls else []","source_hash":"3d536844800bbc74d0b13ec4b727a271d2cc56690613d4878114cf35d293b527","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_litellm_api.test_force_tool_call","uri":"program://AgentLab/function/tests.llm.test_litellm_api.test_force_tool_call#L142-L161","kind":"function","name":"test_force_tool_call","path":"tests/llm/test_litellm_api.py","language":"python","start_line":142,"end_line":161,"context_start_line":122,"context_end_line":167,"code":" llm_class = partial(llm_class, use_only_first_toolcall=True)\n model_args = llm_class(model_name=checkpoint_name, max_new_tokens=200, temperature=None)\n llm, msg_builder = model_args.make_model(), model_args.get_message_builder()\n messages = add_user_messages(msg_builder)\n if tool_choice == \"get_weather\": # force a specific tool call\n response: LLMOutput = llm(\n APIPayload(messages=messages, tools=tools, force_call_tool=tool_choice)\n )\n else:\n response: LLMOutput = llm(\n APIPayload(messages=messages, tools=tools, tool_choice=tool_choice)\n )\n num_tool_calls = len(response.tool_calls) if response.tool_calls else 0\n assert (\n num_tool_calls == 1\n ), f\"Expected 1 tool calls, but got {num_tool_calls} for {name} with tool choice {tool_choice }\"\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not os.getenv(\"OPENAI_API_KEY\"), reason=\"Skipping as OpenAI API key not set\")\ndef test_force_tool_call():\n \"\"\"\n Test that the model can produce a specific tool call when requested.\n The user message asks the 'weather' but we force call tool \"get_time\".\n We test if 'get_time' is present in the tool calls.\n Note: Model can have other tool calls as well.\n \"\"\"\n force_call_tool = \"get_time\"\n for name, llm_class, checkpoint_name, tools in tool_test_configs:\n model_args = llm_class(model_name=checkpoint_name, max_new_tokens=200, temperature=None)\n llm, msg_builder = model_args.make_model(), model_args.get_message_builder()\n messages = add_user_messages(msg_builder) # asks weather in Paris and Delhi\n response: LLMOutput = llm(\n APIPayload(messages=messages, tools=tools, force_call_tool=force_call_tool)\n )\n called_fn_names = [call.name for call in response.tool_calls] if response.tool_calls else []\n assert response.tool_calls is not None\n assert any(\n fn_name == \"get_time\" for fn_name in called_fn_names\n ), f\"Model:{name},Expected all tool calls to be 'get_time', but got {called_fn_names} with force call {force_call_tool}\"\n\n\nif __name__ == \"__main__\":\n test_multi_action_tool_calls()\n test_force_tool_call()\n test_single_tool_call()","source_hash":"3d536844800bbc74d0b13ec4b727a271d2cc56690613d4878114cf35d293b527","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_response_api","uri":"program://AgentLab/module/tests.llm.test_response_api#L1-L803","kind":"module","name":"tests.llm.test_response_api","path":"tests/llm/test_response_api.py","language":"python","start_line":1,"end_line":803,"context_start_line":1,"context_end_line":803,"code":"import os\nfrom typing import Any, Dict, List, Optional\nfrom unittest.mock import MagicMock, patch\n\nimport anthropic\nimport openai\nimport pytest\n\nfrom agentlab.llm import tracking\nfrom agentlab.llm.response_api import (\n AnthropicAPIMessageBuilder,\n APIPayload,\n ClaudeResponseModelArgs,\n LLMOutput,\n OpenAIChatCompletionAPIMessageBuilder,\n OpenAIChatModelArgs,\n OpenAIResponseAPIMessageBuilder,\n OpenAIResponseModelArgs,\n)\n\n\n# Helper to create a mock OpenAI ChatCompletion response\ndef create_mock_openai_chat_completion(\n content=None, tool_calls=None, prompt_tokens=10, completion_tokens=20\n):\n completion = MagicMock(spec=openai.types.chat.ChatCompletion)\n choice = MagicMock()\n message = MagicMock(spec=openai.types.chat.ChatCompletionMessage)\n message.content = content\n message.tool_calls = None\n if tool_calls:\n message.tool_calls = []\n for tc in tool_calls:\n tool_call_mock = MagicMock(\n spec=openai.types.chat.chat_completion_message_tool_call.ChatCompletionMessageToolCall\n )\n tool_call_mock.id = tc[\"id\"]\n tool_call_mock.type = tc[\"type\"]\n tool_call_mock.function = MagicMock()\n tool_call_mock.function.name = tc[\"function\"][\"name\"]\n tool_call_mock.function.arguments = tc[\"function\"][\"arguments\"]\n message.tool_calls.append(tool_call_mock)\n\n choice.message = message\n completion.choices = [choice]\n\n completion.usage = MagicMock()\n # Explicitly set the attributes that get_tokens_counts_from_response will try first.\n # These are the generic names.\n completion.usage.input_tokens = prompt_tokens\n completion.usage.output_tokens = completion_tokens\n\n # Also set the OpenAI-specific names if any other part of the code might look for them directly,\n # or if get_tokens_counts_from_response had different fallback logic.\n completion.usage.prompt_tokens = prompt_tokens\n completion.usage.completion_tokens = completion_tokens\n prompt_tokens_details_mock = MagicMock()\n prompt_tokens_details_mock.cached_tokens = 0\n completion.usage.prompt_tokens_details = prompt_tokens_details_mock\n\n completion.model_dump.return_value = {\n \"id\": \"chatcmpl-xxxx\",\n \"choices\": [\n {\"message\": {\"role\": \"assistant\", \"content\": content, \"tool_calls\": tool_calls}}\n ],\n # Ensure the usage dict in model_dump also reflects the token counts accurately.\n # The get_tokens_counts_from_response also has a path for dict style.\n \"usage\": {\n \"input_tokens\": prompt_tokens, # Generic name\n \"output_tokens\": completion_tokens, # Generic name\n \"prompt_tokens\": prompt_tokens, # OpenAI specific\n \"completion_tokens\": completion_tokens, # OpenAI specific\n \"prompt_tokens_details\": {\"cached_tokens\": 0},\n },\n }\n message.to_dict.return_value = {\n \"role\": \"assistant\",\n \"content\": content,\n \"tool_calls\": tool_calls,\n }\n return completion\n\n\nresponses_api_tools = [\n {\n \"type\": \"function\",\n \"name\": \"get_weather\",\n \"description\": \"Get the current weather in a given location.\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The location to get the weather for.\",\n },\n \"unit\": {\n \"type\": \"string\",\n \"enum\": [\"celsius\", \"fahrenheit\"],\n \"description\": \"The unit of temperature.\",\n },\n },\n \"required\": [\"location\"],\n },\n }\n]\n\nchat_api_tools = [\n {\n \"type\": \"function\",\n \"name\": \"get_weather\",\n \"description\": \"Get the current weather in a given location.\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The location to get the weather for.\",\n },\n \"unit\": {\n \"type\": \"string\",\n \"enum\": [\"celsius\", \"fahrenheit\"],\n \"description\": \"The unit of temperature.\",\n },\n },\n \"required\": [\"location\"],\n },\n }\n]\nanthropic_tools = [\n {\n \"name\": \"get_weather\",\n \"description\": \"Get the current weather in a given location.\",\n \"input_schema\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The location to get the weather for.\",\n },\n },\n \"required\": [\"location\"],\n },\n }\n]\n\n\n# Helper to create a mock Anthropic response\ndef create_mock_anthropic_response(\n text_content=None, tool_use=None, input_tokens=15, output_tokens=25\n):\n\n response = MagicMock(spec=anthropic.types.Message)\n response.type = \"message\" # Explicitly set the type attribute\n response.content = []\n response.content = []\n if text_content:\n text_block = MagicMock(spec=anthropic.types.TextBlock)\n text_block.type = \"text\"\n text_block.text = text_content\n response.content.append(text_block)\n if tool_use:\n tool_use_block = MagicMock(spec=anthropic.types.ToolUseBlock)\n tool_use_block.type = \"tool_use\"\n tool_use_block.id = tool_use[\"id\"]\n tool_use_block.name = tool_use[\"name\"]\n tool_use_block.input = tool_use[\"input\"]\n response.content.append(tool_use_block)\n response.usage = MagicMock()\n response.usage.input_tokens = input_tokens\n response.usage.output_tokens = output_tokens\n response.usage.cache_input_tokens = 0\n response.usage.cache_creation_input_tokens = 0\n return response\n\n\ndef create_mock_openai_responses_api_response(\n outputs: Optional[List[Dict[str, Any]]] = None, input_tokens: int = 10, output_tokens: int = 20\n) -> MagicMock:\n \"\"\"\n Helper to create a mock response object similar to what\n openai.resources.Responses.create() would return.\n Compatible with OpenAIResponseModel and TrackAPIPricingMixin.\n \"\"\"\n\n response_mock = MagicMock(spec=openai.types.responses.response.Response)\n response_mock.type = \"response\"\n response_mock.output = []\n\n if outputs:\n for out_data in outputs:\n output_item_mock = MagicMock()\n output_item_mock.type = out_data.get(\"type\")\n\n if output_item_mock.type == \"function_call\":\n # You can adapt this depending on your expected object structure\n output_item_mock.name = out_data.get(\"name\")\n output_item_mock.arguments = out_data.get(\"arguments\")\n output_item_mock.call_id = out_data.get(\"call_id\")\n elif output_item_mock.type == \"reasoning\":\n output_item_mock.summary = []\n for text_content in out_data.get(\"summary\", []):\n summary_text_mock = MagicMock()\n summary_text_mock.text = text_content\n output_item_mock.summary.append(summary_text_mock)\n\n response_mock.output.append(output_item_mock)\n\n # Token usage for pricing tracking\n response_mock.usage = MagicMock(spec=openai.types.responses.response.ResponseUsage)\n response_mock.usage.input_tokens = input_tokens\n response_mock.usage.output_tokens = output_tokens\n response_mock.usage.prompt_tokens = input_tokens\n response_mock.usage.completion_tokens = output_tokens\n input_tokens_details_mock = MagicMock()\n input_tokens_details_mock.cached_tokens = 0\n response_mock.usage.input_tokens_details = input_tokens_details_mock\n\n return response_mock\n\n\n# --- Test MessageBuilders ---\n\n\ndef test_openai_response_api_message_builder_text():\n builder = OpenAIResponseAPIMessageBuilder.user()\n builder.add_text(\"Hello, world!\")\n messages = builder.prepare_message()\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert messages[0][\"content\"] == [{\"type\": \"input_text\", \"text\": \"Hello, world!\"}]\n\n\ndef test_openai_response_api_message_builder_image():\n builder = OpenAIResponseAPIMessageBuilder.user()\n builder.add_image(\"data:image/png;base64,SIMPLEBASE64STRING\")\n messages = builder.prepare_message()\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert messages[0][\"content\"] == [\n {\"type\": \"input_image\", \"image_url\": \"data:image/png;base64,SIMPLEBASE64STRING\"}\n ]\n\n\ndef test_anthropic_api_message_builder_text():\n builder = AnthropicAPIMessageBuilder.user()\n builder.add_text(\"Hello, Anthropic!\")\n messages = builder.prepare_message()\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert messages[0][\"content\"] == [{\"type\": \"text\", \"text\": \"Hello, Anthropic!\"}]\n\n\ndef test_anthropic_api_message_builder_image():\n builder = AnthropicAPIMessageBuilder.user()\n builder.add_image(\"data:image/png;base64,ANTHROPICBASE64\")\n messages = builder.prepare_message()\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert len(messages[0][\"content\"]) == 1\n image_content = messages[0][\"content\"][0]\n assert image_content[\"type\"] == \"image\"\n assert image_content[\"source\"][\"type\"] == \"base64\"\n assert image_content[\"source\"][\"media_type\"] == \"image/png\"\n assert image_content[\"source\"][\"data\"] == \"ANTHROPICBASE64\" # Base64 prefix should be stripped\n\n\ndef test_openai_chat_completion_api_message_builder_text():\n builder = OpenAIChatCompletionAPIMessageBuilder.user()\n builder.add_text(\"Hello, ChatCompletion!\")\n messages = builder.prepare_message()\n\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert messages[0][\"content\"] == [{\"type\": \"text\", \"text\": \"Hello, ChatCompletion!\"}]\n\n\ndef test_openai_chat_completion_api_message_builder_image():\n builder = OpenAIChatCompletionAPIMessageBuilder.user()\n builder.add_image(\"data:image/jpeg;base64,CHATCOMPLETIONBASE64\")\n messages = builder.prepare_message()\n\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert messages[0][\"content\"] == [\n {\"type\": \"image_url\", \"image_url\": {\"url\": \"data:image/jpeg;base64,CHATCOMPLETIONBASE64\"}}\n ]\n\n\ndef test_openai_chat_completion_model_parse_and_cost():\n args = OpenAIChatModelArgs(model_name=\"gpt-3.5-turbo\")\n with patch(\"agentlab.llm.response_api.OpenAI\") as mock_openai_class:\n mock_client = MagicMock()\n mock_openai_class.return_value = mock_client\n model = args.make_model()\n\n mock_response = create_mock_openai_chat_completion(\n content=\"This is a test thought.\",\n tool_calls=[\n {\n \"id\": \"call_123\",\n \"type\": \"function\",\n \"function\": {\"name\": \"get_weather\", \"arguments\": '{\"location\": \"Paris\"}'},\n }\n ],\n prompt_tokens=50,\n completion_tokens=30,\n )\n\n with patch.object(\n model.client.chat.completions, \"create\", return_value=mock_response\n ) as mock_create:\n with tracking.set_tracker() as global_tracker:\n messages = [\n OpenAIChatCompletionAPIMessageBuilder.user().add_text(\n \"What's the weather in Paris?\"\n )\n ]\n payload = APIPayload(messages=messages)\n parsed_output = model(payload)\n\n mock_create.assert_called_once()\n assert parsed_output.raw_response.choices[0].message.content == \"This is a test thought.\"\n assert parsed_output.action == \"\"\"get_weather(location='Paris')\"\"\"\n assert parsed_output.raw_response.choices[0].message.tool_calls[0].id == \"call_123\"\n # Check cost tracking (token counts)\n assert global_tracker.stats[\"input_tokens\"] == 50\n assert global_tracker.stats[\"output_tokens\"] == 30\n assert global_tracker.stats[\"cost\"] > 0\n\n\ndef test_claude_response_model_parse_and_cost():\n args = ClaudeResponseModelArgs(model_name=\"claude-3-haiku-20240307\")\n model = args.make_model()\n\n mock_anthropic_api_response = create_mock_anthropic_response(\n text_content=\"Thinking about the request.\",\n tool_use={\"id\": \"tool_abc\", \"name\": \"search_web\", \"input\": {\"query\": \"latest news\"}},\n input_tokens=40,\n output_tokens=20,\n )\n\n with patch.object(\n model.client.messages, \"create\", return_value=mock_anthropic_api_response\n ) as mock_create:\n with tracking.set_tracker() as global_tracker:\n messages = [AnthropicAPIMessageBuilder.user().add_text(\"Search for latest news\")]\n payload = APIPayload(messages=messages)\n parsed_output = model(payload)\n\n mock_create.assert_called_once()\n fn_call = next(iter(parsed_output.tool_calls))\n\n assert \"Thinking about the request.\" in parsed_output.think\n assert parsed_output.action == \"\"\"search_web(query='latest news')\"\"\"\n assert fn_call.name == \"search_web\"\n assert global_tracker.stats[\"input_tokens\"] == 40\n assert global_tracker.stats[\"output_tokens\"] == 20\n\n\ndef test_openai_response_model_parse_and_cost():\n args = OpenAIResponseModelArgs(model_name=\"gpt-4.1\")\n\n mock_function_call_output = {\n \"type\": \"function_call\",\n \"name\": \"get_current_weather\",\n \"arguments\": '{\"location\": \"Boston, MA\", \"unit\": \"celsius\"}',\n \"call_id\": \"call_abc123\",\n }\n\n mock_api_resp = create_mock_openai_responses_api_response(\n outputs=[mock_function_call_output],\n input_tokens=70,\n output_tokens=40,\n )\n\n with patch(\"agentlab.llm.response_api.OpenAI\") as mock_openai_class:\n mock_client = MagicMock()\n mock_openai_class.return_value = mock_client\n model = args.make_model()\n\n with patch.object(\n model.client.responses, \"create\", return_value=mock_api_resp\n ) as mock_create_method:\n with tracking.set_tracker() as global_tracker:\n messages = [\n OpenAIResponseAPIMessageBuilder.user().add_text(\"What's the weather in Boston?\")\n ]\n payload = APIPayload(messages=messages)\n parsed_output = model(payload)\n\n mock_create_method.assert_called_once()\n fn_calls = [\n content\n for content in parsed_output.tool_calls.raw_calls.output\n if content.type == \"function_call\"\n ]\n assert parsed_output.action == \"get_current_weather(location='Boston, MA', unit='celsius')\"\n assert fn_calls[0].call_id == \"call_abc123\"\n assert parsed_output.raw_response == mock_api_resp\n assert global_tracker.stats[\"input_tokens\"] == 70\n assert global_tracker.stats[\"output_tokens\"] == 40\n\n\n# --- Test Response Models (Pricy - require API keys and actual calls) ---\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not os.getenv(\"OPENAI_API_KEY\"), reason=\"OPENAI_API_KEY not set\")\ndef test_openai_chat_completion_model_pricy_call():\n \"\"\"Tests OpenAIChatCompletionModel with a real API call.\"\"\"\n args = OpenAIChatModelArgs(\n model_name=\"gpt-4.1\",\n temperature=1e-5,\n max_new_tokens=100,\n )\n\n tools = chat_api_tools\n model = args.make_model()\n\n with tracking.set_tracker() as global_tracker:\n messages = [\n OpenAIChatCompletionAPIMessageBuilder.user().add_text(\"What is the weather in Paris?\")\n ]\n payload = APIPayload(messages=messages, tools=tools, tool_choice=\"required\")\n parsed_output = model(payload)\n\n assert parsed_output.raw_response is not None\n assert (\n parsed_output.action == \"get_weather(location='Paris')\"\n ), f\"\"\" Expected get_weather(location='Paris') but got {parsed_output.action}\"\"\"\n assert global_tracker.stats[\"input_tokens\"] > 0\n assert global_tracker.stats[\"output_tokens\"] > 0\n assert global_tracker.stats[\"cost\"] > 0\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not os.getenv(\"ANTHROPIC_API_KEY\"), reason=\"ANTHROPIC_API_KEY not set\")\ndef test_claude_response_model_pricy_call():\n \"\"\"Tests ClaudeResponseModel with a real API call.\"\"\"\n\n args = ClaudeResponseModelArgs(\n model_name=\"claude-3-haiku-20240307\",\n temperature=1e-5,\n max_new_tokens=100,\n )\n tools = anthropic_tools\n model = args.make_model()\n\n with tracking.set_tracker() as global_tracker:\n messages = [AnthropicAPIMessageBuilder.user().add_text(\"What is the weather in Paris?\")]\n payload = APIPayload(messages=messages, tools=tools)\n parsed_output = model(payload)\n\n assert parsed_output.raw_response is not None\n assert (\n parsed_output.action == \"get_weather(location='Paris')\"\n ), f\"\"\"Expected get_weather('Paris') but got {parsed_output.action}\"\"\"\n assert global_tracker.stats[\"input_tokens\"] > 0\n assert global_tracker.stats[\"output_tokens\"] > 0\n assert global_tracker.stats[\"cost\"] > 0\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not os.getenv(\"OPENAI_API_KEY\"), reason=\"OPENAI_API_KEY not set\")\ndef test_openai_response_model_pricy_call():\n \"\"\"\n Tests OpenAIResponseModel output parsing and cost tracking with both\n function_call and reasoning outputs.\n \"\"\"\n args = OpenAIResponseModelArgs(model_name=\"gpt-4.1\", temperature=1e-5, max_new_tokens=100)\n\n tools = responses_api_tools\n model = args.make_model()\n\n with tracking.set_tracker() as global_tracker:\n messages = [\n OpenAIResponseAPIMessageBuilder.user().add_text(\"What is the weather in Paris?\")\n ]\n payload = APIPayload(messages=messages, tools=tools)\n parsed_output = model(payload)\n\n assert parsed_output.raw_response is not None\n assert (\n parsed_output.action == \"\"\"get_weather(location='Paris', unit='celsius')\"\"\"\n ), f\"\"\" Expected get_weather(location='Paris', unit='celsius') but got {parsed_output.action}\"\"\"\n assert global_tracker.stats[\"input_tokens\"] > 0\n assert global_tracker.stats[\"output_tokens\"] > 0\n assert global_tracker.stats[\"cost\"] > 0\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not os.getenv(\"OPENAI_API_KEY\"), reason=\"OPENAI_API_KEY not set\")\ndef test_openai_response_model_with_multiple_messages_and_cost_tracking():\n \"\"\"\n Test OpenAIResponseModel's output parsing and cost tracking\n with a tool-using assistant and follow-up interaction.\n \"\"\"\n args = OpenAIResponseModelArgs(model_name=\"gpt-4.1\", temperature=1e-5, max_new_tokens=100)\n\n tools = responses_api_tools\n model = args.make_model()\n builder = args.get_message_builder()\n\n messages = [builder.user().add_text(\"What is the weather in Paris?\")]\n\n with tracking.set_tracker() as tracker:\n payload = APIPayload(messages=messages, tools=tools, tool_choice=\"required\")\n parsed = model(payload)\n prev_input = tracker.stats[\"input_tokens\"]\n prev_output = tracker.stats[\"output_tokens\"]\n prev_cost = tracker.stats[\"cost\"]\n\n assert parsed.tool_calls, \"Expected tool calls in the response\"\n # Set tool responses\n for tool_call in parsed.tool_calls:\n tool_call.response_text(\"Its sunny! 25°C\")\n # Simulate tool execution and user follow-up\n messages += [\n builder.add_responded_tool_calls(parsed.tool_calls),\n builder.user().add_text(\"What is the weather in Delhi?\"),\n ]\n\n payload = APIPayload(messages=messages, tools=tools, tool_choice=\"required\")\n parsed = model(payload)\n\n delta_input = tracker.stats[\"input_tokens\"] - prev_input\n delta_output = tracker.stats[\"output_tokens\"] - prev_output\n delta_cost = tracker.stats[\"cost\"] - prev_cost\n\n assert prev_input > 0\n assert prev_output > 0\n assert prev_cost > 0\n assert parsed.raw_response is not None\n assert (\n parsed.action == \"\"\"get_weather(location='Delhi', unit='celsius')\"\"\"\n ), f\n# ... truncated ...","source_hash":"81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3","truncated":true} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_response_api.create_mock_openai_chat_completion","uri":"program://AgentLab/function/tests.llm.test_response_api.create_mock_openai_chat_completion#L23-L81","kind":"function","name":"create_mock_openai_chat_completion","path":"tests/llm/test_response_api.py","language":"python","start_line":23,"end_line":81,"context_start_line":3,"context_end_line":101,"code":"from unittest.mock import MagicMock, patch\n\nimport anthropic\nimport openai\nimport pytest\n\nfrom agentlab.llm import tracking\nfrom agentlab.llm.response_api import (\n AnthropicAPIMessageBuilder,\n APIPayload,\n ClaudeResponseModelArgs,\n LLMOutput,\n OpenAIChatCompletionAPIMessageBuilder,\n OpenAIChatModelArgs,\n OpenAIResponseAPIMessageBuilder,\n OpenAIResponseModelArgs,\n)\n\n\n# Helper to create a mock OpenAI ChatCompletion response\ndef create_mock_openai_chat_completion(\n content=None, tool_calls=None, prompt_tokens=10, completion_tokens=20\n):\n completion = MagicMock(spec=openai.types.chat.ChatCompletion)\n choice = MagicMock()\n message = MagicMock(spec=openai.types.chat.ChatCompletionMessage)\n message.content = content\n message.tool_calls = None\n if tool_calls:\n message.tool_calls = []\n for tc in tool_calls:\n tool_call_mock = MagicMock(\n spec=openai.types.chat.chat_completion_message_tool_call.ChatCompletionMessageToolCall\n )\n tool_call_mock.id = tc[\"id\"]\n tool_call_mock.type = tc[\"type\"]\n tool_call_mock.function = MagicMock()\n tool_call_mock.function.name = tc[\"function\"][\"name\"]\n tool_call_mock.function.arguments = tc[\"function\"][\"arguments\"]\n message.tool_calls.append(tool_call_mock)\n\n choice.message = message\n completion.choices = [choice]\n\n completion.usage = MagicMock()\n # Explicitly set the attributes that get_tokens_counts_from_response will try first.\n # These are the generic names.\n completion.usage.input_tokens = prompt_tokens\n completion.usage.output_tokens = completion_tokens\n\n # Also set the OpenAI-specific names if any other part of the code might look for them directly,\n # or if get_tokens_counts_from_response had different fallback logic.\n completion.usage.prompt_tokens = prompt_tokens\n completion.usage.completion_tokens = completion_tokens\n prompt_tokens_details_mock = MagicMock()\n prompt_tokens_details_mock.cached_tokens = 0\n completion.usage.prompt_tokens_details = prompt_tokens_details_mock\n\n completion.model_dump.return_value = {\n \"id\": \"chatcmpl-xxxx\",\n \"choices\": [\n {\"message\": {\"role\": \"assistant\", \"content\": content, \"tool_calls\": tool_calls}}\n ],\n # Ensure the usage dict in model_dump also reflects the token counts accurately.\n # The get_tokens_counts_from_response also has a path for dict style.\n \"usage\": {\n \"input_tokens\": prompt_tokens, # Generic name\n \"output_tokens\": completion_tokens, # Generic name\n \"prompt_tokens\": prompt_tokens, # OpenAI specific\n \"completion_tokens\": completion_tokens, # OpenAI specific\n \"prompt_tokens_details\": {\"cached_tokens\": 0},\n },\n }\n message.to_dict.return_value = {\n \"role\": \"assistant\",\n \"content\": content,\n \"tool_calls\": tool_calls,\n }\n return completion\n\n\nresponses_api_tools = [\n {\n \"type\": \"function\",\n \"name\": \"get_weather\",\n \"description\": \"Get the current weather in a given location.\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The location to get the weather for.\",\n },\n \"unit\": {\n \"type\": \"string\",\n \"enum\": [\"celsius\", \"fahrenheit\"],\n \"description\": \"The unit of temperature.\",\n },\n },","source_hash":"81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_response_api.create_mock_anthropic_response","uri":"program://AgentLab/function/tests.llm.test_response_api.create_mock_anthropic_response#L148-L173","kind":"function","name":"create_mock_anthropic_response","path":"tests/llm/test_response_api.py","language":"python","start_line":148,"end_line":173,"context_start_line":128,"context_end_line":193,"code":"]\nanthropic_tools = [\n {\n \"name\": \"get_weather\",\n \"description\": \"Get the current weather in a given location.\",\n \"input_schema\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The location to get the weather for.\",\n },\n },\n \"required\": [\"location\"],\n },\n }\n]\n\n\n# Helper to create a mock Anthropic response\ndef create_mock_anthropic_response(\n text_content=None, tool_use=None, input_tokens=15, output_tokens=25\n):\n\n response = MagicMock(spec=anthropic.types.Message)\n response.type = \"message\" # Explicitly set the type attribute\n response.content = []\n response.content = []\n if text_content:\n text_block = MagicMock(spec=anthropic.types.TextBlock)\n text_block.type = \"text\"\n text_block.text = text_content\n response.content.append(text_block)\n if tool_use:\n tool_use_block = MagicMock(spec=anthropic.types.ToolUseBlock)\n tool_use_block.type = \"tool_use\"\n tool_use_block.id = tool_use[\"id\"]\n tool_use_block.name = tool_use[\"name\"]\n tool_use_block.input = tool_use[\"input\"]\n response.content.append(tool_use_block)\n response.usage = MagicMock()\n response.usage.input_tokens = input_tokens\n response.usage.output_tokens = output_tokens\n response.usage.cache_input_tokens = 0\n response.usage.cache_creation_input_tokens = 0\n return response\n\n\ndef create_mock_openai_responses_api_response(\n outputs: Optional[List[Dict[str, Any]]] = None, input_tokens: int = 10, output_tokens: int = 20\n) -> MagicMock:\n \"\"\"\n Helper to create a mock response object similar to what\n openai.resources.Responses.create() would return.\n Compatible with OpenAIResponseModel and TrackAPIPricingMixin.\n \"\"\"\n\n response_mock = MagicMock(spec=openai.types.responses.response.Response)\n response_mock.type = \"response\"\n response_mock.output = []\n\n if outputs:\n for out_data in outputs:\n output_item_mock = MagicMock()\n output_item_mock.type = out_data.get(\"type\")\n","source_hash":"81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_response_api.create_mock_openai_responses_api_response","uri":"program://AgentLab/function/tests.llm.test_response_api.create_mock_openai_responses_api_response#L176-L218","kind":"function","name":"create_mock_openai_responses_api_response","path":"tests/llm/test_response_api.py","language":"python","start_line":176,"end_line":218,"context_start_line":156,"context_end_line":238,"code":" if text_content:\n text_block = MagicMock(spec=anthropic.types.TextBlock)\n text_block.type = \"text\"\n text_block.text = text_content\n response.content.append(text_block)\n if tool_use:\n tool_use_block = MagicMock(spec=anthropic.types.ToolUseBlock)\n tool_use_block.type = \"tool_use\"\n tool_use_block.id = tool_use[\"id\"]\n tool_use_block.name = tool_use[\"name\"]\n tool_use_block.input = tool_use[\"input\"]\n response.content.append(tool_use_block)\n response.usage = MagicMock()\n response.usage.input_tokens = input_tokens\n response.usage.output_tokens = output_tokens\n response.usage.cache_input_tokens = 0\n response.usage.cache_creation_input_tokens = 0\n return response\n\n\ndef create_mock_openai_responses_api_response(\n outputs: Optional[List[Dict[str, Any]]] = None, input_tokens: int = 10, output_tokens: int = 20\n) -> MagicMock:\n \"\"\"\n Helper to create a mock response object similar to what\n openai.resources.Responses.create() would return.\n Compatible with OpenAIResponseModel and TrackAPIPricingMixin.\n \"\"\"\n\n response_mock = MagicMock(spec=openai.types.responses.response.Response)\n response_mock.type = \"response\"\n response_mock.output = []\n\n if outputs:\n for out_data in outputs:\n output_item_mock = MagicMock()\n output_item_mock.type = out_data.get(\"type\")\n\n if output_item_mock.type == \"function_call\":\n # You can adapt this depending on your expected object structure\n output_item_mock.name = out_data.get(\"name\")\n output_item_mock.arguments = out_data.get(\"arguments\")\n output_item_mock.call_id = out_data.get(\"call_id\")\n elif output_item_mock.type == \"reasoning\":\n output_item_mock.summary = []\n for text_content in out_data.get(\"summary\", []):\n summary_text_mock = MagicMock()\n summary_text_mock.text = text_content\n output_item_mock.summary.append(summary_text_mock)\n\n response_mock.output.append(output_item_mock)\n\n # Token usage for pricing tracking\n response_mock.usage = MagicMock(spec=openai.types.responses.response.ResponseUsage)\n response_mock.usage.input_tokens = input_tokens\n response_mock.usage.output_tokens = output_tokens\n response_mock.usage.prompt_tokens = input_tokens\n response_mock.usage.completion_tokens = output_tokens\n input_tokens_details_mock = MagicMock()\n input_tokens_details_mock.cached_tokens = 0\n response_mock.usage.input_tokens_details = input_tokens_details_mock\n\n return response_mock\n\n\n# --- Test MessageBuilders ---\n\n\ndef test_openai_response_api_message_builder_text():\n builder = OpenAIResponseAPIMessageBuilder.user()\n builder.add_text(\"Hello, world!\")\n messages = builder.prepare_message()\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert messages[0][\"content\"] == [{\"type\": \"input_text\", \"text\": \"Hello, world!\"}]\n\n\ndef test_openai_response_api_message_builder_image():\n builder = OpenAIResponseAPIMessageBuilder.user()\n builder.add_image(\"data:image/png;base64,SIMPLEBASE64STRING\")\n messages = builder.prepare_message()\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"","source_hash":"81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_response_api.test_openai_response_api_message_builder_text","uri":"program://AgentLab/function/tests.llm.test_response_api.test_openai_response_api_message_builder_text#L224-L230","kind":"function","name":"test_openai_response_api_message_builder_text","path":"tests/llm/test_response_api.py","language":"python","start_line":224,"end_line":230,"context_start_line":204,"context_end_line":250,"code":" output_item_mock.summary.append(summary_text_mock)\n\n response_mock.output.append(output_item_mock)\n\n # Token usage for pricing tracking\n response_mock.usage = MagicMock(spec=openai.types.responses.response.ResponseUsage)\n response_mock.usage.input_tokens = input_tokens\n response_mock.usage.output_tokens = output_tokens\n response_mock.usage.prompt_tokens = input_tokens\n response_mock.usage.completion_tokens = output_tokens\n input_tokens_details_mock = MagicMock()\n input_tokens_details_mock.cached_tokens = 0\n response_mock.usage.input_tokens_details = input_tokens_details_mock\n\n return response_mock\n\n\n# --- Test MessageBuilders ---\n\n\ndef test_openai_response_api_message_builder_text():\n builder = OpenAIResponseAPIMessageBuilder.user()\n builder.add_text(\"Hello, world!\")\n messages = builder.prepare_message()\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert messages[0][\"content\"] == [{\"type\": \"input_text\", \"text\": \"Hello, world!\"}]\n\n\ndef test_openai_response_api_message_builder_image():\n builder = OpenAIResponseAPIMessageBuilder.user()\n builder.add_image(\"data:image/png;base64,SIMPLEBASE64STRING\")\n messages = builder.prepare_message()\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert messages[0][\"content\"] == [\n {\"type\": \"input_image\", \"image_url\": \"data:image/png;base64,SIMPLEBASE64STRING\"}\n ]\n\n\ndef test_anthropic_api_message_builder_text():\n builder = AnthropicAPIMessageBuilder.user()\n builder.add_text(\"Hello, Anthropic!\")\n messages = builder.prepare_message()\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert messages[0][\"content\"] == [{\"type\": \"text\", \"text\": \"Hello, Anthropic!\"}]","source_hash":"81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_response_api.test_openai_response_api_message_builder_image","uri":"program://AgentLab/function/tests.llm.test_response_api.test_openai_response_api_message_builder_image#L233-L241","kind":"function","name":"test_openai_response_api_message_builder_image","path":"tests/llm/test_response_api.py","language":"python","start_line":233,"end_line":241,"context_start_line":213,"context_end_line":261,"code":" response_mock.usage.completion_tokens = output_tokens\n input_tokens_details_mock = MagicMock()\n input_tokens_details_mock.cached_tokens = 0\n response_mock.usage.input_tokens_details = input_tokens_details_mock\n\n return response_mock\n\n\n# --- Test MessageBuilders ---\n\n\ndef test_openai_response_api_message_builder_text():\n builder = OpenAIResponseAPIMessageBuilder.user()\n builder.add_text(\"Hello, world!\")\n messages = builder.prepare_message()\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert messages[0][\"content\"] == [{\"type\": \"input_text\", \"text\": \"Hello, world!\"}]\n\n\ndef test_openai_response_api_message_builder_image():\n builder = OpenAIResponseAPIMessageBuilder.user()\n builder.add_image(\"data:image/png;base64,SIMPLEBASE64STRING\")\n messages = builder.prepare_message()\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert messages[0][\"content\"] == [\n {\"type\": \"input_image\", \"image_url\": \"data:image/png;base64,SIMPLEBASE64STRING\"}\n ]\n\n\ndef test_anthropic_api_message_builder_text():\n builder = AnthropicAPIMessageBuilder.user()\n builder.add_text(\"Hello, Anthropic!\")\n messages = builder.prepare_message()\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert messages[0][\"content\"] == [{\"type\": \"text\", \"text\": \"Hello, Anthropic!\"}]\n\n\ndef test_anthropic_api_message_builder_image():\n builder = AnthropicAPIMessageBuilder.user()\n builder.add_image(\"data:image/png;base64,ANTHROPICBASE64\")\n messages = builder.prepare_message()\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert len(messages[0][\"content\"]) == 1\n image_content = messages[0][\"content\"][0]\n assert image_content[\"type\"] == \"image\"","source_hash":"81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_response_api.test_anthropic_api_message_builder_text","uri":"program://AgentLab/function/tests.llm.test_response_api.test_anthropic_api_message_builder_text#L244-L250","kind":"function","name":"test_anthropic_api_message_builder_text","path":"tests/llm/test_response_api.py","language":"python","start_line":244,"end_line":250,"context_start_line":224,"context_end_line":270,"code":"def test_openai_response_api_message_builder_text():\n builder = OpenAIResponseAPIMessageBuilder.user()\n builder.add_text(\"Hello, world!\")\n messages = builder.prepare_message()\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert messages[0][\"content\"] == [{\"type\": \"input_text\", \"text\": \"Hello, world!\"}]\n\n\ndef test_openai_response_api_message_builder_image():\n builder = OpenAIResponseAPIMessageBuilder.user()\n builder.add_image(\"data:image/png;base64,SIMPLEBASE64STRING\")\n messages = builder.prepare_message()\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert messages[0][\"content\"] == [\n {\"type\": \"input_image\", \"image_url\": \"data:image/png;base64,SIMPLEBASE64STRING\"}\n ]\n\n\ndef test_anthropic_api_message_builder_text():\n builder = AnthropicAPIMessageBuilder.user()\n builder.add_text(\"Hello, Anthropic!\")\n messages = builder.prepare_message()\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert messages[0][\"content\"] == [{\"type\": \"text\", \"text\": \"Hello, Anthropic!\"}]\n\n\ndef test_anthropic_api_message_builder_image():\n builder = AnthropicAPIMessageBuilder.user()\n builder.add_image(\"data:image/png;base64,ANTHROPICBASE64\")\n messages = builder.prepare_message()\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert len(messages[0][\"content\"]) == 1\n image_content = messages[0][\"content\"][0]\n assert image_content[\"type\"] == \"image\"\n assert image_content[\"source\"][\"type\"] == \"base64\"\n assert image_content[\"source\"][\"media_type\"] == \"image/png\"\n assert image_content[\"source\"][\"data\"] == \"ANTHROPICBASE64\" # Base64 prefix should be stripped\n\n\ndef test_openai_chat_completion_api_message_builder_text():\n builder = OpenAIChatCompletionAPIMessageBuilder.user()\n builder.add_text(\"Hello, ChatCompletion!\")\n messages = builder.prepare_message()","source_hash":"81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_response_api.test_anthropic_api_message_builder_image","uri":"program://AgentLab/function/tests.llm.test_response_api.test_anthropic_api_message_builder_image#L253-L264","kind":"function","name":"test_anthropic_api_message_builder_image","path":"tests/llm/test_response_api.py","language":"python","start_line":253,"end_line":264,"context_start_line":233,"context_end_line":284,"code":"def test_openai_response_api_message_builder_image():\n builder = OpenAIResponseAPIMessageBuilder.user()\n builder.add_image(\"data:image/png;base64,SIMPLEBASE64STRING\")\n messages = builder.prepare_message()\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert messages[0][\"content\"] == [\n {\"type\": \"input_image\", \"image_url\": \"data:image/png;base64,SIMPLEBASE64STRING\"}\n ]\n\n\ndef test_anthropic_api_message_builder_text():\n builder = AnthropicAPIMessageBuilder.user()\n builder.add_text(\"Hello, Anthropic!\")\n messages = builder.prepare_message()\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert messages[0][\"content\"] == [{\"type\": \"text\", \"text\": \"Hello, Anthropic!\"}]\n\n\ndef test_anthropic_api_message_builder_image():\n builder = AnthropicAPIMessageBuilder.user()\n builder.add_image(\"data:image/png;base64,ANTHROPICBASE64\")\n messages = builder.prepare_message()\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert len(messages[0][\"content\"]) == 1\n image_content = messages[0][\"content\"][0]\n assert image_content[\"type\"] == \"image\"\n assert image_content[\"source\"][\"type\"] == \"base64\"\n assert image_content[\"source\"][\"media_type\"] == \"image/png\"\n assert image_content[\"source\"][\"data\"] == \"ANTHROPICBASE64\" # Base64 prefix should be stripped\n\n\ndef test_openai_chat_completion_api_message_builder_text():\n builder = OpenAIChatCompletionAPIMessageBuilder.user()\n builder.add_text(\"Hello, ChatCompletion!\")\n messages = builder.prepare_message()\n\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert messages[0][\"content\"] == [{\"type\": \"text\", \"text\": \"Hello, ChatCompletion!\"}]\n\n\ndef test_openai_chat_completion_api_message_builder_image():\n builder = OpenAIChatCompletionAPIMessageBuilder.user()\n builder.add_image(\"data:image/jpeg;base64,CHATCOMPLETIONBASE64\")\n messages = builder.prepare_message()\n\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert messages[0][\"content\"] == [","source_hash":"81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_response_api.test_openai_chat_completion_api_message_builder_text","uri":"program://AgentLab/function/tests.llm.test_response_api.test_openai_chat_completion_api_message_builder_text#L267-L274","kind":"function","name":"test_openai_chat_completion_api_message_builder_text","path":"tests/llm/test_response_api.py","language":"python","start_line":267,"end_line":274,"context_start_line":247,"context_end_line":294,"code":" messages = builder.prepare_message()\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert messages[0][\"content\"] == [{\"type\": \"text\", \"text\": \"Hello, Anthropic!\"}]\n\n\ndef test_anthropic_api_message_builder_image():\n builder = AnthropicAPIMessageBuilder.user()\n builder.add_image(\"data:image/png;base64,ANTHROPICBASE64\")\n messages = builder.prepare_message()\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert len(messages[0][\"content\"]) == 1\n image_content = messages[0][\"content\"][0]\n assert image_content[\"type\"] == \"image\"\n assert image_content[\"source\"][\"type\"] == \"base64\"\n assert image_content[\"source\"][\"media_type\"] == \"image/png\"\n assert image_content[\"source\"][\"data\"] == \"ANTHROPICBASE64\" # Base64 prefix should be stripped\n\n\ndef test_openai_chat_completion_api_message_builder_text():\n builder = OpenAIChatCompletionAPIMessageBuilder.user()\n builder.add_text(\"Hello, ChatCompletion!\")\n messages = builder.prepare_message()\n\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert messages[0][\"content\"] == [{\"type\": \"text\", \"text\": \"Hello, ChatCompletion!\"}]\n\n\ndef test_openai_chat_completion_api_message_builder_image():\n builder = OpenAIChatCompletionAPIMessageBuilder.user()\n builder.add_image(\"data:image/jpeg;base64,CHATCOMPLETIONBASE64\")\n messages = builder.prepare_message()\n\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert messages[0][\"content\"] == [\n {\"type\": \"image_url\", \"image_url\": {\"url\": \"data:image/jpeg;base64,CHATCOMPLETIONBASE64\"}}\n ]\n\n\ndef test_openai_chat_completion_model_parse_and_cost():\n args = OpenAIChatModelArgs(model_name=\"gpt-3.5-turbo\")\n with patch(\"agentlab.llm.response_api.OpenAI\") as mock_openai_class:\n mock_client = MagicMock()\n mock_openai_class.return_value = mock_client\n model = args.make_model()","source_hash":"81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_response_api.test_openai_chat_completion_api_message_builder_image","uri":"program://AgentLab/function/tests.llm.test_response_api.test_openai_chat_completion_api_message_builder_image#L277-L286","kind":"function","name":"test_openai_chat_completion_api_message_builder_image","path":"tests/llm/test_response_api.py","language":"python","start_line":277,"end_line":286,"context_start_line":257,"context_end_line":306,"code":" assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert len(messages[0][\"content\"]) == 1\n image_content = messages[0][\"content\"][0]\n assert image_content[\"type\"] == \"image\"\n assert image_content[\"source\"][\"type\"] == \"base64\"\n assert image_content[\"source\"][\"media_type\"] == \"image/png\"\n assert image_content[\"source\"][\"data\"] == \"ANTHROPICBASE64\" # Base64 prefix should be stripped\n\n\ndef test_openai_chat_completion_api_message_builder_text():\n builder = OpenAIChatCompletionAPIMessageBuilder.user()\n builder.add_text(\"Hello, ChatCompletion!\")\n messages = builder.prepare_message()\n\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert messages[0][\"content\"] == [{\"type\": \"text\", \"text\": \"Hello, ChatCompletion!\"}]\n\n\ndef test_openai_chat_completion_api_message_builder_image():\n builder = OpenAIChatCompletionAPIMessageBuilder.user()\n builder.add_image(\"data:image/jpeg;base64,CHATCOMPLETIONBASE64\")\n messages = builder.prepare_message()\n\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert messages[0][\"content\"] == [\n {\"type\": \"image_url\", \"image_url\": {\"url\": \"data:image/jpeg;base64,CHATCOMPLETIONBASE64\"}}\n ]\n\n\ndef test_openai_chat_completion_model_parse_and_cost():\n args = OpenAIChatModelArgs(model_name=\"gpt-3.5-turbo\")\n with patch(\"agentlab.llm.response_api.OpenAI\") as mock_openai_class:\n mock_client = MagicMock()\n mock_openai_class.return_value = mock_client\n model = args.make_model()\n\n mock_response = create_mock_openai_chat_completion(\n content=\"This is a test thought.\",\n tool_calls=[\n {\n \"id\": \"call_123\",\n \"type\": \"function\",\n \"function\": {\"name\": \"get_weather\", \"arguments\": '{\"location\": \"Paris\"}'},\n }\n ],\n prompt_tokens=50,\n completion_tokens=30,","source_hash":"81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_response_api.test_openai_chat_completion_model_parse_and_cost","uri":"program://AgentLab/function/tests.llm.test_response_api.test_openai_chat_completion_model_parse_and_cost#L289-L328","kind":"function","name":"test_openai_chat_completion_model_parse_and_cost","path":"tests/llm/test_response_api.py","language":"python","start_line":289,"end_line":328,"context_start_line":269,"context_end_line":348,"code":" builder.add_text(\"Hello, ChatCompletion!\")\n messages = builder.prepare_message()\n\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert messages[0][\"content\"] == [{\"type\": \"text\", \"text\": \"Hello, ChatCompletion!\"}]\n\n\ndef test_openai_chat_completion_api_message_builder_image():\n builder = OpenAIChatCompletionAPIMessageBuilder.user()\n builder.add_image(\"data:image/jpeg;base64,CHATCOMPLETIONBASE64\")\n messages = builder.prepare_message()\n\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert messages[0][\"content\"] == [\n {\"type\": \"image_url\", \"image_url\": {\"url\": \"data:image/jpeg;base64,CHATCOMPLETIONBASE64\"}}\n ]\n\n\ndef test_openai_chat_completion_model_parse_and_cost():\n args = OpenAIChatModelArgs(model_name=\"gpt-3.5-turbo\")\n with patch(\"agentlab.llm.response_api.OpenAI\") as mock_openai_class:\n mock_client = MagicMock()\n mock_openai_class.return_value = mock_client\n model = args.make_model()\n\n mock_response = create_mock_openai_chat_completion(\n content=\"This is a test thought.\",\n tool_calls=[\n {\n \"id\": \"call_123\",\n \"type\": \"function\",\n \"function\": {\"name\": \"get_weather\", \"arguments\": '{\"location\": \"Paris\"}'},\n }\n ],\n prompt_tokens=50,\n completion_tokens=30,\n )\n\n with patch.object(\n model.client.chat.completions, \"create\", return_value=mock_response\n ) as mock_create:\n with tracking.set_tracker() as global_tracker:\n messages = [\n OpenAIChatCompletionAPIMessageBuilder.user().add_text(\n \"What's the weather in Paris?\"\n )\n ]\n payload = APIPayload(messages=messages)\n parsed_output = model(payload)\n\n mock_create.assert_called_once()\n assert parsed_output.raw_response.choices[0].message.content == \"This is a test thought.\"\n assert parsed_output.action == \"\"\"get_weather(location='Paris')\"\"\"\n assert parsed_output.raw_response.choices[0].message.tool_calls[0].id == \"call_123\"\n # Check cost tracking (token counts)\n assert global_tracker.stats[\"input_tokens\"] == 50\n assert global_tracker.stats[\"output_tokens\"] == 30\n assert global_tracker.stats[\"cost\"] > 0\n\n\ndef test_claude_response_model_parse_and_cost():\n args = ClaudeResponseModelArgs(model_name=\"claude-3-haiku-20240307\")\n model = args.make_model()\n\n mock_anthropic_api_response = create_mock_anthropic_response(\n text_content=\"Thinking about the request.\",\n tool_use={\"id\": \"tool_abc\", \"name\": \"search_web\", \"input\": {\"query\": \"latest news\"}},\n input_tokens=40,\n output_tokens=20,\n )\n\n with patch.object(\n model.client.messages, \"create\", return_value=mock_anthropic_api_response\n ) as mock_create:\n with tracking.set_tracker() as global_tracker:\n messages = [AnthropicAPIMessageBuilder.user().add_text(\"Search for latest news\")]\n payload = APIPayload(messages=messages)\n parsed_output = model(payload)","source_hash":"81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_response_api.test_claude_response_model_parse_and_cost","uri":"program://AgentLab/function/tests.llm.test_response_api.test_claude_response_model_parse_and_cost#L331-L357","kind":"function","name":"test_claude_response_model_parse_and_cost","path":"tests/llm/test_response_api.py","language":"python","start_line":331,"end_line":357,"context_start_line":311,"context_end_line":377,"code":" ) as mock_create:\n with tracking.set_tracker() as global_tracker:\n messages = [\n OpenAIChatCompletionAPIMessageBuilder.user().add_text(\n \"What's the weather in Paris?\"\n )\n ]\n payload = APIPayload(messages=messages)\n parsed_output = model(payload)\n\n mock_create.assert_called_once()\n assert parsed_output.raw_response.choices[0].message.content == \"This is a test thought.\"\n assert parsed_output.action == \"\"\"get_weather(location='Paris')\"\"\"\n assert parsed_output.raw_response.choices[0].message.tool_calls[0].id == \"call_123\"\n # Check cost tracking (token counts)\n assert global_tracker.stats[\"input_tokens\"] == 50\n assert global_tracker.stats[\"output_tokens\"] == 30\n assert global_tracker.stats[\"cost\"] > 0\n\n\ndef test_claude_response_model_parse_and_cost():\n args = ClaudeResponseModelArgs(model_name=\"claude-3-haiku-20240307\")\n model = args.make_model()\n\n mock_anthropic_api_response = create_mock_anthropic_response(\n text_content=\"Thinking about the request.\",\n tool_use={\"id\": \"tool_abc\", \"name\": \"search_web\", \"input\": {\"query\": \"latest news\"}},\n input_tokens=40,\n output_tokens=20,\n )\n\n with patch.object(\n model.client.messages, \"create\", return_value=mock_anthropic_api_response\n ) as mock_create:\n with tracking.set_tracker() as global_tracker:\n messages = [AnthropicAPIMessageBuilder.user().add_text(\"Search for latest news\")]\n payload = APIPayload(messages=messages)\n parsed_output = model(payload)\n\n mock_create.assert_called_once()\n fn_call = next(iter(parsed_output.tool_calls))\n\n assert \"Thinking about the request.\" in parsed_output.think\n assert parsed_output.action == \"\"\"search_web(query='latest news')\"\"\"\n assert fn_call.name == \"search_web\"\n assert global_tracker.stats[\"input_tokens\"] == 40\n assert global_tracker.stats[\"output_tokens\"] == 20\n\n\ndef test_openai_response_model_parse_and_cost():\n args = OpenAIResponseModelArgs(model_name=\"gpt-4.1\")\n\n mock_function_call_output = {\n \"type\": \"function_call\",\n \"name\": \"get_current_weather\",\n \"arguments\": '{\"location\": \"Boston, MA\", \"unit\": \"celsius\"}',\n \"call_id\": \"call_abc123\",\n }\n\n mock_api_resp = create_mock_openai_responses_api_response(\n outputs=[mock_function_call_output],\n input_tokens=70,\n output_tokens=40,\n )\n\n with patch(\"agentlab.llm.response_api.OpenAI\") as mock_openai_class:\n mock_client = MagicMock()","source_hash":"81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_response_api.test_openai_response_model_parse_and_cost","uri":"program://AgentLab/function/tests.llm.test_response_api.test_openai_response_model_parse_and_cost#L360-L401","kind":"function","name":"test_openai_response_model_parse_and_cost","path":"tests/llm/test_response_api.py","language":"python","start_line":360,"end_line":401,"context_start_line":340,"context_end_line":421,"code":" )\n\n with patch.object(\n model.client.messages, \"create\", return_value=mock_anthropic_api_response\n ) as mock_create:\n with tracking.set_tracker() as global_tracker:\n messages = [AnthropicAPIMessageBuilder.user().add_text(\"Search for latest news\")]\n payload = APIPayload(messages=messages)\n parsed_output = model(payload)\n\n mock_create.assert_called_once()\n fn_call = next(iter(parsed_output.tool_calls))\n\n assert \"Thinking about the request.\" in parsed_output.think\n assert parsed_output.action == \"\"\"search_web(query='latest news')\"\"\"\n assert fn_call.name == \"search_web\"\n assert global_tracker.stats[\"input_tokens\"] == 40\n assert global_tracker.stats[\"output_tokens\"] == 20\n\n\ndef test_openai_response_model_parse_and_cost():\n args = OpenAIResponseModelArgs(model_name=\"gpt-4.1\")\n\n mock_function_call_output = {\n \"type\": \"function_call\",\n \"name\": \"get_current_weather\",\n \"arguments\": '{\"location\": \"Boston, MA\", \"unit\": \"celsius\"}',\n \"call_id\": \"call_abc123\",\n }\n\n mock_api_resp = create_mock_openai_responses_api_response(\n outputs=[mock_function_call_output],\n input_tokens=70,\n output_tokens=40,\n )\n\n with patch(\"agentlab.llm.response_api.OpenAI\") as mock_openai_class:\n mock_client = MagicMock()\n mock_openai_class.return_value = mock_client\n model = args.make_model()\n\n with patch.object(\n model.client.responses, \"create\", return_value=mock_api_resp\n ) as mock_create_method:\n with tracking.set_tracker() as global_tracker:\n messages = [\n OpenAIResponseAPIMessageBuilder.user().add_text(\"What's the weather in Boston?\")\n ]\n payload = APIPayload(messages=messages)\n parsed_output = model(payload)\n\n mock_create_method.assert_called_once()\n fn_calls = [\n content\n for content in parsed_output.tool_calls.raw_calls.output\n if content.type == \"function_call\"\n ]\n assert parsed_output.action == \"get_current_weather(location='Boston, MA', unit='celsius')\"\n assert fn_calls[0].call_id == \"call_abc123\"\n assert parsed_output.raw_response == mock_api_resp\n assert global_tracker.stats[\"input_tokens\"] == 70\n assert global_tracker.stats[\"output_tokens\"] == 40\n\n\n# --- Test Response Models (Pricy - require API keys and actual calls) ---\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not os.getenv(\"OPENAI_API_KEY\"), reason=\"OPENAI_API_KEY not set\")\ndef test_openai_chat_completion_model_pricy_call():\n \"\"\"Tests OpenAIChatCompletionModel with a real API call.\"\"\"\n args = OpenAIChatModelArgs(\n model_name=\"gpt-4.1\",\n temperature=1e-5,\n max_new_tokens=100,\n )\n\n tools = chat_api_tools\n model = args.make_model()\n\n with tracking.set_tracker() as global_tracker:\n messages = [","source_hash":"81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_response_api.test_openai_chat_completion_model_pricy_call","uri":"program://AgentLab/function/tests.llm.test_response_api.test_openai_chat_completion_model_pricy_call#L409-L433","kind":"function","name":"test_openai_chat_completion_model_pricy_call","path":"tests/llm/test_response_api.py","language":"python","start_line":409,"end_line":433,"context_start_line":389,"context_end_line":453,"code":" parsed_output = model(payload)\n\n mock_create_method.assert_called_once()\n fn_calls = [\n content\n for content in parsed_output.tool_calls.raw_calls.output\n if content.type == \"function_call\"\n ]\n assert parsed_output.action == \"get_current_weather(location='Boston, MA', unit='celsius')\"\n assert fn_calls[0].call_id == \"call_abc123\"\n assert parsed_output.raw_response == mock_api_resp\n assert global_tracker.stats[\"input_tokens\"] == 70\n assert global_tracker.stats[\"output_tokens\"] == 40\n\n\n# --- Test Response Models (Pricy - require API keys and actual calls) ---\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not os.getenv(\"OPENAI_API_KEY\"), reason=\"OPENAI_API_KEY not set\")\ndef test_openai_chat_completion_model_pricy_call():\n \"\"\"Tests OpenAIChatCompletionModel with a real API call.\"\"\"\n args = OpenAIChatModelArgs(\n model_name=\"gpt-4.1\",\n temperature=1e-5,\n max_new_tokens=100,\n )\n\n tools = chat_api_tools\n model = args.make_model()\n\n with tracking.set_tracker() as global_tracker:\n messages = [\n OpenAIChatCompletionAPIMessageBuilder.user().add_text(\"What is the weather in Paris?\")\n ]\n payload = APIPayload(messages=messages, tools=tools, tool_choice=\"required\")\n parsed_output = model(payload)\n\n assert parsed_output.raw_response is not None\n assert (\n parsed_output.action == \"get_weather(location='Paris')\"\n ), f\"\"\" Expected get_weather(location='Paris') but got {parsed_output.action}\"\"\"\n assert global_tracker.stats[\"input_tokens\"] > 0\n assert global_tracker.stats[\"output_tokens\"] > 0\n assert global_tracker.stats[\"cost\"] > 0\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not os.getenv(\"ANTHROPIC_API_KEY\"), reason=\"ANTHROPIC_API_KEY not set\")\ndef test_claude_response_model_pricy_call():\n \"\"\"Tests ClaudeResponseModel with a real API call.\"\"\"\n\n args = ClaudeResponseModelArgs(\n model_name=\"claude-3-haiku-20240307\",\n temperature=1e-5,\n max_new_tokens=100,\n )\n tools = anthropic_tools\n model = args.make_model()\n\n with tracking.set_tracker() as global_tracker:\n messages = [AnthropicAPIMessageBuilder.user().add_text(\"What is the weather in Paris?\")]\n payload = APIPayload(messages=messages, tools=tools)\n parsed_output = model(payload)\n","source_hash":"81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_response_api.test_claude_response_model_pricy_call","uri":"program://AgentLab/function/tests.llm.test_response_api.test_claude_response_model_pricy_call#L438-L460","kind":"function","name":"test_claude_response_model_pricy_call","path":"tests/llm/test_response_api.py","language":"python","start_line":438,"end_line":460,"context_start_line":418,"context_end_line":480,"code":" model = args.make_model()\n\n with tracking.set_tracker() as global_tracker:\n messages = [\n OpenAIChatCompletionAPIMessageBuilder.user().add_text(\"What is the weather in Paris?\")\n ]\n payload = APIPayload(messages=messages, tools=tools, tool_choice=\"required\")\n parsed_output = model(payload)\n\n assert parsed_output.raw_response is not None\n assert (\n parsed_output.action == \"get_weather(location='Paris')\"\n ), f\"\"\" Expected get_weather(location='Paris') but got {parsed_output.action}\"\"\"\n assert global_tracker.stats[\"input_tokens\"] > 0\n assert global_tracker.stats[\"output_tokens\"] > 0\n assert global_tracker.stats[\"cost\"] > 0\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not os.getenv(\"ANTHROPIC_API_KEY\"), reason=\"ANTHROPIC_API_KEY not set\")\ndef test_claude_response_model_pricy_call():\n \"\"\"Tests ClaudeResponseModel with a real API call.\"\"\"\n\n args = ClaudeResponseModelArgs(\n model_name=\"claude-3-haiku-20240307\",\n temperature=1e-5,\n max_new_tokens=100,\n )\n tools = anthropic_tools\n model = args.make_model()\n\n with tracking.set_tracker() as global_tracker:\n messages = [AnthropicAPIMessageBuilder.user().add_text(\"What is the weather in Paris?\")]\n payload = APIPayload(messages=messages, tools=tools)\n parsed_output = model(payload)\n\n assert parsed_output.raw_response is not None\n assert (\n parsed_output.action == \"get_weather(location='Paris')\"\n ), f\"\"\"Expected get_weather('Paris') but got {parsed_output.action}\"\"\"\n assert global_tracker.stats[\"input_tokens\"] > 0\n assert global_tracker.stats[\"output_tokens\"] > 0\n assert global_tracker.stats[\"cost\"] > 0\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not os.getenv(\"OPENAI_API_KEY\"), reason=\"OPENAI_API_KEY not set\")\ndef test_openai_response_model_pricy_call():\n \"\"\"\n Tests OpenAIResponseModel output parsing and cost tracking with both\n function_call and reasoning outputs.\n \"\"\"\n args = OpenAIResponseModelArgs(model_name=\"gpt-4.1\", temperature=1e-5, max_new_tokens=100)\n\n tools = responses_api_tools\n model = args.make_model()\n\n with tracking.set_tracker() as global_tracker:\n messages = [\n OpenAIResponseAPIMessageBuilder.user().add_text(\"What is the weather in Paris?\")\n ]\n payload = APIPayload(messages=messages, tools=tools)\n parsed_output = model(payload)","source_hash":"81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_response_api.test_openai_response_model_pricy_call","uri":"program://AgentLab/function/tests.llm.test_response_api.test_openai_response_model_pricy_call#L465-L488","kind":"function","name":"test_openai_response_model_pricy_call","path":"tests/llm/test_response_api.py","language":"python","start_line":465,"end_line":488,"context_start_line":445,"context_end_line":508,"code":" )\n tools = anthropic_tools\n model = args.make_model()\n\n with tracking.set_tracker() as global_tracker:\n messages = [AnthropicAPIMessageBuilder.user().add_text(\"What is the weather in Paris?\")]\n payload = APIPayload(messages=messages, tools=tools)\n parsed_output = model(payload)\n\n assert parsed_output.raw_response is not None\n assert (\n parsed_output.action == \"get_weather(location='Paris')\"\n ), f\"\"\"Expected get_weather('Paris') but got {parsed_output.action}\"\"\"\n assert global_tracker.stats[\"input_tokens\"] > 0\n assert global_tracker.stats[\"output_tokens\"] > 0\n assert global_tracker.stats[\"cost\"] > 0\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not os.getenv(\"OPENAI_API_KEY\"), reason=\"OPENAI_API_KEY not set\")\ndef test_openai_response_model_pricy_call():\n \"\"\"\n Tests OpenAIResponseModel output parsing and cost tracking with both\n function_call and reasoning outputs.\n \"\"\"\n args = OpenAIResponseModelArgs(model_name=\"gpt-4.1\", temperature=1e-5, max_new_tokens=100)\n\n tools = responses_api_tools\n model = args.make_model()\n\n with tracking.set_tracker() as global_tracker:\n messages = [\n OpenAIResponseAPIMessageBuilder.user().add_text(\"What is the weather in Paris?\")\n ]\n payload = APIPayload(messages=messages, tools=tools)\n parsed_output = model(payload)\n\n assert parsed_output.raw_response is not None\n assert (\n parsed_output.action == \"\"\"get_weather(location='Paris', unit='celsius')\"\"\"\n ), f\"\"\" Expected get_weather(location='Paris', unit='celsius') but got {parsed_output.action}\"\"\"\n assert global_tracker.stats[\"input_tokens\"] > 0\n assert global_tracker.stats[\"output_tokens\"] > 0\n assert global_tracker.stats[\"cost\"] > 0\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not os.getenv(\"OPENAI_API_KEY\"), reason=\"OPENAI_API_KEY not set\")\ndef test_openai_response_model_with_multiple_messages_and_cost_tracking():\n \"\"\"\n Test OpenAIResponseModel's output parsing and cost tracking\n with a tool-using assistant and follow-up interaction.\n \"\"\"\n args = OpenAIResponseModelArgs(model_name=\"gpt-4.1\", temperature=1e-5, max_new_tokens=100)\n\n tools = responses_api_tools\n model = args.make_model()\n builder = args.get_message_builder()\n\n messages = [builder.user().add_text(\"What is the weather in Paris?\")]\n\n with tracking.set_tracker() as tracker:\n payload = APIPayload(messages=messages, tools=tools, tool_choice=\"required\")\n parsed = model(payload)","source_hash":"81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_response_api.test_openai_response_model_with_multiple_messages_and_cost_tracking","uri":"program://AgentLab/function/tests.llm.test_response_api.test_openai_response_model_with_multiple_messages_and_cost_tracking#L493-L542","kind":"function","name":"test_openai_response_model_with_multiple_messages_and_cost_tracking","path":"tests/llm/test_response_api.py","language":"python","start_line":493,"end_line":542,"context_start_line":473,"context_end_line":562,"code":" model = args.make_model()\n\n with tracking.set_tracker() as global_tracker:\n messages = [\n OpenAIResponseAPIMessageBuilder.user().add_text(\"What is the weather in Paris?\")\n ]\n payload = APIPayload(messages=messages, tools=tools)\n parsed_output = model(payload)\n\n assert parsed_output.raw_response is not None\n assert (\n parsed_output.action == \"\"\"get_weather(location='Paris', unit='celsius')\"\"\"\n ), f\"\"\" Expected get_weather(location='Paris', unit='celsius') but got {parsed_output.action}\"\"\"\n assert global_tracker.stats[\"input_tokens\"] > 0\n assert global_tracker.stats[\"output_tokens\"] > 0\n assert global_tracker.stats[\"cost\"] > 0\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not os.getenv(\"OPENAI_API_KEY\"), reason=\"OPENAI_API_KEY not set\")\ndef test_openai_response_model_with_multiple_messages_and_cost_tracking():\n \"\"\"\n Test OpenAIResponseModel's output parsing and cost tracking\n with a tool-using assistant and follow-up interaction.\n \"\"\"\n args = OpenAIResponseModelArgs(model_name=\"gpt-4.1\", temperature=1e-5, max_new_tokens=100)\n\n tools = responses_api_tools\n model = args.make_model()\n builder = args.get_message_builder()\n\n messages = [builder.user().add_text(\"What is the weather in Paris?\")]\n\n with tracking.set_tracker() as tracker:\n payload = APIPayload(messages=messages, tools=tools, tool_choice=\"required\")\n parsed = model(payload)\n prev_input = tracker.stats[\"input_tokens\"]\n prev_output = tracker.stats[\"output_tokens\"]\n prev_cost = tracker.stats[\"cost\"]\n\n assert parsed.tool_calls, \"Expected tool calls in the response\"\n # Set tool responses\n for tool_call in parsed.tool_calls:\n tool_call.response_text(\"Its sunny! 25°C\")\n # Simulate tool execution and user follow-up\n messages += [\n builder.add_responded_tool_calls(parsed.tool_calls),\n builder.user().add_text(\"What is the weather in Delhi?\"),\n ]\n\n payload = APIPayload(messages=messages, tools=tools, tool_choice=\"required\")\n parsed = model(payload)\n\n delta_input = tracker.stats[\"input_tokens\"] - prev_input\n delta_output = tracker.stats[\"output_tokens\"] - prev_output\n delta_cost = tracker.stats[\"cost\"] - prev_cost\n\n assert prev_input > 0\n assert prev_output > 0\n assert prev_cost > 0\n assert parsed.raw_response is not None\n assert (\n parsed.action == \"\"\"get_weather(location='Delhi', unit='celsius')\"\"\"\n ), f\"Unexpected action: {parsed.action}\"\n assert delta_input > 0\n assert delta_output > 0\n assert delta_cost > 0\n assert tracker.stats[\"input_tokens\"] == prev_input + delta_input\n assert tracker.stats[\"output_tokens\"] == prev_output + delta_output\n assert tracker.stats[\"cost\"] == pytest.approx(prev_cost + delta_cost)\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not os.getenv(\"OPENAI_API_KEY\"), reason=\"OPENAI_API_KEY not set\")\ndef test_openai_chat_completion_model_with_multiple_messages_and_cost_tracking():\n \"\"\"\n Test OpenAIResponseModel's output parsing and cost tracking\n with a tool-using assistant and follow-up interaction.\n \"\"\"\n args = OpenAIChatModelArgs(model_name=\"gpt-4.1\", temperature=1e-5, max_new_tokens=100)\n\n tools = [\n {\n \"type\": \"function\",\n \"name\": \"get_weather\",\n \"description\": \"Get the current weather in a given location.\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {","source_hash":"81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_response_api.test_openai_chat_completion_model_with_multiple_messages_and_cost_tracking","uri":"program://AgentLab/function/tests.llm.test_response_api.test_openai_chat_completion_model_with_multiple_messages_and_cost_tracking#L547-L617","kind":"function","name":"test_openai_chat_completion_model_with_multiple_messages_and_cost_tracking","path":"tests/llm/test_response_api.py","language":"python","start_line":547,"end_line":617,"context_start_line":527,"context_end_line":637,"code":" delta_output = tracker.stats[\"output_tokens\"] - prev_output\n delta_cost = tracker.stats[\"cost\"] - prev_cost\n\n assert prev_input > 0\n assert prev_output > 0\n assert prev_cost > 0\n assert parsed.raw_response is not None\n assert (\n parsed.action == \"\"\"get_weather(location='Delhi', unit='celsius')\"\"\"\n ), f\"Unexpected action: {parsed.action}\"\n assert delta_input > 0\n assert delta_output > 0\n assert delta_cost > 0\n assert tracker.stats[\"input_tokens\"] == prev_input + delta_input\n assert tracker.stats[\"output_tokens\"] == prev_output + delta_output\n assert tracker.stats[\"cost\"] == pytest.approx(prev_cost + delta_cost)\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not os.getenv(\"OPENAI_API_KEY\"), reason=\"OPENAI_API_KEY not set\")\ndef test_openai_chat_completion_model_with_multiple_messages_and_cost_tracking():\n \"\"\"\n Test OpenAIResponseModel's output parsing and cost tracking\n with a tool-using assistant and follow-up interaction.\n \"\"\"\n args = OpenAIChatModelArgs(model_name=\"gpt-4.1\", temperature=1e-5, max_new_tokens=100)\n\n tools = [\n {\n \"type\": \"function\",\n \"name\": \"get_weather\",\n \"description\": \"Get the current weather in a given location.\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The location to get the weather for.\",\n },\n \"unit\": {\n \"type\": \"string\",\n \"enum\": [\"celsius\", \"fahrenheit\"],\n \"description\": \"The unit of temperature.\",\n },\n },\n \"required\": [\"location\"],\n },\n }\n ]\n\n model = args.make_model()\n builder = args.get_message_builder()\n\n messages = [builder.user().add_text(\"What is the weather in Paris?\")]\n\n with tracking.set_tracker() as tracker:\n payload = APIPayload(messages=messages, tools=tools, tool_choice=\"required\")\n parsed = model(payload)\n prev_input = tracker.stats[\"input_tokens\"]\n prev_output = tracker.stats[\"output_tokens\"]\n prev_cost = tracker.stats[\"cost\"]\n\n for tool_call in parsed.tool_calls:\n tool_call.response_text(\"Its sunny! 25°C\")\n # Simulate tool execution and user follow-up\n messages += [\n builder.add_responded_tool_calls(parsed.tool_calls),\n builder.user().add_text(\"What is the weather in Delhi?\"),\n ]\n # Set tool responses\n\n payload = APIPayload(messages=messages, tools=tools, tool_choice=\"required\")\n parsed = model(payload)\n\n delta_input = tracker.stats[\"input_tokens\"] - prev_input\n delta_output = tracker.stats[\"output_tokens\"] - prev_output\n delta_cost = tracker.stats[\"cost\"] - prev_cost\n\n assert prev_input > 0\n assert prev_output > 0\n assert prev_cost > 0\n assert parsed.raw_response is not None\n assert (\n parsed.action == \"\"\"get_weather(location='Delhi')\"\"\"\n ), f\"Unexpected action: {parsed.action}\"\n assert delta_input > 0\n assert delta_output > 0\n assert delta_cost > 0\n assert tracker.stats[\"input_tokens\"] == prev_input + delta_input\n assert tracker.stats[\"output_tokens\"] == prev_output + delta_output\n assert tracker.stats[\"cost\"] == pytest.approx(prev_cost + delta_cost)\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not os.getenv(\"ANTHROPIC_API_KEY\"), reason=\"ANTHROPIC_API_KEY not set\")\ndef test_claude_model_with_multiple_messages_pricy_call():\n model_factory = ClaudeResponseModelArgs(\n model_name=\"claude-3-haiku-20240307\", temperature=1e-5, max_new_tokens=100\n )\n tools = [\n {\n \"name\": \"get_weather\",\n \"description\": \"Get the current weather in a given location.\",\n \"input_schema\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The location to get the weather for.\",\n },\n \"unit\": {","source_hash":"81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_response_api.test_claude_model_with_multiple_messages_pricy_call","uri":"program://AgentLab/function/tests.llm.test_response_api.test_claude_model_with_multiple_messages_pricy_call#L622-L684","kind":"function","name":"test_claude_model_with_multiple_messages_pricy_call","path":"tests/llm/test_response_api.py","language":"python","start_line":622,"end_line":684,"context_start_line":602,"context_end_line":704,"code":" delta_output = tracker.stats[\"output_tokens\"] - prev_output\n delta_cost = tracker.stats[\"cost\"] - prev_cost\n\n assert prev_input > 0\n assert prev_output > 0\n assert prev_cost > 0\n assert parsed.raw_response is not None\n assert (\n parsed.action == \"\"\"get_weather(location='Delhi')\"\"\"\n ), f\"Unexpected action: {parsed.action}\"\n assert delta_input > 0\n assert delta_output > 0\n assert delta_cost > 0\n assert tracker.stats[\"input_tokens\"] == prev_input + delta_input\n assert tracker.stats[\"output_tokens\"] == prev_output + delta_output\n assert tracker.stats[\"cost\"] == pytest.approx(prev_cost + delta_cost)\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not os.getenv(\"ANTHROPIC_API_KEY\"), reason=\"ANTHROPIC_API_KEY not set\")\ndef test_claude_model_with_multiple_messages_pricy_call():\n model_factory = ClaudeResponseModelArgs(\n model_name=\"claude-3-haiku-20240307\", temperature=1e-5, max_new_tokens=100\n )\n tools = [\n {\n \"name\": \"get_weather\",\n \"description\": \"Get the current weather in a given location.\",\n \"input_schema\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The location to get the weather for.\",\n },\n \"unit\": {\n \"type\": \"string\",\n \"enum\": [\"celsius\", \"fahrenheit\"],\n \"description\": \"The unit of temperature.\",\n },\n },\n \"required\": [\"location\"],\n },\n }\n ]\n model = model_factory.make_model()\n msg_builder = model_factory.get_message_builder()\n messages = []\n\n messages.append(msg_builder.user().add_text(\"What is the weather in Paris?\"))\n with tracking.set_tracker() as global_tracker:\n payload = APIPayload(messages=messages, tools=tools)\n llm_output1 = model(payload)\n\n prev_input = global_tracker.stats[\"input_tokens\"]\n prev_output = global_tracker.stats[\"output_tokens\"]\n prev_cost = global_tracker.stats[\"cost\"]\n\n for tool_call in llm_output1.tool_calls:\n tool_call.response_text(\"It's sunny! 25°C\")\n messages += [\n msg_builder.add_responded_tool_calls(llm_output1.tool_calls),\n msg_builder.user().add_text(\"What is the weather in Delhi?\"),\n ]\n payload = APIPayload(messages=messages, tools=tools)\n llm_output2 = model(payload)\n delta_input = global_tracker.stats[\"input_tokens\"] - prev_input\n delta_output = global_tracker.stats[\"output_tokens\"] - prev_output\n delta_cost = global_tracker.stats[\"cost\"] - prev_cost\n\n assert prev_input > 0, \"Expected previous input tokens to be greater than 0\"\n assert prev_output > 0, \"Expected previous output tokens to be greater than 0\"\n assert prev_cost > 0, \"Expected previous cost value to be greater than 0\"\n assert llm_output2.raw_response is not None\n assert (\n llm_output2.action == \"\"\"get_weather(location='Delhi', unit='celsius')\"\"\"\n ), f\"\"\"Expected get_weather('Delhi') but got {llm_output2.action}\"\"\"\n assert delta_input > 0, \"Expected new input tokens to be greater than 0\"\n assert delta_output > 0, \"Expected new output tokens to be greater than 0\"\n assert delta_cost > 0, \"Expected new cost value to be greater than 0\"\n assert global_tracker.stats[\"input_tokens\"] == prev_input + delta_input\n assert global_tracker.stats[\"output_tokens\"] == prev_output + delta_output\n assert global_tracker.stats[\"cost\"] == pytest.approx(prev_cost + delta_cost)\n\n\n## Test multiaction\n@pytest.mark.pricy\n@pytest.mark.skipif(not os.getenv(\"OPENAI_API_KEY\"), reason=\"Skipping as OpenAI API key not set\")\ndef test_multi_action_tool_calls():\n \"\"\"\n Test that the model can produce multiple tool calls in parallel.\n Uncomment commented lines to see the full behaviour of models and tool choices.\n \"\"\"\n # test_config (setting name, BaseModelArgs, model_name, tools)\n tool_test_configs = [\n (\n \"gpt-4.1-responses API\",\n OpenAIResponseModelArgs,\n \"gpt-4.1-2025-04-14\",\n responses_api_tools,\n ),\n (\"gpt-4.1-chat Completions API\", OpenAIChatModelArgs, \"gpt-4.1-2025-04-14\", chat_api_tools),\n # (\"claude-3\", ClaudeResponseModelArgs, \"claude-3-haiku-20240307\", anthropic_tools), # fails","source_hash":"81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_response_api.test_multi_action_tool_calls","uri":"program://AgentLab/function/tests.llm.test_response_api.test_multi_action_tool_calls#L690-L750","kind":"function","name":"test_multi_action_tool_calls","path":"tests/llm/test_response_api.py","language":"python","start_line":690,"end_line":750,"context_start_line":670,"context_end_line":770,"code":" delta_cost = global_tracker.stats[\"cost\"] - prev_cost\n\n assert prev_input > 0, \"Expected previous input tokens to be greater than 0\"\n assert prev_output > 0, \"Expected previous output tokens to be greater than 0\"\n assert prev_cost > 0, \"Expected previous cost value to be greater than 0\"\n assert llm_output2.raw_response is not None\n assert (\n llm_output2.action == \"\"\"get_weather(location='Delhi', unit='celsius')\"\"\"\n ), f\"\"\"Expected get_weather('Delhi') but got {llm_output2.action}\"\"\"\n assert delta_input > 0, \"Expected new input tokens to be greater than 0\"\n assert delta_output > 0, \"Expected new output tokens to be greater than 0\"\n assert delta_cost > 0, \"Expected new cost value to be greater than 0\"\n assert global_tracker.stats[\"input_tokens\"] == prev_input + delta_input\n assert global_tracker.stats[\"output_tokens\"] == prev_output + delta_output\n assert global_tracker.stats[\"cost\"] == pytest.approx(prev_cost + delta_cost)\n\n\n## Test multiaction\n@pytest.mark.pricy\n@pytest.mark.skipif(not os.getenv(\"OPENAI_API_KEY\"), reason=\"Skipping as OpenAI API key not set\")\ndef test_multi_action_tool_calls():\n \"\"\"\n Test that the model can produce multiple tool calls in parallel.\n Uncomment commented lines to see the full behaviour of models and tool choices.\n \"\"\"\n # test_config (setting name, BaseModelArgs, model_name, tools)\n tool_test_configs = [\n (\n \"gpt-4.1-responses API\",\n OpenAIResponseModelArgs,\n \"gpt-4.1-2025-04-14\",\n responses_api_tools,\n ),\n (\"gpt-4.1-chat Completions API\", OpenAIChatModelArgs, \"gpt-4.1-2025-04-14\", chat_api_tools),\n # (\"claude-3\", ClaudeResponseModelArgs, \"claude-3-haiku-20240307\", anthropic_tools), # fails\n # (\"claude-3.7\", ClaudeResponseModelArgs, \"claude-3-7-sonnet-20250219\", anthropic_tools), # fails\n (\"claude-4-sonnet\", ClaudeResponseModelArgs, \"claude-sonnet-4-20250514\", anthropic_tools),\n # add more models as needed\n ]\n\n def add_user_messages(msg_builder):\n return [\n msg_builder.user().add_text(\"What is the weather in Paris and Delhi?\"),\n msg_builder.user().add_text(\"You must call multiple tools to achieve the task.\"),\n ]\n\n res_df = []\n\n for tool_choice in [\n # 'none',\n # 'required', # fails for Responses API\n # 'any', # fails for Responses API\n \"auto\",\n # 'get_weather'\n ]:\n for name, llm_class, checkpoint_name, tools in tool_test_configs:\n print(name, \"tool choice:\", tool_choice, \"\\n\", \"**\" * 10)\n model_args = llm_class(model_name=checkpoint_name, max_new_tokens=200, temperature=None)\n llm, msg_builder = model_args.make_model(), model_args.get_message_builder()\n messages = add_user_messages(msg_builder)\n if tool_choice == \"get_weather\": # force a specific tool call\n response: LLMOutput = llm(\n APIPayload(messages=messages, tools=tools, force_call_tool=tool_choice)\n )\n else:\n response: LLMOutput = llm(\n APIPayload(messages=messages, tools=tools, tool_choice=tool_choice)\n )\n num_tool_calls = len(response.tool_calls) if response.tool_calls else 0\n res_df.append(\n {\n \"model\": name,\n \"checkpoint\": checkpoint_name,\n \"tool_choice\": tool_choice,\n \"num_tool_calls\": num_tool_calls,\n \"action\": response.action,\n }\n )\n assert (\n num_tool_calls == 2\n ), f\"Expected 2 tool calls, but got {num_tool_calls} for {name} with tool choice {tool_choice}\"\n # import pandas as pd\n # print(pd.DataFrame(res_df))\n\n\nEDGE_CASES = [\n # 1. Empty kwargs dict\n (\"valid_function\", {}, \"valid_function()\"),\n # 2. Kwargs with problematic string values (quotes, escapes, unicode)\n (\n \"send_message\",\n {\n \"text\": 'He said \"Hello!\" and used a backslash: \\\\',\n \"unicode\": \"Café naïve résumé 🚀\",\n \"newlines\": \"Line1\\nLine2\\tTabbed\",\n },\n \"send_message(text='He said \\\"Hello!\\\" and used a backslash: \\\\\\\\', unicode='Café naïve résumé 🚀', newlines='Line1\\\\nLine2\\\\tTabbed')\",\n ),\n # 3. Mixed types including problematic float values\n (\n \"complex_call\",","source_hash":"81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_response_api.test_tool_call_to_python_code","uri":"program://AgentLab/function/tests.llm.test_response_api.test_tool_call_to_python_code#L791-L798","kind":"function","name":"test_tool_call_to_python_code","path":"tests/llm/test_response_api.py","language":"python","start_line":791,"end_line":798,"context_start_line":771,"context_end_line":803,"code":" {\n \"infinity\": float(\"inf\"),\n \"nan\": float(\"nan\"),\n \"negative_zero\": -0.0,\n \"scientific\": 1.23e-45,\n },\n \"complex_call(infinity=inf, nan=nan, negative_zero=-0.0, scientific=1.23e-45)\",\n ),\n # 4. Deeply nested structures that could stress repr()\n (\n \"process_data\",\n {\n \"nested\": {\"level1\": {\"level2\": {\"level3\": [1, 2, {\"deep\": True}]}}},\n \"circular_ref_like\": {\"a\": {\"b\": {\"c\": \"back_to_start\"}}},\n },\n \"process_data(nested={'level1': {'level2': {'level3': [1, 2, {'deep': True}]}}}, circular_ref_like={'a': {'b': {'c': 'back_to_start'}}})\",\n ),\n]\n\n\ndef test_tool_call_to_python_code():\n from agentlab.llm.response_api import tool_call_to_python_code\n\n for edge_case in EDGE_CASES:\n func_name, kwargs, expected = edge_case\n result = tool_call_to_python_code(func_name, kwargs)\n print(result)\n assert result == expected, f\"Expected {expected} but got {result}\"\n\n\nif __name__ == \"__main__\":\n test_tool_call_to_python_code()\n # test_openai_chat_completion_model_parse_and_cost()","source_hash":"81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_response_api.add_user_messages","uri":"program://AgentLab/function/tests.llm.test_response_api.add_user_messages#L710-L714","kind":"function","name":"add_user_messages","path":"tests/llm/test_response_api.py","language":"python","start_line":710,"end_line":714,"context_start_line":690,"context_end_line":734,"code":"def test_multi_action_tool_calls():\n \"\"\"\n Test that the model can produce multiple tool calls in parallel.\n Uncomment commented lines to see the full behaviour of models and tool choices.\n \"\"\"\n # test_config (setting name, BaseModelArgs, model_name, tools)\n tool_test_configs = [\n (\n \"gpt-4.1-responses API\",\n OpenAIResponseModelArgs,\n \"gpt-4.1-2025-04-14\",\n responses_api_tools,\n ),\n (\"gpt-4.1-chat Completions API\", OpenAIChatModelArgs, \"gpt-4.1-2025-04-14\", chat_api_tools),\n # (\"claude-3\", ClaudeResponseModelArgs, \"claude-3-haiku-20240307\", anthropic_tools), # fails\n # (\"claude-3.7\", ClaudeResponseModelArgs, \"claude-3-7-sonnet-20250219\", anthropic_tools), # fails\n (\"claude-4-sonnet\", ClaudeResponseModelArgs, \"claude-sonnet-4-20250514\", anthropic_tools),\n # add more models as needed\n ]\n\n def add_user_messages(msg_builder):\n return [\n msg_builder.user().add_text(\"What is the weather in Paris and Delhi?\"),\n msg_builder.user().add_text(\"You must call multiple tools to achieve the task.\"),\n ]\n\n res_df = []\n\n for tool_choice in [\n # 'none',\n # 'required', # fails for Responses API\n # 'any', # fails for Responses API\n \"auto\",\n # 'get_weather'\n ]:\n for name, llm_class, checkpoint_name, tools in tool_test_configs:\n print(name, \"tool choice:\", tool_choice, \"\\n\", \"**\" * 10)\n model_args = llm_class(model_name=checkpoint_name, max_new_tokens=200, temperature=None)\n llm, msg_builder = model_args.make_model(), model_args.get_message_builder()\n messages = add_user_messages(msg_builder)\n if tool_choice == \"get_weather\": # force a specific tool call\n response: LLMOutput = llm(\n APIPayload(messages=messages, tools=tools, force_call_tool=tool_choice)\n )\n else:","source_hash":"81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_chat_api","uri":"program://AgentLab/module/tests.llm.test_chat_api#L1-L93","kind":"module","name":"tests.llm.test_chat_api","path":"tests/llm/test_chat_api.py","language":"python","start_line":1,"end_line":93,"context_start_line":1,"context_end_line":93,"code":"import os\n\nimport pytest\n\nfrom agentlab.llm.chat_api import (\n AnthropicModelArgs,\n AzureModelArgs,\n OpenAIModelArgs,\n make_system_message,\n make_user_message,\n)\n\n# TODO(optimass): figure out a good model for all tests\n\n\nif \"AGENTLAB_LOCAL_TEST\" in os.environ:\n skip_tests = os.environ[\"AGENTLAB_LOCAL_TEST\"] != \"1\"\nelse:\n skip_tests = False\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(skip_tests, reason=\"Skipping on remote as Azure is pricy\")\n@pytest.mark.skipif(\n not os.getenv(\"AZURE_OPENAI_API_KEY\"), reason=\"Skipping as Azure API key not set\"\n)\ndef test_api_model_args_azure():\n model_args = AzureModelArgs(\n model_name=\"gpt-4.1-nano\",\n deployment_name=\"gpt-4.1-nano\",\n max_total_tokens=8192,\n max_input_tokens=8192 - 512,\n max_new_tokens=512,\n temperature=1e-1,\n )\n model = model_args.make_model()\n\n messages = [\n make_system_message(\"You are an helpful virtual assistant\"),\n make_user_message(\"Give the third prime number\"),\n ]\n answer = model(messages)\n\n assert \"5\" in answer.get(\"content\")\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(skip_tests, reason=\"Skipping on remote as Azure is pricy\")\n@pytest.mark.skipif(not os.getenv(\"OPENAI_API_KEY\"), reason=\"Skipping as OpenAI API key not set\")\ndef test_api_model_args_openai():\n model_args = OpenAIModelArgs(\n model_name=\"gpt-4o-mini\",\n max_total_tokens=8192,\n max_input_tokens=8192 - 512,\n max_new_tokens=512,\n temperature=1e-1,\n )\n model = model_args.make_model()\n\n messages = [\n make_system_message(\"You are an helpful virtual assistant\"),\n make_user_message(\"Give the third prime number\"),\n ]\n answer = model(messages)\n\n assert \"5\" in answer.get(\"content\")\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(skip_tests, reason=\"Skipping on remote as Anthropic is pricy\")\n@pytest.mark.skipif(\n not os.getenv(\"ANTHROPIC_API_KEY\"), reason=\"Skipping as Anthropic API key not set\"\n)\ndef test_api_model_args_anthropic():\n model_args = AnthropicModelArgs(\n model_name=\"claude-3-haiku-20240307\",\n max_total_tokens=8192,\n max_input_tokens=8192 - 512,\n max_new_tokens=512,\n temperature=1e-1,\n )\n model = model_args.make_model()\n\n messages = [\n make_system_message(\"You are an helpful virtual assistant\"),\n make_user_message(\"Give the third prime number. Just the number, no explanation.\"),\n ]\n answer = model(messages)\n assert \"5\" in answer.get(\"content\")\n\n\nif __name__ == \"__main__\":\n test_api_model_args_anthropic()","source_hash":"1827bbbe5c8ea015a003ddf56dcabfe4f2dadd506ae591ed9bd0a089f03d664b","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_chat_api.test_api_model_args_azure","uri":"program://AgentLab/function/tests.llm.test_chat_api.test_api_model_args_azure#L27-L44","kind":"function","name":"test_api_model_args_azure","path":"tests/llm/test_chat_api.py","language":"python","start_line":27,"end_line":44,"context_start_line":7,"context_end_line":64,"code":" AzureModelArgs,\n OpenAIModelArgs,\n make_system_message,\n make_user_message,\n)\n\n# TODO(optimass): figure out a good model for all tests\n\n\nif \"AGENTLAB_LOCAL_TEST\" in os.environ:\n skip_tests = os.environ[\"AGENTLAB_LOCAL_TEST\"] != \"1\"\nelse:\n skip_tests = False\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(skip_tests, reason=\"Skipping on remote as Azure is pricy\")\n@pytest.mark.skipif(\n not os.getenv(\"AZURE_OPENAI_API_KEY\"), reason=\"Skipping as Azure API key not set\"\n)\ndef test_api_model_args_azure():\n model_args = AzureModelArgs(\n model_name=\"gpt-4.1-nano\",\n deployment_name=\"gpt-4.1-nano\",\n max_total_tokens=8192,\n max_input_tokens=8192 - 512,\n max_new_tokens=512,\n temperature=1e-1,\n )\n model = model_args.make_model()\n\n messages = [\n make_system_message(\"You are an helpful virtual assistant\"),\n make_user_message(\"Give the third prime number\"),\n ]\n answer = model(messages)\n\n assert \"5\" in answer.get(\"content\")\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(skip_tests, reason=\"Skipping on remote as Azure is pricy\")\n@pytest.mark.skipif(not os.getenv(\"OPENAI_API_KEY\"), reason=\"Skipping as OpenAI API key not set\")\ndef test_api_model_args_openai():\n model_args = OpenAIModelArgs(\n model_name=\"gpt-4o-mini\",\n max_total_tokens=8192,\n max_input_tokens=8192 - 512,\n max_new_tokens=512,\n temperature=1e-1,\n )\n model = model_args.make_model()\n\n messages = [\n make_system_message(\"You are an helpful virtual assistant\"),\n make_user_message(\"Give the third prime number\"),\n ]\n answer = model(messages)","source_hash":"1827bbbe5c8ea015a003ddf56dcabfe4f2dadd506ae591ed9bd0a089f03d664b","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_chat_api.test_api_model_args_openai","uri":"program://AgentLab/function/tests.llm.test_chat_api.test_api_model_args_openai#L50-L66","kind":"function","name":"test_api_model_args_openai","path":"tests/llm/test_chat_api.py","language":"python","start_line":50,"end_line":66,"context_start_line":30,"context_end_line":86,"code":" deployment_name=\"gpt-4.1-nano\",\n max_total_tokens=8192,\n max_input_tokens=8192 - 512,\n max_new_tokens=512,\n temperature=1e-1,\n )\n model = model_args.make_model()\n\n messages = [\n make_system_message(\"You are an helpful virtual assistant\"),\n make_user_message(\"Give the third prime number\"),\n ]\n answer = model(messages)\n\n assert \"5\" in answer.get(\"content\")\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(skip_tests, reason=\"Skipping on remote as Azure is pricy\")\n@pytest.mark.skipif(not os.getenv(\"OPENAI_API_KEY\"), reason=\"Skipping as OpenAI API key not set\")\ndef test_api_model_args_openai():\n model_args = OpenAIModelArgs(\n model_name=\"gpt-4o-mini\",\n max_total_tokens=8192,\n max_input_tokens=8192 - 512,\n max_new_tokens=512,\n temperature=1e-1,\n )\n model = model_args.make_model()\n\n messages = [\n make_system_message(\"You are an helpful virtual assistant\"),\n make_user_message(\"Give the third prime number\"),\n ]\n answer = model(messages)\n\n assert \"5\" in answer.get(\"content\")\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(skip_tests, reason=\"Skipping on remote as Anthropic is pricy\")\n@pytest.mark.skipif(\n not os.getenv(\"ANTHROPIC_API_KEY\"), reason=\"Skipping as Anthropic API key not set\"\n)\ndef test_api_model_args_anthropic():\n model_args = AnthropicModelArgs(\n model_name=\"claude-3-haiku-20240307\",\n max_total_tokens=8192,\n max_input_tokens=8192 - 512,\n max_new_tokens=512,\n temperature=1e-1,\n )\n model = model_args.make_model()\n\n messages = [\n make_system_message(\"You are an helpful virtual assistant\"),\n make_user_message(\"Give the third prime number. Just the number, no explanation.\"),","source_hash":"1827bbbe5c8ea015a003ddf56dcabfe4f2dadd506ae591ed9bd0a089f03d664b","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_chat_api.test_api_model_args_anthropic","uri":"program://AgentLab/function/tests.llm.test_chat_api.test_api_model_args_anthropic#L74-L89","kind":"function","name":"test_api_model_args_anthropic","path":"tests/llm/test_chat_api.py","language":"python","start_line":74,"end_line":89,"context_start_line":54,"context_end_line":93,"code":" max_input_tokens=8192 - 512,\n max_new_tokens=512,\n temperature=1e-1,\n )\n model = model_args.make_model()\n\n messages = [\n make_system_message(\"You are an helpful virtual assistant\"),\n make_user_message(\"Give the third prime number\"),\n ]\n answer = model(messages)\n\n assert \"5\" in answer.get(\"content\")\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(skip_tests, reason=\"Skipping on remote as Anthropic is pricy\")\n@pytest.mark.skipif(\n not os.getenv(\"ANTHROPIC_API_KEY\"), reason=\"Skipping as Anthropic API key not set\"\n)\ndef test_api_model_args_anthropic():\n model_args = AnthropicModelArgs(\n model_name=\"claude-3-haiku-20240307\",\n max_total_tokens=8192,\n max_input_tokens=8192 - 512,\n max_new_tokens=512,\n temperature=1e-1,\n )\n model = model_args.make_model()\n\n messages = [\n make_system_message(\"You are an helpful virtual assistant\"),\n make_user_message(\"Give the third prime number. Just the number, no explanation.\"),\n ]\n answer = model(messages)\n assert \"5\" in answer.get(\"content\")\n\n\nif __name__ == \"__main__\":\n test_api_model_args_anthropic()","source_hash":"1827bbbe5c8ea015a003ddf56dcabfe4f2dadd506ae591ed9bd0a089f03d664b","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_llm_utils","uri":"program://AgentLab/module/tests.llm.test_llm_utils#L1-L280","kind":"module","name":"tests.llm.test_llm_utils","path":"tests/llm/test_llm_utils.py","language":"python","start_line":1,"end_line":280,"context_start_line":1,"context_end_line":280,"code":"import warnings\nfrom typing import Literal\nfrom unittest.mock import Mock\n\nimport httpx\nimport pytest\nfrom openai import RateLimitError\n\nfrom agentlab.llm import llm_utils\nfrom agentlab.llm.chat_api import make_system_message\n\nyaml_str = \"\"\"Analysis:\nThis is the analysis\n\nSummary: This is the summary\n\nConfidence Score: 7\n\"\"\"\n\n\ndef test_yaml_parser():\n ans, _, _ = llm_utils.yaml_parser(yaml_str)\n print(ans)\n assert ans[\"Analysis\"] == \"This is the analysis\"\n assert ans[\"Summary\"] == \"This is the summary\"\n assert ans[\"Confidence Score\"] == 7\n\n\ndef test_truncate_tokens():\n text = \"This is a simple test.\"\n truncated = llm_utils.truncate_tokens(text, max_tokens=3)\n assert truncated == \"This is a\"\n\n\ndef test_count_tokens():\n text = \"This is a simple test.\"\n assert llm_utils.count_tokens(text) == 6\n\n\ndef test_json_parser():\n # Testing valid JSON\n message = '{\"test\": \"Hello, World!\"}'\n\n # deactivate warnings\n warnings.filterwarnings(\"ignore\")\n\n value, valid, retry_message = llm_utils.json_parser(message)\n assert value == {\"test\": \"Hello, World!\"}\n assert valid == True\n assert retry_message == \"\"\n\n # Testing invalid JSON\n message = '{\"test\": \"Hello, World!\"' # missing closing brace\n value, valid, retry_message = llm_utils.json_parser(message)\n assert value == {}\n assert valid == False\n assert len(retry_message) > 3\n\n # reactivate warnings\n warnings.filterwarnings(\"default\")\n\n\ndef test_compress_string():\n text = \"\"\"\nThis is a test\nfor paragraph.\n\nThis is a second test.\nhola\nThis is a second test.\n\nThis is a test\nfor paragraph.\n\"\"\"\n\n expected_output = \"\"\"\\\n\n§-0:\nThis is a test\nfor paragraph.\n¶-0:\nThis is a second test.\n\n§-0\n¶-0\nhola\n¶-0\n§-0\"\"\"\n\n compressed_text = llm_utils.compress_string(text)\n assert compressed_text == expected_output\n\n\n# Mock ChatOpenAI class\nclass MockChatOpenAI:\n def call(self, messages):\n return \"mocked response\"\n\n def __call__(self, messages):\n return self.call(messages)\n\n\ndef mock_parser(answer):\n if answer == \"correct content\":\n return \"Parsed value\"\n else:\n raise llm_utils.ParseError(\"Retry message\")\n\n\ndef mock_rate_limit_error(message: str, status_code: Literal[429] = 429) -> RateLimitError:\n \"\"\"\n Create a mocked instantiation of RateLimitError with a specified message and status code.\n\n Args:\n message (str): The error message.\n status_code (Literal[429]): The HTTP status code, default is 429 for rate limiting.\n\n Returns:\n RateLimitError: A mocked RateLimitError instance.\n \"\"\"\n mock_response = Mock(spec=httpx.Response)\n mock_response.status_code = status_code\n mock_response.json.return_value = {\"error\": {\"message\": message}}\n mock_response.headers = {\"x-request-id\": \"test-request-id\"} # Add headers attribute\n\n return RateLimitError(message, response=mock_response, body=mock_response.json())\n\n\n# Test to ensure function stops retrying after reaching the max wait time\n# def test_rate_limit_max_wait_time():\n# mock_chat = MockChatOpenAI()\n# mock_chat.call = Mock(\n# side_effect=mock_rate_limit_error(\"Rate limit reached. Please try again in 2s.\")\n# )\n\n# with pytest.raises(RateLimitError):\n# llm_utils.retry(\n# mock_chat,\n# [],\n# n_retry=4,\n# parser=mock_parser,\n# rate_limit_max_wait_time=6,\n# min_retry_wait_time=1,\n# )\n\n# # The function should stop retrying after 2 attempts (6s each time, 12s total which is greater than the 10s max wait time)\n# assert mock_chat.call.call_count == 3\n\n\n# def test_rate_limit_success():\n# mock_chat = MockChatOpenAI()\n# mock_chat.call = Mock(\n# side_effect=[\n# mock_rate_limit_error(\"Rate limit reached. Please try again in 2s.\"),\n# make_system_message(\"correct content\"),\n# ]\n# )\n\n# result = llm_utils.retry(\n# mock_chat,\n# [],\n# n_retry=4,\n# parser=mock_parser,\n# rate_limit_max_wait_time=6,\n# min_retry_wait_time=1,\n# )\n\n# assert result == \"Parsed value\"\n# assert mock_chat.call.call_count == 2\n\n\n# Mock a successful parser response to test function exit before max retries\ndef test_successful_parse_before_max_retries():\n mock_chat = MockChatOpenAI()\n\n # mock a chat that returns the wrong content the first 2 time, but the right\n # content on the 3rd time\n mock_chat.call = Mock(\n side_effect=[\n make_system_message(\"wrong content\"),\n make_system_message(\"wrong content\"),\n make_system_message(\"correct content\"),\n ]\n )\n\n result = llm_utils.retry(mock_chat, llm_utils.Discussion(), 5, mock_parser)\n\n assert result == \"Parsed value\"\n assert mock_chat.call.call_count == 3\n\n\ndef test_unsuccessful_parse_before_max_retries():\n mock_chat = MockChatOpenAI()\n\n # mock a chat that returns the wrong content the first 2 time, but the right\n # content on the 3rd time\n mock_chat.call = Mock(\n side_effect=[\n make_system_message(\"wrong content\"),\n make_system_message(\"wrong content\"),\n make_system_message(\"correct content\"),\n ]\n )\n with pytest.raises(llm_utils.ParseError):\n result = llm_utils.retry(mock_chat, llm_utils.Discussion(), 2, mock_parser)\n\n assert mock_chat.call.call_count == 2\n\n\ndef test_retry_parse_raises():\n mock_chat = MockChatOpenAI()\n mock_chat.call = Mock(return_value=make_system_message(\"mocked response\"))\n parser_raises = Mock(side_effect=ValueError(\"Parser error\"))\n\n with pytest.raises(ValueError):\n llm_utils.retry(mock_chat, llm_utils.Discussion(), 3, parser_raises)\n\n\ndef test_extract_code_blocks():\n text = \"\"\"\\\nThis is some text.\n```python\ndef hello_world():\n print(\"Hello, world!\")\n```\nSome more text.\n```\nMore code without a language.\n```\nAnother block of code:\n```javascript\nconsole.log(\"Hello, world!\");\n```\nAn inline code block ```click()```\n\"\"\"\n\n expected_output = [\n (\"python\", 'def hello_world():\\n print(\"Hello, world!\")'),\n (\"\", \"More code without a language.\"),\n (\"javascript\", 'console.log(\"Hello, world!\");'),\n (\"\", \"click()\"),\n ]\n\n assert llm_utils.extract_code_blocks(text) == expected_output\n\n\ndef test_message_merge_only_text():\n content = [\n {\"type\": \"text\", \"text\": \"Hello, world!\"},\n {\"type\": \"text\", \"text\": \"This is a test.\"},\n ]\n message = llm_utils.BaseMessage(role=\"system\", content=content)\n message.merge()\n assert message[\"content\"] == \"Hello, world!\\nThis is a test.\"\n\n\ndef test_message_merge_text_image():\n content = [\n {\"type\": \"text\", \"text\": \"Hello, world!\"},\n {\"type\": \"text\", \"text\": \"This is a test.\"},\n {\"type\": \"image_url\", \"image_url\": \"this is a base64 image\"},\n {\"type\": \"text\", \"text\": \"This is another test.\"},\n {\"type\": \"text\", \"text\": \"Goodbye, world!\"},\n ]\n message = llm_utils.BaseMessage(role=\"system\", content=content)\n message.merge()\n assert len(message[\"content\"]) == 3\n assert message[\"content\"][0][\"text\"] == \"Hello, world!\\nThis is a test.\"\n assert message[\"content\"][1][\"image_url\"] == \"this is a base64 image\"\n assert message[\"content\"][2][\"text\"] == \"This is another test.\\nGoodbye, world!\"\n\n\nif __name__ == \"__main__\":\n # test_retry_parallel()\n # test_rate_limit_max_wait_time()\n # test_successful_parse_before_max_retries()\n # test_unsuccessful_parse_before_max_retries()\n # test_extract_code_blocks()\n # test_message_merge_only_text()\n test_message_merge_text_image()","source_hash":"da51ff0c4decad4aa674ede0c63e9a1ea65685e1b1f3b04ef81628923c1af447","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_llm_utils.test_yaml_parser","uri":"program://AgentLab/function/tests.llm.test_llm_utils.test_yaml_parser#L21-L26","kind":"function","name":"test_yaml_parser","path":"tests/llm/test_llm_utils.py","language":"python","start_line":21,"end_line":26,"context_start_line":1,"context_end_line":46,"code":"import warnings\nfrom typing import Literal\nfrom unittest.mock import Mock\n\nimport httpx\nimport pytest\nfrom openai import RateLimitError\n\nfrom agentlab.llm import llm_utils\nfrom agentlab.llm.chat_api import make_system_message\n\nyaml_str = \"\"\"Analysis:\nThis is the analysis\n\nSummary: This is the summary\n\nConfidence Score: 7\n\"\"\"\n\n\ndef test_yaml_parser():\n ans, _, _ = llm_utils.yaml_parser(yaml_str)\n print(ans)\n assert ans[\"Analysis\"] == \"This is the analysis\"\n assert ans[\"Summary\"] == \"This is the summary\"\n assert ans[\"Confidence Score\"] == 7\n\n\ndef test_truncate_tokens():\n text = \"This is a simple test.\"\n truncated = llm_utils.truncate_tokens(text, max_tokens=3)\n assert truncated == \"This is a\"\n\n\ndef test_count_tokens():\n text = \"This is a simple test.\"\n assert llm_utils.count_tokens(text) == 6\n\n\ndef test_json_parser():\n # Testing valid JSON\n message = '{\"test\": \"Hello, World!\"}'\n\n # deactivate warnings\n warnings.filterwarnings(\"ignore\")\n","source_hash":"da51ff0c4decad4aa674ede0c63e9a1ea65685e1b1f3b04ef81628923c1af447","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_llm_utils.test_truncate_tokens","uri":"program://AgentLab/function/tests.llm.test_llm_utils.test_truncate_tokens#L29-L32","kind":"function","name":"test_truncate_tokens","path":"tests/llm/test_llm_utils.py","language":"python","start_line":29,"end_line":32,"context_start_line":9,"context_end_line":52,"code":"from agentlab.llm import llm_utils\nfrom agentlab.llm.chat_api import make_system_message\n\nyaml_str = \"\"\"Analysis:\nThis is the analysis\n\nSummary: This is the summary\n\nConfidence Score: 7\n\"\"\"\n\n\ndef test_yaml_parser():\n ans, _, _ = llm_utils.yaml_parser(yaml_str)\n print(ans)\n assert ans[\"Analysis\"] == \"This is the analysis\"\n assert ans[\"Summary\"] == \"This is the summary\"\n assert ans[\"Confidence Score\"] == 7\n\n\ndef test_truncate_tokens():\n text = \"This is a simple test.\"\n truncated = llm_utils.truncate_tokens(text, max_tokens=3)\n assert truncated == \"This is a\"\n\n\ndef test_count_tokens():\n text = \"This is a simple test.\"\n assert llm_utils.count_tokens(text) == 6\n\n\ndef test_json_parser():\n # Testing valid JSON\n message = '{\"test\": \"Hello, World!\"}'\n\n # deactivate warnings\n warnings.filterwarnings(\"ignore\")\n\n value, valid, retry_message = llm_utils.json_parser(message)\n assert value == {\"test\": \"Hello, World!\"}\n assert valid == True\n assert retry_message == \"\"\n\n # Testing invalid JSON","source_hash":"da51ff0c4decad4aa674ede0c63e9a1ea65685e1b1f3b04ef81628923c1af447","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_llm_utils.test_count_tokens","uri":"program://AgentLab/function/tests.llm.test_llm_utils.test_count_tokens#L35-L37","kind":"function","name":"test_count_tokens","path":"tests/llm/test_llm_utils.py","language":"python","start_line":35,"end_line":37,"context_start_line":15,"context_end_line":57,"code":"Summary: This is the summary\n\nConfidence Score: 7\n\"\"\"\n\n\ndef test_yaml_parser():\n ans, _, _ = llm_utils.yaml_parser(yaml_str)\n print(ans)\n assert ans[\"Analysis\"] == \"This is the analysis\"\n assert ans[\"Summary\"] == \"This is the summary\"\n assert ans[\"Confidence Score\"] == 7\n\n\ndef test_truncate_tokens():\n text = \"This is a simple test.\"\n truncated = llm_utils.truncate_tokens(text, max_tokens=3)\n assert truncated == \"This is a\"\n\n\ndef test_count_tokens():\n text = \"This is a simple test.\"\n assert llm_utils.count_tokens(text) == 6\n\n\ndef test_json_parser():\n # Testing valid JSON\n message = '{\"test\": \"Hello, World!\"}'\n\n # deactivate warnings\n warnings.filterwarnings(\"ignore\")\n\n value, valid, retry_message = llm_utils.json_parser(message)\n assert value == {\"test\": \"Hello, World!\"}\n assert valid == True\n assert retry_message == \"\"\n\n # Testing invalid JSON\n message = '{\"test\": \"Hello, World!\"' # missing closing brace\n value, valid, retry_message = llm_utils.json_parser(message)\n assert value == {}\n assert valid == False\n assert len(retry_message) > 3","source_hash":"da51ff0c4decad4aa674ede0c63e9a1ea65685e1b1f3b04ef81628923c1af447","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_llm_utils.test_json_parser","uri":"program://AgentLab/function/tests.llm.test_llm_utils.test_json_parser#L40-L60","kind":"function","name":"test_json_parser","path":"tests/llm/test_llm_utils.py","language":"python","start_line":40,"end_line":60,"context_start_line":20,"context_end_line":80,"code":"\ndef test_yaml_parser():\n ans, _, _ = llm_utils.yaml_parser(yaml_str)\n print(ans)\n assert ans[\"Analysis\"] == \"This is the analysis\"\n assert ans[\"Summary\"] == \"This is the summary\"\n assert ans[\"Confidence Score\"] == 7\n\n\ndef test_truncate_tokens():\n text = \"This is a simple test.\"\n truncated = llm_utils.truncate_tokens(text, max_tokens=3)\n assert truncated == \"This is a\"\n\n\ndef test_count_tokens():\n text = \"This is a simple test.\"\n assert llm_utils.count_tokens(text) == 6\n\n\ndef test_json_parser():\n # Testing valid JSON\n message = '{\"test\": \"Hello, World!\"}'\n\n # deactivate warnings\n warnings.filterwarnings(\"ignore\")\n\n value, valid, retry_message = llm_utils.json_parser(message)\n assert value == {\"test\": \"Hello, World!\"}\n assert valid == True\n assert retry_message == \"\"\n\n # Testing invalid JSON\n message = '{\"test\": \"Hello, World!\"' # missing closing brace\n value, valid, retry_message = llm_utils.json_parser(message)\n assert value == {}\n assert valid == False\n assert len(retry_message) > 3\n\n # reactivate warnings\n warnings.filterwarnings(\"default\")\n\n\ndef test_compress_string():\n text = \"\"\"\nThis is a test\nfor paragraph.\n\nThis is a second test.\nhola\nThis is a second test.\n\nThis is a test\nfor paragraph.\n\"\"\"\n\n expected_output = \"\"\"\\\n\n§-0:\nThis is a test\nfor paragraph.","source_hash":"da51ff0c4decad4aa674ede0c63e9a1ea65685e1b1f3b04ef81628923c1af447","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_llm_utils.test_compress_string","uri":"program://AgentLab/function/tests.llm.test_llm_utils.test_compress_string#L63-L91","kind":"function","name":"test_compress_string","path":"tests/llm/test_llm_utils.py","language":"python","start_line":63,"end_line":91,"context_start_line":43,"context_end_line":111,"code":"\n # deactivate warnings\n warnings.filterwarnings(\"ignore\")\n\n value, valid, retry_message = llm_utils.json_parser(message)\n assert value == {\"test\": \"Hello, World!\"}\n assert valid == True\n assert retry_message == \"\"\n\n # Testing invalid JSON\n message = '{\"test\": \"Hello, World!\"' # missing closing brace\n value, valid, retry_message = llm_utils.json_parser(message)\n assert value == {}\n assert valid == False\n assert len(retry_message) > 3\n\n # reactivate warnings\n warnings.filterwarnings(\"default\")\n\n\ndef test_compress_string():\n text = \"\"\"\nThis is a test\nfor paragraph.\n\nThis is a second test.\nhola\nThis is a second test.\n\nThis is a test\nfor paragraph.\n\"\"\"\n\n expected_output = \"\"\"\\\n\n§-0:\nThis is a test\nfor paragraph.\n¶-0:\nThis is a second test.\n\n§-0\n¶-0\nhola\n¶-0\n§-0\"\"\"\n\n compressed_text = llm_utils.compress_string(text)\n assert compressed_text == expected_output\n\n\n# Mock ChatOpenAI class\nclass MockChatOpenAI:\n def call(self, messages):\n return \"mocked response\"\n\n def __call__(self, messages):\n return self.call(messages)\n\n\ndef mock_parser(answer):\n if answer == \"correct content\":\n return \"Parsed value\"\n else:\n raise llm_utils.ParseError(\"Retry message\")\n\n\ndef mock_rate_limit_error(message: str, status_code: Literal[429] = 429) -> RateLimitError:\n \"\"\"","source_hash":"da51ff0c4decad4aa674ede0c63e9a1ea65685e1b1f3b04ef81628923c1af447","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_llm_utils.MockChatOpenAI","uri":"program://AgentLab/class/tests.llm.test_llm_utils.MockChatOpenAI#L95-L100","kind":"class","name":"MockChatOpenAI","path":"tests/llm/test_llm_utils.py","language":"python","start_line":95,"end_line":100,"context_start_line":75,"context_end_line":120,"code":"\n expected_output = \"\"\"\\\n\n§-0:\nThis is a test\nfor paragraph.\n¶-0:\nThis is a second test.\n\n§-0\n¶-0\nhola\n¶-0\n§-0\"\"\"\n\n compressed_text = llm_utils.compress_string(text)\n assert compressed_text == expected_output\n\n\n# Mock ChatOpenAI class\nclass MockChatOpenAI:\n def call(self, messages):\n return \"mocked response\"\n\n def __call__(self, messages):\n return self.call(messages)\n\n\ndef mock_parser(answer):\n if answer == \"correct content\":\n return \"Parsed value\"\n else:\n raise llm_utils.ParseError(\"Retry message\")\n\n\ndef mock_rate_limit_error(message: str, status_code: Literal[429] = 429) -> RateLimitError:\n \"\"\"\n Create a mocked instantiation of RateLimitError with a specified message and status code.\n\n Args:\n message (str): The error message.\n status_code (Literal[429]): The HTTP status code, default is 429 for rate limiting.\n\n Returns:\n RateLimitError: A mocked RateLimitError instance.\n \"\"\"","source_hash":"da51ff0c4decad4aa674ede0c63e9a1ea65685e1b1f3b04ef81628923c1af447","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_llm_utils.mock_parser","uri":"program://AgentLab/function/tests.llm.test_llm_utils.mock_parser#L103-L107","kind":"function","name":"mock_parser","path":"tests/llm/test_llm_utils.py","language":"python","start_line":103,"end_line":107,"context_start_line":83,"context_end_line":127,"code":"\n§-0\n¶-0\nhola\n¶-0\n§-0\"\"\"\n\n compressed_text = llm_utils.compress_string(text)\n assert compressed_text == expected_output\n\n\n# Mock ChatOpenAI class\nclass MockChatOpenAI:\n def call(self, messages):\n return \"mocked response\"\n\n def __call__(self, messages):\n return self.call(messages)\n\n\ndef mock_parser(answer):\n if answer == \"correct content\":\n return \"Parsed value\"\n else:\n raise llm_utils.ParseError(\"Retry message\")\n\n\ndef mock_rate_limit_error(message: str, status_code: Literal[429] = 429) -> RateLimitError:\n \"\"\"\n Create a mocked instantiation of RateLimitError with a specified message and status code.\n\n Args:\n message (str): The error message.\n status_code (Literal[429]): The HTTP status code, default is 429 for rate limiting.\n\n Returns:\n RateLimitError: A mocked RateLimitError instance.\n \"\"\"\n mock_response = Mock(spec=httpx.Response)\n mock_response.status_code = status_code\n mock_response.json.return_value = {\"error\": {\"message\": message}}\n mock_response.headers = {\"x-request-id\": \"test-request-id\"} # Add headers attribute\n\n return RateLimitError(message, response=mock_response, body=mock_response.json())\n","source_hash":"da51ff0c4decad4aa674ede0c63e9a1ea65685e1b1f3b04ef81628923c1af447","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_llm_utils.mock_rate_limit_error","uri":"program://AgentLab/function/tests.llm.test_llm_utils.mock_rate_limit_error#L110-L126","kind":"function","name":"mock_rate_limit_error","path":"tests/llm/test_llm_utils.py","language":"python","start_line":110,"end_line":126,"context_start_line":90,"context_end_line":146,"code":" compressed_text = llm_utils.compress_string(text)\n assert compressed_text == expected_output\n\n\n# Mock ChatOpenAI class\nclass MockChatOpenAI:\n def call(self, messages):\n return \"mocked response\"\n\n def __call__(self, messages):\n return self.call(messages)\n\n\ndef mock_parser(answer):\n if answer == \"correct content\":\n return \"Parsed value\"\n else:\n raise llm_utils.ParseError(\"Retry message\")\n\n\ndef mock_rate_limit_error(message: str, status_code: Literal[429] = 429) -> RateLimitError:\n \"\"\"\n Create a mocked instantiation of RateLimitError with a specified message and status code.\n\n Args:\n message (str): The error message.\n status_code (Literal[429]): The HTTP status code, default is 429 for rate limiting.\n\n Returns:\n RateLimitError: A mocked RateLimitError instance.\n \"\"\"\n mock_response = Mock(spec=httpx.Response)\n mock_response.status_code = status_code\n mock_response.json.return_value = {\"error\": {\"message\": message}}\n mock_response.headers = {\"x-request-id\": \"test-request-id\"} # Add headers attribute\n\n return RateLimitError(message, response=mock_response, body=mock_response.json())\n\n\n# Test to ensure function stops retrying after reaching the max wait time\n# def test_rate_limit_max_wait_time():\n# mock_chat = MockChatOpenAI()\n# mock_chat.call = Mock(\n# side_effect=mock_rate_limit_error(\"Rate limit reached. Please try again in 2s.\")\n# )\n\n# with pytest.raises(RateLimitError):\n# llm_utils.retry(\n# mock_chat,\n# [],\n# n_retry=4,\n# parser=mock_parser,\n# rate_limit_max_wait_time=6,\n# min_retry_wait_time=1,\n# )\n\n# # The function should stop retrying after 2 attempts (6s each time, 12s total which is greater than the 10s max wait time)","source_hash":"da51ff0c4decad4aa674ede0c63e9a1ea65685e1b1f3b04ef81628923c1af447","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_llm_utils.test_successful_parse_before_max_retries","uri":"program://AgentLab/function/tests.llm.test_llm_utils.test_successful_parse_before_max_retries#L173-L189","kind":"function","name":"test_successful_parse_before_max_retries","path":"tests/llm/test_llm_utils.py","language":"python","start_line":173,"end_line":189,"context_start_line":153,"context_end_line":209,"code":"# side_effect=[\n# mock_rate_limit_error(\"Rate limit reached. Please try again in 2s.\"),\n# make_system_message(\"correct content\"),\n# ]\n# )\n\n# result = llm_utils.retry(\n# mock_chat,\n# [],\n# n_retry=4,\n# parser=mock_parser,\n# rate_limit_max_wait_time=6,\n# min_retry_wait_time=1,\n# )\n\n# assert result == \"Parsed value\"\n# assert mock_chat.call.call_count == 2\n\n\n# Mock a successful parser response to test function exit before max retries\ndef test_successful_parse_before_max_retries():\n mock_chat = MockChatOpenAI()\n\n # mock a chat that returns the wrong content the first 2 time, but the right\n # content on the 3rd time\n mock_chat.call = Mock(\n side_effect=[\n make_system_message(\"wrong content\"),\n make_system_message(\"wrong content\"),\n make_system_message(\"correct content\"),\n ]\n )\n\n result = llm_utils.retry(mock_chat, llm_utils.Discussion(), 5, mock_parser)\n\n assert result == \"Parsed value\"\n assert mock_chat.call.call_count == 3\n\n\ndef test_unsuccessful_parse_before_max_retries():\n mock_chat = MockChatOpenAI()\n\n # mock a chat that returns the wrong content the first 2 time, but the right\n # content on the 3rd time\n mock_chat.call = Mock(\n side_effect=[\n make_system_message(\"wrong content\"),\n make_system_message(\"wrong content\"),\n make_system_message(\"correct content\"),\n ]\n )\n with pytest.raises(llm_utils.ParseError):\n result = llm_utils.retry(mock_chat, llm_utils.Discussion(), 2, mock_parser)\n\n assert mock_chat.call.call_count == 2\n\n","source_hash":"da51ff0c4decad4aa674ede0c63e9a1ea65685e1b1f3b04ef81628923c1af447","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_llm_utils.test_unsuccessful_parse_before_max_retries","uri":"program://AgentLab/function/tests.llm.test_llm_utils.test_unsuccessful_parse_before_max_retries#L192-L207","kind":"function","name":"test_unsuccessful_parse_before_max_retries","path":"tests/llm/test_llm_utils.py","language":"python","start_line":192,"end_line":207,"context_start_line":172,"context_end_line":227,"code":"# Mock a successful parser response to test function exit before max retries\ndef test_successful_parse_before_max_retries():\n mock_chat = MockChatOpenAI()\n\n # mock a chat that returns the wrong content the first 2 time, but the right\n # content on the 3rd time\n mock_chat.call = Mock(\n side_effect=[\n make_system_message(\"wrong content\"),\n make_system_message(\"wrong content\"),\n make_system_message(\"correct content\"),\n ]\n )\n\n result = llm_utils.retry(mock_chat, llm_utils.Discussion(), 5, mock_parser)\n\n assert result == \"Parsed value\"\n assert mock_chat.call.call_count == 3\n\n\ndef test_unsuccessful_parse_before_max_retries():\n mock_chat = MockChatOpenAI()\n\n # mock a chat that returns the wrong content the first 2 time, but the right\n # content on the 3rd time\n mock_chat.call = Mock(\n side_effect=[\n make_system_message(\"wrong content\"),\n make_system_message(\"wrong content\"),\n make_system_message(\"correct content\"),\n ]\n )\n with pytest.raises(llm_utils.ParseError):\n result = llm_utils.retry(mock_chat, llm_utils.Discussion(), 2, mock_parser)\n\n assert mock_chat.call.call_count == 2\n\n\ndef test_retry_parse_raises():\n mock_chat = MockChatOpenAI()\n mock_chat.call = Mock(return_value=make_system_message(\"mocked response\"))\n parser_raises = Mock(side_effect=ValueError(\"Parser error\"))\n\n with pytest.raises(ValueError):\n llm_utils.retry(mock_chat, llm_utils.Discussion(), 3, parser_raises)\n\n\ndef test_extract_code_blocks():\n text = \"\"\"\\\nThis is some text.\n```python\ndef hello_world():\n print(\"Hello, world!\")\n```\nSome more text.\n```","source_hash":"da51ff0c4decad4aa674ede0c63e9a1ea65685e1b1f3b04ef81628923c1af447","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_llm_utils.test_retry_parse_raises","uri":"program://AgentLab/function/tests.llm.test_llm_utils.test_retry_parse_raises#L210-L216","kind":"function","name":"test_retry_parse_raises","path":"tests/llm/test_llm_utils.py","language":"python","start_line":210,"end_line":216,"context_start_line":190,"context_end_line":236,"code":"\n\ndef test_unsuccessful_parse_before_max_retries():\n mock_chat = MockChatOpenAI()\n\n # mock a chat that returns the wrong content the first 2 time, but the right\n # content on the 3rd time\n mock_chat.call = Mock(\n side_effect=[\n make_system_message(\"wrong content\"),\n make_system_message(\"wrong content\"),\n make_system_message(\"correct content\"),\n ]\n )\n with pytest.raises(llm_utils.ParseError):\n result = llm_utils.retry(mock_chat, llm_utils.Discussion(), 2, mock_parser)\n\n assert mock_chat.call.call_count == 2\n\n\ndef test_retry_parse_raises():\n mock_chat = MockChatOpenAI()\n mock_chat.call = Mock(return_value=make_system_message(\"mocked response\"))\n parser_raises = Mock(side_effect=ValueError(\"Parser error\"))\n\n with pytest.raises(ValueError):\n llm_utils.retry(mock_chat, llm_utils.Discussion(), 3, parser_raises)\n\n\ndef test_extract_code_blocks():\n text = \"\"\"\\\nThis is some text.\n```python\ndef hello_world():\n print(\"Hello, world!\")\n```\nSome more text.\n```\nMore code without a language.\n```\nAnother block of code:\n```javascript\nconsole.log(\"Hello, world!\");\n```\nAn inline code block ```click()```\n\"\"\"\n","source_hash":"da51ff0c4decad4aa674ede0c63e9a1ea65685e1b1f3b04ef81628923c1af447","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_llm_utils.test_extract_code_blocks","uri":"program://AgentLab/function/tests.llm.test_llm_utils.test_extract_code_blocks#L219-L244","kind":"function","name":"test_extract_code_blocks","path":"tests/llm/test_llm_utils.py","language":"python","start_line":219,"end_line":244,"context_start_line":199,"context_end_line":264,"code":" make_system_message(\"wrong content\"),\n make_system_message(\"wrong content\"),\n make_system_message(\"correct content\"),\n ]\n )\n with pytest.raises(llm_utils.ParseError):\n result = llm_utils.retry(mock_chat, llm_utils.Discussion(), 2, mock_parser)\n\n assert mock_chat.call.call_count == 2\n\n\ndef test_retry_parse_raises():\n mock_chat = MockChatOpenAI()\n mock_chat.call = Mock(return_value=make_system_message(\"mocked response\"))\n parser_raises = Mock(side_effect=ValueError(\"Parser error\"))\n\n with pytest.raises(ValueError):\n llm_utils.retry(mock_chat, llm_utils.Discussion(), 3, parser_raises)\n\n\ndef test_extract_code_blocks():\n text = \"\"\"\\\nThis is some text.\n```python\ndef hello_world():\n print(\"Hello, world!\")\n```\nSome more text.\n```\nMore code without a language.\n```\nAnother block of code:\n```javascript\nconsole.log(\"Hello, world!\");\n```\nAn inline code block ```click()```\n\"\"\"\n\n expected_output = [\n (\"python\", 'def hello_world():\\n print(\"Hello, world!\")'),\n (\"\", \"More code without a language.\"),\n (\"javascript\", 'console.log(\"Hello, world!\");'),\n (\"\", \"click()\"),\n ]\n\n assert llm_utils.extract_code_blocks(text) == expected_output\n\n\ndef test_message_merge_only_text():\n content = [\n {\"type\": \"text\", \"text\": \"Hello, world!\"},\n {\"type\": \"text\", \"text\": \"This is a test.\"},\n ]\n message = llm_utils.BaseMessage(role=\"system\", content=content)\n message.merge()\n assert message[\"content\"] == \"Hello, world!\\nThis is a test.\"\n\n\ndef test_message_merge_text_image():\n content = [\n {\"type\": \"text\", \"text\": \"Hello, world!\"},\n {\"type\": \"text\", \"text\": \"This is a test.\"},\n {\"type\": \"image_url\", \"image_url\": \"this is a base64 image\"},\n {\"type\": \"text\", \"text\": \"This is another test.\"},\n {\"type\": \"text\", \"text\": \"Goodbye, world!\"},\n ]","source_hash":"da51ff0c4decad4aa674ede0c63e9a1ea65685e1b1f3b04ef81628923c1af447","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_llm_utils.test_message_merge_only_text","uri":"program://AgentLab/function/tests.llm.test_llm_utils.test_message_merge_only_text#L247-L254","kind":"function","name":"test_message_merge_only_text","path":"tests/llm/test_llm_utils.py","language":"python","start_line":247,"end_line":254,"context_start_line":227,"context_end_line":274,"code":"```\nMore code without a language.\n```\nAnother block of code:\n```javascript\nconsole.log(\"Hello, world!\");\n```\nAn inline code block ```click()```\n\"\"\"\n\n expected_output = [\n (\"python\", 'def hello_world():\\n print(\"Hello, world!\")'),\n (\"\", \"More code without a language.\"),\n (\"javascript\", 'console.log(\"Hello, world!\");'),\n (\"\", \"click()\"),\n ]\n\n assert llm_utils.extract_code_blocks(text) == expected_output\n\n\ndef test_message_merge_only_text():\n content = [\n {\"type\": \"text\", \"text\": \"Hello, world!\"},\n {\"type\": \"text\", \"text\": \"This is a test.\"},\n ]\n message = llm_utils.BaseMessage(role=\"system\", content=content)\n message.merge()\n assert message[\"content\"] == \"Hello, world!\\nThis is a test.\"\n\n\ndef test_message_merge_text_image():\n content = [\n {\"type\": \"text\", \"text\": \"Hello, world!\"},\n {\"type\": \"text\", \"text\": \"This is a test.\"},\n {\"type\": \"image_url\", \"image_url\": \"this is a base64 image\"},\n {\"type\": \"text\", \"text\": \"This is another test.\"},\n {\"type\": \"text\", \"text\": \"Goodbye, world!\"},\n ]\n message = llm_utils.BaseMessage(role=\"system\", content=content)\n message.merge()\n assert len(message[\"content\"]) == 3\n assert message[\"content\"][0][\"text\"] == \"Hello, world!\\nThis is a test.\"\n assert message[\"content\"][1][\"image_url\"] == \"this is a base64 image\"\n assert message[\"content\"][2][\"text\"] == \"This is another test.\\nGoodbye, world!\"\n\n\nif __name__ == \"__main__\":\n # test_retry_parallel()","source_hash":"da51ff0c4decad4aa674ede0c63e9a1ea65685e1b1f3b04ef81628923c1af447","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_llm_utils.test_message_merge_text_image","uri":"program://AgentLab/function/tests.llm.test_llm_utils.test_message_merge_text_image#L257-L270","kind":"function","name":"test_message_merge_text_image","path":"tests/llm/test_llm_utils.py","language":"python","start_line":257,"end_line":270,"context_start_line":237,"context_end_line":280,"code":" expected_output = [\n (\"python\", 'def hello_world():\\n print(\"Hello, world!\")'),\n (\"\", \"More code without a language.\"),\n (\"javascript\", 'console.log(\"Hello, world!\");'),\n (\"\", \"click()\"),\n ]\n\n assert llm_utils.extract_code_blocks(text) == expected_output\n\n\ndef test_message_merge_only_text():\n content = [\n {\"type\": \"text\", \"text\": \"Hello, world!\"},\n {\"type\": \"text\", \"text\": \"This is a test.\"},\n ]\n message = llm_utils.BaseMessage(role=\"system\", content=content)\n message.merge()\n assert message[\"content\"] == \"Hello, world!\\nThis is a test.\"\n\n\ndef test_message_merge_text_image():\n content = [\n {\"type\": \"text\", \"text\": \"Hello, world!\"},\n {\"type\": \"text\", \"text\": \"This is a test.\"},\n {\"type\": \"image_url\", \"image_url\": \"this is a base64 image\"},\n {\"type\": \"text\", \"text\": \"This is another test.\"},\n {\"type\": \"text\", \"text\": \"Goodbye, world!\"},\n ]\n message = llm_utils.BaseMessage(role=\"system\", content=content)\n message.merge()\n assert len(message[\"content\"]) == 3\n assert message[\"content\"][0][\"text\"] == \"Hello, world!\\nThis is a test.\"\n assert message[\"content\"][1][\"image_url\"] == \"this is a base64 image\"\n assert message[\"content\"][2][\"text\"] == \"This is another test.\\nGoodbye, world!\"\n\n\nif __name__ == \"__main__\":\n # test_retry_parallel()\n # test_rate_limit_max_wait_time()\n # test_successful_parse_before_max_retries()\n # test_unsuccessful_parse_before_max_retries()\n # test_extract_code_blocks()\n # test_message_merge_only_text()\n test_message_merge_text_image()","source_hash":"da51ff0c4decad4aa674ede0c63e9a1ea65685e1b1f3b04ef81628923c1af447","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_llm_utils.call","uri":"program://AgentLab/function/tests.llm.test_llm_utils.call#L96-L97","kind":"function","name":"call","path":"tests/llm/test_llm_utils.py","language":"python","start_line":96,"end_line":97,"context_start_line":76,"context_end_line":117,"code":" expected_output = \"\"\"\\\n\n§-0:\nThis is a test\nfor paragraph.\n¶-0:\nThis is a second test.\n\n§-0\n¶-0\nhola\n¶-0\n§-0\"\"\"\n\n compressed_text = llm_utils.compress_string(text)\n assert compressed_text == expected_output\n\n\n# Mock ChatOpenAI class\nclass MockChatOpenAI:\n def call(self, messages):\n return \"mocked response\"\n\n def __call__(self, messages):\n return self.call(messages)\n\n\ndef mock_parser(answer):\n if answer == \"correct content\":\n return \"Parsed value\"\n else:\n raise llm_utils.ParseError(\"Retry message\")\n\n\ndef mock_rate_limit_error(message: str, status_code: Literal[429] = 429) -> RateLimitError:\n \"\"\"\n Create a mocked instantiation of RateLimitError with a specified message and status code.\n\n Args:\n message (str): The error message.\n status_code (Literal[429]): The HTTP status code, default is 429 for rate limiting.\n","source_hash":"da51ff0c4decad4aa674ede0c63e9a1ea65685e1b1f3b04ef81628923c1af447","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.llm.test_llm_utils.__call__","uri":"program://AgentLab/function/tests.llm.test_llm_utils.__call__#L99-L100","kind":"function","name":"__call__","path":"tests/llm/test_llm_utils.py","language":"python","start_line":99,"end_line":100,"context_start_line":79,"context_end_line":120,"code":"This is a test\nfor paragraph.\n¶-0:\nThis is a second test.\n\n§-0\n¶-0\nhola\n¶-0\n§-0\"\"\"\n\n compressed_text = llm_utils.compress_string(text)\n assert compressed_text == expected_output\n\n\n# Mock ChatOpenAI class\nclass MockChatOpenAI:\n def call(self, messages):\n return \"mocked response\"\n\n def __call__(self, messages):\n return self.call(messages)\n\n\ndef mock_parser(answer):\n if answer == \"correct content\":\n return \"Parsed value\"\n else:\n raise llm_utils.ParseError(\"Retry message\")\n\n\ndef mock_rate_limit_error(message: str, status_code: Literal[429] = 429) -> RateLimitError:\n \"\"\"\n Create a mocked instantiation of RateLimitError with a specified message and status code.\n\n Args:\n message (str): The error message.\n status_code (Literal[429]): The HTTP status code, default is 429 for rate limiting.\n\n Returns:\n RateLimitError: A mocked RateLimitError instance.\n \"\"\"","source_hash":"da51ff0c4decad4aa674ede0c63e9a1ea65685e1b1f3b04ef81628923c1af447","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.experiments.test_launch_exp","uri":"program://AgentLab/module/tests.experiments.test_launch_exp#L1-L128","kind":"module","name":"tests.experiments.test_launch_exp","path":"tests/experiments/test_launch_exp.py","language":"python","start_line":1,"end_line":128,"context_start_line":1,"context_end_line":128,"code":"import math\nimport tempfile\nfrom pathlib import Path\n\nimport pytest\n\nfrom agentlab.agents.generic_agent.agent_configs import FLAGS_GPT_3_5, AGENT_4o_MINI\nfrom agentlab.agents.generic_agent.generic_agent import GenericAgentArgs\nfrom agentlab.analyze import inspect_results\nfrom agentlab.experiments.launch_exp import (\n find_incomplete,\n non_dummy_count,\n run_experiments,\n)\nfrom agentlab.experiments.loop import EnvArgs, ExpArgs\nfrom agentlab.experiments.study import Study\nfrom agentlab.llm.chat_api import CheatMiniWoBLLMArgs\n\n\ndef test_relaunch_study():\n study_dir = Path(__file__).parent.parent / \"data\" / \"test_study\"\n exp_args_list = find_incomplete(study_dir, include_errors=False)\n\n assert non_dummy_count(exp_args_list) == 1\n assert exp_args_list[0].env_args.task_name == \"miniwob.ascending-numbers\"\n\n exp_args_list = find_incomplete(study_dir, include_errors=True)\n\n assert non_dummy_count(exp_args_list) == 2\n\n\ndef _test_launch_system(backend=\"ray\", cause_timeout=False):\n if cause_timeout:\n wait_time = 10\n avg_step_timeout = 0.5\n else:\n wait_time = 0\n avg_step_timeout = 10\n\n exp_args_list = []\n for seed in range(3):\n exp_args_list.append(\n ExpArgs(\n agent_args=GenericAgentArgs(\n chat_model_args=CheatMiniWoBLLMArgs(wait_time=wait_time),\n flags=FLAGS_GPT_3_5,\n ),\n env_args=EnvArgs(task_name=\"miniwob.click-test\", task_seed=seed, max_steps=5),\n )\n )\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n study_dir = Path(tmp_dir) / \"generic_agent_test\"\n run_experiments(\n n_jobs=2,\n exp_args_list=exp_args_list,\n study_dir=study_dir,\n parallel_backend=backend,\n avg_step_timeout=avg_step_timeout,\n )\n\n results_df = inspect_results.load_result_df(study_dir, progress_fn=None)\n assert len(results_df) == len(exp_args_list)\n\n for _, row in results_df.iterrows():\n if row.stack_trace is not None:\n print(row.stack_trace)\n if cause_timeout:\n # assert row.err_msg is not None\n assert math.isnan(row.cum_reward) or row.cum_reward == 0\n else:\n assert row.err_msg is None\n assert row.cum_reward == 1.0\n\n study_summary = inspect_results.summarize_study(results_df)\n assert len(study_summary) == 1\n assert study_summary.std_err.iloc[0] == 0\n\n if not cause_timeout:\n assert study_summary.n_completed.iloc[0] == \"3/3\"\n assert study_summary.avg_reward.iloc[0] == 1.0\n\n\ndef test_launch_system_joblib():\n _test_launch_system(backend=\"joblib\")\n\n\ndef test_launch_system_sequntial():\n _test_launch_system(backend=\"sequential\")\n\n\ndef test_launch_system_ray():\n _test_launch_system(backend=\"ray\")\n\n\n@pytest.mark.pricy\ndef test_timeout_ray():\n _test_launch_system(backend=\"ray\", cause_timeout=True)\n\n\n@pytest.mark.pricy\ndef test_4o_mini_on_miniwob_tiny_test():\n \"\"\"Run with `pytest -m pricy`.\"\"\"\n with tempfile.TemporaryDirectory() as tmp_dir:\n study = Study(agent_args=[AGENT_4o_MINI], benchmark=\"miniwob_tiny_test\", dir=tmp_dir)\n\n study.run(n_jobs=4)\n\n results_df = inspect_results.load_result_df(study.dir, progress_fn=None)\n\n for row in results_df.iterrows():\n if row[1].err_msg:\n print(row[1].err_msg)\n print(row[1].stack_trace)\n\n assert len(results_df) == len(study.exp_args_list)\n summary = inspect_results.summarize_study(results_df)\n print(summary)\n assert len(summary) == 1\n reward = summary.avg_reward.iloc[0]\n assert reward == 1.0\n\n\nif __name__ == \"__main__\":\n test_timeout_ray()\n # test_4o_mini_on_miniwob_tiny_test()\n # test_launch_system_ray()\n # test_launch_system_sequntial()","source_hash":"2928ffc5b8e30dcb8f46368d60abb04f9c1527c9ea2dbf790053a8f1308d9285","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.experiments.test_launch_exp.test_relaunch_study","uri":"program://AgentLab/function/tests.experiments.test_launch_exp.test_relaunch_study#L20-L29","kind":"function","name":"test_relaunch_study","path":"tests/experiments/test_launch_exp.py","language":"python","start_line":20,"end_line":29,"context_start_line":1,"context_end_line":49,"code":"import math\nimport tempfile\nfrom pathlib import Path\n\nimport pytest\n\nfrom agentlab.agents.generic_agent.agent_configs import FLAGS_GPT_3_5, AGENT_4o_MINI\nfrom agentlab.agents.generic_agent.generic_agent import GenericAgentArgs\nfrom agentlab.analyze import inspect_results\nfrom agentlab.experiments.launch_exp import (\n find_incomplete,\n non_dummy_count,\n run_experiments,\n)\nfrom agentlab.experiments.loop import EnvArgs, ExpArgs\nfrom agentlab.experiments.study import Study\nfrom agentlab.llm.chat_api import CheatMiniWoBLLMArgs\n\n\ndef test_relaunch_study():\n study_dir = Path(__file__).parent.parent / \"data\" / \"test_study\"\n exp_args_list = find_incomplete(study_dir, include_errors=False)\n\n assert non_dummy_count(exp_args_list) == 1\n assert exp_args_list[0].env_args.task_name == \"miniwob.ascending-numbers\"\n\n exp_args_list = find_incomplete(study_dir, include_errors=True)\n\n assert non_dummy_count(exp_args_list) == 2\n\n\ndef _test_launch_system(backend=\"ray\", cause_timeout=False):\n if cause_timeout:\n wait_time = 10\n avg_step_timeout = 0.5\n else:\n wait_time = 0\n avg_step_timeout = 10\n\n exp_args_list = []\n for seed in range(3):\n exp_args_list.append(\n ExpArgs(\n agent_args=GenericAgentArgs(\n chat_model_args=CheatMiniWoBLLMArgs(wait_time=wait_time),\n flags=FLAGS_GPT_3_5,\n ),\n env_args=EnvArgs(task_name=\"miniwob.click-test\", task_seed=seed, max_steps=5),\n )","source_hash":"2928ffc5b8e30dcb8f46368d60abb04f9c1527c9ea2dbf790053a8f1308d9285","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.experiments.test_launch_exp._test_launch_system","uri":"program://AgentLab/function/tests.experiments.test_launch_exp._test_launch_system#L32-L81","kind":"function","name":"_test_launch_system","path":"tests/experiments/test_launch_exp.py","language":"python","start_line":32,"end_line":81,"context_start_line":12,"context_end_line":101,"code":" non_dummy_count,\n run_experiments,\n)\nfrom agentlab.experiments.loop import EnvArgs, ExpArgs\nfrom agentlab.experiments.study import Study\nfrom agentlab.llm.chat_api import CheatMiniWoBLLMArgs\n\n\ndef test_relaunch_study():\n study_dir = Path(__file__).parent.parent / \"data\" / \"test_study\"\n exp_args_list = find_incomplete(study_dir, include_errors=False)\n\n assert non_dummy_count(exp_args_list) == 1\n assert exp_args_list[0].env_args.task_name == \"miniwob.ascending-numbers\"\n\n exp_args_list = find_incomplete(study_dir, include_errors=True)\n\n assert non_dummy_count(exp_args_list) == 2\n\n\ndef _test_launch_system(backend=\"ray\", cause_timeout=False):\n if cause_timeout:\n wait_time = 10\n avg_step_timeout = 0.5\n else:\n wait_time = 0\n avg_step_timeout = 10\n\n exp_args_list = []\n for seed in range(3):\n exp_args_list.append(\n ExpArgs(\n agent_args=GenericAgentArgs(\n chat_model_args=CheatMiniWoBLLMArgs(wait_time=wait_time),\n flags=FLAGS_GPT_3_5,\n ),\n env_args=EnvArgs(task_name=\"miniwob.click-test\", task_seed=seed, max_steps=5),\n )\n )\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n study_dir = Path(tmp_dir) / \"generic_agent_test\"\n run_experiments(\n n_jobs=2,\n exp_args_list=exp_args_list,\n study_dir=study_dir,\n parallel_backend=backend,\n avg_step_timeout=avg_step_timeout,\n )\n\n results_df = inspect_results.load_result_df(study_dir, progress_fn=None)\n assert len(results_df) == len(exp_args_list)\n\n for _, row in results_df.iterrows():\n if row.stack_trace is not None:\n print(row.stack_trace)\n if cause_timeout:\n # assert row.err_msg is not None\n assert math.isnan(row.cum_reward) or row.cum_reward == 0\n else:\n assert row.err_msg is None\n assert row.cum_reward == 1.0\n\n study_summary = inspect_results.summarize_study(results_df)\n assert len(study_summary) == 1\n assert study_summary.std_err.iloc[0] == 0\n\n if not cause_timeout:\n assert study_summary.n_completed.iloc[0] == \"3/3\"\n assert study_summary.avg_reward.iloc[0] == 1.0\n\n\ndef test_launch_system_joblib():\n _test_launch_system(backend=\"joblib\")\n\n\ndef test_launch_system_sequntial():\n _test_launch_system(backend=\"sequential\")\n\n\ndef test_launch_system_ray():\n _test_launch_system(backend=\"ray\")\n\n\n@pytest.mark.pricy\ndef test_timeout_ray():\n _test_launch_system(backend=\"ray\", cause_timeout=True)\n\n\n@pytest.mark.pricy","source_hash":"2928ffc5b8e30dcb8f46368d60abb04f9c1527c9ea2dbf790053a8f1308d9285","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.experiments.test_launch_exp.test_launch_system_joblib","uri":"program://AgentLab/function/tests.experiments.test_launch_exp.test_launch_system_joblib#L84-L85","kind":"function","name":"test_launch_system_joblib","path":"tests/experiments/test_launch_exp.py","language":"python","start_line":84,"end_line":85,"context_start_line":64,"context_end_line":105,"code":"\n for _, row in results_df.iterrows():\n if row.stack_trace is not None:\n print(row.stack_trace)\n if cause_timeout:\n # assert row.err_msg is not None\n assert math.isnan(row.cum_reward) or row.cum_reward == 0\n else:\n assert row.err_msg is None\n assert row.cum_reward == 1.0\n\n study_summary = inspect_results.summarize_study(results_df)\n assert len(study_summary) == 1\n assert study_summary.std_err.iloc[0] == 0\n\n if not cause_timeout:\n assert study_summary.n_completed.iloc[0] == \"3/3\"\n assert study_summary.avg_reward.iloc[0] == 1.0\n\n\ndef test_launch_system_joblib():\n _test_launch_system(backend=\"joblib\")\n\n\ndef test_launch_system_sequntial():\n _test_launch_system(backend=\"sequential\")\n\n\ndef test_launch_system_ray():\n _test_launch_system(backend=\"ray\")\n\n\n@pytest.mark.pricy\ndef test_timeout_ray():\n _test_launch_system(backend=\"ray\", cause_timeout=True)\n\n\n@pytest.mark.pricy\ndef test_4o_mini_on_miniwob_tiny_test():\n \"\"\"Run with `pytest -m pricy`.\"\"\"\n with tempfile.TemporaryDirectory() as tmp_dir:\n study = Study(agent_args=[AGENT_4o_MINI], benchmark=\"miniwob_tiny_test\", dir=tmp_dir)","source_hash":"2928ffc5b8e30dcb8f46368d60abb04f9c1527c9ea2dbf790053a8f1308d9285","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.experiments.test_launch_exp.test_launch_system_sequntial","uri":"program://AgentLab/function/tests.experiments.test_launch_exp.test_launch_system_sequntial#L88-L89","kind":"function","name":"test_launch_system_sequntial","path":"tests/experiments/test_launch_exp.py","language":"python","start_line":88,"end_line":89,"context_start_line":68,"context_end_line":109,"code":" if cause_timeout:\n # assert row.err_msg is not None\n assert math.isnan(row.cum_reward) or row.cum_reward == 0\n else:\n assert row.err_msg is None\n assert row.cum_reward == 1.0\n\n study_summary = inspect_results.summarize_study(results_df)\n assert len(study_summary) == 1\n assert study_summary.std_err.iloc[0] == 0\n\n if not cause_timeout:\n assert study_summary.n_completed.iloc[0] == \"3/3\"\n assert study_summary.avg_reward.iloc[0] == 1.0\n\n\ndef test_launch_system_joblib():\n _test_launch_system(backend=\"joblib\")\n\n\ndef test_launch_system_sequntial():\n _test_launch_system(backend=\"sequential\")\n\n\ndef test_launch_system_ray():\n _test_launch_system(backend=\"ray\")\n\n\n@pytest.mark.pricy\ndef test_timeout_ray():\n _test_launch_system(backend=\"ray\", cause_timeout=True)\n\n\n@pytest.mark.pricy\ndef test_4o_mini_on_miniwob_tiny_test():\n \"\"\"Run with `pytest -m pricy`.\"\"\"\n with tempfile.TemporaryDirectory() as tmp_dir:\n study = Study(agent_args=[AGENT_4o_MINI], benchmark=\"miniwob_tiny_test\", dir=tmp_dir)\n\n study.run(n_jobs=4)\n\n results_df = inspect_results.load_result_df(study.dir, progress_fn=None)","source_hash":"2928ffc5b8e30dcb8f46368d60abb04f9c1527c9ea2dbf790053a8f1308d9285","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.experiments.test_launch_exp.test_launch_system_ray","uri":"program://AgentLab/function/tests.experiments.test_launch_exp.test_launch_system_ray#L92-L93","kind":"function","name":"test_launch_system_ray","path":"tests/experiments/test_launch_exp.py","language":"python","start_line":92,"end_line":93,"context_start_line":72,"context_end_line":113,"code":" assert row.err_msg is None\n assert row.cum_reward == 1.0\n\n study_summary = inspect_results.summarize_study(results_df)\n assert len(study_summary) == 1\n assert study_summary.std_err.iloc[0] == 0\n\n if not cause_timeout:\n assert study_summary.n_completed.iloc[0] == \"3/3\"\n assert study_summary.avg_reward.iloc[0] == 1.0\n\n\ndef test_launch_system_joblib():\n _test_launch_system(backend=\"joblib\")\n\n\ndef test_launch_system_sequntial():\n _test_launch_system(backend=\"sequential\")\n\n\ndef test_launch_system_ray():\n _test_launch_system(backend=\"ray\")\n\n\n@pytest.mark.pricy\ndef test_timeout_ray():\n _test_launch_system(backend=\"ray\", cause_timeout=True)\n\n\n@pytest.mark.pricy\ndef test_4o_mini_on_miniwob_tiny_test():\n \"\"\"Run with `pytest -m pricy`.\"\"\"\n with tempfile.TemporaryDirectory() as tmp_dir:\n study = Study(agent_args=[AGENT_4o_MINI], benchmark=\"miniwob_tiny_test\", dir=tmp_dir)\n\n study.run(n_jobs=4)\n\n results_df = inspect_results.load_result_df(study.dir, progress_fn=None)\n\n for row in results_df.iterrows():\n if row[1].err_msg:\n print(row[1].err_msg)","source_hash":"2928ffc5b8e30dcb8f46368d60abb04f9c1527c9ea2dbf790053a8f1308d9285","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.experiments.test_launch_exp.test_timeout_ray","uri":"program://AgentLab/function/tests.experiments.test_launch_exp.test_timeout_ray#L97-L98","kind":"function","name":"test_timeout_ray","path":"tests/experiments/test_launch_exp.py","language":"python","start_line":97,"end_line":98,"context_start_line":77,"context_end_line":118,"code":" assert study_summary.std_err.iloc[0] == 0\n\n if not cause_timeout:\n assert study_summary.n_completed.iloc[0] == \"3/3\"\n assert study_summary.avg_reward.iloc[0] == 1.0\n\n\ndef test_launch_system_joblib():\n _test_launch_system(backend=\"joblib\")\n\n\ndef test_launch_system_sequntial():\n _test_launch_system(backend=\"sequential\")\n\n\ndef test_launch_system_ray():\n _test_launch_system(backend=\"ray\")\n\n\n@pytest.mark.pricy\ndef test_timeout_ray():\n _test_launch_system(backend=\"ray\", cause_timeout=True)\n\n\n@pytest.mark.pricy\ndef test_4o_mini_on_miniwob_tiny_test():\n \"\"\"Run with `pytest -m pricy`.\"\"\"\n with tempfile.TemporaryDirectory() as tmp_dir:\n study = Study(agent_args=[AGENT_4o_MINI], benchmark=\"miniwob_tiny_test\", dir=tmp_dir)\n\n study.run(n_jobs=4)\n\n results_df = inspect_results.load_result_df(study.dir, progress_fn=None)\n\n for row in results_df.iterrows():\n if row[1].err_msg:\n print(row[1].err_msg)\n print(row[1].stack_trace)\n\n assert len(results_df) == len(study.exp_args_list)\n summary = inspect_results.summarize_study(results_df)\n print(summary)","source_hash":"2928ffc5b8e30dcb8f46368d60abb04f9c1527c9ea2dbf790053a8f1308d9285","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.experiments.test_launch_exp.test_4o_mini_on_miniwob_tiny_test","uri":"program://AgentLab/function/tests.experiments.test_launch_exp.test_4o_mini_on_miniwob_tiny_test#L102-L121","kind":"function","name":"test_4o_mini_on_miniwob_tiny_test","path":"tests/experiments/test_launch_exp.py","language":"python","start_line":102,"end_line":121,"context_start_line":82,"context_end_line":128,"code":"\n\ndef test_launch_system_joblib():\n _test_launch_system(backend=\"joblib\")\n\n\ndef test_launch_system_sequntial():\n _test_launch_system(backend=\"sequential\")\n\n\ndef test_launch_system_ray():\n _test_launch_system(backend=\"ray\")\n\n\n@pytest.mark.pricy\ndef test_timeout_ray():\n _test_launch_system(backend=\"ray\", cause_timeout=True)\n\n\n@pytest.mark.pricy\ndef test_4o_mini_on_miniwob_tiny_test():\n \"\"\"Run with `pytest -m pricy`.\"\"\"\n with tempfile.TemporaryDirectory() as tmp_dir:\n study = Study(agent_args=[AGENT_4o_MINI], benchmark=\"miniwob_tiny_test\", dir=tmp_dir)\n\n study.run(n_jobs=4)\n\n results_df = inspect_results.load_result_df(study.dir, progress_fn=None)\n\n for row in results_df.iterrows():\n if row[1].err_msg:\n print(row[1].err_msg)\n print(row[1].stack_trace)\n\n assert len(results_df) == len(study.exp_args_list)\n summary = inspect_results.summarize_study(results_df)\n print(summary)\n assert len(summary) == 1\n reward = summary.avg_reward.iloc[0]\n assert reward == 1.0\n\n\nif __name__ == \"__main__\":\n test_timeout_ray()\n # test_4o_mini_on_miniwob_tiny_test()\n # test_launch_system_ray()\n # test_launch_system_sequntial()","source_hash":"2928ffc5b8e30dcb8f46368d60abb04f9c1527c9ea2dbf790053a8f1308d9285","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.experiments.test_multi_server","uri":"program://AgentLab/module/tests.experiments.test_multi_server#L1-L37","kind":"module","name":"tests.experiments.test_multi_server","path":"tests/experiments/test_multi_server.py","language":"python","start_line":1,"end_line":37,"context_start_line":1,"context_end_line":37,"code":"from agentlab.experiments.multi_server import WebArenaInstanceVars\nfrom browsergym.webarena.instance import WebArenaInstance\n\n\ndef test_webarena_multiserver():\n\n instance_1 = WebArenaInstanceVars(\n base_url=\"http://webarena1.eastus.cloudapp.azure.com\",\n shopping=\"8082/\",\n shopping_admin=\"8083/admin\",\n reddit=\"8080\",\n gitlab=\"9001\",\n wikipedia=\"8081/wikipedia_en_all_maxi_2022-05/A/User:The_other_Kiwix_guy/Landing\",\n map=\"443\",\n homepage=\"80\",\n full_reset=\"7565\",\n module_name=\"webarena\",\n prefix=\"WA_\",\n )\n\n instance_1.init()\n\n bgym_instance = WebArenaInstance()\n base_url_1 = bgym_instance.urls[\"reddit\"].rsplit(\":\", 1)[0]\n assert base_url_1 == instance_1.base_url\n\n instance_2 = instance_1.clone()\n instance_2.base_url = \"http://webarena2.eastus.cloudapp.azure.com\"\n instance_2.init()\n\n bgym_instance = WebArenaInstance()\n base_url_2 = bgym_instance.urls[\"reddit\"].rsplit(\":\", 1)[0]\n assert base_url_2 == instance_2.base_url\n\n\nif __name__ == \"__main__\":\n test_webarena_multiserver()","source_hash":"a2e6320bd23ca7f9804927c7f751ae02a9d2ebc372b66404a1cc1c46ccf4d86f","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.experiments.test_multi_server.test_webarena_multiserver","uri":"program://AgentLab/function/tests.experiments.test_multi_server.test_webarena_multiserver#L5-L33","kind":"function","name":"test_webarena_multiserver","path":"tests/experiments/test_multi_server.py","language":"python","start_line":5,"end_line":33,"context_start_line":1,"context_end_line":37,"code":"from agentlab.experiments.multi_server import WebArenaInstanceVars\nfrom browsergym.webarena.instance import WebArenaInstance\n\n\ndef test_webarena_multiserver():\n\n instance_1 = WebArenaInstanceVars(\n base_url=\"http://webarena1.eastus.cloudapp.azure.com\",\n shopping=\"8082/\",\n shopping_admin=\"8083/admin\",\n reddit=\"8080\",\n gitlab=\"9001\",\n wikipedia=\"8081/wikipedia_en_all_maxi_2022-05/A/User:The_other_Kiwix_guy/Landing\",\n map=\"443\",\n homepage=\"80\",\n full_reset=\"7565\",\n module_name=\"webarena\",\n prefix=\"WA_\",\n )\n\n instance_1.init()\n\n bgym_instance = WebArenaInstance()\n base_url_1 = bgym_instance.urls[\"reddit\"].rsplit(\":\", 1)[0]\n assert base_url_1 == instance_1.base_url\n\n instance_2 = instance_1.clone()\n instance_2.base_url = \"http://webarena2.eastus.cloudapp.azure.com\"\n instance_2.init()\n\n bgym_instance = WebArenaInstance()\n base_url_2 = bgym_instance.urls[\"reddit\"].rsplit(\":\", 1)[0]\n assert base_url_2 == instance_2.base_url\n\n\nif __name__ == \"__main__\":\n test_webarena_multiserver()","source_hash":"a2e6320bd23ca7f9804927c7f751ae02a9d2ebc372b66404a1cc1c46ccf4d86f","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.experiments.test_ray","uri":"program://AgentLab/module/tests.experiments.test_ray#L1-L80","kind":"module","name":"tests.experiments.test_ray","path":"tests/experiments/test_ray.py","language":"python","start_line":1,"end_line":80,"context_start_line":1,"context_end_line":80,"code":"import bgym\nimport pytest\nimport ray\nfrom flaky import flaky\n\nfrom agentlab.experiments.exp_utils import MockedExpArgs, add_dependencies\nfrom agentlab.experiments.graph_execution_ray import execute_task_graph\n\nTASK_TIME = 3\n\n\n@flaky(max_runs=3, min_passes=1)\ndef test_execute_task_graph():\n # Define a list of ExpArgs with dependencies\n exp_args_list = [\n MockedExpArgs(exp_id=\"task1\", depends_on=[]),\n MockedExpArgs(exp_id=\"task2\", depends_on=[\"task1\"]),\n MockedExpArgs(exp_id=\"task3\", depends_on=[\"task1\"]),\n MockedExpArgs(exp_id=\"task4\", depends_on=[\"task2\", \"task3\"]),\n ]\n\n ray.init(num_cpus=4)\n results = execute_task_graph(exp_args_list)\n ray.shutdown()\n\n exp_args_list = [results[task_id] for task_id in [\"task1\", \"task2\", \"task3\", \"task4\"]]\n\n # Verify that all tasks were executed in the proper order\n assert exp_args_list[0].start_time < exp_args_list[1].start_time\n assert exp_args_list[0].start_time < exp_args_list[2].start_time\n assert exp_args_list[1].end_time < exp_args_list[3].start_time\n assert exp_args_list[2].end_time < exp_args_list[3].start_time\n\n # Verify that parallel tasks (task2 and task3) started within a short time of each other\n # TODO: replace with non flaky check\n # parallel_start_diff = abs(exp_args_list[1].start_time - exp_args_list[2].start_time)\n # print(f\"parallel_start_diff: {parallel_start_diff}\")\n # assert parallel_start_diff < 2, \"Parallel tasks should start within 2 seconds of each other\"\n\n # Ensure that the entire task graph took the expected amount of time\n # TODO: replace with non flaky check\n # total_time = exp_args_list[-1].end_time - exp_args_list[0].start_time\n # # Since the critical path involves at least 1.5 seconds of work\n # assert total_time >= TASK_TIME * 3, \"Total time should be at least 3 times the task time\"\n\n\ndef test_add_dependencies():\n # Prepare a simple list of ExpArgs\n\n def make_exp_args(task_name, exp_id):\n return bgym.ExpArgs(\n agent_args=None, env_args=bgym.EnvArgs(task_name=task_name), exp_id=exp_id\n )\n\n exp_args_list = [\n make_exp_args(\"task1\", \"1\"),\n make_exp_args(\"task2\", \"2\"),\n make_exp_args(\"task3\", \"3\"),\n ]\n\n # Define simple task_dependencies\n task_dependencies = {\"task1\": [\"task2\"], \"task2\": [], \"task3\": [\"task1\"]}\n\n # Call the function\n modified_list = add_dependencies(exp_args_list, task_dependencies)\n\n # Verify dependencies\n assert modified_list[0].depends_on == (\"2\",) # task1 depends on task2\n assert modified_list[1].depends_on == () # task2 has no dependencies\n assert modified_list[2].depends_on == (\"1\",) # task3 depends on task1\n\n # assert raise if task_dependencies is wrong\n task_dependencies = {\"task1\": [\"task2\"], \"task2\": [], \"task4\": [\"task3\"]}\n with pytest.raises(ValueError):\n add_dependencies(exp_args_list, task_dependencies)\n\n\nif __name__ == \"__main__\":\n test_execute_task_graph()\n # test_add_dependencies()","source_hash":"cf7a13d302b1156c30cdf415e6286413e1c5d07dc70e8f5cb8b04cc3ec89cae1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.experiments.test_ray.test_execute_task_graph","uri":"program://AgentLab/function/tests.experiments.test_ray.test_execute_task_graph#L13-L32","kind":"function","name":"test_execute_task_graph","path":"tests/experiments/test_ray.py","language":"python","start_line":13,"end_line":32,"context_start_line":1,"context_end_line":52,"code":"import bgym\nimport pytest\nimport ray\nfrom flaky import flaky\n\nfrom agentlab.experiments.exp_utils import MockedExpArgs, add_dependencies\nfrom agentlab.experiments.graph_execution_ray import execute_task_graph\n\nTASK_TIME = 3\n\n\n@flaky(max_runs=3, min_passes=1)\ndef test_execute_task_graph():\n # Define a list of ExpArgs with dependencies\n exp_args_list = [\n MockedExpArgs(exp_id=\"task1\", depends_on=[]),\n MockedExpArgs(exp_id=\"task2\", depends_on=[\"task1\"]),\n MockedExpArgs(exp_id=\"task3\", depends_on=[\"task1\"]),\n MockedExpArgs(exp_id=\"task4\", depends_on=[\"task2\", \"task3\"]),\n ]\n\n ray.init(num_cpus=4)\n results = execute_task_graph(exp_args_list)\n ray.shutdown()\n\n exp_args_list = [results[task_id] for task_id in [\"task1\", \"task2\", \"task3\", \"task4\"]]\n\n # Verify that all tasks were executed in the proper order\n assert exp_args_list[0].start_time < exp_args_list[1].start_time\n assert exp_args_list[0].start_time < exp_args_list[2].start_time\n assert exp_args_list[1].end_time < exp_args_list[3].start_time\n assert exp_args_list[2].end_time < exp_args_list[3].start_time\n\n # Verify that parallel tasks (task2 and task3) started within a short time of each other\n # TODO: replace with non flaky check\n # parallel_start_diff = abs(exp_args_list[1].start_time - exp_args_list[2].start_time)\n # print(f\"parallel_start_diff: {parallel_start_diff}\")\n # assert parallel_start_diff < 2, \"Parallel tasks should start within 2 seconds of each other\"\n\n # Ensure that the entire task graph took the expected amount of time\n # TODO: replace with non flaky check\n # total_time = exp_args_list[-1].end_time - exp_args_list[0].start_time\n # # Since the critical path involves at least 1.5 seconds of work\n # assert total_time >= TASK_TIME * 3, \"Total time should be at least 3 times the task time\"\n\n\ndef test_add_dependencies():\n # Prepare a simple list of ExpArgs\n\n def make_exp_args(task_name, exp_id):\n return bgym.ExpArgs(\n agent_args=None, env_args=bgym.EnvArgs(task_name=task_name), exp_id=exp_id","source_hash":"cf7a13d302b1156c30cdf415e6286413e1c5d07dc70e8f5cb8b04cc3ec89cae1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.experiments.test_ray.test_add_dependencies","uri":"program://AgentLab/function/tests.experiments.test_ray.test_add_dependencies#L47-L75","kind":"function","name":"test_add_dependencies","path":"tests/experiments/test_ray.py","language":"python","start_line":47,"end_line":75,"context_start_line":27,"context_end_line":80,"code":"\n # Verify that all tasks were executed in the proper order\n assert exp_args_list[0].start_time < exp_args_list[1].start_time\n assert exp_args_list[0].start_time < exp_args_list[2].start_time\n assert exp_args_list[1].end_time < exp_args_list[3].start_time\n assert exp_args_list[2].end_time < exp_args_list[3].start_time\n\n # Verify that parallel tasks (task2 and task3) started within a short time of each other\n # TODO: replace with non flaky check\n # parallel_start_diff = abs(exp_args_list[1].start_time - exp_args_list[2].start_time)\n # print(f\"parallel_start_diff: {parallel_start_diff}\")\n # assert parallel_start_diff < 2, \"Parallel tasks should start within 2 seconds of each other\"\n\n # Ensure that the entire task graph took the expected amount of time\n # TODO: replace with non flaky check\n # total_time = exp_args_list[-1].end_time - exp_args_list[0].start_time\n # # Since the critical path involves at least 1.5 seconds of work\n # assert total_time >= TASK_TIME * 3, \"Total time should be at least 3 times the task time\"\n\n\ndef test_add_dependencies():\n # Prepare a simple list of ExpArgs\n\n def make_exp_args(task_name, exp_id):\n return bgym.ExpArgs(\n agent_args=None, env_args=bgym.EnvArgs(task_name=task_name), exp_id=exp_id\n )\n\n exp_args_list = [\n make_exp_args(\"task1\", \"1\"),\n make_exp_args(\"task2\", \"2\"),\n make_exp_args(\"task3\", \"3\"),\n ]\n\n # Define simple task_dependencies\n task_dependencies = {\"task1\": [\"task2\"], \"task2\": [], \"task3\": [\"task1\"]}\n\n # Call the function\n modified_list = add_dependencies(exp_args_list, task_dependencies)\n\n # Verify dependencies\n assert modified_list[0].depends_on == (\"2\",) # task1 depends on task2\n assert modified_list[1].depends_on == () # task2 has no dependencies\n assert modified_list[2].depends_on == (\"1\",) # task3 depends on task1\n\n # assert raise if task_dependencies is wrong\n task_dependencies = {\"task1\": [\"task2\"], \"task2\": [], \"task4\": [\"task3\"]}\n with pytest.raises(ValueError):\n add_dependencies(exp_args_list, task_dependencies)\n\n\nif __name__ == \"__main__\":\n test_execute_task_graph()\n # test_add_dependencies()","source_hash":"cf7a13d302b1156c30cdf415e6286413e1c5d07dc70e8f5cb8b04cc3ec89cae1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.experiments.test_ray.make_exp_args","uri":"program://AgentLab/function/tests.experiments.test_ray.make_exp_args#L50-L53","kind":"function","name":"make_exp_args","path":"tests/experiments/test_ray.py","language":"python","start_line":50,"end_line":53,"context_start_line":30,"context_end_line":73,"code":" assert exp_args_list[0].start_time < exp_args_list[2].start_time\n assert exp_args_list[1].end_time < exp_args_list[3].start_time\n assert exp_args_list[2].end_time < exp_args_list[3].start_time\n\n # Verify that parallel tasks (task2 and task3) started within a short time of each other\n # TODO: replace with non flaky check\n # parallel_start_diff = abs(exp_args_list[1].start_time - exp_args_list[2].start_time)\n # print(f\"parallel_start_diff: {parallel_start_diff}\")\n # assert parallel_start_diff < 2, \"Parallel tasks should start within 2 seconds of each other\"\n\n # Ensure that the entire task graph took the expected amount of time\n # TODO: replace with non flaky check\n # total_time = exp_args_list[-1].end_time - exp_args_list[0].start_time\n # # Since the critical path involves at least 1.5 seconds of work\n # assert total_time >= TASK_TIME * 3, \"Total time should be at least 3 times the task time\"\n\n\ndef test_add_dependencies():\n # Prepare a simple list of ExpArgs\n\n def make_exp_args(task_name, exp_id):\n return bgym.ExpArgs(\n agent_args=None, env_args=bgym.EnvArgs(task_name=task_name), exp_id=exp_id\n )\n\n exp_args_list = [\n make_exp_args(\"task1\", \"1\"),\n make_exp_args(\"task2\", \"2\"),\n make_exp_args(\"task3\", \"3\"),\n ]\n\n # Define simple task_dependencies\n task_dependencies = {\"task1\": [\"task2\"], \"task2\": [], \"task3\": [\"task1\"]}\n\n # Call the function\n modified_list = add_dependencies(exp_args_list, task_dependencies)\n\n # Verify dependencies\n assert modified_list[0].depends_on == (\"2\",) # task1 depends on task2\n assert modified_list[1].depends_on == () # task2 has no dependencies\n assert modified_list[2].depends_on == (\"1\",) # task3 depends on task1\n\n # assert raise if task_dependencies is wrong\n task_dependencies = {\"task1\": [\"task2\"], \"task2\": [], \"task4\": [\"task3\"]}","source_hash":"cf7a13d302b1156c30cdf415e6286413e1c5d07dc70e8f5cb8b04cc3ec89cae1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.experiments.test_args","uri":"program://AgentLab/module/tests.experiments.test_args#L1-L159","kind":"module","name":"tests.experiments.test_args","path":"tests/experiments/test_args.py","language":"python","start_line":1,"end_line":159,"context_start_line":1,"context_end_line":159,"code":"from ast import mod\nfrom dataclasses import dataclass\nfrom agentlab.experiments.args import (\n expand_cross_product,\n CrossProd,\n Choice,\n make_progression_study,\n sample_args,\n make_ablation_study,\n)\n\n\n@dataclass\nclass LLMArgsTest:\n model_name: str = \"model1\"\n temperature: float = 0.1\n\n\n@dataclass\nclass ExpArgsTest:\n llm_args: LLMArgsTest\n task_name: str = \"task1\"\n n_episode: int = 10\n\n\ndef test_cross_product():\n exp_args = ExpArgsTest(\n n_episode=CrossProd([1, 2, 3]),\n llm_args=LLMArgsTest(\n model_name=CrossProd([\"model1\", \"model2\"]),\n ),\n )\n\n expanded_args_list = expand_cross_product(exp_args)\n assert len(expanded_args_list) == 6\n\n variables = [(args.n_episode, args.llm_args.model_name) for args in expanded_args_list]\n variables.sort()\n assert variables == [\n (1, \"model1\"),\n (1, \"model2\"),\n (2, \"model1\"),\n (2, \"model2\"),\n (3, \"model1\"),\n (3, \"model2\"),\n ]\n\n\ndef test_cross_product_dict():\n exp_args_dict = dict(\n n_episode=CrossProd([1, 2, 3]),\n llm_args=dict(\n model_name=CrossProd([\"model1\", \"model2\"]),\n ),\n )\n\n expanded_args_list = expand_cross_product(exp_args_dict)\n assert len(expanded_args_list) == 6\n\n variables = [(args[\"n_episode\"], args[\"llm_args\"][\"model_name\"]) for args in expanded_args_list]\n variables.sort()\n assert variables == [\n (1, \"model1\"),\n (1, \"model2\"),\n (2, \"model1\"),\n (2, \"model2\"),\n (3, \"model1\"),\n (3, \"model2\"),\n ]\n\n\ndef test_sample():\n exp_args = ExpArgsTest(\n n_episode=Choice([1, 2, 3]),\n llm_args=LLMArgsTest(\n model_name=Choice([\"model1\", \"model2\"]),\n ),\n )\n exp_args_sample = sample_args(exp_args, 3)\n assert len(exp_args_sample) == 3\n for exp_args in exp_args_sample:\n assert exp_args.n_episode in [1, 2, 3]\n assert exp_args.llm_args.model_name in [\"model1\", \"model2\"]\n assert exp_args.llm_args.temperature == 0.1\n\n\ndef test_sample_and_cross_prod():\n exp_args = ExpArgsTest(\n n_episode=Choice([1, 2, 3]),\n llm_args=LLMArgsTest(\n model_name=CrossProd([\"model1\", \"model2\"]),\n ),\n )\n\n def assert_ok(exp_args_list):\n assert len(exp_args_list) == 6\n for exp_args in exp_args_list:\n assert exp_args.n_episode in [1, 2, 3]\n assert exp_args.llm_args.model_name in [\"model1\", \"model2\"]\n assert exp_args.llm_args.temperature == 0.1\n\n exp_args_list_1 = expand_cross_product(sample_args(exp_args, 3))\n assert_ok(exp_args_list_1)\n exp_args_list_2 = sample_args(expand_cross_product(exp_args), 3)\n assert_ok(exp_args_list_2)\n\n\ndef test_make_progression_study():\n ablation = make_progression_study(\n start_point=LLMArgsTest(\n model_name=\"model1\",\n temperature=0.1,\n ),\n changes=[\n (\"model_name\", \"model2\"),\n (\"temperature\", 0.2),\n ],\n )\n\n configs = expand_cross_product(\n ExpArgsTest(\n n_episode=CrossProd([1, 2]),\n llm_args=ablation,\n )\n )\n\n assert len(configs) == 6\n params = [(config.llm_args.model_name, config.llm_args.temperature) for config in configs]\n params = list(set(params))\n params.sort()\n\n assert params == [(\"model1\", 0.1), (\"model2\", 0.1), (\"model2\", 0.2)]\n\n\ndef test_make_ablation_study():\n ablation = make_ablation_study(\n start_point=LLMArgsTest(\n model_name=\"model1\",\n temperature=0.1,\n ),\n changes=[\n (\"model_name\", \"model2\"),\n (\"temperature\", 0.2),\n ],\n )\n\n configs = expand_cross_product(\n ExpArgsTest(\n n_episode=CrossProd([1, 2]),\n llm_args=ablation,\n )\n )\n\n assert len(configs) == 6\n params = [(config.llm_args.model_name, config.llm_args.temperature) for config in configs]\n params = list(set(params))\n params.sort()\n\n assert params == [(\"model1\", 0.1), (\"model1\", 0.2), (\"model2\", 0.1)]","source_hash":"bf29ba898a0df2cd4add1b820df73fd73661deaecdc8c76fc7ddd4a153b6be81","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.experiments.test_args.LLMArgsTest","uri":"program://AgentLab/class/tests.experiments.test_args.LLMArgsTest#L14-L16","kind":"class","name":"LLMArgsTest","path":"tests/experiments/test_args.py","language":"python","start_line":14,"end_line":16,"context_start_line":1,"context_end_line":36,"code":"from ast import mod\nfrom dataclasses import dataclass\nfrom agentlab.experiments.args import (\n expand_cross_product,\n CrossProd,\n Choice,\n make_progression_study,\n sample_args,\n make_ablation_study,\n)\n\n\n@dataclass\nclass LLMArgsTest:\n model_name: str = \"model1\"\n temperature: float = 0.1\n\n\n@dataclass\nclass ExpArgsTest:\n llm_args: LLMArgsTest\n task_name: str = \"task1\"\n n_episode: int = 10\n\n\ndef test_cross_product():\n exp_args = ExpArgsTest(\n n_episode=CrossProd([1, 2, 3]),\n llm_args=LLMArgsTest(\n model_name=CrossProd([\"model1\", \"model2\"]),\n ),\n )\n\n expanded_args_list = expand_cross_product(exp_args)\n assert len(expanded_args_list) == 6\n","source_hash":"bf29ba898a0df2cd4add1b820df73fd73661deaecdc8c76fc7ddd4a153b6be81","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.experiments.test_args.ExpArgsTest","uri":"program://AgentLab/class/tests.experiments.test_args.ExpArgsTest#L20-L23","kind":"class","name":"ExpArgsTest","path":"tests/experiments/test_args.py","language":"python","start_line":20,"end_line":23,"context_start_line":1,"context_end_line":43,"code":"from ast import mod\nfrom dataclasses import dataclass\nfrom agentlab.experiments.args import (\n expand_cross_product,\n CrossProd,\n Choice,\n make_progression_study,\n sample_args,\n make_ablation_study,\n)\n\n\n@dataclass\nclass LLMArgsTest:\n model_name: str = \"model1\"\n temperature: float = 0.1\n\n\n@dataclass\nclass ExpArgsTest:\n llm_args: LLMArgsTest\n task_name: str = \"task1\"\n n_episode: int = 10\n\n\ndef test_cross_product():\n exp_args = ExpArgsTest(\n n_episode=CrossProd([1, 2, 3]),\n llm_args=LLMArgsTest(\n model_name=CrossProd([\"model1\", \"model2\"]),\n ),\n )\n\n expanded_args_list = expand_cross_product(exp_args)\n assert len(expanded_args_list) == 6\n\n variables = [(args.n_episode, args.llm_args.model_name) for args in expanded_args_list]\n variables.sort()\n assert variables == [\n (1, \"model1\"),\n (1, \"model2\"),\n (2, \"model1\"),\n (2, \"model2\"),","source_hash":"bf29ba898a0df2cd4add1b820df73fd73661deaecdc8c76fc7ddd4a153b6be81","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.experiments.test_args.test_cross_product","uri":"program://AgentLab/function/tests.experiments.test_args.test_cross_product#L26-L46","kind":"function","name":"test_cross_product","path":"tests/experiments/test_args.py","language":"python","start_line":26,"end_line":46,"context_start_line":6,"context_end_line":66,"code":" Choice,\n make_progression_study,\n sample_args,\n make_ablation_study,\n)\n\n\n@dataclass\nclass LLMArgsTest:\n model_name: str = \"model1\"\n temperature: float = 0.1\n\n\n@dataclass\nclass ExpArgsTest:\n llm_args: LLMArgsTest\n task_name: str = \"task1\"\n n_episode: int = 10\n\n\ndef test_cross_product():\n exp_args = ExpArgsTest(\n n_episode=CrossProd([1, 2, 3]),\n llm_args=LLMArgsTest(\n model_name=CrossProd([\"model1\", \"model2\"]),\n ),\n )\n\n expanded_args_list = expand_cross_product(exp_args)\n assert len(expanded_args_list) == 6\n\n variables = [(args.n_episode, args.llm_args.model_name) for args in expanded_args_list]\n variables.sort()\n assert variables == [\n (1, \"model1\"),\n (1, \"model2\"),\n (2, \"model1\"),\n (2, \"model2\"),\n (3, \"model1\"),\n (3, \"model2\"),\n ]\n\n\ndef test_cross_product_dict():\n exp_args_dict = dict(\n n_episode=CrossProd([1, 2, 3]),\n llm_args=dict(\n model_name=CrossProd([\"model1\", \"model2\"]),\n ),\n )\n\n expanded_args_list = expand_cross_product(exp_args_dict)\n assert len(expanded_args_list) == 6\n\n variables = [(args[\"n_episode\"], args[\"llm_args\"][\"model_name\"]) for args in expanded_args_list]\n variables.sort()\n assert variables == [\n (1, \"model1\"),\n (1, \"model2\"),\n (2, \"model1\"),\n (2, \"model2\"),","source_hash":"bf29ba898a0df2cd4add1b820df73fd73661deaecdc8c76fc7ddd4a153b6be81","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.experiments.test_args.test_cross_product_dict","uri":"program://AgentLab/function/tests.experiments.test_args.test_cross_product_dict#L49-L69","kind":"function","name":"test_cross_product_dict","path":"tests/experiments/test_args.py","language":"python","start_line":49,"end_line":69,"context_start_line":29,"context_end_line":89,"code":" llm_args=LLMArgsTest(\n model_name=CrossProd([\"model1\", \"model2\"]),\n ),\n )\n\n expanded_args_list = expand_cross_product(exp_args)\n assert len(expanded_args_list) == 6\n\n variables = [(args.n_episode, args.llm_args.model_name) for args in expanded_args_list]\n variables.sort()\n assert variables == [\n (1, \"model1\"),\n (1, \"model2\"),\n (2, \"model1\"),\n (2, \"model2\"),\n (3, \"model1\"),\n (3, \"model2\"),\n ]\n\n\ndef test_cross_product_dict():\n exp_args_dict = dict(\n n_episode=CrossProd([1, 2, 3]),\n llm_args=dict(\n model_name=CrossProd([\"model1\", \"model2\"]),\n ),\n )\n\n expanded_args_list = expand_cross_product(exp_args_dict)\n assert len(expanded_args_list) == 6\n\n variables = [(args[\"n_episode\"], args[\"llm_args\"][\"model_name\"]) for args in expanded_args_list]\n variables.sort()\n assert variables == [\n (1, \"model1\"),\n (1, \"model2\"),\n (2, \"model1\"),\n (2, \"model2\"),\n (3, \"model1\"),\n (3, \"model2\"),\n ]\n\n\ndef test_sample():\n exp_args = ExpArgsTest(\n n_episode=Choice([1, 2, 3]),\n llm_args=LLMArgsTest(\n model_name=Choice([\"model1\", \"model2\"]),\n ),\n )\n exp_args_sample = sample_args(exp_args, 3)\n assert len(exp_args_sample) == 3\n for exp_args in exp_args_sample:\n assert exp_args.n_episode in [1, 2, 3]\n assert exp_args.llm_args.model_name in [\"model1\", \"model2\"]\n assert exp_args.llm_args.temperature == 0.1\n\n\ndef test_sample_and_cross_prod():\n exp_args = ExpArgsTest(\n n_episode=Choice([1, 2, 3]),","source_hash":"bf29ba898a0df2cd4add1b820df73fd73661deaecdc8c76fc7ddd4a153b6be81","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.experiments.test_args.test_sample","uri":"program://AgentLab/function/tests.experiments.test_args.test_sample#L72-L84","kind":"function","name":"test_sample","path":"tests/experiments/test_args.py","language":"python","start_line":72,"end_line":84,"context_start_line":52,"context_end_line":104,"code":" llm_args=dict(\n model_name=CrossProd([\"model1\", \"model2\"]),\n ),\n )\n\n expanded_args_list = expand_cross_product(exp_args_dict)\n assert len(expanded_args_list) == 6\n\n variables = [(args[\"n_episode\"], args[\"llm_args\"][\"model_name\"]) for args in expanded_args_list]\n variables.sort()\n assert variables == [\n (1, \"model1\"),\n (1, \"model2\"),\n (2, \"model1\"),\n (2, \"model2\"),\n (3, \"model1\"),\n (3, \"model2\"),\n ]\n\n\ndef test_sample():\n exp_args = ExpArgsTest(\n n_episode=Choice([1, 2, 3]),\n llm_args=LLMArgsTest(\n model_name=Choice([\"model1\", \"model2\"]),\n ),\n )\n exp_args_sample = sample_args(exp_args, 3)\n assert len(exp_args_sample) == 3\n for exp_args in exp_args_sample:\n assert exp_args.n_episode in [1, 2, 3]\n assert exp_args.llm_args.model_name in [\"model1\", \"model2\"]\n assert exp_args.llm_args.temperature == 0.1\n\n\ndef test_sample_and_cross_prod():\n exp_args = ExpArgsTest(\n n_episode=Choice([1, 2, 3]),\n llm_args=LLMArgsTest(\n model_name=CrossProd([\"model1\", \"model2\"]),\n ),\n )\n\n def assert_ok(exp_args_list):\n assert len(exp_args_list) == 6\n for exp_args in exp_args_list:\n assert exp_args.n_episode in [1, 2, 3]\n assert exp_args.llm_args.model_name in [\"model1\", \"model2\"]\n assert exp_args.llm_args.temperature == 0.1\n\n exp_args_list_1 = expand_cross_product(sample_args(exp_args, 3))\n assert_ok(exp_args_list_1)\n exp_args_list_2 = sample_args(expand_cross_product(exp_args), 3)","source_hash":"bf29ba898a0df2cd4add1b820df73fd73661deaecdc8c76fc7ddd4a153b6be81","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.experiments.test_args.test_sample_and_cross_prod","uri":"program://AgentLab/function/tests.experiments.test_args.test_sample_and_cross_prod#L87-L105","kind":"function","name":"test_sample_and_cross_prod","path":"tests/experiments/test_args.py","language":"python","start_line":87,"end_line":105,"context_start_line":67,"context_end_line":125,"code":" (3, \"model1\"),\n (3, \"model2\"),\n ]\n\n\ndef test_sample():\n exp_args = ExpArgsTest(\n n_episode=Choice([1, 2, 3]),\n llm_args=LLMArgsTest(\n model_name=Choice([\"model1\", \"model2\"]),\n ),\n )\n exp_args_sample = sample_args(exp_args, 3)\n assert len(exp_args_sample) == 3\n for exp_args in exp_args_sample:\n assert exp_args.n_episode in [1, 2, 3]\n assert exp_args.llm_args.model_name in [\"model1\", \"model2\"]\n assert exp_args.llm_args.temperature == 0.1\n\n\ndef test_sample_and_cross_prod():\n exp_args = ExpArgsTest(\n n_episode=Choice([1, 2, 3]),\n llm_args=LLMArgsTest(\n model_name=CrossProd([\"model1\", \"model2\"]),\n ),\n )\n\n def assert_ok(exp_args_list):\n assert len(exp_args_list) == 6\n for exp_args in exp_args_list:\n assert exp_args.n_episode in [1, 2, 3]\n assert exp_args.llm_args.model_name in [\"model1\", \"model2\"]\n assert exp_args.llm_args.temperature == 0.1\n\n exp_args_list_1 = expand_cross_product(sample_args(exp_args, 3))\n assert_ok(exp_args_list_1)\n exp_args_list_2 = sample_args(expand_cross_product(exp_args), 3)\n assert_ok(exp_args_list_2)\n\n\ndef test_make_progression_study():\n ablation = make_progression_study(\n start_point=LLMArgsTest(\n model_name=\"model1\",\n temperature=0.1,\n ),\n changes=[\n (\"model_name\", \"model2\"),\n (\"temperature\", 0.2),\n ],\n )\n\n configs = expand_cross_product(\n ExpArgsTest(\n n_episode=CrossProd([1, 2]),\n llm_args=ablation,\n )\n )","source_hash":"bf29ba898a0df2cd4add1b820df73fd73661deaecdc8c76fc7ddd4a153b6be81","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.experiments.test_args.test_make_progression_study","uri":"program://AgentLab/function/tests.experiments.test_args.test_make_progression_study#L108-L132","kind":"function","name":"test_make_progression_study","path":"tests/experiments/test_args.py","language":"python","start_line":108,"end_line":132,"context_start_line":88,"context_end_line":152,"code":" exp_args = ExpArgsTest(\n n_episode=Choice([1, 2, 3]),\n llm_args=LLMArgsTest(\n model_name=CrossProd([\"model1\", \"model2\"]),\n ),\n )\n\n def assert_ok(exp_args_list):\n assert len(exp_args_list) == 6\n for exp_args in exp_args_list:\n assert exp_args.n_episode in [1, 2, 3]\n assert exp_args.llm_args.model_name in [\"model1\", \"model2\"]\n assert exp_args.llm_args.temperature == 0.1\n\n exp_args_list_1 = expand_cross_product(sample_args(exp_args, 3))\n assert_ok(exp_args_list_1)\n exp_args_list_2 = sample_args(expand_cross_product(exp_args), 3)\n assert_ok(exp_args_list_2)\n\n\ndef test_make_progression_study():\n ablation = make_progression_study(\n start_point=LLMArgsTest(\n model_name=\"model1\",\n temperature=0.1,\n ),\n changes=[\n (\"model_name\", \"model2\"),\n (\"temperature\", 0.2),\n ],\n )\n\n configs = expand_cross_product(\n ExpArgsTest(\n n_episode=CrossProd([1, 2]),\n llm_args=ablation,\n )\n )\n\n assert len(configs) == 6\n params = [(config.llm_args.model_name, config.llm_args.temperature) for config in configs]\n params = list(set(params))\n params.sort()\n\n assert params == [(\"model1\", 0.1), (\"model2\", 0.1), (\"model2\", 0.2)]\n\n\ndef test_make_ablation_study():\n ablation = make_ablation_study(\n start_point=LLMArgsTest(\n model_name=\"model1\",\n temperature=0.1,\n ),\n changes=[\n (\"model_name\", \"model2\"),\n (\"temperature\", 0.2),\n ],\n )\n\n configs = expand_cross_product(\n ExpArgsTest(\n n_episode=CrossProd([1, 2]),\n llm_args=ablation,\n )\n )","source_hash":"bf29ba898a0df2cd4add1b820df73fd73661deaecdc8c76fc7ddd4a153b6be81","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.experiments.test_args.test_make_ablation_study","uri":"program://AgentLab/function/tests.experiments.test_args.test_make_ablation_study#L135-L159","kind":"function","name":"test_make_ablation_study","path":"tests/experiments/test_args.py","language":"python","start_line":135,"end_line":159,"context_start_line":115,"context_end_line":159,"code":" (\"model_name\", \"model2\"),\n (\"temperature\", 0.2),\n ],\n )\n\n configs = expand_cross_product(\n ExpArgsTest(\n n_episode=CrossProd([1, 2]),\n llm_args=ablation,\n )\n )\n\n assert len(configs) == 6\n params = [(config.llm_args.model_name, config.llm_args.temperature) for config in configs]\n params = list(set(params))\n params.sort()\n\n assert params == [(\"model1\", 0.1), (\"model2\", 0.1), (\"model2\", 0.2)]\n\n\ndef test_make_ablation_study():\n ablation = make_ablation_study(\n start_point=LLMArgsTest(\n model_name=\"model1\",\n temperature=0.1,\n ),\n changes=[\n (\"model_name\", \"model2\"),\n (\"temperature\", 0.2),\n ],\n )\n\n configs = expand_cross_product(\n ExpArgsTest(\n n_episode=CrossProd([1, 2]),\n llm_args=ablation,\n )\n )\n\n assert len(configs) == 6\n params = [(config.llm_args.model_name, config.llm_args.temperature) for config in configs]\n params = list(set(params))\n params.sort()\n\n assert params == [(\"model1\", 0.1), (\"model1\", 0.2), (\"model2\", 0.1)]","source_hash":"bf29ba898a0df2cd4add1b820df73fd73661deaecdc8c76fc7ddd4a153b6be81","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.experiments.test_args.assert_ok","uri":"program://AgentLab/function/tests.experiments.test_args.assert_ok#L95-L100","kind":"function","name":"assert_ok","path":"tests/experiments/test_args.py","language":"python","start_line":95,"end_line":100,"context_start_line":75,"context_end_line":120,"code":" llm_args=LLMArgsTest(\n model_name=Choice([\"model1\", \"model2\"]),\n ),\n )\n exp_args_sample = sample_args(exp_args, 3)\n assert len(exp_args_sample) == 3\n for exp_args in exp_args_sample:\n assert exp_args.n_episode in [1, 2, 3]\n assert exp_args.llm_args.model_name in [\"model1\", \"model2\"]\n assert exp_args.llm_args.temperature == 0.1\n\n\ndef test_sample_and_cross_prod():\n exp_args = ExpArgsTest(\n n_episode=Choice([1, 2, 3]),\n llm_args=LLMArgsTest(\n model_name=CrossProd([\"model1\", \"model2\"]),\n ),\n )\n\n def assert_ok(exp_args_list):\n assert len(exp_args_list) == 6\n for exp_args in exp_args_list:\n assert exp_args.n_episode in [1, 2, 3]\n assert exp_args.llm_args.model_name in [\"model1\", \"model2\"]\n assert exp_args.llm_args.temperature == 0.1\n\n exp_args_list_1 = expand_cross_product(sample_args(exp_args, 3))\n assert_ok(exp_args_list_1)\n exp_args_list_2 = sample_args(expand_cross_product(exp_args), 3)\n assert_ok(exp_args_list_2)\n\n\ndef test_make_progression_study():\n ablation = make_progression_study(\n start_point=LLMArgsTest(\n model_name=\"model1\",\n temperature=0.1,\n ),\n changes=[\n (\"model_name\", \"model2\"),\n (\"temperature\", 0.2),\n ],\n )\n\n configs = expand_cross_product(","source_hash":"bf29ba898a0df2cd4add1b820df73fd73661deaecdc8c76fc7ddd4a153b6be81","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.experiments.test_reproducibility_util","uri":"program://AgentLab/module/tests.experiments.test_reproducibility_util#L1-L106","kind":"module","name":"tests.experiments.test_reproducibility_util","path":"tests/experiments/test_reproducibility_util.py","language":"python","start_line":1,"end_line":106,"context_start_line":1,"context_end_line":106,"code":"import json\nimport tempfile\nimport time\nfrom pathlib import Path\n\nimport bgym\nimport pytest\nfrom bgym import DEFAULT_BENCHMARKS\n\nfrom agentlab.agents.generic_agent import AGENT_4o_MINI\nfrom agentlab.analyze import inspect_results\nfrom agentlab.experiments import reproducibility_util\n\n\n@pytest.mark.parametrize(\n \"benchmark_name\",\n [\"miniwob\", \"workarena_l1\", \"webarena\", \"visualwebarena\"],\n)\ndef test_get_reproducibility_info(benchmark_name):\n\n benchmark = DEFAULT_BENCHMARKS[benchmark_name]()\n\n info = reproducibility_util.get_reproducibility_info(\n \"test_agent\", benchmark, \"test_id\", ignore_changes=True\n )\n\n print(\"reproducibility info:\")\n print(json.dumps(info, indent=4))\n\n # assert keys in info\n assert \"git_user\" in info\n assert \"benchmark\" in info\n assert \"benchmark_version\" in info\n assert \"agentlab_version\" in info\n assert \"agentlab_git_hash\" in info\n assert \"agentlab__local_modifications\" in info\n assert \"browsergym_version\" in info\n assert \"browsergym_git_hash\" in info\n assert \"browsergym__local_modifications\" in info\n\n\n# def test_save_reproducibility_info():\n# with tempfile.TemporaryDirectory() as tmp_dir:\n# tmp_dir = Path(tmp_dir)\n\n# info1 = reproducibility_util.save_reproducibility_info(\n# study_dir=tmp_dir,\n# info=reproducibility_util.get_reproducibility_info(\n# agents_args=\"GenericAgent\",\n# benchmark_name=\"miniwob\",\n# ignore_changes=True,\n# ),\n# )\n# time.sleep(1) # make sure the date changes by at least 1s\n\n# # this should overwrite the previous info since they are the same beside\n# # the date\n# info2 = reproducibility_util.save_reproducibility_info(\n# study_dir=tmp_dir,\n# info=reproducibility_util.get_reproducibility_info(\n# agents_args=\"GenericAgent\",\n# benchmark_name=\"miniwob\",\n# ignore_changes=True,\n# ),\n# )\n\n# reproducibility_util.assert_compatible(info1, info2)\n\n# # this should not overwrite info2 as the agent name is different, it\n# # should raise an error\n# with pytest.raises(ValueError):\n# reproducibility_util.save_reproducibility_info(\n# study_dir=tmp_dir,\n# info=reproducibility_util.get_reproducibility_info(\n# agents_args=\"GenericAgent_alt\",\n# benchmark_name=\"miniwob\",\n# ignore_changes=True,\n# ),\n# )\n\n# # load json\n# info3 = reproducibility_util.load_reproducibility_info(tmp_dir)\n\n# assert info2 == info3\n# assert info1 != info3\n\n# test_study_dir = Path(__file__).parent.parent / \"data\" / \"test_study\"\n# result_df = inspect_results.load_result_df(test_study_dir, progress_fn=None)\n# report_df = inspect_results.summarize_study(result_df)\n\n# with pytest.raises(ValueError):\n# reproducibility_util.append_to_journal(\n# info3, report_df, journal_path=tmp_dir / \"journal.csv\"\n# )\n\n# reproducibility_util.append_to_journal(\n# info3, report_df, journal_path=tmp_dir / \"journal.csv\", strict_reproducibility=False\n# )\n\n# print((tmp_dir / \"journal.csv\").read_text())\n\n\nif __name__ == \"__main__\":\n # test_set_temp()\n test_get_reproducibility_info(\"miniwob\")\n # test_save_reproducibility_info()","source_hash":"dc2db573a132c2279ef58e00f126007efe53c15b26bbc1b860be283c3a20a80d","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.experiments.test_reproducibility_util.test_get_reproducibility_info","uri":"program://AgentLab/function/tests.experiments.test_reproducibility_util.test_get_reproducibility_info#L19-L39","kind":"function","name":"test_get_reproducibility_info","path":"tests/experiments/test_reproducibility_util.py","language":"python","start_line":19,"end_line":39,"context_start_line":1,"context_end_line":59,"code":"import json\nimport tempfile\nimport time\nfrom pathlib import Path\n\nimport bgym\nimport pytest\nfrom bgym import DEFAULT_BENCHMARKS\n\nfrom agentlab.agents.generic_agent import AGENT_4o_MINI\nfrom agentlab.analyze import inspect_results\nfrom agentlab.experiments import reproducibility_util\n\n\n@pytest.mark.parametrize(\n \"benchmark_name\",\n [\"miniwob\", \"workarena_l1\", \"webarena\", \"visualwebarena\"],\n)\ndef test_get_reproducibility_info(benchmark_name):\n\n benchmark = DEFAULT_BENCHMARKS[benchmark_name]()\n\n info = reproducibility_util.get_reproducibility_info(\n \"test_agent\", benchmark, \"test_id\", ignore_changes=True\n )\n\n print(\"reproducibility info:\")\n print(json.dumps(info, indent=4))\n\n # assert keys in info\n assert \"git_user\" in info\n assert \"benchmark\" in info\n assert \"benchmark_version\" in info\n assert \"agentlab_version\" in info\n assert \"agentlab_git_hash\" in info\n assert \"agentlab__local_modifications\" in info\n assert \"browsergym_version\" in info\n assert \"browsergym_git_hash\" in info\n assert \"browsergym__local_modifications\" in info\n\n\n# def test_save_reproducibility_info():\n# with tempfile.TemporaryDirectory() as tmp_dir:\n# tmp_dir = Path(tmp_dir)\n\n# info1 = reproducibility_util.save_reproducibility_info(\n# study_dir=tmp_dir,\n# info=reproducibility_util.get_reproducibility_info(\n# agents_args=\"GenericAgent\",\n# benchmark_name=\"miniwob\",\n# ignore_changes=True,\n# ),\n# )\n# time.sleep(1) # make sure the date changes by at least 1s\n\n# # this should overwrite the previous info since they are the same beside\n# # the date\n# info2 = reproducibility_util.save_reproducibility_info(\n# study_dir=tmp_dir,","source_hash":"dc2db573a132c2279ef58e00f126007efe53c15b26bbc1b860be283c3a20a80d","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.experiments.test_study","uri":"program://AgentLab/module/tests.experiments.test_study#L1-L68","kind":"module","name":"tests.experiments.test_study","path":"tests/experiments/test_study.py","language":"python","start_line":1,"end_line":68,"context_start_line":1,"context_end_line":68,"code":"import pytest\nfrom agentlab.agents.generic_agent.agent_configs import FLAGS_GPT_4o\nfrom agentlab.agents.generic_agent.generic_agent import GenericAgentArgs\nfrom agentlab.llm.chat_api import CheatMiniWoBLLMArgs\nfrom agentlab.experiments.study import ParallelStudies, make_study, Study\nfrom agentlab.experiments.multi_server import WebArenaInstanceVars\nimport logging\n\n\nlogging.getLogger().setLevel(logging.INFO)\n\n\ndef _make_agent_args_list():\n # CheatMiniWoB agents won't succeed on WebArena, this is just for testing parallelization\n agent_args_list = []\n for i in range(2):\n agent_args = GenericAgentArgs(\n chat_model_args=CheatMiniWoBLLMArgs(),\n flags=FLAGS_GPT_4o,\n )\n\n agent_args.agent_name = agent_args.agent_name + f\"_{i}\"\n agent_args_list.append(agent_args)\n return agent_args_list\n\n\n@pytest.mark.skip(reason=\"This test requires WebArena instances to be running\")\ndef manual_test_launch_parallel_study_webarena():\n agent_args_list = _make_agent_args_list()\n\n server_instance_1 = WebArenaInstanceVars.from_env_vars()\n server_instance_2 = server_instance_1.clone()\n server_instance_2.base_url = \"http://webarena-slow.eastus.cloudapp.azure.com\"\n parallel_servers = [server_instance_1, server_instance_2]\n # parallel_servers = [server_instance_2]\n\n for server in parallel_servers:\n print(server)\n\n study = make_study(\n agent_args_list,\n benchmark=\"webarena_tiny\",\n parallel_servers=parallel_servers,\n ignore_dependencies=True,\n )\n study.override_max_steps(2)\n assert isinstance(study, ParallelStudies)\n\n study.run(n_jobs=4, parallel_backend=\"ray\", n_relaunch=1)\n\n\n@pytest.mark.skip(reason=\"This usecase isnt relevant atm\")\ndef test_launch_parallel_study():\n agent_args_list = _make_agent_args_list()\n\n study = make_study(agent_args_list, benchmark=\"miniwob_tiny_test\", parallel_servers=2)\n assert isinstance(study, ParallelStudies)\n\n study.run(n_jobs=4, parallel_backend=\"ray\", n_relaunch=1)\n _, summary_df, _ = study.get_results()\n assert len(summary_df) == 2\n for n_completed in summary_df[\"n_completed\"]:\n assert n_completed == \"4/4\"\n\n\nif __name__ == \"__main__\":\n # test_launch_parallel_study()\n manual_test_launch_parallel_study_webarena()","source_hash":"4b57b012597beb12a266466d11fa45ce8ed5c221029ccf7650893a5f54c0a840","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.experiments.test_study._make_agent_args_list","uri":"program://AgentLab/function/tests.experiments.test_study._make_agent_args_list#L13-L24","kind":"function","name":"_make_agent_args_list","path":"tests/experiments/test_study.py","language":"python","start_line":13,"end_line":24,"context_start_line":1,"context_end_line":44,"code":"import pytest\nfrom agentlab.agents.generic_agent.agent_configs import FLAGS_GPT_4o\nfrom agentlab.agents.generic_agent.generic_agent import GenericAgentArgs\nfrom agentlab.llm.chat_api import CheatMiniWoBLLMArgs\nfrom agentlab.experiments.study import ParallelStudies, make_study, Study\nfrom agentlab.experiments.multi_server import WebArenaInstanceVars\nimport logging\n\n\nlogging.getLogger().setLevel(logging.INFO)\n\n\ndef _make_agent_args_list():\n # CheatMiniWoB agents won't succeed on WebArena, this is just for testing parallelization\n agent_args_list = []\n for i in range(2):\n agent_args = GenericAgentArgs(\n chat_model_args=CheatMiniWoBLLMArgs(),\n flags=FLAGS_GPT_4o,\n )\n\n agent_args.agent_name = agent_args.agent_name + f\"_{i}\"\n agent_args_list.append(agent_args)\n return agent_args_list\n\n\n@pytest.mark.skip(reason=\"This test requires WebArena instances to be running\")\ndef manual_test_launch_parallel_study_webarena():\n agent_args_list = _make_agent_args_list()\n\n server_instance_1 = WebArenaInstanceVars.from_env_vars()\n server_instance_2 = server_instance_1.clone()\n server_instance_2.base_url = \"http://webarena-slow.eastus.cloudapp.azure.com\"\n parallel_servers = [server_instance_1, server_instance_2]\n # parallel_servers = [server_instance_2]\n\n for server in parallel_servers:\n print(server)\n\n study = make_study(\n agent_args_list,\n benchmark=\"webarena_tiny\",\n parallel_servers=parallel_servers,\n ignore_dependencies=True,","source_hash":"4b57b012597beb12a266466d11fa45ce8ed5c221029ccf7650893a5f54c0a840","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.experiments.test_study.manual_test_launch_parallel_study_webarena","uri":"program://AgentLab/function/tests.experiments.test_study.manual_test_launch_parallel_study_webarena#L28-L49","kind":"function","name":"manual_test_launch_parallel_study_webarena","path":"tests/experiments/test_study.py","language":"python","start_line":28,"end_line":49,"context_start_line":8,"context_end_line":68,"code":"\n\nlogging.getLogger().setLevel(logging.INFO)\n\n\ndef _make_agent_args_list():\n # CheatMiniWoB agents won't succeed on WebArena, this is just for testing parallelization\n agent_args_list = []\n for i in range(2):\n agent_args = GenericAgentArgs(\n chat_model_args=CheatMiniWoBLLMArgs(),\n flags=FLAGS_GPT_4o,\n )\n\n agent_args.agent_name = agent_args.agent_name + f\"_{i}\"\n agent_args_list.append(agent_args)\n return agent_args_list\n\n\n@pytest.mark.skip(reason=\"This test requires WebArena instances to be running\")\ndef manual_test_launch_parallel_study_webarena():\n agent_args_list = _make_agent_args_list()\n\n server_instance_1 = WebArenaInstanceVars.from_env_vars()\n server_instance_2 = server_instance_1.clone()\n server_instance_2.base_url = \"http://webarena-slow.eastus.cloudapp.azure.com\"\n parallel_servers = [server_instance_1, server_instance_2]\n # parallel_servers = [server_instance_2]\n\n for server in parallel_servers:\n print(server)\n\n study = make_study(\n agent_args_list,\n benchmark=\"webarena_tiny\",\n parallel_servers=parallel_servers,\n ignore_dependencies=True,\n )\n study.override_max_steps(2)\n assert isinstance(study, ParallelStudies)\n\n study.run(n_jobs=4, parallel_backend=\"ray\", n_relaunch=1)\n\n\n@pytest.mark.skip(reason=\"This usecase isnt relevant atm\")\ndef test_launch_parallel_study():\n agent_args_list = _make_agent_args_list()\n\n study = make_study(agent_args_list, benchmark=\"miniwob_tiny_test\", parallel_servers=2)\n assert isinstance(study, ParallelStudies)\n\n study.run(n_jobs=4, parallel_backend=\"ray\", n_relaunch=1)\n _, summary_df, _ = study.get_results()\n assert len(summary_df) == 2\n for n_completed in summary_df[\"n_completed\"]:\n assert n_completed == \"4/4\"\n\n\nif __name__ == \"__main__\":\n # test_launch_parallel_study()\n manual_test_launch_parallel_study_webarena()","source_hash":"4b57b012597beb12a266466d11fa45ce8ed5c221029ccf7650893a5f54c0a840","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.experiments.test_study.test_launch_parallel_study","uri":"program://AgentLab/function/tests.experiments.test_study.test_launch_parallel_study#L53-L63","kind":"function","name":"test_launch_parallel_study","path":"tests/experiments/test_study.py","language":"python","start_line":53,"end_line":63,"context_start_line":33,"context_end_line":68,"code":" server_instance_2.base_url = \"http://webarena-slow.eastus.cloudapp.azure.com\"\n parallel_servers = [server_instance_1, server_instance_2]\n # parallel_servers = [server_instance_2]\n\n for server in parallel_servers:\n print(server)\n\n study = make_study(\n agent_args_list,\n benchmark=\"webarena_tiny\",\n parallel_servers=parallel_servers,\n ignore_dependencies=True,\n )\n study.override_max_steps(2)\n assert isinstance(study, ParallelStudies)\n\n study.run(n_jobs=4, parallel_backend=\"ray\", n_relaunch=1)\n\n\n@pytest.mark.skip(reason=\"This usecase isnt relevant atm\")\ndef test_launch_parallel_study():\n agent_args_list = _make_agent_args_list()\n\n study = make_study(agent_args_list, benchmark=\"miniwob_tiny_test\", parallel_servers=2)\n assert isinstance(study, ParallelStudies)\n\n study.run(n_jobs=4, parallel_backend=\"ray\", n_relaunch=1)\n _, summary_df, _ = study.get_results()\n assert len(summary_df) == 2\n for n_completed in summary_df[\"n_completed\"]:\n assert n_completed == \"4/4\"\n\n\nif __name__ == \"__main__\":\n # test_launch_parallel_study()\n manual_test_launch_parallel_study_webarena()","source_hash":"4b57b012597beb12a266466d11fa45ce8ed5c221029ccf7650893a5f54c0a840","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.experiments.test_exp_configs","uri":"program://AgentLab/module/tests.experiments.test_exp_configs#L1-L1","kind":"module","name":"tests.experiments.test_exp_configs","path":"tests/experiments/test_exp_configs.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":1,"code":"from agentlab.experiments import study","source_hash":"aed6b5dda43aa03be5153eec7c5a1a91392be82bfdf13500d2cf4ca03cc8c824","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.benchmarks.test_osworld","uri":"program://AgentLab/module/tests.benchmarks.test_osworld#L1-L297","kind":"module","name":"tests.benchmarks.test_osworld","path":"tests/benchmarks/test_osworld.py","language":"python","start_line":1,"end_line":297,"context_start_line":1,"context_end_line":297,"code":"import importlib.util\nimport tempfile\nfrom pathlib import Path\nfrom unittest.mock import patch\n\nimport pytest\n\nspec = importlib.util.find_spec(\"desktop_env\")\nif spec is None:\n DESKTOP_ENV_AVAILABLE = False\n OSWorldActionSet = None\n OsworldEnvArgs = None\n OsworldGym = None\nelse:\n # If desktop_env is available, import the necessary classes\n from agentlab.benchmarks.osworld import (\n OSWorldActionSet,\n OsworldEnvArgs,\n OsworldGym,\n )\n\n DESKTOP_ENV_AVAILABLE = True\n\n\n# Skip the entire module if desktop_env is not available\npytestmark = pytest.mark.skipif(not DESKTOP_ENV_AVAILABLE, reason=\"desktop_env not installed\")\n\n\ndef mock_task_config() -> dict:\n \"\"\"Mock task configuration for testing.\"\"\"\n return {\n \"id\": \"bb5e4c0d-f964-439c-97b6-bdb9747de3f4\",\n \"snapshot\": \"chrome\",\n \"instruction\": \"Can you make Bing the main search thingy when I look stuff up on the internet?\",\n \"source\": \"https://support.google.com/chrome/answer/95426\",\n \"config\": [\n {\n \"type\": \"launch\",\n \"parameters\": {\"command\": [\"google-chrome\", \"--remote-debugging-port=1337\"]},\n }\n ],\n \"trajectory\": \"trajectories/\",\n \"related_apps\": [\"chrome\"],\n \"evaluator\": {\n \"func\": \"match_in_list\",\n \"result\": {\"type\": \"default_search_engine\"},\n \"expected\": {\"type\": \"rule\", \"rules\": {\"expected\": [\"Microsoft Bing\", \"Bing\"]}},\n },\n \"proxy\": False,\n }\n\n\nclass TestOSWorldActionSet:\n \"\"\"Test cases for OSWorld action set functionality.\"\"\"\n\n def test_action_set_creation(self):\n \"\"\"Test basic action set creation.\"\"\"\n action_set = OSWorldActionSet(action_space=\"computer_13\")\n assert action_set.action_space == \"computer_13\"\n\n def test_to_tool_description_openai(self):\n \"\"\"Test tool description conversion for OpenAI format.\"\"\"\n action_set = OSWorldActionSet(action_space=\"computer_13\")\n tools = action_set.to_tool_description(api=\"openai\")\n\n assert isinstance(tools, list)\n assert len(tools) > 0\n\n # Check that tools have the expected structure\n tool = tools[0]\n assert \"type\" in tool\n assert \"name\" in tool\n assert \"description\" in tool\n assert \"parameters\" in tool\n assert tool[\"type\"] == \"function\"\n\n def test_to_tool_description_anthropic(self):\n \"\"\"Test tool description conversion for Anthropic format.\"\"\"\n action_set = OSWorldActionSet(action_space=\"computer_13\")\n tools = action_set.to_tool_description(api=\"anthropic\")\n\n assert isinstance(tools, list)\n assert len(tools) > 0\n\n # Check that tools have the Anthropic format\n tool = tools[0]\n assert \"name\" in tool\n assert \"description\" in tool\n assert \"input_schema\" in tool\n # Anthropic format doesn't have \"type\" field\n\n def test_unsupported_action_space(self):\n \"\"\"Test that unsupported action spaces raise ValueError.\"\"\"\n action_set = OSWorldActionSet(action_space=\"pyautogui\")\n with pytest.raises(\n ValueError, match=\"Only 'computer_13' action space is currently supported\"\n ):\n action_set.to_tool_description()\n\n\nclass TestOsworldEnvArgs:\n \"\"\"Test cases for OSWorld environment arguments.\"\"\"\n\n def test_env_args_creation(self):\n \"\"\"Test basic environment args creation.\"\"\"\n task = mock_task_config()\n env_args = OsworldEnvArgs(task=task, task_name=\"test_task\", max_steps=10)\n\n assert env_args.task == task\n assert env_args.task_name == \"test_task\"\n assert env_args.max_steps == 10\n assert env_args.action_space == \"computer_13\" # default\n assert env_args.provider_name == \"docker\" # default\n\n def test_env_args_custom_config(self):\n \"\"\"Test environment args with custom configuration.\"\"\"\n task = mock_task_config()\n env_args = OsworldEnvArgs(\n task=task,\n task_name=\"custom_task\",\n action_space=\"computer_13\",\n provider_name=\"vmware\",\n headless=True,\n screen_size=(1280, 720),\n max_steps=25,\n )\n\n assert env_args.action_space == \"computer_13\"\n assert env_args.provider_name == \"vmware\"\n assert env_args.headless is True\n assert env_args.screen_size == (1280, 720)\n assert env_args.max_steps == 25\n\n @patch(\"agentlab.benchmarks.osworld.OsworldGym\")\n def test_make_env(self, mock_gym_class):\n \"\"\"Test environment creation from args.\"\"\"\n task = mock_task_config()\n env_args = OsworldEnvArgs(task=task, task_name=\"test_task\")\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n exp_dir = Path(tmp_dir)\n env_args.make_env(exp_dir)\n\n # Verify that OsworldGym was called with correct arguments\n mock_gym_class.assert_called_once()\n call_args = mock_gym_class.call_args[1]\n assert call_args[\"task\"] == task\n assert call_args[\"exp_dir\"] == exp_dir\n\n\nclass TestOsworldGym:\n \"\"\"Test cases for OSWorld gym functionality.\"\"\"\n\n def test_gym_action_parsing(self):\n \"\"\"Test gym action parsing functionality.\"\"\"\n\n from agentlab.benchmarks.osworld import OsworldGym\n\n # Test various action strings including edge cases\n test_cases = [\n # Basic actions\n (\"wait()\", (\"wait\", [], {})),\n (\"done()\", (\"done\", [], {})),\n (\"move_to(x=100, y=200)\", (\"move_to\", [], {\"x\": 100, \"y\": 200})),\n ('typing(text=\"hello world\")', (\"typing\", [], {\"text\": \"hello world\"})),\n (\"hotkey(keys=['ctrl', 'c'])\", (\"hotkey\", [], {\"keys\": [\"ctrl\", \"c\"]})),\n # Edge cases with strings\n ('typing(text=\"\")', (\"typing\", [], {\"text\": \"\"})), # Empty string\n ('typing(text=\"line1\\\\nline2\")', (\"typing\", [], {\"text\": \"line1\\nline2\"})), # Newlines\n ('typing(text=\"tab\\\\there\")', (\"typing\", [], {\"text\": \"tab\\there\"})), # Tabs\n (\n 'typing(text=\"quote\\\\\"test\")',\n (\"typing\", [], {\"text\": 'quote\"test'}),\n ), # Escaped quotes\n (\n 'typing(text=\"single\\'quote\")',\n (\"typing\", [], {\"text\": \"single'quote\"}),\n ), # Single quotes\n ('typing(text=\"unicode: café\")', (\"typing\", [], {\"text\": \"unicode: café\"})), # Unicode\n # Edge cases with coordinates\n (\"move_to(x=0, y=0)\", (\"move_to\", [], {\"x\": 0, \"y\": 0})), # Zero coordinates\n (\n \"move_to(x=-10, y=-20)\",\n (\"move_to\", [], {\"x\": -10, \"y\": -20}),\n ), # Negative coordinates\n (\n \"move_to(x=9999, y=9999)\",\n (\"move_to\", [], {\"x\": 9999, \"y\": 9999}),\n ), # Large coordinates\n # Edge cases with lists\n (\"hotkey(keys=[])\", (\"hotkey\", [], {\"keys\": []})), # Empty list\n (\"hotkey(keys=['ctrl'])\", (\"hotkey\", [], {\"keys\": [\"ctrl\"]})), # Single key\n (\n \"hotkey(keys=['ctrl', 'shift', 'alt', 'a'])\",\n (\"hotkey\", [], {\"keys\": [\"ctrl\", \"shift\", \"alt\", \"a\"]}),\n ), # Multiple keys\n # Edge cases with boolean values\n (\"scroll(direction='up', clicks=3)\", (\"scroll\", [], {\"direction\": \"up\", \"clicks\": 3})),\n (\n \"click(x=100, y=200, button='left')\",\n (\"click\", [], {\"x\": 100, \"y\": 200, \"button\": \"left\"}),\n ),\n # Edge cases with mixed parameter types\n (\n \"complex_action(text='test', x=50, enabled=True, items=['a', 'b'])\",\n (\n \"complex_action\",\n [],\n {\"text\": \"test\", \"x\": 50, \"enabled\": True, \"items\": [\"a\", \"b\"]},\n ),\n ),\n # Edge cases with whitespace\n (\" wait() \", (\"wait\", [], {})), # Leading/trailing spaces\n (\n \"move_to( x=100 , y=200 )\",\n (\"move_to\", [], {\"x\": 100, \"y\": 200}),\n ), # Spaces around params\n # Edge cases with special characters in strings\n (\n 'typing(text=\"@#$%^&*()+={}[]|\\\\:;\\'<>?,./\")',\n (\"typing\", [], {\"text\": \"@#$%^&*()+={}[]|\\\\:;'<>?,./\"}),\n ),\n ]\n\n for action_str, expected in test_cases:\n result = OsworldGym.parse_agentlab_action_str_to_func_args(action_str)\n assert result == expected, f\"Failed parsing: {action_str}\"\n\n @patch(\"agentlab.benchmarks.osworld.DesktopEnv\")\n def test_gym_creation(self, mock_desktop_env):\n \"\"\"Test OSWorld gym creation.\"\"\"\n task = mock_task_config()\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n exp_dir = Path(tmp_dir)\n gym = OsworldGym(\n task=task,\n provider_name=\"docker\",\n region=None,\n path_to_vm=None,\n snapshot_name=\"init_state\",\n action_space=\"computer_13\",\n cache_dir=\"cache\",\n screen_size=(1920, 1080),\n headless=True,\n require_a11y_tree=True,\n require_terminal=False,\n os_type=\"Ubuntu\",\n enable_proxy=False,\n max_steps=50,\n exp_dir=exp_dir,\n )\n\n assert gym.task == task\n assert gym._step_count == 0\n assert gym.max_steps == 50\n assert gym.exp_dir == exp_dir\n\n def test_convert_agentlab_action_to_computer_13(self):\n \"\"\"Test action conversion from AgentLab to Computer 13 format.\"\"\"\n task = mock_task_config()\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n exp_dir = Path(tmp_dir)\n\n with patch(\"agentlab.benchmarks.osworld.DesktopEnv\"):\n gym = OsworldGym(\n task=task,\n provider_name=\"docker\",\n region=None,\n path_to_vm=None,\n snapshot_name=\"init_state\",\n action_space=\"computer_13\",\n cache_dir=\"cache\",\n screen_size=(1920, 1080),\n headless=True,\n require_a11y_tree=True,\n require_terminal=False,\n os_type=\"Ubuntu\",\n enable_proxy=False,\n max_steps=50,\n exp_dir=exp_dir,\n )\n\n # Test simple action\n result = gym.convert_agentlab_action_to_computer_13(\"wait()\")\n assert result == \"WAIT\"\n\n # Test action with parameters\n result = gym.convert_agentlab_action_to_computer_13(\"move_to(x=100, y=200)\")\n expected = {\"action_type\": \"MOVE_TO\", \"parameters\": {\"x\": 100, \"y\": 200}}\n assert result == expected\n\n # Test typing action\n result = gym.convert_agentlab_action_to_computer_13('typing(text=\"hello\")')\n expected = {\"action_type\": \"TYPING\", \"parameters\": {\"text\": \"hello\"}}\n assert result == expected","source_hash":"7c6d97bf660ba9a6641061024160423300df9e423f2bf383fda7c4269974f15d","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.benchmarks.test_osworld.mock_task_config","uri":"program://AgentLab/function/tests.benchmarks.test_osworld.mock_task_config#L29-L50","kind":"function","name":"mock_task_config","path":"tests/benchmarks/test_osworld.py","language":"python","start_line":29,"end_line":50,"context_start_line":9,"context_end_line":70,"code":"if spec is None:\n DESKTOP_ENV_AVAILABLE = False\n OSWorldActionSet = None\n OsworldEnvArgs = None\n OsworldGym = None\nelse:\n # If desktop_env is available, import the necessary classes\n from agentlab.benchmarks.osworld import (\n OSWorldActionSet,\n OsworldEnvArgs,\n OsworldGym,\n )\n\n DESKTOP_ENV_AVAILABLE = True\n\n\n# Skip the entire module if desktop_env is not available\npytestmark = pytest.mark.skipif(not DESKTOP_ENV_AVAILABLE, reason=\"desktop_env not installed\")\n\n\ndef mock_task_config() -> dict:\n \"\"\"Mock task configuration for testing.\"\"\"\n return {\n \"id\": \"bb5e4c0d-f964-439c-97b6-bdb9747de3f4\",\n \"snapshot\": \"chrome\",\n \"instruction\": \"Can you make Bing the main search thingy when I look stuff up on the internet?\",\n \"source\": \"https://support.google.com/chrome/answer/95426\",\n \"config\": [\n {\n \"type\": \"launch\",\n \"parameters\": {\"command\": [\"google-chrome\", \"--remote-debugging-port=1337\"]},\n }\n ],\n \"trajectory\": \"trajectories/\",\n \"related_apps\": [\"chrome\"],\n \"evaluator\": {\n \"func\": \"match_in_list\",\n \"result\": {\"type\": \"default_search_engine\"},\n \"expected\": {\"type\": \"rule\", \"rules\": {\"expected\": [\"Microsoft Bing\", \"Bing\"]}},\n },\n \"proxy\": False,\n }\n\n\nclass TestOSWorldActionSet:\n \"\"\"Test cases for OSWorld action set functionality.\"\"\"\n\n def test_action_set_creation(self):\n \"\"\"Test basic action set creation.\"\"\"\n action_set = OSWorldActionSet(action_space=\"computer_13\")\n assert action_set.action_space == \"computer_13\"\n\n def test_to_tool_description_openai(self):\n \"\"\"Test tool description conversion for OpenAI format.\"\"\"\n action_set = OSWorldActionSet(action_space=\"computer_13\")\n tools = action_set.to_tool_description(api=\"openai\")\n\n assert isinstance(tools, list)\n assert len(tools) > 0\n\n # Check that tools have the expected structure\n tool = tools[0]","source_hash":"7c6d97bf660ba9a6641061024160423300df9e423f2bf383fda7c4269974f15d","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.benchmarks.test_osworld.TestOSWorldActionSet","uri":"program://AgentLab/class/tests.benchmarks.test_osworld.TestOSWorldActionSet#L53-L98","kind":"class","name":"TestOSWorldActionSet","path":"tests/benchmarks/test_osworld.py","language":"python","start_line":53,"end_line":98,"context_start_line":33,"context_end_line":118,"code":" \"snapshot\": \"chrome\",\n \"instruction\": \"Can you make Bing the main search thingy when I look stuff up on the internet?\",\n \"source\": \"https://support.google.com/chrome/answer/95426\",\n \"config\": [\n {\n \"type\": \"launch\",\n \"parameters\": {\"command\": [\"google-chrome\", \"--remote-debugging-port=1337\"]},\n }\n ],\n \"trajectory\": \"trajectories/\",\n \"related_apps\": [\"chrome\"],\n \"evaluator\": {\n \"func\": \"match_in_list\",\n \"result\": {\"type\": \"default_search_engine\"},\n \"expected\": {\"type\": \"rule\", \"rules\": {\"expected\": [\"Microsoft Bing\", \"Bing\"]}},\n },\n \"proxy\": False,\n }\n\n\nclass TestOSWorldActionSet:\n \"\"\"Test cases for OSWorld action set functionality.\"\"\"\n\n def test_action_set_creation(self):\n \"\"\"Test basic action set creation.\"\"\"\n action_set = OSWorldActionSet(action_space=\"computer_13\")\n assert action_set.action_space == \"computer_13\"\n\n def test_to_tool_description_openai(self):\n \"\"\"Test tool description conversion for OpenAI format.\"\"\"\n action_set = OSWorldActionSet(action_space=\"computer_13\")\n tools = action_set.to_tool_description(api=\"openai\")\n\n assert isinstance(tools, list)\n assert len(tools) > 0\n\n # Check that tools have the expected structure\n tool = tools[0]\n assert \"type\" in tool\n assert \"name\" in tool\n assert \"description\" in tool\n assert \"parameters\" in tool\n assert tool[\"type\"] == \"function\"\n\n def test_to_tool_description_anthropic(self):\n \"\"\"Test tool description conversion for Anthropic format.\"\"\"\n action_set = OSWorldActionSet(action_space=\"computer_13\")\n tools = action_set.to_tool_description(api=\"anthropic\")\n\n assert isinstance(tools, list)\n assert len(tools) > 0\n\n # Check that tools have the Anthropic format\n tool = tools[0]\n assert \"name\" in tool\n assert \"description\" in tool\n assert \"input_schema\" in tool\n # Anthropic format doesn't have \"type\" field\n\n def test_unsupported_action_space(self):\n \"\"\"Test that unsupported action spaces raise ValueError.\"\"\"\n action_set = OSWorldActionSet(action_space=\"pyautogui\")\n with pytest.raises(\n ValueError, match=\"Only 'computer_13' action space is currently supported\"\n ):\n action_set.to_tool_description()\n\n\nclass TestOsworldEnvArgs:\n \"\"\"Test cases for OSWorld environment arguments.\"\"\"\n\n def test_env_args_creation(self):\n \"\"\"Test basic environment args creation.\"\"\"\n task = mock_task_config()\n env_args = OsworldEnvArgs(task=task, task_name=\"test_task\", max_steps=10)\n\n assert env_args.task == task\n assert env_args.task_name == \"test_task\"\n assert env_args.max_steps == 10\n assert env_args.action_space == \"computer_13\" # default\n assert env_args.provider_name == \"docker\" # default\n\n def test_env_args_custom_config(self):\n \"\"\"Test environment args with custom configuration.\"\"\"\n task = mock_task_config()\n env_args = OsworldEnvArgs(","source_hash":"7c6d97bf660ba9a6641061024160423300df9e423f2bf383fda7c4269974f15d","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.benchmarks.test_osworld.TestOsworldEnvArgs","uri":"program://AgentLab/class/tests.benchmarks.test_osworld.TestOsworldEnvArgs#L101-L148","kind":"class","name":"TestOsworldEnvArgs","path":"tests/benchmarks/test_osworld.py","language":"python","start_line":101,"end_line":148,"context_start_line":81,"context_end_line":168,"code":"\n assert isinstance(tools, list)\n assert len(tools) > 0\n\n # Check that tools have the Anthropic format\n tool = tools[0]\n assert \"name\" in tool\n assert \"description\" in tool\n assert \"input_schema\" in tool\n # Anthropic format doesn't have \"type\" field\n\n def test_unsupported_action_space(self):\n \"\"\"Test that unsupported action spaces raise ValueError.\"\"\"\n action_set = OSWorldActionSet(action_space=\"pyautogui\")\n with pytest.raises(\n ValueError, match=\"Only 'computer_13' action space is currently supported\"\n ):\n action_set.to_tool_description()\n\n\nclass TestOsworldEnvArgs:\n \"\"\"Test cases for OSWorld environment arguments.\"\"\"\n\n def test_env_args_creation(self):\n \"\"\"Test basic environment args creation.\"\"\"\n task = mock_task_config()\n env_args = OsworldEnvArgs(task=task, task_name=\"test_task\", max_steps=10)\n\n assert env_args.task == task\n assert env_args.task_name == \"test_task\"\n assert env_args.max_steps == 10\n assert env_args.action_space == \"computer_13\" # default\n assert env_args.provider_name == \"docker\" # default\n\n def test_env_args_custom_config(self):\n \"\"\"Test environment args with custom configuration.\"\"\"\n task = mock_task_config()\n env_args = OsworldEnvArgs(\n task=task,\n task_name=\"custom_task\",\n action_space=\"computer_13\",\n provider_name=\"vmware\",\n headless=True,\n screen_size=(1280, 720),\n max_steps=25,\n )\n\n assert env_args.action_space == \"computer_13\"\n assert env_args.provider_name == \"vmware\"\n assert env_args.headless is True\n assert env_args.screen_size == (1280, 720)\n assert env_args.max_steps == 25\n\n @patch(\"agentlab.benchmarks.osworld.OsworldGym\")\n def test_make_env(self, mock_gym_class):\n \"\"\"Test environment creation from args.\"\"\"\n task = mock_task_config()\n env_args = OsworldEnvArgs(task=task, task_name=\"test_task\")\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n exp_dir = Path(tmp_dir)\n env_args.make_env(exp_dir)\n\n # Verify that OsworldGym was called with correct arguments\n mock_gym_class.assert_called_once()\n call_args = mock_gym_class.call_args[1]\n assert call_args[\"task\"] == task\n assert call_args[\"exp_dir\"] == exp_dir\n\n\nclass TestOsworldGym:\n \"\"\"Test cases for OSWorld gym functionality.\"\"\"\n\n def test_gym_action_parsing(self):\n \"\"\"Test gym action parsing functionality.\"\"\"\n\n from agentlab.benchmarks.osworld import OsworldGym\n\n # Test various action strings including edge cases\n test_cases = [\n # Basic actions\n (\"wait()\", (\"wait\", [], {})),\n (\"done()\", (\"done\", [], {})),\n (\"move_to(x=100, y=200)\", (\"move_to\", [], {\"x\": 100, \"y\": 200})),\n ('typing(text=\"hello world\")', (\"typing\", [], {\"text\": \"hello world\"})),\n (\"hotkey(keys=['ctrl', 'c'])\", (\"hotkey\", [], {\"keys\": [\"ctrl\", \"c\"]})),\n # Edge cases with strings\n ('typing(text=\"\")', (\"typing\", [], {\"text\": \"\"})), # Empty string","source_hash":"7c6d97bf660ba9a6641061024160423300df9e423f2bf383fda7c4269974f15d","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.benchmarks.test_osworld.TestOsworldGym","uri":"program://AgentLab/class/tests.benchmarks.test_osworld.TestOsworldGym#L151-L297","kind":"class","name":"TestOsworldGym","path":"tests/benchmarks/test_osworld.py","language":"python","start_line":151,"end_line":297,"context_start_line":131,"context_end_line":297,"code":" assert env_args.screen_size == (1280, 720)\n assert env_args.max_steps == 25\n\n @patch(\"agentlab.benchmarks.osworld.OsworldGym\")\n def test_make_env(self, mock_gym_class):\n \"\"\"Test environment creation from args.\"\"\"\n task = mock_task_config()\n env_args = OsworldEnvArgs(task=task, task_name=\"test_task\")\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n exp_dir = Path(tmp_dir)\n env_args.make_env(exp_dir)\n\n # Verify that OsworldGym was called with correct arguments\n mock_gym_class.assert_called_once()\n call_args = mock_gym_class.call_args[1]\n assert call_args[\"task\"] == task\n assert call_args[\"exp_dir\"] == exp_dir\n\n\nclass TestOsworldGym:\n \"\"\"Test cases for OSWorld gym functionality.\"\"\"\n\n def test_gym_action_parsing(self):\n \"\"\"Test gym action parsing functionality.\"\"\"\n\n from agentlab.benchmarks.osworld import OsworldGym\n\n # Test various action strings including edge cases\n test_cases = [\n # Basic actions\n (\"wait()\", (\"wait\", [], {})),\n (\"done()\", (\"done\", [], {})),\n (\"move_to(x=100, y=200)\", (\"move_to\", [], {\"x\": 100, \"y\": 200})),\n ('typing(text=\"hello world\")', (\"typing\", [], {\"text\": \"hello world\"})),\n (\"hotkey(keys=['ctrl', 'c'])\", (\"hotkey\", [], {\"keys\": [\"ctrl\", \"c\"]})),\n # Edge cases with strings\n ('typing(text=\"\")', (\"typing\", [], {\"text\": \"\"})), # Empty string\n ('typing(text=\"line1\\\\nline2\")', (\"typing\", [], {\"text\": \"line1\\nline2\"})), # Newlines\n ('typing(text=\"tab\\\\there\")', (\"typing\", [], {\"text\": \"tab\\there\"})), # Tabs\n (\n 'typing(text=\"quote\\\\\"test\")',\n (\"typing\", [], {\"text\": 'quote\"test'}),\n ), # Escaped quotes\n (\n 'typing(text=\"single\\'quote\")',\n (\"typing\", [], {\"text\": \"single'quote\"}),\n ), # Single quotes\n ('typing(text=\"unicode: café\")', (\"typing\", [], {\"text\": \"unicode: café\"})), # Unicode\n # Edge cases with coordinates\n (\"move_to(x=0, y=0)\", (\"move_to\", [], {\"x\": 0, \"y\": 0})), # Zero coordinates\n (\n \"move_to(x=-10, y=-20)\",\n (\"move_to\", [], {\"x\": -10, \"y\": -20}),\n ), # Negative coordinates\n (\n \"move_to(x=9999, y=9999)\",\n (\"move_to\", [], {\"x\": 9999, \"y\": 9999}),\n ), # Large coordinates\n # Edge cases with lists\n (\"hotkey(keys=[])\", (\"hotkey\", [], {\"keys\": []})), # Empty list\n (\"hotkey(keys=['ctrl'])\", (\"hotkey\", [], {\"keys\": [\"ctrl\"]})), # Single key\n (\n \"hotkey(keys=['ctrl', 'shift', 'alt', 'a'])\",\n (\"hotkey\", [], {\"keys\": [\"ctrl\", \"shift\", \"alt\", \"a\"]}),\n ), # Multiple keys\n # Edge cases with boolean values\n (\"scroll(direction='up', clicks=3)\", (\"scroll\", [], {\"direction\": \"up\", \"clicks\": 3})),\n (\n \"click(x=100, y=200, button='left')\",\n (\"click\", [], {\"x\": 100, \"y\": 200, \"button\": \"left\"}),\n ),\n # Edge cases with mixed parameter types\n (\n \"complex_action(text='test', x=50, enabled=True, items=['a', 'b'])\",\n (\n \"complex_action\",\n [],\n {\"text\": \"test\", \"x\": 50, \"enabled\": True, \"items\": [\"a\", \"b\"]},\n ),\n ),\n # Edge cases with whitespace\n (\" wait() \", (\"wait\", [], {})), # Leading/trailing spaces\n (\n \"move_to( x=100 , y=200 )\",\n (\"move_to\", [], {\"x\": 100, \"y\": 200}),\n ), # Spaces around params\n # Edge cases with special characters in strings\n (\n 'typing(text=\"@#$%^&*()+={}[]|\\\\:;\\'<>?,./\")',\n (\"typing\", [], {\"text\": \"@#$%^&*()+={}[]|\\\\:;'<>?,./\"}),\n ),\n ]\n\n for action_str, expected in test_cases:\n result = OsworldGym.parse_agentlab_action_str_to_func_args(action_str)\n assert result == expected, f\"Failed parsing: {action_str}\"\n\n @patch(\"agentlab.benchmarks.osworld.DesktopEnv\")\n def test_gym_creation(self, mock_desktop_env):\n \"\"\"Test OSWorld gym creation.\"\"\"\n task = mock_task_config()\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n exp_dir = Path(tmp_dir)\n gym = OsworldGym(\n task=task,\n provider_name=\"docker\",\n region=None,\n path_to_vm=None,\n snapshot_name=\"init_state\",\n action_space=\"computer_13\",\n cache_dir=\"cache\",\n screen_size=(1920, 1080),\n headless=True,\n require_a11y_tree=True,\n require_terminal=False,\n os_type=\"Ubuntu\",\n enable_proxy=False,\n max_steps=50,\n exp_dir=exp_dir,\n )\n\n assert gym.task == task\n assert gym._step_count == 0\n assert gym.max_steps == 50\n assert gym.exp_dir == exp_dir\n\n def test_convert_agentlab_action_to_computer_13(self):\n \"\"\"Test action conversion from AgentLab to Computer 13 format.\"\"\"\n task = mock_task_config()\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n exp_dir = Path(tmp_dir)\n\n with patch(\"agentlab.benchmarks.osworld.DesktopEnv\"):\n gym = OsworldGym(\n task=task,\n provider_name=\"docker\",\n region=None,\n path_to_vm=None,\n snapshot_name=\"init_state\",\n action_space=\"computer_13\",\n cache_dir=\"cache\",\n screen_size=(1920, 1080),\n headless=True,\n require_a11y_tree=True,\n require_terminal=False,\n os_type=\"Ubuntu\",\n enable_proxy=False,\n max_steps=50,\n exp_dir=exp_dir,\n )\n\n # Test simple action\n result = gym.convert_agentlab_action_to_computer_13(\"wait()\")\n assert result == \"WAIT\"\n\n # Test action with parameters\n result = gym.convert_agentlab_action_to_computer_13(\"move_to(x=100, y=200)\")\n expected = {\"action_type\": \"MOVE_TO\", \"parameters\": {\"x\": 100, \"y\": 200}}\n assert result == expected\n\n # Test typing action\n result = gym.convert_agentlab_action_to_computer_13('typing(text=\"hello\")')\n expected = {\"action_type\": \"TYPING\", \"parameters\": {\"text\": \"hello\"}}\n assert result == expected","source_hash":"7c6d97bf660ba9a6641061024160423300df9e423f2bf383fda7c4269974f15d","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.benchmarks.test_osworld.test_action_set_creation","uri":"program://AgentLab/function/tests.benchmarks.test_osworld.test_action_set_creation#L56-L59","kind":"function","name":"test_action_set_creation","path":"tests/benchmarks/test_osworld.py","language":"python","start_line":56,"end_line":59,"context_start_line":36,"context_end_line":79,"code":" \"config\": [\n {\n \"type\": \"launch\",\n \"parameters\": {\"command\": [\"google-chrome\", \"--remote-debugging-port=1337\"]},\n }\n ],\n \"trajectory\": \"trajectories/\",\n \"related_apps\": [\"chrome\"],\n \"evaluator\": {\n \"func\": \"match_in_list\",\n \"result\": {\"type\": \"default_search_engine\"},\n \"expected\": {\"type\": \"rule\", \"rules\": {\"expected\": [\"Microsoft Bing\", \"Bing\"]}},\n },\n \"proxy\": False,\n }\n\n\nclass TestOSWorldActionSet:\n \"\"\"Test cases for OSWorld action set functionality.\"\"\"\n\n def test_action_set_creation(self):\n \"\"\"Test basic action set creation.\"\"\"\n action_set = OSWorldActionSet(action_space=\"computer_13\")\n assert action_set.action_space == \"computer_13\"\n\n def test_to_tool_description_openai(self):\n \"\"\"Test tool description conversion for OpenAI format.\"\"\"\n action_set = OSWorldActionSet(action_space=\"computer_13\")\n tools = action_set.to_tool_description(api=\"openai\")\n\n assert isinstance(tools, list)\n assert len(tools) > 0\n\n # Check that tools have the expected structure\n tool = tools[0]\n assert \"type\" in tool\n assert \"name\" in tool\n assert \"description\" in tool\n assert \"parameters\" in tool\n assert tool[\"type\"] == \"function\"\n\n def test_to_tool_description_anthropic(self):\n \"\"\"Test tool description conversion for Anthropic format.\"\"\"\n action_set = OSWorldActionSet(action_space=\"computer_13\")","source_hash":"7c6d97bf660ba9a6641061024160423300df9e423f2bf383fda7c4269974f15d","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.benchmarks.test_osworld.test_to_tool_description_openai","uri":"program://AgentLab/function/tests.benchmarks.test_osworld.test_to_tool_description_openai#L61-L75","kind":"function","name":"test_to_tool_description_openai","path":"tests/benchmarks/test_osworld.py","language":"python","start_line":61,"end_line":75,"context_start_line":41,"context_end_line":95,"code":" ],\n \"trajectory\": \"trajectories/\",\n \"related_apps\": [\"chrome\"],\n \"evaluator\": {\n \"func\": \"match_in_list\",\n \"result\": {\"type\": \"default_search_engine\"},\n \"expected\": {\"type\": \"rule\", \"rules\": {\"expected\": [\"Microsoft Bing\", \"Bing\"]}},\n },\n \"proxy\": False,\n }\n\n\nclass TestOSWorldActionSet:\n \"\"\"Test cases for OSWorld action set functionality.\"\"\"\n\n def test_action_set_creation(self):\n \"\"\"Test basic action set creation.\"\"\"\n action_set = OSWorldActionSet(action_space=\"computer_13\")\n assert action_set.action_space == \"computer_13\"\n\n def test_to_tool_description_openai(self):\n \"\"\"Test tool description conversion for OpenAI format.\"\"\"\n action_set = OSWorldActionSet(action_space=\"computer_13\")\n tools = action_set.to_tool_description(api=\"openai\")\n\n assert isinstance(tools, list)\n assert len(tools) > 0\n\n # Check that tools have the expected structure\n tool = tools[0]\n assert \"type\" in tool\n assert \"name\" in tool\n assert \"description\" in tool\n assert \"parameters\" in tool\n assert tool[\"type\"] == \"function\"\n\n def test_to_tool_description_anthropic(self):\n \"\"\"Test tool description conversion for Anthropic format.\"\"\"\n action_set = OSWorldActionSet(action_space=\"computer_13\")\n tools = action_set.to_tool_description(api=\"anthropic\")\n\n assert isinstance(tools, list)\n assert len(tools) > 0\n\n # Check that tools have the Anthropic format\n tool = tools[0]\n assert \"name\" in tool\n assert \"description\" in tool\n assert \"input_schema\" in tool\n # Anthropic format doesn't have \"type\" field\n\n def test_unsupported_action_space(self):\n \"\"\"Test that unsupported action spaces raise ValueError.\"\"\"\n action_set = OSWorldActionSet(action_space=\"pyautogui\")\n with pytest.raises(","source_hash":"7c6d97bf660ba9a6641061024160423300df9e423f2bf383fda7c4269974f15d","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.benchmarks.test_osworld.test_to_tool_description_anthropic","uri":"program://AgentLab/function/tests.benchmarks.test_osworld.test_to_tool_description_anthropic#L77-L89","kind":"function","name":"test_to_tool_description_anthropic","path":"tests/benchmarks/test_osworld.py","language":"python","start_line":77,"end_line":89,"context_start_line":57,"context_end_line":109,"code":" \"\"\"Test basic action set creation.\"\"\"\n action_set = OSWorldActionSet(action_space=\"computer_13\")\n assert action_set.action_space == \"computer_13\"\n\n def test_to_tool_description_openai(self):\n \"\"\"Test tool description conversion for OpenAI format.\"\"\"\n action_set = OSWorldActionSet(action_space=\"computer_13\")\n tools = action_set.to_tool_description(api=\"openai\")\n\n assert isinstance(tools, list)\n assert len(tools) > 0\n\n # Check that tools have the expected structure\n tool = tools[0]\n assert \"type\" in tool\n assert \"name\" in tool\n assert \"description\" in tool\n assert \"parameters\" in tool\n assert tool[\"type\"] == \"function\"\n\n def test_to_tool_description_anthropic(self):\n \"\"\"Test tool description conversion for Anthropic format.\"\"\"\n action_set = OSWorldActionSet(action_space=\"computer_13\")\n tools = action_set.to_tool_description(api=\"anthropic\")\n\n assert isinstance(tools, list)\n assert len(tools) > 0\n\n # Check that tools have the Anthropic format\n tool = tools[0]\n assert \"name\" in tool\n assert \"description\" in tool\n assert \"input_schema\" in tool\n # Anthropic format doesn't have \"type\" field\n\n def test_unsupported_action_space(self):\n \"\"\"Test that unsupported action spaces raise ValueError.\"\"\"\n action_set = OSWorldActionSet(action_space=\"pyautogui\")\n with pytest.raises(\n ValueError, match=\"Only 'computer_13' action space is currently supported\"\n ):\n action_set.to_tool_description()\n\n\nclass TestOsworldEnvArgs:\n \"\"\"Test cases for OSWorld environment arguments.\"\"\"\n\n def test_env_args_creation(self):\n \"\"\"Test basic environment args creation.\"\"\"\n task = mock_task_config()\n env_args = OsworldEnvArgs(task=task, task_name=\"test_task\", max_steps=10)\n\n assert env_args.task == task","source_hash":"7c6d97bf660ba9a6641061024160423300df9e423f2bf383fda7c4269974f15d","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.benchmarks.test_osworld.test_unsupported_action_space","uri":"program://AgentLab/function/tests.benchmarks.test_osworld.test_unsupported_action_space#L92-L98","kind":"function","name":"test_unsupported_action_space","path":"tests/benchmarks/test_osworld.py","language":"python","start_line":92,"end_line":98,"context_start_line":72,"context_end_line":118,"code":" assert \"name\" in tool\n assert \"description\" in tool\n assert \"parameters\" in tool\n assert tool[\"type\"] == \"function\"\n\n def test_to_tool_description_anthropic(self):\n \"\"\"Test tool description conversion for Anthropic format.\"\"\"\n action_set = OSWorldActionSet(action_space=\"computer_13\")\n tools = action_set.to_tool_description(api=\"anthropic\")\n\n assert isinstance(tools, list)\n assert len(tools) > 0\n\n # Check that tools have the Anthropic format\n tool = tools[0]\n assert \"name\" in tool\n assert \"description\" in tool\n assert \"input_schema\" in tool\n # Anthropic format doesn't have \"type\" field\n\n def test_unsupported_action_space(self):\n \"\"\"Test that unsupported action spaces raise ValueError.\"\"\"\n action_set = OSWorldActionSet(action_space=\"pyautogui\")\n with pytest.raises(\n ValueError, match=\"Only 'computer_13' action space is currently supported\"\n ):\n action_set.to_tool_description()\n\n\nclass TestOsworldEnvArgs:\n \"\"\"Test cases for OSWorld environment arguments.\"\"\"\n\n def test_env_args_creation(self):\n \"\"\"Test basic environment args creation.\"\"\"\n task = mock_task_config()\n env_args = OsworldEnvArgs(task=task, task_name=\"test_task\", max_steps=10)\n\n assert env_args.task == task\n assert env_args.task_name == \"test_task\"\n assert env_args.max_steps == 10\n assert env_args.action_space == \"computer_13\" # default\n assert env_args.provider_name == \"docker\" # default\n\n def test_env_args_custom_config(self):\n \"\"\"Test environment args with custom configuration.\"\"\"\n task = mock_task_config()\n env_args = OsworldEnvArgs(","source_hash":"7c6d97bf660ba9a6641061024160423300df9e423f2bf383fda7c4269974f15d","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.benchmarks.test_osworld.test_env_args_creation","uri":"program://AgentLab/function/tests.benchmarks.test_osworld.test_env_args_creation#L104-L113","kind":"function","name":"test_env_args_creation","path":"tests/benchmarks/test_osworld.py","language":"python","start_line":104,"end_line":113,"context_start_line":84,"context_end_line":133,"code":"\n # Check that tools have the Anthropic format\n tool = tools[0]\n assert \"name\" in tool\n assert \"description\" in tool\n assert \"input_schema\" in tool\n # Anthropic format doesn't have \"type\" field\n\n def test_unsupported_action_space(self):\n \"\"\"Test that unsupported action spaces raise ValueError.\"\"\"\n action_set = OSWorldActionSet(action_space=\"pyautogui\")\n with pytest.raises(\n ValueError, match=\"Only 'computer_13' action space is currently supported\"\n ):\n action_set.to_tool_description()\n\n\nclass TestOsworldEnvArgs:\n \"\"\"Test cases for OSWorld environment arguments.\"\"\"\n\n def test_env_args_creation(self):\n \"\"\"Test basic environment args creation.\"\"\"\n task = mock_task_config()\n env_args = OsworldEnvArgs(task=task, task_name=\"test_task\", max_steps=10)\n\n assert env_args.task == task\n assert env_args.task_name == \"test_task\"\n assert env_args.max_steps == 10\n assert env_args.action_space == \"computer_13\" # default\n assert env_args.provider_name == \"docker\" # default\n\n def test_env_args_custom_config(self):\n \"\"\"Test environment args with custom configuration.\"\"\"\n task = mock_task_config()\n env_args = OsworldEnvArgs(\n task=task,\n task_name=\"custom_task\",\n action_space=\"computer_13\",\n provider_name=\"vmware\",\n headless=True,\n screen_size=(1280, 720),\n max_steps=25,\n )\n\n assert env_args.action_space == \"computer_13\"\n assert env_args.provider_name == \"vmware\"\n assert env_args.headless is True\n assert env_args.screen_size == (1280, 720)\n assert env_args.max_steps == 25\n","source_hash":"7c6d97bf660ba9a6641061024160423300df9e423f2bf383fda7c4269974f15d","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.benchmarks.test_osworld.test_env_args_custom_config","uri":"program://AgentLab/function/tests.benchmarks.test_osworld.test_env_args_custom_config#L115-L132","kind":"function","name":"test_env_args_custom_config","path":"tests/benchmarks/test_osworld.py","language":"python","start_line":115,"end_line":132,"context_start_line":95,"context_end_line":152,"code":" with pytest.raises(\n ValueError, match=\"Only 'computer_13' action space is currently supported\"\n ):\n action_set.to_tool_description()\n\n\nclass TestOsworldEnvArgs:\n \"\"\"Test cases for OSWorld environment arguments.\"\"\"\n\n def test_env_args_creation(self):\n \"\"\"Test basic environment args creation.\"\"\"\n task = mock_task_config()\n env_args = OsworldEnvArgs(task=task, task_name=\"test_task\", max_steps=10)\n\n assert env_args.task == task\n assert env_args.task_name == \"test_task\"\n assert env_args.max_steps == 10\n assert env_args.action_space == \"computer_13\" # default\n assert env_args.provider_name == \"docker\" # default\n\n def test_env_args_custom_config(self):\n \"\"\"Test environment args with custom configuration.\"\"\"\n task = mock_task_config()\n env_args = OsworldEnvArgs(\n task=task,\n task_name=\"custom_task\",\n action_space=\"computer_13\",\n provider_name=\"vmware\",\n headless=True,\n screen_size=(1280, 720),\n max_steps=25,\n )\n\n assert env_args.action_space == \"computer_13\"\n assert env_args.provider_name == \"vmware\"\n assert env_args.headless is True\n assert env_args.screen_size == (1280, 720)\n assert env_args.max_steps == 25\n\n @patch(\"agentlab.benchmarks.osworld.OsworldGym\")\n def test_make_env(self, mock_gym_class):\n \"\"\"Test environment creation from args.\"\"\"\n task = mock_task_config()\n env_args = OsworldEnvArgs(task=task, task_name=\"test_task\")\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n exp_dir = Path(tmp_dir)\n env_args.make_env(exp_dir)\n\n # Verify that OsworldGym was called with correct arguments\n mock_gym_class.assert_called_once()\n call_args = mock_gym_class.call_args[1]\n assert call_args[\"task\"] == task\n assert call_args[\"exp_dir\"] == exp_dir\n\n\nclass TestOsworldGym:\n \"\"\"Test cases for OSWorld gym functionality.\"\"\"","source_hash":"7c6d97bf660ba9a6641061024160423300df9e423f2bf383fda7c4269974f15d","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.benchmarks.test_osworld.test_make_env","uri":"program://AgentLab/function/tests.benchmarks.test_osworld.test_make_env#L135-L148","kind":"function","name":"test_make_env","path":"tests/benchmarks/test_osworld.py","language":"python","start_line":135,"end_line":148,"context_start_line":115,"context_end_line":168,"code":" def test_env_args_custom_config(self):\n \"\"\"Test environment args with custom configuration.\"\"\"\n task = mock_task_config()\n env_args = OsworldEnvArgs(\n task=task,\n task_name=\"custom_task\",\n action_space=\"computer_13\",\n provider_name=\"vmware\",\n headless=True,\n screen_size=(1280, 720),\n max_steps=25,\n )\n\n assert env_args.action_space == \"computer_13\"\n assert env_args.provider_name == \"vmware\"\n assert env_args.headless is True\n assert env_args.screen_size == (1280, 720)\n assert env_args.max_steps == 25\n\n @patch(\"agentlab.benchmarks.osworld.OsworldGym\")\n def test_make_env(self, mock_gym_class):\n \"\"\"Test environment creation from args.\"\"\"\n task = mock_task_config()\n env_args = OsworldEnvArgs(task=task, task_name=\"test_task\")\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n exp_dir = Path(tmp_dir)\n env_args.make_env(exp_dir)\n\n # Verify that OsworldGym was called with correct arguments\n mock_gym_class.assert_called_once()\n call_args = mock_gym_class.call_args[1]\n assert call_args[\"task\"] == task\n assert call_args[\"exp_dir\"] == exp_dir\n\n\nclass TestOsworldGym:\n \"\"\"Test cases for OSWorld gym functionality.\"\"\"\n\n def test_gym_action_parsing(self):\n \"\"\"Test gym action parsing functionality.\"\"\"\n\n from agentlab.benchmarks.osworld import OsworldGym\n\n # Test various action strings including edge cases\n test_cases = [\n # Basic actions\n (\"wait()\", (\"wait\", [], {})),\n (\"done()\", (\"done\", [], {})),\n (\"move_to(x=100, y=200)\", (\"move_to\", [], {\"x\": 100, \"y\": 200})),\n ('typing(text=\"hello world\")', (\"typing\", [], {\"text\": \"hello world\"})),\n (\"hotkey(keys=['ctrl', 'c'])\", (\"hotkey\", [], {\"keys\": [\"ctrl\", \"c\"]})),\n # Edge cases with strings\n ('typing(text=\"\")', (\"typing\", [], {\"text\": \"\"})), # Empty string","source_hash":"7c6d97bf660ba9a6641061024160423300df9e423f2bf383fda7c4269974f15d","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.benchmarks.test_osworld.test_gym_action_parsing","uri":"program://AgentLab/function/tests.benchmarks.test_osworld.test_gym_action_parsing#L154-L227","kind":"function","name":"test_gym_action_parsing","path":"tests/benchmarks/test_osworld.py","language":"python","start_line":154,"end_line":227,"context_start_line":134,"context_end_line":247,"code":" @patch(\"agentlab.benchmarks.osworld.OsworldGym\")\n def test_make_env(self, mock_gym_class):\n \"\"\"Test environment creation from args.\"\"\"\n task = mock_task_config()\n env_args = OsworldEnvArgs(task=task, task_name=\"test_task\")\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n exp_dir = Path(tmp_dir)\n env_args.make_env(exp_dir)\n\n # Verify that OsworldGym was called with correct arguments\n mock_gym_class.assert_called_once()\n call_args = mock_gym_class.call_args[1]\n assert call_args[\"task\"] == task\n assert call_args[\"exp_dir\"] == exp_dir\n\n\nclass TestOsworldGym:\n \"\"\"Test cases for OSWorld gym functionality.\"\"\"\n\n def test_gym_action_parsing(self):\n \"\"\"Test gym action parsing functionality.\"\"\"\n\n from agentlab.benchmarks.osworld import OsworldGym\n\n # Test various action strings including edge cases\n test_cases = [\n # Basic actions\n (\"wait()\", (\"wait\", [], {})),\n (\"done()\", (\"done\", [], {})),\n (\"move_to(x=100, y=200)\", (\"move_to\", [], {\"x\": 100, \"y\": 200})),\n ('typing(text=\"hello world\")', (\"typing\", [], {\"text\": \"hello world\"})),\n (\"hotkey(keys=['ctrl', 'c'])\", (\"hotkey\", [], {\"keys\": [\"ctrl\", \"c\"]})),\n # Edge cases with strings\n ('typing(text=\"\")', (\"typing\", [], {\"text\": \"\"})), # Empty string\n ('typing(text=\"line1\\\\nline2\")', (\"typing\", [], {\"text\": \"line1\\nline2\"})), # Newlines\n ('typing(text=\"tab\\\\there\")', (\"typing\", [], {\"text\": \"tab\\there\"})), # Tabs\n (\n 'typing(text=\"quote\\\\\"test\")',\n (\"typing\", [], {\"text\": 'quote\"test'}),\n ), # Escaped quotes\n (\n 'typing(text=\"single\\'quote\")',\n (\"typing\", [], {\"text\": \"single'quote\"}),\n ), # Single quotes\n ('typing(text=\"unicode: café\")', (\"typing\", [], {\"text\": \"unicode: café\"})), # Unicode\n # Edge cases with coordinates\n (\"move_to(x=0, y=0)\", (\"move_to\", [], {\"x\": 0, \"y\": 0})), # Zero coordinates\n (\n \"move_to(x=-10, y=-20)\",\n (\"move_to\", [], {\"x\": -10, \"y\": -20}),\n ), # Negative coordinates\n (\n \"move_to(x=9999, y=9999)\",\n (\"move_to\", [], {\"x\": 9999, \"y\": 9999}),\n ), # Large coordinates\n # Edge cases with lists\n (\"hotkey(keys=[])\", (\"hotkey\", [], {\"keys\": []})), # Empty list\n (\"hotkey(keys=['ctrl'])\", (\"hotkey\", [], {\"keys\": [\"ctrl\"]})), # Single key\n (\n \"hotkey(keys=['ctrl', 'shift', 'alt', 'a'])\",\n (\"hotkey\", [], {\"keys\": [\"ctrl\", \"shift\", \"alt\", \"a\"]}),\n ), # Multiple keys\n # Edge cases with boolean values\n (\"scroll(direction='up', clicks=3)\", (\"scroll\", [], {\"direction\": \"up\", \"clicks\": 3})),\n (\n \"click(x=100, y=200, button='left')\",\n (\"click\", [], {\"x\": 100, \"y\": 200, \"button\": \"left\"}),\n ),\n # Edge cases with mixed parameter types\n (\n \"complex_action(text='test', x=50, enabled=True, items=['a', 'b'])\",\n (\n \"complex_action\",\n [],\n {\"text\": \"test\", \"x\": 50, \"enabled\": True, \"items\": [\"a\", \"b\"]},\n ),\n ),\n # Edge cases with whitespace\n (\" wait() \", (\"wait\", [], {})), # Leading/trailing spaces\n (\n \"move_to( x=100 , y=200 )\",\n (\"move_to\", [], {\"x\": 100, \"y\": 200}),\n ), # Spaces around params\n # Edge cases with special characters in strings\n (\n 'typing(text=\"@#$%^&*()+={}[]|\\\\:;\\'<>?,./\")',\n (\"typing\", [], {\"text\": \"@#$%^&*()+={}[]|\\\\:;'<>?,./\"}),\n ),\n ]\n\n for action_str, expected in test_cases:\n result = OsworldGym.parse_agentlab_action_str_to_func_args(action_str)\n assert result == expected, f\"Failed parsing: {action_str}\"\n\n @patch(\"agentlab.benchmarks.osworld.DesktopEnv\")\n def test_gym_creation(self, mock_desktop_env):\n \"\"\"Test OSWorld gym creation.\"\"\"\n task = mock_task_config()\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n exp_dir = Path(tmp_dir)\n gym = OsworldGym(\n task=task,\n provider_name=\"docker\",\n region=None,\n path_to_vm=None,\n snapshot_name=\"init_state\",\n action_space=\"computer_13\",\n cache_dir=\"cache\",\n screen_size=(1920, 1080),\n headless=True,\n require_a11y_tree=True,\n require_terminal=False,","source_hash":"7c6d97bf660ba9a6641061024160423300df9e423f2bf383fda7c4269974f15d","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.benchmarks.test_osworld.test_gym_creation","uri":"program://AgentLab/function/tests.benchmarks.test_osworld.test_gym_creation#L230-L257","kind":"function","name":"test_gym_creation","path":"tests/benchmarks/test_osworld.py","language":"python","start_line":230,"end_line":257,"context_start_line":210,"context_end_line":277,"code":" ),\n ),\n # Edge cases with whitespace\n (\" wait() \", (\"wait\", [], {})), # Leading/trailing spaces\n (\n \"move_to( x=100 , y=200 )\",\n (\"move_to\", [], {\"x\": 100, \"y\": 200}),\n ), # Spaces around params\n # Edge cases with special characters in strings\n (\n 'typing(text=\"@#$%^&*()+={}[]|\\\\:;\\'<>?,./\")',\n (\"typing\", [], {\"text\": \"@#$%^&*()+={}[]|\\\\:;'<>?,./\"}),\n ),\n ]\n\n for action_str, expected in test_cases:\n result = OsworldGym.parse_agentlab_action_str_to_func_args(action_str)\n assert result == expected, f\"Failed parsing: {action_str}\"\n\n @patch(\"agentlab.benchmarks.osworld.DesktopEnv\")\n def test_gym_creation(self, mock_desktop_env):\n \"\"\"Test OSWorld gym creation.\"\"\"\n task = mock_task_config()\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n exp_dir = Path(tmp_dir)\n gym = OsworldGym(\n task=task,\n provider_name=\"docker\",\n region=None,\n path_to_vm=None,\n snapshot_name=\"init_state\",\n action_space=\"computer_13\",\n cache_dir=\"cache\",\n screen_size=(1920, 1080),\n headless=True,\n require_a11y_tree=True,\n require_terminal=False,\n os_type=\"Ubuntu\",\n enable_proxy=False,\n max_steps=50,\n exp_dir=exp_dir,\n )\n\n assert gym.task == task\n assert gym._step_count == 0\n assert gym.max_steps == 50\n assert gym.exp_dir == exp_dir\n\n def test_convert_agentlab_action_to_computer_13(self):\n \"\"\"Test action conversion from AgentLab to Computer 13 format.\"\"\"\n task = mock_task_config()\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n exp_dir = Path(tmp_dir)\n\n with patch(\"agentlab.benchmarks.osworld.DesktopEnv\"):\n gym = OsworldGym(\n task=task,\n provider_name=\"docker\",\n region=None,\n path_to_vm=None,\n snapshot_name=\"init_state\",\n action_space=\"computer_13\",\n cache_dir=\"cache\",\n screen_size=(1920, 1080),\n headless=True,\n require_a11y_tree=True,","source_hash":"7c6d97bf660ba9a6641061024160423300df9e423f2bf383fda7c4269974f15d","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.benchmarks.test_osworld.test_convert_agentlab_action_to_computer_13","uri":"program://AgentLab/function/tests.benchmarks.test_osworld.test_convert_agentlab_action_to_computer_13#L259-L297","kind":"function","name":"test_convert_agentlab_action_to_computer_13","path":"tests/benchmarks/test_osworld.py","language":"python","start_line":259,"end_line":297,"context_start_line":239,"context_end_line":297,"code":" region=None,\n path_to_vm=None,\n snapshot_name=\"init_state\",\n action_space=\"computer_13\",\n cache_dir=\"cache\",\n screen_size=(1920, 1080),\n headless=True,\n require_a11y_tree=True,\n require_terminal=False,\n os_type=\"Ubuntu\",\n enable_proxy=False,\n max_steps=50,\n exp_dir=exp_dir,\n )\n\n assert gym.task == task\n assert gym._step_count == 0\n assert gym.max_steps == 50\n assert gym.exp_dir == exp_dir\n\n def test_convert_agentlab_action_to_computer_13(self):\n \"\"\"Test action conversion from AgentLab to Computer 13 format.\"\"\"\n task = mock_task_config()\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n exp_dir = Path(tmp_dir)\n\n with patch(\"agentlab.benchmarks.osworld.DesktopEnv\"):\n gym = OsworldGym(\n task=task,\n provider_name=\"docker\",\n region=None,\n path_to_vm=None,\n snapshot_name=\"init_state\",\n action_space=\"computer_13\",\n cache_dir=\"cache\",\n screen_size=(1920, 1080),\n headless=True,\n require_a11y_tree=True,\n require_terminal=False,\n os_type=\"Ubuntu\",\n enable_proxy=False,\n max_steps=50,\n exp_dir=exp_dir,\n )\n\n # Test simple action\n result = gym.convert_agentlab_action_to_computer_13(\"wait()\")\n assert result == \"WAIT\"\n\n # Test action with parameters\n result = gym.convert_agentlab_action_to_computer_13(\"move_to(x=100, y=200)\")\n expected = {\"action_type\": \"MOVE_TO\", \"parameters\": {\"x\": 100, \"y\": 200}}\n assert result == expected\n\n # Test typing action\n result = gym.convert_agentlab_action_to_computer_13('typing(text=\"hello\")')\n expected = {\"action_type\": \"TYPING\", \"parameters\": {\"text\": \"hello\"}}\n assert result == expected","source_hash":"7c6d97bf660ba9a6641061024160423300df9e423f2bf383fda7c4269974f15d","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.agents.test_agent","uri":"program://AgentLab/module/tests.agents.test_agent#L1-L249","kind":"module","name":"tests.agents.test_agent","path":"tests/agents/test_agent.py","language":"python","start_line":1,"end_line":249,"context_start_line":1,"context_end_line":249,"code":"import re\nimport tempfile\nfrom dataclasses import dataclass\nfrom pathlib import Path\n\nfrom openai import OpenAIError\n\nfrom agentlab.agents.generic_agent.agent_configs import FLAGS_GPT_3_5\nfrom agentlab.agents.generic_agent.generic_agent import GenericAgentArgs\nfrom agentlab.analyze import inspect_results\nfrom agentlab.experiments import launch_exp\nfrom agentlab.experiments.loop import EnvArgs, ExpArgs\nfrom agentlab.llm.chat_api import BaseModelArgs, CheatMiniWoBLLMArgs\nfrom agentlab.llm.llm_utils import Discussion\n\n\ndef test_generic_agent():\n exp_args = ExpArgs(\n agent_args=GenericAgentArgs(\n chat_model_args=CheatMiniWoBLLMArgs(),\n flags=FLAGS_GPT_3_5,\n ),\n env_args=EnvArgs(task_name=\"miniwob.click-test\", task_seed=42),\n )\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n launch_exp.run_experiments(\n 1, [exp_args], Path(tmp_dir) / \"generic_agent_test\", parallel_backend=\"joblib\"\n )\n\n result_record = inspect_results.load_result_df(tmp_dir, progress_fn=None)\n\n target = {\n \"n_steps\": 1,\n \"cum_reward\": 1.0,\n \"terminated\": True,\n \"truncated\": False,\n \"err_msg\": None,\n \"stack_trace\": None,\n \"agent.flags.obs.use_ax_tree\": True,\n }\n\n for key, target_val in target.items():\n assert key in result_record\n assert result_record[key].iloc[0] == target_val\n\n\n@dataclass\nclass CheatMiniWoBLLM_ParseRetry:\n \"\"\"For unit-testing purposes only. It only work with miniwob.click-test task.\"\"\"\n\n n_retry: int\n retry_count: int = 0\n\n def __call__(self, messages) -> str:\n if self.retry_count < self.n_retry:\n self.retry_count += 1\n return dict(role=\"assistant\", content=\"I'm retrying\")\n\n if isinstance(messages, Discussion):\n prompt = messages.to_string()\n else:\n prompt = messages[1].get(\"content\", \"\")\n match = re.search(r\"^\\s*\\[(\\d+)\\].*button\", prompt, re.MULTILINE | re.IGNORECASE)\n\n if match:\n bid = match.group(1)\n action = f'click(\"{bid}\")'\n else:\n raise Exception(\"Can't find the button's bid\")\n\n answer = f\"\"\"I'm clicking the button as requested.\n\n{action}\n\n\"\"\"\n return dict(role=\"assistant\", content=answer)\n\n def get_stats(self):\n return {}\n\n\n@dataclass\nclass CheatMiniWoBLLMArgs_ParseRetry(BaseModelArgs):\n n_retry: int = 2\n model_name: str = \"test/cheat_miniwob_click_test_parse_retry\"\n\n def make_model(self):\n return CheatMiniWoBLLM_ParseRetry(n_retry=self.n_retry)\n\n\n@dataclass\nclass CheatLLM_LLMError:\n \"\"\"For unit-testing purposes only. Fails to call LLM\"\"\"\n\n n_retry: int = 0\n success: bool = False\n\n def __call__(self, messages) -> str:\n if self.success:\n if isinstance(messages, Discussion):\n prompt = messages.to_string()\n else:\n prompt = messages[1].get(\"content\", \"\")\n match = re.search(r\"^\\s*\\[(\\d+)\\].*button\", prompt, re.MULTILINE | re.IGNORECASE)\n\n if match:\n bid = match.group(1)\n action = f'click(\"{bid}\")'\n else:\n raise Exception(\"Can't find the button's bid\")\n\n answer = f\"\"\"I'm clicking the button as requested.\n \n {action}\n \n \"\"\"\n return dict(role=\"assistant\", content=answer)\n raise OpenAIError(\"LLM failed to respond\")\n\n def get_stats(self):\n return {\"n_llm_retry\": self.n_retry, \"n_llm_busted_retry\": int(not self.success)}\n\n\n@dataclass\nclass CheatLLMArgs_LLMError(BaseModelArgs):\n n_retry: int = 2\n success: bool = False\n model_name: str = \"test/cheat_miniwob_click_test_parse_retry\"\n\n def make_model(self):\n return CheatLLM_LLMError(\n n_retry=self.n_retry,\n success=self.success,\n )\n\n\ndef test_generic_agent_parse_retry():\n exp_args = ExpArgs(\n agent_args=GenericAgentArgs(\n chat_model_args=CheatMiniWoBLLMArgs_ParseRetry(n_retry=2),\n flags=FLAGS_GPT_3_5,\n ),\n env_args=EnvArgs(task_name=\"miniwob.click-test\", task_seed=42),\n )\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n # TODO why these tests don't work with ray backend?\n launch_exp.run_experiments(\n 1, [exp_args], Path(tmp_dir) / \"generic_agent_test\", parallel_backend=\"joblib\"\n )\n result_record = inspect_results.load_result_df(tmp_dir, progress_fn=None)\n print(result_record)\n target = {\n \"stats.cum_n_retry\": 2,\n \"stats.cum_busted_retry\": 0,\n \"n_steps\": 1,\n \"cum_reward\": 1.0,\n }\n\n for key, target_val in target.items():\n assert key in result_record\n assert result_record[key].iloc[0] == target_val\n\n\ndef test_bust_parse_retry():\n exp_args = ExpArgs(\n agent_args=GenericAgentArgs(\n chat_model_args=CheatMiniWoBLLMArgs_ParseRetry(n_retry=10),\n flags=FLAGS_GPT_3_5,\n ),\n env_args=EnvArgs(task_name=\"miniwob.click-test\", task_seed=42),\n )\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n launch_exp.run_experiments(\n 1, [exp_args], Path(tmp_dir) / \"generic_agent_test\", parallel_backend=\"joblib\"\n )\n result_record = inspect_results.load_result_df(tmp_dir, progress_fn=None)\n\n target = {\n \"stats.cum_n_retry\": 5,\n \"stats.cum_busted_retry\": 1,\n \"n_steps\": 0,\n \"cum_reward\": 0,\n \"err_msg\": None, # parsing error is considered an agent failure, not a code error\n }\n\n for key, target_val in target.items():\n assert key in result_record\n assert result_record[key].iloc[0] == target_val\n\n\ndef test_llm_error_success():\n exp_args = ExpArgs(\n agent_args=GenericAgentArgs(\n chat_model_args=CheatLLMArgs_LLMError(n_retry=3, success=True),\n flags=FLAGS_GPT_3_5,\n ),\n env_args=EnvArgs(task_name=\"miniwob.click-test\", task_seed=42),\n )\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n launch_exp.run_experiments(\n 1, [exp_args], Path(tmp_dir) / \"generic_agent_test\", parallel_backend=\"joblib\"\n )\n result_record = inspect_results.load_result_df(tmp_dir, progress_fn=None)\n\n target = {\n \"stats.cum_n_llm_retry\": 3,\n \"n_steps\": 1,\n \"cum_reward\": 1.0,\n \"err_msg\": None,\n }\n\n for key, target_val in target.items():\n assert key in result_record\n assert result_record[key].iloc[0] == target_val\n\n\ndef test_llm_error_no_success():\n exp_args = ExpArgs(\n agent_args=GenericAgentArgs(\n chat_model_args=CheatLLMArgs_LLMError(n_retry=5, success=False),\n flags=FLAGS_GPT_3_5,\n ),\n env_args=EnvArgs(task_name=\"miniwob.click-test\", task_seed=42),\n )\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n launch_exp.run_experiments(\n 1, [exp_args], Path(tmp_dir) / \"generic_agent_test\", parallel_backend=\"joblib\"\n )\n result_record = inspect_results.load_result_df(tmp_dir, progress_fn=None)\n\n target = {\n \"n_steps\": 0,\n \"cum_reward\": 0,\n \"err_msg\": \"Exception uncaught by agent or environment in task miniwob.click-test.\\nOpenAIError:\\nLLM failed to respond\",\n }\n\n for key, target_val in target.items():\n assert key in result_record\n assert result_record[key].iloc[0] == target_val\n\n\nif __name__ == \"__main__\":\n # test_generic_agent()\n test_generic_agent_parse_retry()","source_hash":"8b280feaabcb410d9982a02da3621c0520a0bfff2cc8fca85fc4bdbde9241cab","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.agents.test_agent.test_generic_agent","uri":"program://AgentLab/function/tests.agents.test_agent.test_generic_agent#L17-L45","kind":"function","name":"test_generic_agent","path":"tests/agents/test_agent.py","language":"python","start_line":17,"end_line":45,"context_start_line":1,"context_end_line":65,"code":"import re\nimport tempfile\nfrom dataclasses import dataclass\nfrom pathlib import Path\n\nfrom openai import OpenAIError\n\nfrom agentlab.agents.generic_agent.agent_configs import FLAGS_GPT_3_5\nfrom agentlab.agents.generic_agent.generic_agent import GenericAgentArgs\nfrom agentlab.analyze import inspect_results\nfrom agentlab.experiments import launch_exp\nfrom agentlab.experiments.loop import EnvArgs, ExpArgs\nfrom agentlab.llm.chat_api import BaseModelArgs, CheatMiniWoBLLMArgs\nfrom agentlab.llm.llm_utils import Discussion\n\n\ndef test_generic_agent():\n exp_args = ExpArgs(\n agent_args=GenericAgentArgs(\n chat_model_args=CheatMiniWoBLLMArgs(),\n flags=FLAGS_GPT_3_5,\n ),\n env_args=EnvArgs(task_name=\"miniwob.click-test\", task_seed=42),\n )\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n launch_exp.run_experiments(\n 1, [exp_args], Path(tmp_dir) / \"generic_agent_test\", parallel_backend=\"joblib\"\n )\n\n result_record = inspect_results.load_result_df(tmp_dir, progress_fn=None)\n\n target = {\n \"n_steps\": 1,\n \"cum_reward\": 1.0,\n \"terminated\": True,\n \"truncated\": False,\n \"err_msg\": None,\n \"stack_trace\": None,\n \"agent.flags.obs.use_ax_tree\": True,\n }\n\n for key, target_val in target.items():\n assert key in result_record\n assert result_record[key].iloc[0] == target_val\n\n\n@dataclass\nclass CheatMiniWoBLLM_ParseRetry:\n \"\"\"For unit-testing purposes only. It only work with miniwob.click-test task.\"\"\"\n\n n_retry: int\n retry_count: int = 0\n\n def __call__(self, messages) -> str:\n if self.retry_count < self.n_retry:\n self.retry_count += 1\n return dict(role=\"assistant\", content=\"I'm retrying\")\n\n if isinstance(messages, Discussion):\n prompt = messages.to_string()\n else:\n prompt = messages[1].get(\"content\", \"\")\n match = re.search(r\"^\\s*\\[(\\d+)\\].*button\", prompt, re.MULTILINE | re.IGNORECASE)\n","source_hash":"8b280feaabcb410d9982a02da3621c0520a0bfff2cc8fca85fc4bdbde9241cab","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.agents.test_agent.CheatMiniWoBLLM_ParseRetry","uri":"program://AgentLab/class/tests.agents.test_agent.CheatMiniWoBLLM_ParseRetry#L49-L80","kind":"class","name":"CheatMiniWoBLLM_ParseRetry","path":"tests/agents/test_agent.py","language":"python","start_line":49,"end_line":80,"context_start_line":29,"context_end_line":100,"code":" )\n\n result_record = inspect_results.load_result_df(tmp_dir, progress_fn=None)\n\n target = {\n \"n_steps\": 1,\n \"cum_reward\": 1.0,\n \"terminated\": True,\n \"truncated\": False,\n \"err_msg\": None,\n \"stack_trace\": None,\n \"agent.flags.obs.use_ax_tree\": True,\n }\n\n for key, target_val in target.items():\n assert key in result_record\n assert result_record[key].iloc[0] == target_val\n\n\n@dataclass\nclass CheatMiniWoBLLM_ParseRetry:\n \"\"\"For unit-testing purposes only. It only work with miniwob.click-test task.\"\"\"\n\n n_retry: int\n retry_count: int = 0\n\n def __call__(self, messages) -> str:\n if self.retry_count < self.n_retry:\n self.retry_count += 1\n return dict(role=\"assistant\", content=\"I'm retrying\")\n\n if isinstance(messages, Discussion):\n prompt = messages.to_string()\n else:\n prompt = messages[1].get(\"content\", \"\")\n match = re.search(r\"^\\s*\\[(\\d+)\\].*button\", prompt, re.MULTILINE | re.IGNORECASE)\n\n if match:\n bid = match.group(1)\n action = f'click(\"{bid}\")'\n else:\n raise Exception(\"Can't find the button's bid\")\n\n answer = f\"\"\"I'm clicking the button as requested.\n\n{action}\n\n\"\"\"\n return dict(role=\"assistant\", content=answer)\n\n def get_stats(self):\n return {}\n\n\n@dataclass\nclass CheatMiniWoBLLMArgs_ParseRetry(BaseModelArgs):\n n_retry: int = 2\n model_name: str = \"test/cheat_miniwob_click_test_parse_retry\"\n\n def make_model(self):\n return CheatMiniWoBLLM_ParseRetry(n_retry=self.n_retry)\n\n\n@dataclass\nclass CheatLLM_LLMError:\n \"\"\"For unit-testing purposes only. Fails to call LLM\"\"\"\n\n n_retry: int = 0\n success: bool = False\n\n def __call__(self, messages) -> str:\n if self.success:","source_hash":"8b280feaabcb410d9982a02da3621c0520a0bfff2cc8fca85fc4bdbde9241cab","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.agents.test_agent.CheatMiniWoBLLMArgs_ParseRetry","uri":"program://AgentLab/class/tests.agents.test_agent.CheatMiniWoBLLMArgs_ParseRetry#L84-L89","kind":"class","name":"CheatMiniWoBLLMArgs_ParseRetry","path":"tests/agents/test_agent.py","language":"python","start_line":84,"end_line":89,"context_start_line":64,"context_end_line":109,"code":" match = re.search(r\"^\\s*\\[(\\d+)\\].*button\", prompt, re.MULTILINE | re.IGNORECASE)\n\n if match:\n bid = match.group(1)\n action = f'click(\"{bid}\")'\n else:\n raise Exception(\"Can't find the button's bid\")\n\n answer = f\"\"\"I'm clicking the button as requested.\n\n{action}\n\n\"\"\"\n return dict(role=\"assistant\", content=answer)\n\n def get_stats(self):\n return {}\n\n\n@dataclass\nclass CheatMiniWoBLLMArgs_ParseRetry(BaseModelArgs):\n n_retry: int = 2\n model_name: str = \"test/cheat_miniwob_click_test_parse_retry\"\n\n def make_model(self):\n return CheatMiniWoBLLM_ParseRetry(n_retry=self.n_retry)\n\n\n@dataclass\nclass CheatLLM_LLMError:\n \"\"\"For unit-testing purposes only. Fails to call LLM\"\"\"\n\n n_retry: int = 0\n success: bool = False\n\n def __call__(self, messages) -> str:\n if self.success:\n if isinstance(messages, Discussion):\n prompt = messages.to_string()\n else:\n prompt = messages[1].get(\"content\", \"\")\n match = re.search(r\"^\\s*\\[(\\d+)\\].*button\", prompt, re.MULTILINE | re.IGNORECASE)\n\n if match:\n bid = match.group(1)\n action = f'click(\"{bid}\")'","source_hash":"8b280feaabcb410d9982a02da3621c0520a0bfff2cc8fca85fc4bdbde9241cab","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.agents.test_agent.CheatLLM_LLMError","uri":"program://AgentLab/class/tests.agents.test_agent.CheatLLM_LLMError#L93-L122","kind":"class","name":"CheatLLM_LLMError","path":"tests/agents/test_agent.py","language":"python","start_line":93,"end_line":122,"context_start_line":73,"context_end_line":142,"code":"\n{action}\n\n\"\"\"\n return dict(role=\"assistant\", content=answer)\n\n def get_stats(self):\n return {}\n\n\n@dataclass\nclass CheatMiniWoBLLMArgs_ParseRetry(BaseModelArgs):\n n_retry: int = 2\n model_name: str = \"test/cheat_miniwob_click_test_parse_retry\"\n\n def make_model(self):\n return CheatMiniWoBLLM_ParseRetry(n_retry=self.n_retry)\n\n\n@dataclass\nclass CheatLLM_LLMError:\n \"\"\"For unit-testing purposes only. Fails to call LLM\"\"\"\n\n n_retry: int = 0\n success: bool = False\n\n def __call__(self, messages) -> str:\n if self.success:\n if isinstance(messages, Discussion):\n prompt = messages.to_string()\n else:\n prompt = messages[1].get(\"content\", \"\")\n match = re.search(r\"^\\s*\\[(\\d+)\\].*button\", prompt, re.MULTILINE | re.IGNORECASE)\n\n if match:\n bid = match.group(1)\n action = f'click(\"{bid}\")'\n else:\n raise Exception(\"Can't find the button's bid\")\n\n answer = f\"\"\"I'm clicking the button as requested.\n \n {action}\n \n \"\"\"\n return dict(role=\"assistant\", content=answer)\n raise OpenAIError(\"LLM failed to respond\")\n\n def get_stats(self):\n return {\"n_llm_retry\": self.n_retry, \"n_llm_busted_retry\": int(not self.success)}\n\n\n@dataclass\nclass CheatLLMArgs_LLMError(BaseModelArgs):\n n_retry: int = 2\n success: bool = False\n model_name: str = \"test/cheat_miniwob_click_test_parse_retry\"\n\n def make_model(self):\n return CheatLLM_LLMError(\n n_retry=self.n_retry,\n success=self.success,\n )\n\n\ndef test_generic_agent_parse_retry():\n exp_args = ExpArgs(\n agent_args=GenericAgentArgs(\n chat_model_args=CheatMiniWoBLLMArgs_ParseRetry(n_retry=2),\n flags=FLAGS_GPT_3_5,","source_hash":"8b280feaabcb410d9982a02da3621c0520a0bfff2cc8fca85fc4bdbde9241cab","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.agents.test_agent.CheatLLMArgs_LLMError","uri":"program://AgentLab/class/tests.agents.test_agent.CheatLLMArgs_LLMError#L126-L135","kind":"class","name":"CheatLLMArgs_LLMError","path":"tests/agents/test_agent.py","language":"python","start_line":126,"end_line":135,"context_start_line":106,"context_end_line":155,"code":"\n if match:\n bid = match.group(1)\n action = f'click(\"{bid}\")'\n else:\n raise Exception(\"Can't find the button's bid\")\n\n answer = f\"\"\"I'm clicking the button as requested.\n \n {action}\n \n \"\"\"\n return dict(role=\"assistant\", content=answer)\n raise OpenAIError(\"LLM failed to respond\")\n\n def get_stats(self):\n return {\"n_llm_retry\": self.n_retry, \"n_llm_busted_retry\": int(not self.success)}\n\n\n@dataclass\nclass CheatLLMArgs_LLMError(BaseModelArgs):\n n_retry: int = 2\n success: bool = False\n model_name: str = \"test/cheat_miniwob_click_test_parse_retry\"\n\n def make_model(self):\n return CheatLLM_LLMError(\n n_retry=self.n_retry,\n success=self.success,\n )\n\n\ndef test_generic_agent_parse_retry():\n exp_args = ExpArgs(\n agent_args=GenericAgentArgs(\n chat_model_args=CheatMiniWoBLLMArgs_ParseRetry(n_retry=2),\n flags=FLAGS_GPT_3_5,\n ),\n env_args=EnvArgs(task_name=\"miniwob.click-test\", task_seed=42),\n )\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n # TODO why these tests don't work with ray backend?\n launch_exp.run_experiments(\n 1, [exp_args], Path(tmp_dir) / \"generic_agent_test\", parallel_backend=\"joblib\"\n )\n result_record = inspect_results.load_result_df(tmp_dir, progress_fn=None)\n print(result_record)\n target = {\n \"stats.cum_n_retry\": 2,","source_hash":"8b280feaabcb410d9982a02da3621c0520a0bfff2cc8fca85fc4bdbde9241cab","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.agents.test_agent.test_generic_agent_parse_retry","uri":"program://AgentLab/function/tests.agents.test_agent.test_generic_agent_parse_retry#L138-L163","kind":"function","name":"test_generic_agent_parse_retry","path":"tests/agents/test_agent.py","language":"python","start_line":138,"end_line":163,"context_start_line":118,"context_end_line":183,"code":" return dict(role=\"assistant\", content=answer)\n raise OpenAIError(\"LLM failed to respond\")\n\n def get_stats(self):\n return {\"n_llm_retry\": self.n_retry, \"n_llm_busted_retry\": int(not self.success)}\n\n\n@dataclass\nclass CheatLLMArgs_LLMError(BaseModelArgs):\n n_retry: int = 2\n success: bool = False\n model_name: str = \"test/cheat_miniwob_click_test_parse_retry\"\n\n def make_model(self):\n return CheatLLM_LLMError(\n n_retry=self.n_retry,\n success=self.success,\n )\n\n\ndef test_generic_agent_parse_retry():\n exp_args = ExpArgs(\n agent_args=GenericAgentArgs(\n chat_model_args=CheatMiniWoBLLMArgs_ParseRetry(n_retry=2),\n flags=FLAGS_GPT_3_5,\n ),\n env_args=EnvArgs(task_name=\"miniwob.click-test\", task_seed=42),\n )\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n # TODO why these tests don't work with ray backend?\n launch_exp.run_experiments(\n 1, [exp_args], Path(tmp_dir) / \"generic_agent_test\", parallel_backend=\"joblib\"\n )\n result_record = inspect_results.load_result_df(tmp_dir, progress_fn=None)\n print(result_record)\n target = {\n \"stats.cum_n_retry\": 2,\n \"stats.cum_busted_retry\": 0,\n \"n_steps\": 1,\n \"cum_reward\": 1.0,\n }\n\n for key, target_val in target.items():\n assert key in result_record\n assert result_record[key].iloc[0] == target_val\n\n\ndef test_bust_parse_retry():\n exp_args = ExpArgs(\n agent_args=GenericAgentArgs(\n chat_model_args=CheatMiniWoBLLMArgs_ParseRetry(n_retry=10),\n flags=FLAGS_GPT_3_5,\n ),\n env_args=EnvArgs(task_name=\"miniwob.click-test\", task_seed=42),\n )\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n launch_exp.run_experiments(\n 1, [exp_args], Path(tmp_dir) / \"generic_agent_test\", parallel_backend=\"joblib\"\n )\n result_record = inspect_results.load_result_df(tmp_dir, progress_fn=None)\n\n target = {\n \"stats.cum_n_retry\": 5,\n \"stats.cum_busted_retry\": 1,","source_hash":"8b280feaabcb410d9982a02da3621c0520a0bfff2cc8fca85fc4bdbde9241cab","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.agents.test_agent.test_bust_parse_retry","uri":"program://AgentLab/function/tests.agents.test_agent.test_bust_parse_retry#L166-L191","kind":"function","name":"test_bust_parse_retry","path":"tests/agents/test_agent.py","language":"python","start_line":166,"end_line":191,"context_start_line":146,"context_end_line":211,"code":"\n with tempfile.TemporaryDirectory() as tmp_dir:\n # TODO why these tests don't work with ray backend?\n launch_exp.run_experiments(\n 1, [exp_args], Path(tmp_dir) / \"generic_agent_test\", parallel_backend=\"joblib\"\n )\n result_record = inspect_results.load_result_df(tmp_dir, progress_fn=None)\n print(result_record)\n target = {\n \"stats.cum_n_retry\": 2,\n \"stats.cum_busted_retry\": 0,\n \"n_steps\": 1,\n \"cum_reward\": 1.0,\n }\n\n for key, target_val in target.items():\n assert key in result_record\n assert result_record[key].iloc[0] == target_val\n\n\ndef test_bust_parse_retry():\n exp_args = ExpArgs(\n agent_args=GenericAgentArgs(\n chat_model_args=CheatMiniWoBLLMArgs_ParseRetry(n_retry=10),\n flags=FLAGS_GPT_3_5,\n ),\n env_args=EnvArgs(task_name=\"miniwob.click-test\", task_seed=42),\n )\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n launch_exp.run_experiments(\n 1, [exp_args], Path(tmp_dir) / \"generic_agent_test\", parallel_backend=\"joblib\"\n )\n result_record = inspect_results.load_result_df(tmp_dir, progress_fn=None)\n\n target = {\n \"stats.cum_n_retry\": 5,\n \"stats.cum_busted_retry\": 1,\n \"n_steps\": 0,\n \"cum_reward\": 0,\n \"err_msg\": None, # parsing error is considered an agent failure, not a code error\n }\n\n for key, target_val in target.items():\n assert key in result_record\n assert result_record[key].iloc[0] == target_val\n\n\ndef test_llm_error_success():\n exp_args = ExpArgs(\n agent_args=GenericAgentArgs(\n chat_model_args=CheatLLMArgs_LLMError(n_retry=3, success=True),\n flags=FLAGS_GPT_3_5,\n ),\n env_args=EnvArgs(task_name=\"miniwob.click-test\", task_seed=42),\n )\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n launch_exp.run_experiments(\n 1, [exp_args], Path(tmp_dir) / \"generic_agent_test\", parallel_backend=\"joblib\"\n )\n result_record = inspect_results.load_result_df(tmp_dir, progress_fn=None)\n\n target = {\n \"stats.cum_n_llm_retry\": 3,\n \"n_steps\": 1,","source_hash":"8b280feaabcb410d9982a02da3621c0520a0bfff2cc8fca85fc4bdbde9241cab","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.agents.test_agent.test_llm_error_success","uri":"program://AgentLab/function/tests.agents.test_agent.test_llm_error_success#L194-L218","kind":"function","name":"test_llm_error_success","path":"tests/agents/test_agent.py","language":"python","start_line":194,"end_line":218,"context_start_line":174,"context_end_line":238,"code":"\n with tempfile.TemporaryDirectory() as tmp_dir:\n launch_exp.run_experiments(\n 1, [exp_args], Path(tmp_dir) / \"generic_agent_test\", parallel_backend=\"joblib\"\n )\n result_record = inspect_results.load_result_df(tmp_dir, progress_fn=None)\n\n target = {\n \"stats.cum_n_retry\": 5,\n \"stats.cum_busted_retry\": 1,\n \"n_steps\": 0,\n \"cum_reward\": 0,\n \"err_msg\": None, # parsing error is considered an agent failure, not a code error\n }\n\n for key, target_val in target.items():\n assert key in result_record\n assert result_record[key].iloc[0] == target_val\n\n\ndef test_llm_error_success():\n exp_args = ExpArgs(\n agent_args=GenericAgentArgs(\n chat_model_args=CheatLLMArgs_LLMError(n_retry=3, success=True),\n flags=FLAGS_GPT_3_5,\n ),\n env_args=EnvArgs(task_name=\"miniwob.click-test\", task_seed=42),\n )\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n launch_exp.run_experiments(\n 1, [exp_args], Path(tmp_dir) / \"generic_agent_test\", parallel_backend=\"joblib\"\n )\n result_record = inspect_results.load_result_df(tmp_dir, progress_fn=None)\n\n target = {\n \"stats.cum_n_llm_retry\": 3,\n \"n_steps\": 1,\n \"cum_reward\": 1.0,\n \"err_msg\": None,\n }\n\n for key, target_val in target.items():\n assert key in result_record\n assert result_record[key].iloc[0] == target_val\n\n\ndef test_llm_error_no_success():\n exp_args = ExpArgs(\n agent_args=GenericAgentArgs(\n chat_model_args=CheatLLMArgs_LLMError(n_retry=5, success=False),\n flags=FLAGS_GPT_3_5,\n ),\n env_args=EnvArgs(task_name=\"miniwob.click-test\", task_seed=42),\n )\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n launch_exp.run_experiments(\n 1, [exp_args], Path(tmp_dir) / \"generic_agent_test\", parallel_backend=\"joblib\"\n )\n result_record = inspect_results.load_result_df(tmp_dir, progress_fn=None)\n\n target = {\n \"n_steps\": 0,\n \"cum_reward\": 0,","source_hash":"8b280feaabcb410d9982a02da3621c0520a0bfff2cc8fca85fc4bdbde9241cab","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.agents.test_agent.test_llm_error_no_success","uri":"program://AgentLab/function/tests.agents.test_agent.test_llm_error_no_success#L221-L244","kind":"function","name":"test_llm_error_no_success","path":"tests/agents/test_agent.py","language":"python","start_line":221,"end_line":244,"context_start_line":201,"context_end_line":249,"code":" )\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n launch_exp.run_experiments(\n 1, [exp_args], Path(tmp_dir) / \"generic_agent_test\", parallel_backend=\"joblib\"\n )\n result_record = inspect_results.load_result_df(tmp_dir, progress_fn=None)\n\n target = {\n \"stats.cum_n_llm_retry\": 3,\n \"n_steps\": 1,\n \"cum_reward\": 1.0,\n \"err_msg\": None,\n }\n\n for key, target_val in target.items():\n assert key in result_record\n assert result_record[key].iloc[0] == target_val\n\n\ndef test_llm_error_no_success():\n exp_args = ExpArgs(\n agent_args=GenericAgentArgs(\n chat_model_args=CheatLLMArgs_LLMError(n_retry=5, success=False),\n flags=FLAGS_GPT_3_5,\n ),\n env_args=EnvArgs(task_name=\"miniwob.click-test\", task_seed=42),\n )\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n launch_exp.run_experiments(\n 1, [exp_args], Path(tmp_dir) / \"generic_agent_test\", parallel_backend=\"joblib\"\n )\n result_record = inspect_results.load_result_df(tmp_dir, progress_fn=None)\n\n target = {\n \"n_steps\": 0,\n \"cum_reward\": 0,\n \"err_msg\": \"Exception uncaught by agent or environment in task miniwob.click-test.\\nOpenAIError:\\nLLM failed to respond\",\n }\n\n for key, target_val in target.items():\n assert key in result_record\n assert result_record[key].iloc[0] == target_val\n\n\nif __name__ == \"__main__\":\n # test_generic_agent()\n test_generic_agent_parse_retry()","source_hash":"8b280feaabcb410d9982a02da3621c0520a0bfff2cc8fca85fc4bdbde9241cab","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.agents.test_agent.__call__","uri":"program://AgentLab/function/tests.agents.test_agent.__call__#L99-L119","kind":"function","name":"__call__","path":"tests/agents/test_agent.py","language":"python","start_line":99,"end_line":119,"context_start_line":79,"context_end_line":139,"code":" def get_stats(self):\n return {}\n\n\n@dataclass\nclass CheatMiniWoBLLMArgs_ParseRetry(BaseModelArgs):\n n_retry: int = 2\n model_name: str = \"test/cheat_miniwob_click_test_parse_retry\"\n\n def make_model(self):\n return CheatMiniWoBLLM_ParseRetry(n_retry=self.n_retry)\n\n\n@dataclass\nclass CheatLLM_LLMError:\n \"\"\"For unit-testing purposes only. Fails to call LLM\"\"\"\n\n n_retry: int = 0\n success: bool = False\n\n def __call__(self, messages) -> str:\n if self.success:\n if isinstance(messages, Discussion):\n prompt = messages.to_string()\n else:\n prompt = messages[1].get(\"content\", \"\")\n match = re.search(r\"^\\s*\\[(\\d+)\\].*button\", prompt, re.MULTILINE | re.IGNORECASE)\n\n if match:\n bid = match.group(1)\n action = f'click(\"{bid}\")'\n else:\n raise Exception(\"Can't find the button's bid\")\n\n answer = f\"\"\"I'm clicking the button as requested.\n \n {action}\n \n \"\"\"\n return dict(role=\"assistant\", content=answer)\n raise OpenAIError(\"LLM failed to respond\")\n\n def get_stats(self):\n return {\"n_llm_retry\": self.n_retry, \"n_llm_busted_retry\": int(not self.success)}\n\n\n@dataclass\nclass CheatLLMArgs_LLMError(BaseModelArgs):\n n_retry: int = 2\n success: bool = False\n model_name: str = \"test/cheat_miniwob_click_test_parse_retry\"\n\n def make_model(self):\n return CheatLLM_LLMError(\n n_retry=self.n_retry,\n success=self.success,\n )\n\n\ndef test_generic_agent_parse_retry():\n exp_args = ExpArgs(","source_hash":"8b280feaabcb410d9982a02da3621c0520a0bfff2cc8fca85fc4bdbde9241cab","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.agents.test_agent.get_stats","uri":"program://AgentLab/function/tests.agents.test_agent.get_stats#L121-L122","kind":"function","name":"get_stats","path":"tests/agents/test_agent.py","language":"python","start_line":121,"end_line":122,"context_start_line":101,"context_end_line":142,"code":" if isinstance(messages, Discussion):\n prompt = messages.to_string()\n else:\n prompt = messages[1].get(\"content\", \"\")\n match = re.search(r\"^\\s*\\[(\\d+)\\].*button\", prompt, re.MULTILINE | re.IGNORECASE)\n\n if match:\n bid = match.group(1)\n action = f'click(\"{bid}\")'\n else:\n raise Exception(\"Can't find the button's bid\")\n\n answer = f\"\"\"I'm clicking the button as requested.\n \n {action}\n \n \"\"\"\n return dict(role=\"assistant\", content=answer)\n raise OpenAIError(\"LLM failed to respond\")\n\n def get_stats(self):\n return {\"n_llm_retry\": self.n_retry, \"n_llm_busted_retry\": int(not self.success)}\n\n\n@dataclass\nclass CheatLLMArgs_LLMError(BaseModelArgs):\n n_retry: int = 2\n success: bool = False\n model_name: str = \"test/cheat_miniwob_click_test_parse_retry\"\n\n def make_model(self):\n return CheatLLM_LLMError(\n n_retry=self.n_retry,\n success=self.success,\n )\n\n\ndef test_generic_agent_parse_retry():\n exp_args = ExpArgs(\n agent_args=GenericAgentArgs(\n chat_model_args=CheatMiniWoBLLMArgs_ParseRetry(n_retry=2),\n flags=FLAGS_GPT_3_5,","source_hash":"8b280feaabcb410d9982a02da3621c0520a0bfff2cc8fca85fc4bdbde9241cab","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.agents.test_agent.make_model","uri":"program://AgentLab/function/tests.agents.test_agent.make_model#L131-L135","kind":"function","name":"make_model","path":"tests/agents/test_agent.py","language":"python","start_line":131,"end_line":135,"context_start_line":111,"context_end_line":155,"code":" raise Exception(\"Can't find the button's bid\")\n\n answer = f\"\"\"I'm clicking the button as requested.\n \n {action}\n \n \"\"\"\n return dict(role=\"assistant\", content=answer)\n raise OpenAIError(\"LLM failed to respond\")\n\n def get_stats(self):\n return {\"n_llm_retry\": self.n_retry, \"n_llm_busted_retry\": int(not self.success)}\n\n\n@dataclass\nclass CheatLLMArgs_LLMError(BaseModelArgs):\n n_retry: int = 2\n success: bool = False\n model_name: str = \"test/cheat_miniwob_click_test_parse_retry\"\n\n def make_model(self):\n return CheatLLM_LLMError(\n n_retry=self.n_retry,\n success=self.success,\n )\n\n\ndef test_generic_agent_parse_retry():\n exp_args = ExpArgs(\n agent_args=GenericAgentArgs(\n chat_model_args=CheatMiniWoBLLMArgs_ParseRetry(n_retry=2),\n flags=FLAGS_GPT_3_5,\n ),\n env_args=EnvArgs(task_name=\"miniwob.click-test\", task_seed=42),\n )\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n # TODO why these tests don't work with ray backend?\n launch_exp.run_experiments(\n 1, [exp_args], Path(tmp_dir) / \"generic_agent_test\", parallel_backend=\"joblib\"\n )\n result_record = inspect_results.load_result_df(tmp_dir, progress_fn=None)\n print(result_record)\n target = {\n \"stats.cum_n_retry\": 2,","source_hash":"8b280feaabcb410d9982a02da3621c0520a0bfff2cc8fca85fc4bdbde9241cab","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.agents.test_visualwebarena_agent","uri":"program://AgentLab/module/tests.agents.test_visualwebarena_agent#L1-L48","kind":"module","name":"tests.agents.test_visualwebarena_agent","path":"tests/agents/test_visualwebarena_agent.py","language":"python","start_line":1,"end_line":48,"context_start_line":1,"context_end_line":48,"code":"import logging\nimport tempfile\n\nimport pytest\n\nfrom agentlab.agents.visualwebarena.agent import VisualWebArenaAgentArgs\nfrom agentlab.experiments.loop import EnvArgs, ExpArgs\nfrom agentlab.llm.llm_configs import CHAT_MODEL_ARGS_DICT\n\n\n@pytest.mark.pricy\ndef test_agent():\n with tempfile.TemporaryDirectory() as exp_dir:\n env_args = EnvArgs(\n task_name=\"miniwob.click-button\",\n task_seed=0,\n max_steps=10,\n headless=True,\n )\n\n chat_model_args = CHAT_MODEL_ARGS_DICT[\"openai/gpt-4o-mini-2024-07-18\"]\n\n exp_args = [\n ExpArgs(\n agent_args=VisualWebArenaAgentArgs(\n temperature=0.1,\n chat_model_args=chat_model_args,\n ),\n env_args=env_args,\n logging_level=logging.INFO,\n ),\n ExpArgs(\n agent_args=VisualWebArenaAgentArgs(\n temperature=0.0,\n chat_model_args=chat_model_args,\n ),\n env_args=env_args,\n logging_level=logging.INFO,\n ),\n ]\n\n for exp_arg in exp_args:\n exp_arg.agent_args.prepare()\n exp_arg.prepare(exp_dir)\n\n for exp_arg in exp_args:\n exp_arg.run()\n exp_arg.agent_args.close()","source_hash":"286649828d67dc957a6c23717b83ced89895c03c76a57e47edefc1fb9419650f","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.agents.test_visualwebarena_agent.test_agent","uri":"program://AgentLab/function/tests.agents.test_visualwebarena_agent.test_agent#L12-L48","kind":"function","name":"test_agent","path":"tests/agents/test_visualwebarena_agent.py","language":"python","start_line":12,"end_line":48,"context_start_line":1,"context_end_line":48,"code":"import logging\nimport tempfile\n\nimport pytest\n\nfrom agentlab.agents.visualwebarena.agent import VisualWebArenaAgentArgs\nfrom agentlab.experiments.loop import EnvArgs, ExpArgs\nfrom agentlab.llm.llm_configs import CHAT_MODEL_ARGS_DICT\n\n\n@pytest.mark.pricy\ndef test_agent():\n with tempfile.TemporaryDirectory() as exp_dir:\n env_args = EnvArgs(\n task_name=\"miniwob.click-button\",\n task_seed=0,\n max_steps=10,\n headless=True,\n )\n\n chat_model_args = CHAT_MODEL_ARGS_DICT[\"openai/gpt-4o-mini-2024-07-18\"]\n\n exp_args = [\n ExpArgs(\n agent_args=VisualWebArenaAgentArgs(\n temperature=0.1,\n chat_model_args=chat_model_args,\n ),\n env_args=env_args,\n logging_level=logging.INFO,\n ),\n ExpArgs(\n agent_args=VisualWebArenaAgentArgs(\n temperature=0.0,\n chat_model_args=chat_model_args,\n ),\n env_args=env_args,\n logging_level=logging.INFO,\n ),\n ]\n\n for exp_arg in exp_args:\n exp_arg.agent_args.prepare()\n exp_arg.prepare(exp_dir)\n\n for exp_arg in exp_args:\n exp_arg.run()\n exp_arg.agent_args.close()","source_hash":"286649828d67dc957a6c23717b83ced89895c03c76a57e47edefc1fb9419650f","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.agents.test_gaia_agent","uri":"program://AgentLab/module/tests.agents.test_gaia_agent#L1-L102","kind":"module","name":"tests.agents.test_gaia_agent","path":"tests/agents/test_gaia_agent.py","language":"python","start_line":1,"end_line":102,"context_start_line":1,"context_end_line":102,"code":"import os\nimport uuid\nfrom pathlib import Path\n\ntry:\n from tapeagents.steps import ImageObservation\n\n from agentlab.agents.tapeagent.agent import TapeAgent, TapeAgentArgs, load_config\n from agentlab.benchmarks.gaia import GaiaBenchmark, GaiaQuestion\nexcept ModuleNotFoundError:\n import pytest\n\n pytest.skip(\"Skipping test due to missing dependencies\", allow_module_level=True)\n\n\ndef mock_dataset() -> dict:\n \"\"\"Mock dataset for testing purposes.\"\"\"\n data = [{\"task_id\": str(uuid.uuid4()), \"file_name\": \"\", \"file_path\": \"\"} for i in range(165)]\n data[5] = {\n \"task_id\": \"32102e3e-d12a-4209-9163-7b3a104efe5d\",\n \"Question\": \"\"\"The attached spreadsheet shows the inventory for a movie and video game rental store in Seattle, Washington. What is the title of the oldest Blu-Ray recorded in this spreadsheet? Return it as appearing in the spreadsheet.\"\"\",\n \"Level\": \"2\",\n \"Final answer\": \"Time-Parking 2: Parallel Universe\",\n \"file_name\": \"32102e3e-d12a-4209-9163-7b3a104efe5d.xlsx\",\n \"file_path\": \"tests/data/32102e3e-d12a-4209-9163-7b3a104efe5d.xlsx\",\n \"Annotator Metadata\": {\n \"Steps\": \"\"\"1. Open the attached file.\\n2. Compare the years given in the Blu-Ray section to find the oldest year, 2009.\\n3. Find the title of the Blu-Ray disc that corresponds to the year 2009: Time-Parking 2: Parallel Universe.\"\"\",\n \"Number of steps\": \"3\",\n \"How long did this take?\": \"1 minute\",\n \"Tools\": \"1. Microsoft Excel\",\n \"Number of tools\": \"1\",\n },\n }\n data[20] = {\n \"task_id\": \"df6561b2-7ee5-4540-baab-5095f742716a\",\n \"Question\": \"When you take the average of the standard population deviation of the red numbers and the standard sample deviation of the green numbers in this image using the statistics module in Python 3.11, what is the result rounded to the nearest three decimal points?\",\n \"Level\": \"2\",\n \"Final answer\": \"17.056\",\n \"file_name\": \"df6561b2-7ee5-4540-baab-5095f742716a.png\",\n \"file_path\": \"tests/data/df6561b2-7ee5-4540-baab-5095f742716a.png\",\n \"Annotator Metadata\": {\n \"Steps\": \"1. Opened the PNG file.\\n2. Made separate lists of the red numbers and green numbers.\\n3. Opened a Python compiler.\\n4. Ran the following code:\\n```\\nimport statistics as st\\nred = st.pstdev([24, 74, 28, 54, 73, 33, 64, 73, 60, 53, 59, 40, 65, 76, 48, 34, 62, 70, 31, 24, 51, 55, 78, 76, 41, 77, 51])\\ngreen = st.stdev([39, 29, 28, 72, 68, 47, 64, 74, 72, 40, 75, 26, 27, 37, 31, 55, 44, 64, 65, 38, 46, 66, 35, 76, 61, 53, 49])\\navg = st.mean([red, green])\\nprint(avg)\\n```\\n5. Rounded the output.\",\n \"Number of steps\": \"5\",\n \"How long did this take?\": \"20 minutes\",\n \"Tools\": \"1. Python compiler\\n2. Image recognition tools\",\n \"Number of tools\": \"2\",\n },\n }\n return {\"validation\": data}\n\n\ndef test_agent_creation():\n config = load_config(\"gaia_val\")\n args = TapeAgentArgs(config=config)\n agent = args.make_agent()\n assert isinstance(agent, TapeAgent)\n assert agent.agent.name == \"gaia_agent\"\n\n\ndef test_gaia_bench():\n config = load_config(\"gaia_val\")\n bench = GaiaBenchmark.from_config(config, dataset=mock_dataset())\n assert bench.name == \"gaia\"\n assert bench.split == \"validation\"\n assert len(bench.env_args_list) == 165\n\n task = bench.env_args_list[5].task\n question = \"\"\"The attached spreadsheet shows the inventory for a movie and video game rental store in Seattle, Washington. What is the title of the oldest Blu-Ray recorded in this spreadsheet? Return it as appearing in the spreadsheet.\"\"\"\n steps = \"\"\"1. Open the attached file.\\n2. Compare the years given in the Blu-Ray section to find the oldest year, 2009.\\n3. Find the title of the Blu-Ray disc that corresponds to the year 2009: Time-Parking 2: Parallel Universe.\"\"\"\n assert task[\"task_id\"] == \"32102e3e-d12a-4209-9163-7b3a104efe5d\"\n assert task[\"Question\"] == question\n assert task[\"Level\"] == \"2\"\n assert task[\"Final answer\"] == \"Time-Parking 2: Parallel Universe\"\n assert task[\"file_name\"] == \"32102e3e-d12a-4209-9163-7b3a104efe5d.xlsx\"\n assert task[\"Annotator Metadata\"][\"Steps\"] == steps\n assert task[\"Annotator Metadata\"][\"Number of steps\"] == \"3\"\n assert task[\"Annotator Metadata\"][\"How long did this take?\"] == \"1 minute\"\n assert task[\"Annotator Metadata\"][\"Tools\"] == \"1. Microsoft Excel\"\n assert task[\"Annotator Metadata\"][\"Number of tools\"] == \"1\"\n\n\ndef test_gaia_gym_reset():\n exp_dir = \"/tmp/gaia_unit_test\"\n os.makedirs(exp_dir, exist_ok=True)\n\n config = load_config(\"gaia_val\")\n bench = GaiaBenchmark.from_config(config, dataset=mock_dataset())\n args = bench.env_args_list[5]\n env = args.make_env(Path(exp_dir))\n steps, _ = env.reset()\n assert len(steps) == 1\n assert isinstance(steps[0], GaiaQuestion)\n assert steps[0].content.startswith(args.task[\"Question\"])\n\n args = bench.env_args_list[20]\n env = args.make_env(Path(exp_dir))\n steps, _ = env.reset()\n assert len(steps) == 2\n assert isinstance(steps[0], GaiaQuestion)\n assert steps[0].content == args.task[\"Question\"]\n assert isinstance(steps[1], ImageObservation)\n assert os.path.basename(steps[1].image_path) == args.task[\"file_name\"]","source_hash":"3973ab8712b8441a0bdc67ee6032767bee5bfc2c2d5fc2645ffda3029149e000","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.agents.test_gaia_agent.mock_dataset","uri":"program://AgentLab/function/tests.agents.test_gaia_agent.mock_dataset#L16-L49","kind":"function","name":"mock_dataset","path":"tests/agents/test_gaia_agent.py","language":"python","start_line":16,"end_line":49,"context_start_line":1,"context_end_line":69,"code":"import os\nimport uuid\nfrom pathlib import Path\n\ntry:\n from tapeagents.steps import ImageObservation\n\n from agentlab.agents.tapeagent.agent import TapeAgent, TapeAgentArgs, load_config\n from agentlab.benchmarks.gaia import GaiaBenchmark, GaiaQuestion\nexcept ModuleNotFoundError:\n import pytest\n\n pytest.skip(\"Skipping test due to missing dependencies\", allow_module_level=True)\n\n\ndef mock_dataset() -> dict:\n \"\"\"Mock dataset for testing purposes.\"\"\"\n data = [{\"task_id\": str(uuid.uuid4()), \"file_name\": \"\", \"file_path\": \"\"} for i in range(165)]\n data[5] = {\n \"task_id\": \"32102e3e-d12a-4209-9163-7b3a104efe5d\",\n \"Question\": \"\"\"The attached spreadsheet shows the inventory for a movie and video game rental store in Seattle, Washington. What is the title of the oldest Blu-Ray recorded in this spreadsheet? Return it as appearing in the spreadsheet.\"\"\",\n \"Level\": \"2\",\n \"Final answer\": \"Time-Parking 2: Parallel Universe\",\n \"file_name\": \"32102e3e-d12a-4209-9163-7b3a104efe5d.xlsx\",\n \"file_path\": \"tests/data/32102e3e-d12a-4209-9163-7b3a104efe5d.xlsx\",\n \"Annotator Metadata\": {\n \"Steps\": \"\"\"1. Open the attached file.\\n2. Compare the years given in the Blu-Ray section to find the oldest year, 2009.\\n3. Find the title of the Blu-Ray disc that corresponds to the year 2009: Time-Parking 2: Parallel Universe.\"\"\",\n \"Number of steps\": \"3\",\n \"How long did this take?\": \"1 minute\",\n \"Tools\": \"1. Microsoft Excel\",\n \"Number of tools\": \"1\",\n },\n }\n data[20] = {\n \"task_id\": \"df6561b2-7ee5-4540-baab-5095f742716a\",\n \"Question\": \"When you take the average of the standard population deviation of the red numbers and the standard sample deviation of the green numbers in this image using the statistics module in Python 3.11, what is the result rounded to the nearest three decimal points?\",\n \"Level\": \"2\",\n \"Final answer\": \"17.056\",\n \"file_name\": \"df6561b2-7ee5-4540-baab-5095f742716a.png\",\n \"file_path\": \"tests/data/df6561b2-7ee5-4540-baab-5095f742716a.png\",\n \"Annotator Metadata\": {\n \"Steps\": \"1. Opened the PNG file.\\n2. Made separate lists of the red numbers and green numbers.\\n3. Opened a Python compiler.\\n4. Ran the following code:\\n```\\nimport statistics as st\\nred = st.pstdev([24, 74, 28, 54, 73, 33, 64, 73, 60, 53, 59, 40, 65, 76, 48, 34, 62, 70, 31, 24, 51, 55, 78, 76, 41, 77, 51])\\ngreen = st.stdev([39, 29, 28, 72, 68, 47, 64, 74, 72, 40, 75, 26, 27, 37, 31, 55, 44, 64, 65, 38, 46, 66, 35, 76, 61, 53, 49])\\navg = st.mean([red, green])\\nprint(avg)\\n```\\n5. Rounded the output.\",\n \"Number of steps\": \"5\",\n \"How long did this take?\": \"20 minutes\",\n \"Tools\": \"1. Python compiler\\n2. Image recognition tools\",\n \"Number of tools\": \"2\",\n },\n }\n return {\"validation\": data}\n\n\ndef test_agent_creation():\n config = load_config(\"gaia_val\")\n args = TapeAgentArgs(config=config)\n agent = args.make_agent()\n assert isinstance(agent, TapeAgent)\n assert agent.agent.name == \"gaia_agent\"\n\n\ndef test_gaia_bench():\n config = load_config(\"gaia_val\")\n bench = GaiaBenchmark.from_config(config, dataset=mock_dataset())\n assert bench.name == \"gaia\"\n assert bench.split == \"validation\"\n assert len(bench.env_args_list) == 165\n\n task = bench.env_args_list[5].task\n question = \"\"\"The attached spreadsheet shows the inventory for a movie and video game rental store in Seattle, Washington. What is the title of the oldest Blu-Ray recorded in this spreadsheet? Return it as appearing in the spreadsheet.\"\"\"\n steps = \"\"\"1. Open the attached file.\\n2. Compare the years given in the Blu-Ray section to find the oldest year, 2009.\\n3. Find the title of the Blu-Ray disc that corresponds to the year 2009: Time-Parking 2: Parallel Universe.\"\"\"","source_hash":"3973ab8712b8441a0bdc67ee6032767bee5bfc2c2d5fc2645ffda3029149e000","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.agents.test_gaia_agent.test_agent_creation","uri":"program://AgentLab/function/tests.agents.test_gaia_agent.test_agent_creation#L52-L57","kind":"function","name":"test_agent_creation","path":"tests/agents/test_gaia_agent.py","language":"python","start_line":52,"end_line":57,"context_start_line":32,"context_end_line":77,"code":" },\n }\n data[20] = {\n \"task_id\": \"df6561b2-7ee5-4540-baab-5095f742716a\",\n \"Question\": \"When you take the average of the standard population deviation of the red numbers and the standard sample deviation of the green numbers in this image using the statistics module in Python 3.11, what is the result rounded to the nearest three decimal points?\",\n \"Level\": \"2\",\n \"Final answer\": \"17.056\",\n \"file_name\": \"df6561b2-7ee5-4540-baab-5095f742716a.png\",\n \"file_path\": \"tests/data/df6561b2-7ee5-4540-baab-5095f742716a.png\",\n \"Annotator Metadata\": {\n \"Steps\": \"1. Opened the PNG file.\\n2. Made separate lists of the red numbers and green numbers.\\n3. Opened a Python compiler.\\n4. Ran the following code:\\n```\\nimport statistics as st\\nred = st.pstdev([24, 74, 28, 54, 73, 33, 64, 73, 60, 53, 59, 40, 65, 76, 48, 34, 62, 70, 31, 24, 51, 55, 78, 76, 41, 77, 51])\\ngreen = st.stdev([39, 29, 28, 72, 68, 47, 64, 74, 72, 40, 75, 26, 27, 37, 31, 55, 44, 64, 65, 38, 46, 66, 35, 76, 61, 53, 49])\\navg = st.mean([red, green])\\nprint(avg)\\n```\\n5. Rounded the output.\",\n \"Number of steps\": \"5\",\n \"How long did this take?\": \"20 minutes\",\n \"Tools\": \"1. Python compiler\\n2. Image recognition tools\",\n \"Number of tools\": \"2\",\n },\n }\n return {\"validation\": data}\n\n\ndef test_agent_creation():\n config = load_config(\"gaia_val\")\n args = TapeAgentArgs(config=config)\n agent = args.make_agent()\n assert isinstance(agent, TapeAgent)\n assert agent.agent.name == \"gaia_agent\"\n\n\ndef test_gaia_bench():\n config = load_config(\"gaia_val\")\n bench = GaiaBenchmark.from_config(config, dataset=mock_dataset())\n assert bench.name == \"gaia\"\n assert bench.split == \"validation\"\n assert len(bench.env_args_list) == 165\n\n task = bench.env_args_list[5].task\n question = \"\"\"The attached spreadsheet shows the inventory for a movie and video game rental store in Seattle, Washington. What is the title of the oldest Blu-Ray recorded in this spreadsheet? Return it as appearing in the spreadsheet.\"\"\"\n steps = \"\"\"1. Open the attached file.\\n2. Compare the years given in the Blu-Ray section to find the oldest year, 2009.\\n3. Find the title of the Blu-Ray disc that corresponds to the year 2009: Time-Parking 2: Parallel Universe.\"\"\"\n assert task[\"task_id\"] == \"32102e3e-d12a-4209-9163-7b3a104efe5d\"\n assert task[\"Question\"] == question\n assert task[\"Level\"] == \"2\"\n assert task[\"Final answer\"] == \"Time-Parking 2: Parallel Universe\"\n assert task[\"file_name\"] == \"32102e3e-d12a-4209-9163-7b3a104efe5d.xlsx\"\n assert task[\"Annotator Metadata\"][\"Steps\"] == steps\n assert task[\"Annotator Metadata\"][\"Number of steps\"] == \"3\"\n assert task[\"Annotator Metadata\"][\"How long did this take?\"] == \"1 minute\"","source_hash":"3973ab8712b8441a0bdc67ee6032767bee5bfc2c2d5fc2645ffda3029149e000","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.agents.test_gaia_agent.test_gaia_bench","uri":"program://AgentLab/function/tests.agents.test_gaia_agent.test_gaia_bench#L60-L79","kind":"function","name":"test_gaia_bench","path":"tests/agents/test_gaia_agent.py","language":"python","start_line":60,"end_line":79,"context_start_line":40,"context_end_line":99,"code":" \"file_path\": \"tests/data/df6561b2-7ee5-4540-baab-5095f742716a.png\",\n \"Annotator Metadata\": {\n \"Steps\": \"1. Opened the PNG file.\\n2. Made separate lists of the red numbers and green numbers.\\n3. Opened a Python compiler.\\n4. Ran the following code:\\n```\\nimport statistics as st\\nred = st.pstdev([24, 74, 28, 54, 73, 33, 64, 73, 60, 53, 59, 40, 65, 76, 48, 34, 62, 70, 31, 24, 51, 55, 78, 76, 41, 77, 51])\\ngreen = st.stdev([39, 29, 28, 72, 68, 47, 64, 74, 72, 40, 75, 26, 27, 37, 31, 55, 44, 64, 65, 38, 46, 66, 35, 76, 61, 53, 49])\\navg = st.mean([red, green])\\nprint(avg)\\n```\\n5. Rounded the output.\",\n \"Number of steps\": \"5\",\n \"How long did this take?\": \"20 minutes\",\n \"Tools\": \"1. Python compiler\\n2. Image recognition tools\",\n \"Number of tools\": \"2\",\n },\n }\n return {\"validation\": data}\n\n\ndef test_agent_creation():\n config = load_config(\"gaia_val\")\n args = TapeAgentArgs(config=config)\n agent = args.make_agent()\n assert isinstance(agent, TapeAgent)\n assert agent.agent.name == \"gaia_agent\"\n\n\ndef test_gaia_bench():\n config = load_config(\"gaia_val\")\n bench = GaiaBenchmark.from_config(config, dataset=mock_dataset())\n assert bench.name == \"gaia\"\n assert bench.split == \"validation\"\n assert len(bench.env_args_list) == 165\n\n task = bench.env_args_list[5].task\n question = \"\"\"The attached spreadsheet shows the inventory for a movie and video game rental store in Seattle, Washington. What is the title of the oldest Blu-Ray recorded in this spreadsheet? Return it as appearing in the spreadsheet.\"\"\"\n steps = \"\"\"1. Open the attached file.\\n2. Compare the years given in the Blu-Ray section to find the oldest year, 2009.\\n3. Find the title of the Blu-Ray disc that corresponds to the year 2009: Time-Parking 2: Parallel Universe.\"\"\"\n assert task[\"task_id\"] == \"32102e3e-d12a-4209-9163-7b3a104efe5d\"\n assert task[\"Question\"] == question\n assert task[\"Level\"] == \"2\"\n assert task[\"Final answer\"] == \"Time-Parking 2: Parallel Universe\"\n assert task[\"file_name\"] == \"32102e3e-d12a-4209-9163-7b3a104efe5d.xlsx\"\n assert task[\"Annotator Metadata\"][\"Steps\"] == steps\n assert task[\"Annotator Metadata\"][\"Number of steps\"] == \"3\"\n assert task[\"Annotator Metadata\"][\"How long did this take?\"] == \"1 minute\"\n assert task[\"Annotator Metadata\"][\"Tools\"] == \"1. Microsoft Excel\"\n assert task[\"Annotator Metadata\"][\"Number of tools\"] == \"1\"\n\n\ndef test_gaia_gym_reset():\n exp_dir = \"/tmp/gaia_unit_test\"\n os.makedirs(exp_dir, exist_ok=True)\n\n config = load_config(\"gaia_val\")\n bench = GaiaBenchmark.from_config(config, dataset=mock_dataset())\n args = bench.env_args_list[5]\n env = args.make_env(Path(exp_dir))\n steps, _ = env.reset()\n assert len(steps) == 1\n assert isinstance(steps[0], GaiaQuestion)\n assert steps[0].content.startswith(args.task[\"Question\"])\n\n args = bench.env_args_list[20]\n env = args.make_env(Path(exp_dir))\n steps, _ = env.reset()\n assert len(steps) == 2\n assert isinstance(steps[0], GaiaQuestion)","source_hash":"3973ab8712b8441a0bdc67ee6032767bee5bfc2c2d5fc2645ffda3029149e000","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.agents.test_gaia_agent.test_gaia_gym_reset","uri":"program://AgentLab/function/tests.agents.test_gaia_agent.test_gaia_gym_reset#L82-L102","kind":"function","name":"test_gaia_gym_reset","path":"tests/agents/test_gaia_agent.py","language":"python","start_line":82,"end_line":102,"context_start_line":62,"context_end_line":102,"code":" bench = GaiaBenchmark.from_config(config, dataset=mock_dataset())\n assert bench.name == \"gaia\"\n assert bench.split == \"validation\"\n assert len(bench.env_args_list) == 165\n\n task = bench.env_args_list[5].task\n question = \"\"\"The attached spreadsheet shows the inventory for a movie and video game rental store in Seattle, Washington. What is the title of the oldest Blu-Ray recorded in this spreadsheet? Return it as appearing in the spreadsheet.\"\"\"\n steps = \"\"\"1. Open the attached file.\\n2. Compare the years given in the Blu-Ray section to find the oldest year, 2009.\\n3. Find the title of the Blu-Ray disc that corresponds to the year 2009: Time-Parking 2: Parallel Universe.\"\"\"\n assert task[\"task_id\"] == \"32102e3e-d12a-4209-9163-7b3a104efe5d\"\n assert task[\"Question\"] == question\n assert task[\"Level\"] == \"2\"\n assert task[\"Final answer\"] == \"Time-Parking 2: Parallel Universe\"\n assert task[\"file_name\"] == \"32102e3e-d12a-4209-9163-7b3a104efe5d.xlsx\"\n assert task[\"Annotator Metadata\"][\"Steps\"] == steps\n assert task[\"Annotator Metadata\"][\"Number of steps\"] == \"3\"\n assert task[\"Annotator Metadata\"][\"How long did this take?\"] == \"1 minute\"\n assert task[\"Annotator Metadata\"][\"Tools\"] == \"1. Microsoft Excel\"\n assert task[\"Annotator Metadata\"][\"Number of tools\"] == \"1\"\n\n\ndef test_gaia_gym_reset():\n exp_dir = \"/tmp/gaia_unit_test\"\n os.makedirs(exp_dir, exist_ok=True)\n\n config = load_config(\"gaia_val\")\n bench = GaiaBenchmark.from_config(config, dataset=mock_dataset())\n args = bench.env_args_list[5]\n env = args.make_env(Path(exp_dir))\n steps, _ = env.reset()\n assert len(steps) == 1\n assert isinstance(steps[0], GaiaQuestion)\n assert steps[0].content.startswith(args.task[\"Question\"])\n\n args = bench.env_args_list[20]\n env = args.make_env(Path(exp_dir))\n steps, _ = env.reset()\n assert len(steps) == 2\n assert isinstance(steps[0], GaiaQuestion)\n assert steps[0].content == args.task[\"Question\"]\n assert isinstance(steps[1], ImageObservation)\n assert os.path.basename(steps[1].image_path) == args.task[\"file_name\"]","source_hash":"3973ab8712b8441a0bdc67ee6032767bee5bfc2c2d5fc2645ffda3029149e000","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.agents.test_generic_prompt","uri":"program://AgentLab/module/tests.agents.test_generic_prompt#L1-L259","kind":"module","name":"tests.agents.test_generic_prompt","path":"tests/agents/test_generic_prompt.py","language":"python","start_line":1,"end_line":259,"context_start_line":1,"context_end_line":259,"code":"from copy import deepcopy\n\nimport bgym\nimport pytest\nfrom bgym import HighLevelActionSet, HighLevelActionSetArgs\n\nfrom agentlab.agents import dynamic_prompting as dp\nfrom agentlab.agents.generic_agent.agent_configs import FLAGS_GPT_3_5\nfrom agentlab.agents.generic_agent.generic_agent_prompt import GenericPromptFlags, MainPrompt\nfrom agentlab.llm.llm_utils import count_tokens\n\nhtml_template = \"\"\"\n\n\n
\nHello World.\nStep {}.\n
\n\nsome extra text to make the html longer\n\n\"\"\"\n\nbase_obs = {\n \"goal\": \"do this and that\",\n \"goal_object\": [{\"type\": \"text\", \"text\": \"do this and that\"}],\n \"chat_messages\": [{\"role\": \"user\", \"message\": \"do this and that\"}],\n \"axtree_txt\": \"[1] Click me\",\n \"focused_element_bid\": \"45-256\",\n \"open_pages_urls\": [\"https://example.com\"],\n \"open_pages_titles\": [\"Example\"],\n \"active_page_index\": 0,\n}\n\nOBS_HISTORY = [\n base_obs\n | {\n \"pruned_html\": html_template.format(1),\n \"last_action_error\": \"\",\n },\n base_obs\n | {\n \"pruned_html\": html_template.format(2),\n \"last_action_error\": \"Hey, this is an error in the past\",\n },\n base_obs\n | {\n \"pruned_html\": html_template.format(3),\n \"last_action_error\": \"Hey, there is an error now\",\n },\n]\nACTIONS = [\"click('41')\", \"click('42')\"]\nMEMORIES = [\"memory A\", \"memory B\"]\nTHOUGHTS = [\"thought A\", \"thought B\"]\n\nALL_TRUE_FLAGS = GenericPromptFlags(\n obs=dp.ObsFlags(\n use_html=True,\n use_ax_tree=True,\n use_tabs=True,\n use_focused_element=True,\n use_error_logs=True,\n use_history=True,\n use_past_error_logs=True,\n use_action_history=True,\n use_think_history=True,\n use_diff=True,\n html_type=\"pruned_html\",\n use_screenshot=False, # TODO test this\n use_som=False, # TODO test this\n extract_visible_tag=True,\n extract_clickable_tag=True,\n extract_coords=False,\n filter_visible_elements_only=True,\n ),\n action=dp.ActionFlags(\n action_set=HighLevelActionSetArgs(\n subsets=[\"bid\"],\n multiaction=True,\n ),\n long_description=True,\n individual_examples=True,\n ),\n use_plan=True,\n use_criticise=True,\n use_thinking=True,\n use_memory=True,\n use_concrete_example=True,\n use_abstract_example=True,\n use_hints=True,\n enable_chat=False, # TODO test this\n max_prompt_tokens=None,\n be_cautious=True,\n extra_instructions=None,\n)\n\n\nFLAG_EXPECTED_PROMPT = [\n (\n \"obs.use_html\",\n (\"HTML:\", \"\", \"Hello World.\", \"Step 3.\"), # last obs will be in obs\n ),\n (\n \"obs.use_ax_tree\",\n (\"AXTree:\", \"Click me\"),\n ),\n (\n \"obs.use_tabs\",\n (\"Currently open tabs:\", \"(active tab)\"),\n ),\n (\n \"obs.use_focused_element\",\n (\"Focused element:\", \"bid='45-256'\"),\n ),\n (\n \"obs.use_error_logs\",\n (\"Hey, there is an error now\",),\n ),\n (\n \"use_plan\",\n (\"You just executed step\", \"1- think\\n2- do it\"),\n ),\n (\n \"use_criticise\",\n (\n \"Criticise action_draft\",\n \"\",\n \"\",\n \"\",\n ),\n ),\n (\n \"use_thinking\",\n (\"\", \"\"),\n ),\n (\n \"obs.use_past_error_logs\",\n (\"Hey, this is an error in the past\",),\n ),\n (\n \"obs.use_action_history\",\n (\"\", \"click('41')\", \"click('42')\"),\n ),\n (\n \"use_memory\",\n (\"\", \"\", \"memory A\", \"memory B\"),\n ),\n # (\n # \"obs.use_diff\",\n # (\"diff:\", \"- Step 2\", \"Identical\"),\n # ),\n (\n \"use_concrete_example\",\n (\"# Concrete Example\", \"\\nclick('a324')\"),\n ),\n (\n \"use_abstract_example\",\n (\"# Abstract Example\",),\n ),\n # (\n # \"action.action_set.multiaction\",\n # (\"One or several actions, separated by new lines\",),\n # ),\n]\n\n\ndef test_shrinking_observation():\n flags = deepcopy(FLAGS_GPT_3_5)\n flags.obs.use_html = True\n\n prompt_maker = MainPrompt(\n action_set=HighLevelActionSet(),\n obs_history=OBS_HISTORY,\n actions=ACTIONS,\n memories=MEMORIES,\n thoughts=THOUGHTS,\n previous_plan=\"1- think\\n2- do it\",\n step=2,\n flags=flags,\n )\n\n prompt = str(prompt_maker.prompt)\n new_prompt = str(\n dp.fit_tokens(prompt_maker, max_prompt_tokens=count_tokens(prompt) - 1, max_iterations=7)\n )\n assert count_tokens(new_prompt) < count_tokens(prompt)\n assert \"[1] Click me\" in prompt\n assert \"[1] Click me\" in new_prompt\n assert \"\" in prompt\n assert \"\" not in new_prompt\n\n\n@pytest.mark.parametrize(\"flag_name, expected_prompts\", FLAG_EXPECTED_PROMPT)\ndef test_main_prompt_elements_gone_one_at_a_time(flag_name: str, expected_prompts):\n\n if flag_name in [\"use_thinking\", \"obs.use_action_history\"]:\n return # TODO design new tests for those two flags\n\n # Disable the flag\n flags = deepcopy(ALL_TRUE_FLAGS)\n if \".\" in flag_name:\n prefix, flag_name = flag_name.split(\".\")\n sub_flags = getattr(flags, prefix)\n setattr(sub_flags, flag_name, False)\n else:\n setattr(flags, flag_name, False)\n\n if flag_name == \"use_memory\":\n memories = None\n else:\n memories = MEMORIES\n\n # Initialize MainPrompt\n prompt = str(\n MainPrompt(\n action_set=flags.action.action_set.make_action_set(),\n obs_history=OBS_HISTORY,\n actions=ACTIONS,\n memories=memories,\n thoughts=THOUGHTS,\n previous_plan=\"1- think\\n2- do it\",\n step=2,\n flags=flags,\n ).prompt\n )\n\n # Verify all elements are not present\n for expected in expected_prompts:\n assert expected not in prompt\n\n\ndef test_main_prompt_elements_present():\n # Make sure the flag is enabled\n\n # Initialize MainPrompt\n prompt = str(\n MainPrompt(\n action_set=HighLevelActionSet(),\n obs_history=OBS_HISTORY,\n actions=ACTIONS,\n memories=MEMORIES,\n thoughts=THOUGHTS,\n previous_plan=\"1- think\\n2- do it\",\n step=2,\n flags=ALL_TRUE_FLAGS,\n ).prompt\n )\n # Verify all elements are not present\n for _, expected_prompts in FLAG_EXPECTED_PROMPT:\n for expected in expected_prompts:\n assert expected in prompt\n\n\nif __name__ == \"__main__\":\n # for debugging\n test_shrinking_observation()\n test_main_prompt_elements_present()\n # for flag, expected_prompts in FLAG_EXPECTED_PROMPT:\n # test_main_prompt_elements_gone_one_at_a_time(flag, expected_prompts)","source_hash":"70ca9f3685fe3a46c52a6b90f3d153e9e86317d2c424342a1b20fc46f1a94df8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.agents.test_generic_prompt.test_shrinking_observation","uri":"program://AgentLab/function/tests.agents.test_generic_prompt.test_shrinking_observation#L167-L190","kind":"function","name":"test_shrinking_observation","path":"tests/agents/test_generic_prompt.py","language":"python","start_line":167,"end_line":190,"context_start_line":147,"context_end_line":210,"code":" ),\n # (\n # \"obs.use_diff\",\n # (\"diff:\", \"- Step 2\", \"Identical\"),\n # ),\n (\n \"use_concrete_example\",\n (\"# Concrete Example\", \"\\nclick('a324')\"),\n ),\n (\n \"use_abstract_example\",\n (\"# Abstract Example\",),\n ),\n # (\n # \"action.action_set.multiaction\",\n # (\"One or several actions, separated by new lines\",),\n # ),\n]\n\n\ndef test_shrinking_observation():\n flags = deepcopy(FLAGS_GPT_3_5)\n flags.obs.use_html = True\n\n prompt_maker = MainPrompt(\n action_set=HighLevelActionSet(),\n obs_history=OBS_HISTORY,\n actions=ACTIONS,\n memories=MEMORIES,\n thoughts=THOUGHTS,\n previous_plan=\"1- think\\n2- do it\",\n step=2,\n flags=flags,\n )\n\n prompt = str(prompt_maker.prompt)\n new_prompt = str(\n dp.fit_tokens(prompt_maker, max_prompt_tokens=count_tokens(prompt) - 1, max_iterations=7)\n )\n assert count_tokens(new_prompt) < count_tokens(prompt)\n assert \"[1] Click me\" in prompt\n assert \"[1] Click me\" in new_prompt\n assert \"\" in prompt\n assert \"\" not in new_prompt\n\n\n@pytest.mark.parametrize(\"flag_name, expected_prompts\", FLAG_EXPECTED_PROMPT)\ndef test_main_prompt_elements_gone_one_at_a_time(flag_name: str, expected_prompts):\n\n if flag_name in [\"use_thinking\", \"obs.use_action_history\"]:\n return # TODO design new tests for those two flags\n\n # Disable the flag\n flags = deepcopy(ALL_TRUE_FLAGS)\n if \".\" in flag_name:\n prefix, flag_name = flag_name.split(\".\")\n sub_flags = getattr(flags, prefix)\n setattr(sub_flags, flag_name, False)\n else:\n setattr(flags, flag_name, False)\n\n if flag_name == \"use_memory\":\n memories = None\n else:","source_hash":"70ca9f3685fe3a46c52a6b90f3d153e9e86317d2c424342a1b20fc46f1a94df8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.agents.test_generic_prompt.test_main_prompt_elements_gone_one_at_a_time","uri":"program://AgentLab/function/tests.agents.test_generic_prompt.test_main_prompt_elements_gone_one_at_a_time#L194-L229","kind":"function","name":"test_main_prompt_elements_gone_one_at_a_time","path":"tests/agents/test_generic_prompt.py","language":"python","start_line":194,"end_line":229,"context_start_line":174,"context_end_line":249,"code":" actions=ACTIONS,\n memories=MEMORIES,\n thoughts=THOUGHTS,\n previous_plan=\"1- think\\n2- do it\",\n step=2,\n flags=flags,\n )\n\n prompt = str(prompt_maker.prompt)\n new_prompt = str(\n dp.fit_tokens(prompt_maker, max_prompt_tokens=count_tokens(prompt) - 1, max_iterations=7)\n )\n assert count_tokens(new_prompt) < count_tokens(prompt)\n assert \"[1] Click me\" in prompt\n assert \"[1] Click me\" in new_prompt\n assert \"\" in prompt\n assert \"\" not in new_prompt\n\n\n@pytest.mark.parametrize(\"flag_name, expected_prompts\", FLAG_EXPECTED_PROMPT)\ndef test_main_prompt_elements_gone_one_at_a_time(flag_name: str, expected_prompts):\n\n if flag_name in [\"use_thinking\", \"obs.use_action_history\"]:\n return # TODO design new tests for those two flags\n\n # Disable the flag\n flags = deepcopy(ALL_TRUE_FLAGS)\n if \".\" in flag_name:\n prefix, flag_name = flag_name.split(\".\")\n sub_flags = getattr(flags, prefix)\n setattr(sub_flags, flag_name, False)\n else:\n setattr(flags, flag_name, False)\n\n if flag_name == \"use_memory\":\n memories = None\n else:\n memories = MEMORIES\n\n # Initialize MainPrompt\n prompt = str(\n MainPrompt(\n action_set=flags.action.action_set.make_action_set(),\n obs_history=OBS_HISTORY,\n actions=ACTIONS,\n memories=memories,\n thoughts=THOUGHTS,\n previous_plan=\"1- think\\n2- do it\",\n step=2,\n flags=flags,\n ).prompt\n )\n\n # Verify all elements are not present\n for expected in expected_prompts:\n assert expected not in prompt\n\n\ndef test_main_prompt_elements_present():\n # Make sure the flag is enabled\n\n # Initialize MainPrompt\n prompt = str(\n MainPrompt(\n action_set=HighLevelActionSet(),\n obs_history=OBS_HISTORY,\n actions=ACTIONS,\n memories=MEMORIES,\n thoughts=THOUGHTS,\n previous_plan=\"1- think\\n2- do it\",\n step=2,\n flags=ALL_TRUE_FLAGS,\n ).prompt\n )\n # Verify all elements are not present\n for _, expected_prompts in FLAG_EXPECTED_PROMPT:","source_hash":"70ca9f3685fe3a46c52a6b90f3d153e9e86317d2c424342a1b20fc46f1a94df8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tests.agents.test_generic_prompt.test_main_prompt_elements_present","uri":"program://AgentLab/function/tests.agents.test_generic_prompt.test_main_prompt_elements_present#L232-L251","kind":"function","name":"test_main_prompt_elements_present","path":"tests/agents/test_generic_prompt.py","language":"python","start_line":232,"end_line":251,"context_start_line":212,"context_end_line":259,"code":"\n # Initialize MainPrompt\n prompt = str(\n MainPrompt(\n action_set=flags.action.action_set.make_action_set(),\n obs_history=OBS_HISTORY,\n actions=ACTIONS,\n memories=memories,\n thoughts=THOUGHTS,\n previous_plan=\"1- think\\n2- do it\",\n step=2,\n flags=flags,\n ).prompt\n )\n\n # Verify all elements are not present\n for expected in expected_prompts:\n assert expected not in prompt\n\n\ndef test_main_prompt_elements_present():\n # Make sure the flag is enabled\n\n # Initialize MainPrompt\n prompt = str(\n MainPrompt(\n action_set=HighLevelActionSet(),\n obs_history=OBS_HISTORY,\n actions=ACTIONS,\n memories=MEMORIES,\n thoughts=THOUGHTS,\n previous_plan=\"1- think\\n2- do it\",\n step=2,\n flags=ALL_TRUE_FLAGS,\n ).prompt\n )\n # Verify all elements are not present\n for _, expected_prompts in FLAG_EXPECTED_PROMPT:\n for expected in expected_prompts:\n assert expected in prompt\n\n\nif __name__ == \"__main__\":\n # for debugging\n test_shrinking_observation()\n test_main_prompt_elements_present()\n # for flag, expected_prompts in FLAG_EXPECTED_PROMPT:\n # test_main_prompt_elements_gone_one_at_a_time(flag, expected_prompts)","source_hash":"70ca9f3685fe3a46c52a6b90f3d153e9e86317d2c424342a1b20fc46f1a94df8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:docs.source.conf","uri":"program://AgentLab/module/docs.source.conf#L1-L75","kind":"module","name":"docs.source.conf","path":"docs/source/conf.py","language":"python","start_line":1,"end_line":75,"context_start_line":1,"context_end_line":75,"code":"# Configuration file for the Sphinx documentation builder.\n#\n# For the full list of built-in configuration values, see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Project information -----------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information\n\nimport os\nimport subprocess\n\n\n# Automatically retrieve the project version from Git\ndef get_version():\n try:\n return subprocess.check_output([\"git\", \"describe\", \"--tags\"], encoding=\"utf-8\").strip()\n except Exception:\n return \"0.0.0\"\n\n\nproject = \"AgentLab\"\nauthor = \"ServiceNow\"\nrelease = get_version() # Full version string including tags\nversion = release # Short version (e.g., 1.0)\n\n\n# -- General configuration ---------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration\nextensions = [\n \"sphinx.ext.duration\",\n \"sphinx.ext.doctest\",\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.intersphinx\",\n # \"myst_parser\", # Add this to enable Markdown parsing\n \"sphinx.ext.napoleon\",\n]\n\nintersphinx_mapping = {\n \"rtd\": (\"https://docs.readthedocs.io/en/stable/\", None),\n \"python\": (\"https://docs.python.org/3/\", None),\n \"sphinx\": (\"https://www.sphinx-doc.org/en/master/\", None),\n}\nintersphinx_disabled_domains = [\"std\"]\n\nautodoc_default_options = {\n \"members\": True,\n \"undoc-members\": True,\n \"show-inheritance\": True,\n}\n\nsource_suffix = {\n \".rst\": \"restructuredtext\",\n}\n\n\ntemplates_path = [\"_templates\"]\nexclude_patterns = []\n\n\n# -- Options for HTML output -------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output\n\nhtml_theme = \"sphinx_rtd_theme\"\nhtml_theme_options = {\n \"navigation_depth\": -1,\n \"collapse_navigation\": False,\n \"display_version\": True,\n \"version_selector\": True,\n}\nhtml_static_path = [\"_static\"]\n\nimport sys\n\nsys.path.insert(0, os.path.abspath(\"../../src\"))","source_hash":"7f722dfbb158949f79af66975bcbd75d4d7bd4a3423059fe20b75e6747ba7454","truncated":false} {"repo_id":"AgentLab","entity_id":"py:docs.source.conf.get_version","uri":"program://AgentLab/function/docs.source.conf.get_version#L14-L18","kind":"function","name":"get_version","path":"docs/source/conf.py","language":"python","start_line":14,"end_line":18,"context_start_line":1,"context_end_line":38,"code":"# Configuration file for the Sphinx documentation builder.\n#\n# For the full list of built-in configuration values, see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Project information -----------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information\n\nimport os\nimport subprocess\n\n\n# Automatically retrieve the project version from Git\ndef get_version():\n try:\n return subprocess.check_output([\"git\", \"describe\", \"--tags\"], encoding=\"utf-8\").strip()\n except Exception:\n return \"0.0.0\"\n\n\nproject = \"AgentLab\"\nauthor = \"ServiceNow\"\nrelease = get_version() # Full version string including tags\nversion = release # Short version (e.g., 1.0)\n\n\n# -- General configuration ---------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration\nextensions = [\n \"sphinx.ext.duration\",\n \"sphinx.ext.doctest\",\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.autosummary\",\n \"sphinx.ext.intersphinx\",\n # \"myst_parser\", # Add this to enable Markdown parsing\n \"sphinx.ext.napoleon\",\n]\n","source_hash":"7f722dfbb158949f79af66975bcbd75d4d7bd4a3423059fe20b75e6747ba7454","truncated":false} {"repo_id":"AgentLab","entity_id":"py:tutorials.2_eval_on_miniwob.experiment","uri":"program://AgentLab/module/tutorials.2_eval_on_miniwob.experiment#L1-L47","kind":"module","name":"tutorials.2_eval_on_miniwob.experiment","path":"tutorials/2_eval_on_miniwob/experiment.py","language":"python","start_line":1,"end_line":47,"context_start_line":1,"context_end_line":47,"code":"from pathlib import Path\n\nfrom bgym import DEFAULT_BENCHMARKS\nfrom dotenv import load_dotenv\n\nfrom agentlab.agents.generic_agent.tmlr_config import (\n BASE_FLAGS,\n CHAT_MODEL_ARGS_DICT,\n GenericAgentArgs,\n)\nfrom agentlab.benchmarks.setup_benchmark import ensure_benchmark\nfrom agentlab.experiments.study import Study\n\n# This ensures MiniWob assets are downloaded and sets the MINIWOB_URL .env in the project dir.\nproject_dir = Path(__file__).parents[2]\nensure_benchmark(\"miniwob\", project_root=project_dir)\nload_dotenv(project_dir.joinpath(\".env\"), override=False) # load .env variables\n\n\nagent_config = GenericAgentArgs(\n chat_model_args=CHAT_MODEL_ARGS_DICT[\n \"openai/gpt-5-nano-2025-08-07\"\n ], # or CHAT_MODEL_ARGS_DICT[\"openrouter/openai/gpt-5-nano\"]\n flags=BASE_FLAGS,\n)\n\n# choose a list of agents to evaluate\nagent_configs = [agent_config]\n# chose your benchmark\nbenchmark = DEFAULT_BENCHMARKS[\"miniwob_tiny_test\"]()\n\n# benchmark = DEFAULT_BENCHMARKS[\"miniwob\"]() # 125 tasks\n# benchmark = benchmark.subset_from_glob(column=\"task_name\", glob=\"*enter*\")\n\n## Number of parallel jobs\nn_jobs = 4 # Make sure to use 1 job when debugging in VSCode\n\nif __name__ == \"__main__\": # necessary for dask backend\n\n # A study evaluates multiple agents on a benchmark\n study = Study(agent_configs, benchmark)\n\n study.run(\n n_jobs=n_jobs,\n parallel_backend=\"ray\", # \"ray\", \"joblib\" or \"sequential\"\n n_relaunch=3, # will automatically relaunch tasks with system error or incomplete tasks.\n )","source_hash":"2b1633b04e44be0153236f5ecfe637bac3342fdf685db42ae3b87f19f70b8cd3","truncated":false} {"repo_id":"AgentLab","entity_id":"py:experiments.run_osworld","uri":"program://AgentLab/module/experiments.run_osworld#L1-L66","kind":"module","name":"experiments.run_osworld","path":"experiments/run_osworld.py","language":"python","start_line":1,"end_line":66,"context_start_line":1,"context_end_line":66,"code":"import json\nimport logging\nimport os\n\nfrom agentlab.agents.tool_use_agent.tool_use_agent import OSWORLD_CLAUDE\nfrom agentlab.benchmarks.osworld import OsworldBenchmark\nfrom agentlab.experiments.study import Study, make_study\n\nfmt = \"%(asctime)s - %(levelname)s - %(name)s:%(lineno)d - %(funcName)s() - %(message)s\"\nlogging.basicConfig(level=logging.INFO, force=True, format=fmt, handlers=[logging.StreamHandler()])\n\n\ndef get_most_recent_incomplete_study() -> Study:\n \"\"\"\n Relaunch an existing study, this will continue incomplete experiments and relaunch errored experiments.\n \"\"\"\n study = Study.load_most_recent()\n study.find_incomplete(include_errors=True)\n return study\n\n\ndef get_task_ids() -> set[str]:\n with open(\"experiments/osworld_debug_task_ids.json\", \"r\") as f:\n task_ids = json.load(f)\n return set([task[\"id\"] for task in task_ids])\n\n\ndef main():\n n_jobs = 4\n use_vmware = True\n relaunch = False\n agent_args = [\n OSWORLD_CLAUDE,\n # OSWORLD_OAI # performs poorly.\n ] # type: ignore\n parallel_backend = \"ray\"\n os.environ[\"AGENTLAB_DEBUG\"] = os.environ.get(\"AGENTLAB_DEBUG\", \"1\")\n\n study = make_study(\n benchmark=OsworldBenchmark(\n test_set_name=\"test_small.json\"\n ), # or test_all.json (Exper) # type: ignore\n agent_args=agent_args, # type: ignore\n comment=\"osworld debug 2\",\n logging_level=logging.INFO,\n logging_level_stdout=logging.INFO,\n )\n\n if use_vmware:\n for exp_args in study.exp_args_list:\n exp_args.env_args.provider_name = \"vmware\" # type: ignore\n exp_args.env_args.path_to_vm = \"OSWorld/vmware_vm_data/Ubuntu0/Ubuntu0.vmx\" # type: ignore\n parallel_backend = \"sequential\"\n\n if os.environ.get(\"AGENTLAB_DEBUG\"):\n task_ids = get_task_ids()\n study.exp_args_list = [exp_args for exp_args in study.exp_args_list if exp_args.env_args.task[\"id\"] in task_ids] # type: ignore\n print(f\"Debug on {len(study.exp_args_list)} experiments\")\n n_jobs = 1 # Make sure to use 1 job when debugging in VS\n\n study = get_most_recent_incomplete_study() if relaunch else study\n study.run(n_jobs=n_jobs, n_relaunch=1, parallel_backend=parallel_backend)\n\n\nif __name__ == \"__main__\":\n main()","source_hash":"0fc43cb4e5da8b657895d33c00a64d162b11f1c6d537b3ef7dda5d8816b0d11e","truncated":false} {"repo_id":"AgentLab","entity_id":"py:experiments.run_osworld.get_most_recent_incomplete_study","uri":"program://AgentLab/function/experiments.run_osworld.get_most_recent_incomplete_study#L13-L19","kind":"function","name":"get_most_recent_incomplete_study","path":"experiments/run_osworld.py","language":"python","start_line":13,"end_line":19,"context_start_line":1,"context_end_line":39,"code":"import json\nimport logging\nimport os\n\nfrom agentlab.agents.tool_use_agent.tool_use_agent import OSWORLD_CLAUDE\nfrom agentlab.benchmarks.osworld import OsworldBenchmark\nfrom agentlab.experiments.study import Study, make_study\n\nfmt = \"%(asctime)s - %(levelname)s - %(name)s:%(lineno)d - %(funcName)s() - %(message)s\"\nlogging.basicConfig(level=logging.INFO, force=True, format=fmt, handlers=[logging.StreamHandler()])\n\n\ndef get_most_recent_incomplete_study() -> Study:\n \"\"\"\n Relaunch an existing study, this will continue incomplete experiments and relaunch errored experiments.\n \"\"\"\n study = Study.load_most_recent()\n study.find_incomplete(include_errors=True)\n return study\n\n\ndef get_task_ids() -> set[str]:\n with open(\"experiments/osworld_debug_task_ids.json\", \"r\") as f:\n task_ids = json.load(f)\n return set([task[\"id\"] for task in task_ids])\n\n\ndef main():\n n_jobs = 4\n use_vmware = True\n relaunch = False\n agent_args = [\n OSWORLD_CLAUDE,\n # OSWORLD_OAI # performs poorly.\n ] # type: ignore\n parallel_backend = \"ray\"\n os.environ[\"AGENTLAB_DEBUG\"] = os.environ.get(\"AGENTLAB_DEBUG\", \"1\")\n\n study = make_study(","source_hash":"0fc43cb4e5da8b657895d33c00a64d162b11f1c6d537b3ef7dda5d8816b0d11e","truncated":false} {"repo_id":"AgentLab","entity_id":"py:experiments.run_osworld.get_task_ids","uri":"program://AgentLab/function/experiments.run_osworld.get_task_ids#L22-L25","kind":"function","name":"get_task_ids","path":"experiments/run_osworld.py","language":"python","start_line":22,"end_line":25,"context_start_line":2,"context_end_line":45,"code":"import logging\nimport os\n\nfrom agentlab.agents.tool_use_agent.tool_use_agent import OSWORLD_CLAUDE\nfrom agentlab.benchmarks.osworld import OsworldBenchmark\nfrom agentlab.experiments.study import Study, make_study\n\nfmt = \"%(asctime)s - %(levelname)s - %(name)s:%(lineno)d - %(funcName)s() - %(message)s\"\nlogging.basicConfig(level=logging.INFO, force=True, format=fmt, handlers=[logging.StreamHandler()])\n\n\ndef get_most_recent_incomplete_study() -> Study:\n \"\"\"\n Relaunch an existing study, this will continue incomplete experiments and relaunch errored experiments.\n \"\"\"\n study = Study.load_most_recent()\n study.find_incomplete(include_errors=True)\n return study\n\n\ndef get_task_ids() -> set[str]:\n with open(\"experiments/osworld_debug_task_ids.json\", \"r\") as f:\n task_ids = json.load(f)\n return set([task[\"id\"] for task in task_ids])\n\n\ndef main():\n n_jobs = 4\n use_vmware = True\n relaunch = False\n agent_args = [\n OSWORLD_CLAUDE,\n # OSWORLD_OAI # performs poorly.\n ] # type: ignore\n parallel_backend = \"ray\"\n os.environ[\"AGENTLAB_DEBUG\"] = os.environ.get(\"AGENTLAB_DEBUG\", \"1\")\n\n study = make_study(\n benchmark=OsworldBenchmark(\n test_set_name=\"test_small.json\"\n ), # or test_all.json (Exper) # type: ignore\n agent_args=agent_args, # type: ignore\n comment=\"osworld debug 2\",\n logging_level=logging.INFO,","source_hash":"0fc43cb4e5da8b657895d33c00a64d162b11f1c6d537b3ef7dda5d8816b0d11e","truncated":false} {"repo_id":"AgentLab","entity_id":"py:experiments.run_osworld.main","uri":"program://AgentLab/function/experiments.run_osworld.main#L28-L62","kind":"function","name":"main","path":"experiments/run_osworld.py","language":"python","start_line":28,"end_line":62,"context_start_line":8,"context_end_line":66,"code":"\nfmt = \"%(asctime)s - %(levelname)s - %(name)s:%(lineno)d - %(funcName)s() - %(message)s\"\nlogging.basicConfig(level=logging.INFO, force=True, format=fmt, handlers=[logging.StreamHandler()])\n\n\ndef get_most_recent_incomplete_study() -> Study:\n \"\"\"\n Relaunch an existing study, this will continue incomplete experiments and relaunch errored experiments.\n \"\"\"\n study = Study.load_most_recent()\n study.find_incomplete(include_errors=True)\n return study\n\n\ndef get_task_ids() -> set[str]:\n with open(\"experiments/osworld_debug_task_ids.json\", \"r\") as f:\n task_ids = json.load(f)\n return set([task[\"id\"] for task in task_ids])\n\n\ndef main():\n n_jobs = 4\n use_vmware = True\n relaunch = False\n agent_args = [\n OSWORLD_CLAUDE,\n # OSWORLD_OAI # performs poorly.\n ] # type: ignore\n parallel_backend = \"ray\"\n os.environ[\"AGENTLAB_DEBUG\"] = os.environ.get(\"AGENTLAB_DEBUG\", \"1\")\n\n study = make_study(\n benchmark=OsworldBenchmark(\n test_set_name=\"test_small.json\"\n ), # or test_all.json (Exper) # type: ignore\n agent_args=agent_args, # type: ignore\n comment=\"osworld debug 2\",\n logging_level=logging.INFO,\n logging_level_stdout=logging.INFO,\n )\n\n if use_vmware:\n for exp_args in study.exp_args_list:\n exp_args.env_args.provider_name = \"vmware\" # type: ignore\n exp_args.env_args.path_to_vm = \"OSWorld/vmware_vm_data/Ubuntu0/Ubuntu0.vmx\" # type: ignore\n parallel_backend = \"sequential\"\n\n if os.environ.get(\"AGENTLAB_DEBUG\"):\n task_ids = get_task_ids()\n study.exp_args_list = [exp_args for exp_args in study.exp_args_list if exp_args.env_args.task[\"id\"] in task_ids] # type: ignore\n print(f\"Debug on {len(study.exp_args_list)} experiments\")\n n_jobs = 1 # Make sure to use 1 job when debugging in VS\n\n study = get_most_recent_incomplete_study() if relaunch else study\n study.run(n_jobs=n_jobs, n_relaunch=1, parallel_backend=parallel_backend)\n\n\nif __name__ == \"__main__\":\n main()","source_hash":"0fc43cb4e5da8b657895d33c00a64d162b11f1c6d537b3ef7dda5d8816b0d11e","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.ui_assistant","uri":"program://AgentLab/module/src.agentlab.ui_assistant#L1-L63","kind":"module","name":"src.agentlab.ui_assistant","path":"src/agentlab/ui_assistant.py","language":"python","start_line":1,"end_line":63,"context_start_line":1,"context_end_line":63,"code":"import argparse\n\nfrom agentlab.agents.agent_args import AgentArgs\nfrom agentlab.agents.generic_agent.generic_agent import GenericAgentArgs\nfrom agentlab.experiments.exp_utils import RESULTS_DIR\nfrom agentlab.experiments.launch_exp import import_object\nfrom agentlab.experiments.loop import EnvArgs, ExpArgs\n\n\ndef make_exp_args(agent_args: AgentArgs, start_url: str) -> ExpArgs:\n try:\n agent_args.flags.action.demo_mode = \"default\"\n except AttributeError:\n pass\n\n if isinstance(agent_args, GenericAgentArgs):\n agent_args.flags.enable_chat = True\n\n exp_args = ExpArgs(\n agent_args=agent_args,\n env_args=EnvArgs(\n max_steps=1000,\n task_seed=None,\n task_name=\"openended\",\n task_kwargs={\n \"start_url\": start_url,\n },\n headless=False,\n record_video=True,\n wait_for_user_message=True,\n viewport={\"width\": 1500, \"height\": 1280},\n slow_mo=1000,\n ),\n )\n\n return exp_args\n\n\ndef main():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n \"--agent_config\",\n type=str,\n default=\"agentlab.agents.generic_agent.AGENT_GPT5_MINI\",\n help=\"\"\"Python path to the agent config. Defaults to : \"agentlab.agents.generic_agent.AGENT_GPT5_MINI\".\"\"\",\n )\n parser.add_argument(\n \"--start_url\",\n type=str,\n default=\"https://www.google.com\",\n help=\"The start page of the agent. Defaults to https://www.google.com\",\n )\n\n args, unknown = parser.parse_known_args()\n agent_args = import_object(args.agent_config)\n exp_args = make_exp_args(agent_args, args.start_url)\n exp_args.prepare(RESULTS_DIR / \"ui_assistant_logs\")\n exp_args.run()\n\n\nif __name__ == \"__main__\":\n main()","source_hash":"d297795cc2edb4903388ef4913c82c39a17de6bc6f1eeb89cd5c8fa93e10c07b","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.ui_assistant.make_exp_args","uri":"program://AgentLab/function/src.agentlab.ui_assistant.make_exp_args#L10-L36","kind":"function","name":"make_exp_args","path":"src/agentlab/ui_assistant.py","language":"python","start_line":10,"end_line":36,"context_start_line":1,"context_end_line":56,"code":"import argparse\n\nfrom agentlab.agents.agent_args import AgentArgs\nfrom agentlab.agents.generic_agent.generic_agent import GenericAgentArgs\nfrom agentlab.experiments.exp_utils import RESULTS_DIR\nfrom agentlab.experiments.launch_exp import import_object\nfrom agentlab.experiments.loop import EnvArgs, ExpArgs\n\n\ndef make_exp_args(agent_args: AgentArgs, start_url: str) -> ExpArgs:\n try:\n agent_args.flags.action.demo_mode = \"default\"\n except AttributeError:\n pass\n\n if isinstance(agent_args, GenericAgentArgs):\n agent_args.flags.enable_chat = True\n\n exp_args = ExpArgs(\n agent_args=agent_args,\n env_args=EnvArgs(\n max_steps=1000,\n task_seed=None,\n task_name=\"openended\",\n task_kwargs={\n \"start_url\": start_url,\n },\n headless=False,\n record_video=True,\n wait_for_user_message=True,\n viewport={\"width\": 1500, \"height\": 1280},\n slow_mo=1000,\n ),\n )\n\n return exp_args\n\n\ndef main():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n \"--agent_config\",\n type=str,\n default=\"agentlab.agents.generic_agent.AGENT_GPT5_MINI\",\n help=\"\"\"Python path to the agent config. Defaults to : \"agentlab.agents.generic_agent.AGENT_GPT5_MINI\".\"\"\",\n )\n parser.add_argument(\n \"--start_url\",\n type=str,\n default=\"https://www.google.com\",\n help=\"The start page of the agent. Defaults to https://www.google.com\",\n )\n\n args, unknown = parser.parse_known_args()\n agent_args = import_object(args.agent_config)","source_hash":"d297795cc2edb4903388ef4913c82c39a17de6bc6f1eeb89cd5c8fa93e10c07b","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.ui_assistant.main","uri":"program://AgentLab/function/src.agentlab.ui_assistant.main#L39-L59","kind":"function","name":"main","path":"src/agentlab/ui_assistant.py","language":"python","start_line":39,"end_line":59,"context_start_line":19,"context_end_line":63,"code":" exp_args = ExpArgs(\n agent_args=agent_args,\n env_args=EnvArgs(\n max_steps=1000,\n task_seed=None,\n task_name=\"openended\",\n task_kwargs={\n \"start_url\": start_url,\n },\n headless=False,\n record_video=True,\n wait_for_user_message=True,\n viewport={\"width\": 1500, \"height\": 1280},\n slow_mo=1000,\n ),\n )\n\n return exp_args\n\n\ndef main():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n \"--agent_config\",\n type=str,\n default=\"agentlab.agents.generic_agent.AGENT_GPT5_MINI\",\n help=\"\"\"Python path to the agent config. Defaults to : \"agentlab.agents.generic_agent.AGENT_GPT5_MINI\".\"\"\",\n )\n parser.add_argument(\n \"--start_url\",\n type=str,\n default=\"https://www.google.com\",\n help=\"The start page of the agent. Defaults to https://www.google.com\",\n )\n\n args, unknown = parser.parse_known_args()\n agent_args = import_object(args.agent_config)\n exp_args = make_exp_args(agent_args, args.start_url)\n exp_args.prepare(RESULTS_DIR / \"ui_assistant_logs\")\n exp_args.run()\n\n\nif __name__ == \"__main__\":\n main()","source_hash":"d297795cc2edb4903388ef4913c82c39a17de6bc6f1eeb89cd5c8fa93e10c07b","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.archive_studies","uri":"program://AgentLab/module/src.agentlab.analyze.archive_studies#L1-L122","kind":"module","name":"src.agentlab.analyze.archive_studies","path":"src/agentlab/analyze/archive_studies.py","language":"python","start_line":1,"end_line":122,"context_start_line":1,"context_end_line":122,"code":"import os\nfrom dataclasses import dataclass\nfrom pathlib import Path\n\nimport pandas as pd\nfrom tqdm import tqdm\n\nfrom agentlab.analyze import inspect_results\nfrom agentlab.experiments.exp_utils import RESULTS_DIR\nfrom agentlab.experiments.study import Study\n\n\n@dataclass\nclass StudyInfo:\n study_dir: Path\n study: Study\n summary_df: pd.DataFrame\n should_delete: bool = False\n reason: str = \"\"\n\n\ndef search_for_reasons_to_archive(result_dir: Path, min_study_size: int = 0) -> list[StudyInfo]:\n\n study_info_list = []\n study_dirs = list(result_dir.iterdir())\n progress = tqdm(study_dirs, desc=\"Processing studies\")\n for study_dir in progress:\n\n progress.set_postfix({\"study_dir\": study_dir})\n if not study_dir.is_dir():\n progress.set_postfix({\"status\": \"skipped\"})\n continue\n\n try:\n study = Study.load(study_dir)\n except Exception:\n study = None\n # get summary*.csv files and find the most recent\n summary_files = list(study_dir.glob(\"summary*.csv\"))\n\n if len(summary_files) != 0:\n most_recent_summary = max(summary_files, key=os.path.getctime)\n summary_df = pd.read_csv(most_recent_summary)\n\n else:\n try:\n result_df = inspect_results.load_result_df(study_dir, progress_fn=None)\n summary_df = inspect_results.summarize_study(result_df)\n except Exception as e:\n print(f\" Error processing {study_dir}: {e}\")\n continue\n\n study_info = StudyInfo(\n study_dir=study_dir,\n study=study,\n summary_df=summary_df,\n )\n\n if len(study_info.summary_df) == 0:\n study_info.should_delete = True\n study_info.reason = \"Empty summary DataFrame\"\n\n n_completed, n_total, n_err = 0, 0, 0\n\n for _, row in study_info.summary_df.iterrows():\n n_comp, n_tot = row[\"n_completed\"].split(\"/\")\n n_completed += int(n_comp)\n n_total += int(n_tot)\n n_err += int(row.get(\"n_err\"))\n\n n_finished = n_completed - n_err\n\n # print(summary_df)\n # print(f\" {n_completed} / {n_total}, {n_err} errors\")\n\n if \"miniwob-tiny-test\" in study_dir.name:\n study_info.should_delete = True\n study_info.reason += \"Miniwob tiny test\\n\"\n if n_total == 0:\n study_info.should_delete = True\n study_info.reason += \"No tasks\\n\"\n if n_completed == 0:\n study_info.should_delete = True\n study_info.reason += \"No tasks completed\\n\"\n if float(n_finished) / float(n_total) < 0.5:\n study_info.should_delete = True\n study_info.reason += f\"Less than 50% tasks finished, n_err: {n_err}, n_total: {n_total}, n_finished: {n_finished}, n_completed: {n_completed}\\n\"\n\n if n_total <= min_study_size:\n study_info.should_delete = True\n study_info.reason += (\n f\"Too few tasks. n_total ({n_total}) <= min_study_size ({min_study_size})\\n\"\n )\n\n study_info_list.append(study_info)\n return study_info_list\n\n\nif __name__ == \"__main__\":\n study_list_info = search_for_reasons_to_archive(RESULTS_DIR, min_study_size=5)\n archive_dir = RESULTS_DIR.parent / \"archived_agentlab_results\" # type: Path\n archive_dir.mkdir(parents=True, exist_ok=True)\n\n # Uncomment the line below to prevent moving studies to archive\n archive_dir = None\n\n for study_info in study_list_info:\n if not study_info.should_delete:\n continue\n\n print(f\"Study: {study_info.study_dir.name}\")\n print(f\" Reason: {study_info.reason}\")\n print(study_info.summary_df)\n print()\n\n if archive_dir is not None:\n # move to new dir\n new_path = archive_dir / study_info.study_dir.name\n study_info.study_dir.rename(new_path)\n # save reason in a file\n reason_file = new_path / \"reason_to_archive.txt\"\n reason_file.write_text(study_info.reason)","source_hash":"6cea6f7c8acdd87b74c18ca1ab84052938be3024762c472ead51035c163aa15a","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.archive_studies.StudyInfo","uri":"program://AgentLab/class/src.agentlab.analyze.archive_studies.StudyInfo#L14-L19","kind":"class","name":"StudyInfo","path":"src/agentlab/analyze/archive_studies.py","language":"python","start_line":14,"end_line":19,"context_start_line":1,"context_end_line":39,"code":"import os\nfrom dataclasses import dataclass\nfrom pathlib import Path\n\nimport pandas as pd\nfrom tqdm import tqdm\n\nfrom agentlab.analyze import inspect_results\nfrom agentlab.experiments.exp_utils import RESULTS_DIR\nfrom agentlab.experiments.study import Study\n\n\n@dataclass\nclass StudyInfo:\n study_dir: Path\n study: Study\n summary_df: pd.DataFrame\n should_delete: bool = False\n reason: str = \"\"\n\n\ndef search_for_reasons_to_archive(result_dir: Path, min_study_size: int = 0) -> list[StudyInfo]:\n\n study_info_list = []\n study_dirs = list(result_dir.iterdir())\n progress = tqdm(study_dirs, desc=\"Processing studies\")\n for study_dir in progress:\n\n progress.set_postfix({\"study_dir\": study_dir})\n if not study_dir.is_dir():\n progress.set_postfix({\"status\": \"skipped\"})\n continue\n\n try:\n study = Study.load(study_dir)\n except Exception:\n study = None\n # get summary*.csv files and find the most recent\n summary_files = list(study_dir.glob(\"summary*.csv\"))","source_hash":"6cea6f7c8acdd87b74c18ca1ab84052938be3024762c472ead51035c163aa15a","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.archive_studies.search_for_reasons_to_archive","uri":"program://AgentLab/function/src.agentlab.analyze.archive_studies.search_for_reasons_to_archive#L22-L96","kind":"function","name":"search_for_reasons_to_archive","path":"src/agentlab/analyze/archive_studies.py","language":"python","start_line":22,"end_line":96,"context_start_line":2,"context_end_line":116,"code":"from dataclasses import dataclass\nfrom pathlib import Path\n\nimport pandas as pd\nfrom tqdm import tqdm\n\nfrom agentlab.analyze import inspect_results\nfrom agentlab.experiments.exp_utils import RESULTS_DIR\nfrom agentlab.experiments.study import Study\n\n\n@dataclass\nclass StudyInfo:\n study_dir: Path\n study: Study\n summary_df: pd.DataFrame\n should_delete: bool = False\n reason: str = \"\"\n\n\ndef search_for_reasons_to_archive(result_dir: Path, min_study_size: int = 0) -> list[StudyInfo]:\n\n study_info_list = []\n study_dirs = list(result_dir.iterdir())\n progress = tqdm(study_dirs, desc=\"Processing studies\")\n for study_dir in progress:\n\n progress.set_postfix({\"study_dir\": study_dir})\n if not study_dir.is_dir():\n progress.set_postfix({\"status\": \"skipped\"})\n continue\n\n try:\n study = Study.load(study_dir)\n except Exception:\n study = None\n # get summary*.csv files and find the most recent\n summary_files = list(study_dir.glob(\"summary*.csv\"))\n\n if len(summary_files) != 0:\n most_recent_summary = max(summary_files, key=os.path.getctime)\n summary_df = pd.read_csv(most_recent_summary)\n\n else:\n try:\n result_df = inspect_results.load_result_df(study_dir, progress_fn=None)\n summary_df = inspect_results.summarize_study(result_df)\n except Exception as e:\n print(f\" Error processing {study_dir}: {e}\")\n continue\n\n study_info = StudyInfo(\n study_dir=study_dir,\n study=study,\n summary_df=summary_df,\n )\n\n if len(study_info.summary_df) == 0:\n study_info.should_delete = True\n study_info.reason = \"Empty summary DataFrame\"\n\n n_completed, n_total, n_err = 0, 0, 0\n\n for _, row in study_info.summary_df.iterrows():\n n_comp, n_tot = row[\"n_completed\"].split(\"/\")\n n_completed += int(n_comp)\n n_total += int(n_tot)\n n_err += int(row.get(\"n_err\"))\n\n n_finished = n_completed - n_err\n\n # print(summary_df)\n # print(f\" {n_completed} / {n_total}, {n_err} errors\")\n\n if \"miniwob-tiny-test\" in study_dir.name:\n study_info.should_delete = True\n study_info.reason += \"Miniwob tiny test\\n\"\n if n_total == 0:\n study_info.should_delete = True\n study_info.reason += \"No tasks\\n\"\n if n_completed == 0:\n study_info.should_delete = True\n study_info.reason += \"No tasks completed\\n\"\n if float(n_finished) / float(n_total) < 0.5:\n study_info.should_delete = True\n study_info.reason += f\"Less than 50% tasks finished, n_err: {n_err}, n_total: {n_total}, n_finished: {n_finished}, n_completed: {n_completed}\\n\"\n\n if n_total <= min_study_size:\n study_info.should_delete = True\n study_info.reason += (\n f\"Too few tasks. n_total ({n_total}) <= min_study_size ({min_study_size})\\n\"\n )\n\n study_info_list.append(study_info)\n return study_info_list\n\n\nif __name__ == \"__main__\":\n study_list_info = search_for_reasons_to_archive(RESULTS_DIR, min_study_size=5)\n archive_dir = RESULTS_DIR.parent / \"archived_agentlab_results\" # type: Path\n archive_dir.mkdir(parents=True, exist_ok=True)\n\n # Uncomment the line below to prevent moving studies to archive\n archive_dir = None\n\n for study_info in study_list_info:\n if not study_info.should_delete:\n continue\n\n print(f\"Study: {study_info.study_dir.name}\")\n print(f\" Reason: {study_info.reason}\")\n print(study_info.summary_df)\n print()\n\n if archive_dir is not None:","source_hash":"6cea6f7c8acdd87b74c18ca1ab84052938be3024762c472ead51035c163aa15a","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.overlay_utils","uri":"program://AgentLab/module/src.agentlab.analyze.overlay_utils#L1-L435","kind":"module","name":"src.agentlab.analyze.overlay_utils","path":"src/agentlab/analyze/overlay_utils.py","language":"python","start_line":1,"end_line":435,"context_start_line":1,"context_end_line":435,"code":"import ast\nimport inspect\nimport math\nfrom dataclasses import dataclass\nfrom typing import Any, Union\n\nimport matplotlib.pyplot as plt\nimport PIL\nfrom browsergym.core.action.highlevel import ACTION_SUBSETS\nfrom PIL import Image, ImageDraw\n\nBGYM_FUNCTION_MAP = {}\nfor subset in (\"bid\", \"coord\"):\n for func in ACTION_SUBSETS[subset]:\n if func not in BGYM_FUNCTION_MAP:\n BGYM_FUNCTION_MAP[func.__name__] = func\n\n\n@dataclass\nclass ArgInfo:\n function_name: str\n name: str\n value: Any\n type: str\n start_index: int\n stop_index: int\n\n\ndef parse_function_calls(code_string: str) -> list[ArgInfo]:\n \"\"\"\n Parse a string containing multiple function calls and return a list of ArgInfo objects\n for all arguments in all function calls.\n\n Args:\n code_string: String containing function calls\n\n Returns:\n List of ArgInfo objects containing detailed information about each argument\n\n Example:\n >>> code = '''\n ... mouse_click(34, 59)\n ... fill(\"a234\", \"test\")\n ... '''\n >>> result = parse_function_calls(code)\n >>> # Returns list of ArgInfo objects for each argument\n \"\"\"\n result = []\n\n try:\n # Parse the code string into an AST\n tree = ast.parse(code_string)\n\n # Extract all function calls\n for node in ast.walk(tree):\n if isinstance(node, ast.Call) and isinstance(node.func, ast.Name):\n func_name = node.func.id\n\n # Check if this function exists in our module\n if func_name in BGYM_FUNCTION_MAP:\n func = BGYM_FUNCTION_MAP[func_name]\n\n # Get function signature to map positional args to parameter names\n try:\n sig = inspect.signature(func)\n param_names = list(sig.parameters.keys())\n\n # Process positional arguments\n for i, arg in enumerate(node.args):\n if i < len(param_names):\n param_name = param_names[i]\n value = _extract_value(arg)\n start_idx, stop_idx = _get_node_indices(code_string, arg)\n\n arg_info = ArgInfo(\n function_name=func_name,\n name=param_name,\n value=value,\n type=type(value).__name__,\n start_index=start_idx,\n stop_index=stop_idx,\n )\n result.append(arg_info)\n\n # Process keyword arguments\n for keyword in node.keywords:\n value = _extract_value(keyword.value)\n start_idx, stop_idx = _get_node_indices(\n code_string, keyword.value, keyword\n )\n\n arg_info = ArgInfo(\n function_name=func_name,\n name=keyword.arg,\n value=value,\n type=type(value).__name__,\n start_index=start_idx,\n stop_index=stop_idx,\n )\n result.append(arg_info)\n\n except Exception as e:\n # If we can't inspect the function, skip it\n print(f\"Warning: Could not process function {func_name}: {e}\")\n continue\n\n except SyntaxError as e:\n print(f\"Syntax error in code string: {e}\")\n return []\n\n return result\n\n\ndef _extract_value(node: ast.AST) -> Any:\n \"\"\"\n Extract the actual value from an AST node.\n\n Args:\n node: AST node representing a value\n\n Returns:\n The extracted Python value\n \"\"\"\n if isinstance(node, ast.Constant):\n # Python 3.8+ uses ast.Constant for all literals\n return node.value\n elif isinstance(node, ast.Str):\n # Fallback for older Python versions\n return node.s\n elif isinstance(node, ast.Num):\n # Fallback for older Python versions\n return node.n\n elif isinstance(node, ast.List):\n # Handle list literals\n return [_extract_value(item) for item in node.elts]\n elif isinstance(node, ast.Name):\n # Handle variable names (return as string identifier)\n return node.id\n else:\n # For other node types, return a string representation\n return ast.unparse(node) if hasattr(ast, \"unparse\") else str(node)\n\n\ndef _get_node_indices(\n source: str, node: ast.AST, keyword_node: ast.keyword = None\n) -> tuple[int, int]:\n \"\"\"\n Convert AST node line/column positions to absolute character indices.\n\n Args:\n source: Original source code string\n node: AST node (the value)\n keyword_node: If provided, use this keyword node's position as start\n\n Returns:\n Tuple of (start_index, stop_index) in the source string\n \"\"\"\n lines = source.splitlines(keepends=True)\n\n # For keyword arguments, start from the keyword name\n if keyword_node is not None:\n start_line = keyword_node.lineno\n start_col = keyword_node.col_offset\n else:\n start_line = node.lineno\n start_col = node.col_offset\n\n # Calculate start index\n start_index = 0\n for i in range(start_line - 1): # lineno is 1-based\n start_index += len(lines[i])\n start_index += start_col\n\n # End index always comes from the value node\n if hasattr(node, \"end_lineno\") and hasattr(node, \"end_col_offset\"):\n end_index = 0\n for i in range(node.end_lineno - 1):\n end_index += len(lines[i])\n end_index += node.end_col_offset\n else:\n # Fallback estimation\n if hasattr(ast, \"get_source_segment\"):\n segment = ast.get_source_segment(source, node)\n end_index = start_index + len(segment) if segment else start_index + 1\n else:\n end_index = start_index + 1\n\n return start_index, end_index\n\n\ndef find_bids_and_xy_pairs(args: list[ArgInfo]) -> list[ArgInfo]:\n \"\"\"\n Find bid arguments and x,y coordinate pairs from a list of ArgInfo objects.\n\n Args:\n args: List of ArgInfo objects from parse_function_calls\n\n Returns:\n List of ArgInfo objects containing:\n - Original bid arguments (unchanged)\n - Merged x,y pairs with joint names, tuple values, and combined indices\n\n Rules for x,y pairs:\n - Must be consecutive arguments\n - Must end with 'x' and 'y' respectively\n - Must have the same prefix (everything before 'x'/'y')\n - Merged name: prefix + \"_xy\"\n - Merged value: (x_value, y_value) as tuple of floats\n - Merged indices: start of x to stop of y\n \"\"\"\n result = []\n i = 0\n\n while i < len(args):\n current_arg = args[i]\n\n # Check if current arg name ends with 'bid'\n if current_arg.name.endswith(\"bid\"):\n result.append(current_arg)\n i += 1\n continue\n\n # Check for x,y pair\n if i + 1 < len(args) and current_arg.name.endswith(\"x\") and args[i + 1].name.endswith(\"y\"):\n\n next_arg = args[i + 1]\n\n # Extract prefixes (everything before 'x' and 'y')\n current_prefix = current_arg.name[:-1] # Remove 'x'\n next_prefix = next_arg.name[:-1] # Remove 'y'\n\n # Check if they have the same prefix and are from the same function\n if (\n current_prefix == next_prefix\n and current_arg.function_name == next_arg.function_name\n ):\n\n # Create merged ArgInfo for x,y pair\n merged_name = f\"{current_prefix}xy\"\n\n # Convert values to floats and create tuple\n try:\n x_val = float(current_arg.value)\n y_val = float(next_arg.value)\n merged_value = (x_val, y_val)\n except (ValueError, TypeError):\n # If conversion fails, keep original values\n merged_value = (current_arg.value, next_arg.value)\n\n merged_arg = ArgInfo(\n function_name=current_arg.function_name,\n name=merged_name,\n value=merged_value,\n type=\"tuple\",\n start_index=current_arg.start_index,\n stop_index=next_arg.stop_index,\n )\n\n result.append(merged_arg)\n i += 2 # Skip both x and y args\n continue\n\n # If no special handling, skip this argument\n i += 1\n\n return result\n\n\ndef overlay_cross(\n img: Image.Image,\n coord: tuple[float, float],\n color: Union[str, tuple[int, int, int]] = \"red\",\n length: int = 7,\n width: int = 1,\n) -> Image.Image:\n draw = ImageDraw.Draw(img)\n\n x, y = coord\n half_len = length // 2\n\n # Draw horizontal line\n draw.line([x - half_len, y, x + half_len, y], fill=color, width=width)\n # Draw vertical line\n draw.line([x, y - half_len, x, y + half_len], fill=color, width=width)\n\n return img\n\n\ndef overlay_rectangle(\n img: Image.Image,\n bbox: tuple[float, float, float, float],\n color: Union[str, tuple[int, int, int]] = \"red\",\n width: int = 1,\n dashed: bool = True,\n) -> Image.Image:\n draw = ImageDraw.Draw(img)\n\n x, y, w, h = bbox\n\n if dashed:\n # Draw dashed rectangle\n linedashed(draw, x, y, x + w, y, color, width)\n linedashed(draw, x + w, y, x + w, y + h, color, width)\n linedashed(draw, x + w, y + h, x, y + h, color, width)\n linedashed(draw, x, y + h, x, y, color, width)\n else:\n draw.rectangle([x, y, x + w, y + h], outline=color, width=width)\n\n return img\n\n\n# Adapted from https://stackoverflow.com/questions/51908563/dotted-or-dashed-line-with-python-pillow/58885306#58885306\ndef linedashed(\n draw: PIL.ImageDraw.Draw, x0, y0, x1, y1, fill, width, dash_length=4, nodash_length=8\n):\n line_dx = x1 - x0 # delta x (can be negative)\n line_dy = y1 - y0 # delta y (can be negative)\n line_length = math.hypot(line_dx, line_dy) # line length (positive)\n if line_length == 0:\n return # Avoid division by zero in case the line length is 0\n pixel_dx = line_dx / line_length # x add for 1px line length\n pixel_dy = line_dy / line_length # y add for 1px line length\n dash_start = 0\n while dash_start < line_length:\n dash_end = dash_start + dash_length\n if dash_end > line_length:\n dash_end = line_length\n draw.line(\n (\n round(x0 + pixel_dx * dash_start),\n round(y0 + pixel_dy * dash_start),\n round(x0 + pixel_dx * dash_end),\n round(y0 + pixel_dy * dash_end),\n ),\n fill=fill,\n width=width,\n )\n dash_start += dash_length + nodash_length\n\n\ndef annotate_action(\n img: Image.Image, action_string: str, properties: dict[str, tuple], colormap: str = \"tab10\"\n) -> str:\n \"\"\"\n Annotate an image with overlays for action arguments and return colored HTML.\n\n Args:\n img: PIL Image to modify in place\n action_string: String containing function calls\n properties: Dict mapping bid strings to bounding boxes (x1, y1, x2, y2)\n colormap: Matplotlib colormap name for auto-color selection\n\n Returns:\n HTML string with arguments colored to match overlays\n \"\"\"\n # Parse function calls to get all arguments\n all_args = parse_function_calls(action_string)\n\n # Filter to get bids and xy pairs\n filtered_args = find_bids_and_xy_pairs(all_args)\n\n # Get colormap\n cmap = plt.get_cmap(colormap)\n\n # Track colors for each filtered argument\n colors = []\n\n # Add overlays to image\n for i, arg_info in enumerate(filtered_args):\n # Get color from colormap\n color_rgb = cmap(i % cmap.N)\n color_255 = tuple(int(c * 255) for c in color_rgb[:3]) # Convert to 0-255 range\n\n colors.append(color_rgb[:3]) # Store normalized RGB for HTML\n\n if arg_info.name.endswith(\"xy\"):\n # Handle x,y coordinate pairs\n x, y = arg_info.value\n overlay_cross(img, (x, y), color_255, length=9, width=3)\n\n elif arg_info.name.endswith(\"bid\"):\n # Handle bid arguments with bounding boxes\n bid_value = arg_info.value\n if bid_value in properties:\n\n bbox = properties[bid_value][\"bbox\"]\n if bbox:\n overlay_rectangle(img, bbox, color_255, width=3)\n\n # Generate colored HTML\n html = create_colored_html(action_string, filtered_args, colors)\n\n return html\n\n\ndef create_colored_html(action_string: str, filtered_args: list, colors: list) -> str:\n \"\"\"\n Create HTML with colored arguments using start/stop indices.\n\n Args:\n action_string: Original action string\n filtered_args: List of ArgInfo objects with start_index/stop_index\n colors: List of RGB tuples, same length as filtered_args\n\n Returns:\n HTML string with colored spans\n \"\"\"\n # Sort args by start position for sequential processing\n sorted_pairs = sorted(zip(filtered_args, colors), key=lambda x: x[0].start_index)\n\n # Build HTML with colored spans\n html_parts = []\n last_end = 0\n\n for arg_info, color_rgb in sorted_pairs:\n # Add uncolored text before this argument\n html_parts.append(action_string[last_end : arg_info.start_index])\n\n # Get the argument text\n arg_text = action_string[arg_info.start_index : arg_info.stop_index]\n\n # Convert color to hex\n color_hex = \"#{:02x}{:02x}{:02x}\".format(\n int(color_rgb[0] * 255), int(color_rgb[1] * 255), int(color_rgb[2] * 255)\n )\n\n # Add colored span\n html_parts.append(f'{arg_text}')\n\n last_end = arg_info.stop_index\n\n # Add remaining text\n html_parts.append(action_string[last_end:])\n\n return \"\".join(html_parts)","source_hash":"f23004ae2cfc4f02728461d5ba27cf3f81962e76a322170cc44292b655b5401e","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.overlay_utils.ArgInfo","uri":"program://AgentLab/class/src.agentlab.analyze.overlay_utils.ArgInfo#L20-L26","kind":"class","name":"ArgInfo","path":"src/agentlab/analyze/overlay_utils.py","language":"python","start_line":20,"end_line":26,"context_start_line":1,"context_end_line":46,"code":"import ast\nimport inspect\nimport math\nfrom dataclasses import dataclass\nfrom typing import Any, Union\n\nimport matplotlib.pyplot as plt\nimport PIL\nfrom browsergym.core.action.highlevel import ACTION_SUBSETS\nfrom PIL import Image, ImageDraw\n\nBGYM_FUNCTION_MAP = {}\nfor subset in (\"bid\", \"coord\"):\n for func in ACTION_SUBSETS[subset]:\n if func not in BGYM_FUNCTION_MAP:\n BGYM_FUNCTION_MAP[func.__name__] = func\n\n\n@dataclass\nclass ArgInfo:\n function_name: str\n name: str\n value: Any\n type: str\n start_index: int\n stop_index: int\n\n\ndef parse_function_calls(code_string: str) -> list[ArgInfo]:\n \"\"\"\n Parse a string containing multiple function calls and return a list of ArgInfo objects\n for all arguments in all function calls.\n\n Args:\n code_string: String containing function calls\n\n Returns:\n List of ArgInfo objects containing detailed information about each argument\n\n Example:\n >>> code = '''\n ... mouse_click(34, 59)\n ... fill(\"a234\", \"test\")\n ... '''\n >>> result = parse_function_calls(code)\n >>> # Returns list of ArgInfo objects for each argument","source_hash":"f23004ae2cfc4f02728461d5ba27cf3f81962e76a322170cc44292b655b5401e","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.overlay_utils.parse_function_calls","uri":"program://AgentLab/function/src.agentlab.analyze.overlay_utils.parse_function_calls#L29-L111","kind":"function","name":"parse_function_calls","path":"src/agentlab/analyze/overlay_utils.py","language":"python","start_line":29,"end_line":111,"context_start_line":9,"context_end_line":131,"code":"from browsergym.core.action.highlevel import ACTION_SUBSETS\nfrom PIL import Image, ImageDraw\n\nBGYM_FUNCTION_MAP = {}\nfor subset in (\"bid\", \"coord\"):\n for func in ACTION_SUBSETS[subset]:\n if func not in BGYM_FUNCTION_MAP:\n BGYM_FUNCTION_MAP[func.__name__] = func\n\n\n@dataclass\nclass ArgInfo:\n function_name: str\n name: str\n value: Any\n type: str\n start_index: int\n stop_index: int\n\n\ndef parse_function_calls(code_string: str) -> list[ArgInfo]:\n \"\"\"\n Parse a string containing multiple function calls and return a list of ArgInfo objects\n for all arguments in all function calls.\n\n Args:\n code_string: String containing function calls\n\n Returns:\n List of ArgInfo objects containing detailed information about each argument\n\n Example:\n >>> code = '''\n ... mouse_click(34, 59)\n ... fill(\"a234\", \"test\")\n ... '''\n >>> result = parse_function_calls(code)\n >>> # Returns list of ArgInfo objects for each argument\n \"\"\"\n result = []\n\n try:\n # Parse the code string into an AST\n tree = ast.parse(code_string)\n\n # Extract all function calls\n for node in ast.walk(tree):\n if isinstance(node, ast.Call) and isinstance(node.func, ast.Name):\n func_name = node.func.id\n\n # Check if this function exists in our module\n if func_name in BGYM_FUNCTION_MAP:\n func = BGYM_FUNCTION_MAP[func_name]\n\n # Get function signature to map positional args to parameter names\n try:\n sig = inspect.signature(func)\n param_names = list(sig.parameters.keys())\n\n # Process positional arguments\n for i, arg in enumerate(node.args):\n if i < len(param_names):\n param_name = param_names[i]\n value = _extract_value(arg)\n start_idx, stop_idx = _get_node_indices(code_string, arg)\n\n arg_info = ArgInfo(\n function_name=func_name,\n name=param_name,\n value=value,\n type=type(value).__name__,\n start_index=start_idx,\n stop_index=stop_idx,\n )\n result.append(arg_info)\n\n # Process keyword arguments\n for keyword in node.keywords:\n value = _extract_value(keyword.value)\n start_idx, stop_idx = _get_node_indices(\n code_string, keyword.value, keyword\n )\n\n arg_info = ArgInfo(\n function_name=func_name,\n name=keyword.arg,\n value=value,\n type=type(value).__name__,\n start_index=start_idx,\n stop_index=stop_idx,\n )\n result.append(arg_info)\n\n except Exception as e:\n # If we can't inspect the function, skip it\n print(f\"Warning: Could not process function {func_name}: {e}\")\n continue\n\n except SyntaxError as e:\n print(f\"Syntax error in code string: {e}\")\n return []\n\n return result\n\n\ndef _extract_value(node: ast.AST) -> Any:\n \"\"\"\n Extract the actual value from an AST node.\n\n Args:\n node: AST node representing a value\n\n Returns:\n The extracted Python value\n \"\"\"\n if isinstance(node, ast.Constant):\n # Python 3.8+ uses ast.Constant for all literals\n return node.value\n elif isinstance(node, ast.Str):\n # Fallback for older Python versions\n return node.s\n elif isinstance(node, ast.Num):\n # Fallback for older Python versions","source_hash":"f23004ae2cfc4f02728461d5ba27cf3f81962e76a322170cc44292b655b5401e","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.overlay_utils._extract_value","uri":"program://AgentLab/function/src.agentlab.analyze.overlay_utils._extract_value#L114-L141","kind":"function","name":"_extract_value","path":"src/agentlab/analyze/overlay_utils.py","language":"python","start_line":114,"end_line":141,"context_start_line":94,"context_end_line":161,"code":" name=keyword.arg,\n value=value,\n type=type(value).__name__,\n start_index=start_idx,\n stop_index=stop_idx,\n )\n result.append(arg_info)\n\n except Exception as e:\n # If we can't inspect the function, skip it\n print(f\"Warning: Could not process function {func_name}: {e}\")\n continue\n\n except SyntaxError as e:\n print(f\"Syntax error in code string: {e}\")\n return []\n\n return result\n\n\ndef _extract_value(node: ast.AST) -> Any:\n \"\"\"\n Extract the actual value from an AST node.\n\n Args:\n node: AST node representing a value\n\n Returns:\n The extracted Python value\n \"\"\"\n if isinstance(node, ast.Constant):\n # Python 3.8+ uses ast.Constant for all literals\n return node.value\n elif isinstance(node, ast.Str):\n # Fallback for older Python versions\n return node.s\n elif isinstance(node, ast.Num):\n # Fallback for older Python versions\n return node.n\n elif isinstance(node, ast.List):\n # Handle list literals\n return [_extract_value(item) for item in node.elts]\n elif isinstance(node, ast.Name):\n # Handle variable names (return as string identifier)\n return node.id\n else:\n # For other node types, return a string representation\n return ast.unparse(node) if hasattr(ast, \"unparse\") else str(node)\n\n\ndef _get_node_indices(\n source: str, node: ast.AST, keyword_node: ast.keyword = None\n) -> tuple[int, int]:\n \"\"\"\n Convert AST node line/column positions to absolute character indices.\n\n Args:\n source: Original source code string\n node: AST node (the value)\n keyword_node: If provided, use this keyword node's position as start\n\n Returns:\n Tuple of (start_index, stop_index) in the source string\n \"\"\"\n lines = source.splitlines(keepends=True)\n\n # For keyword arguments, start from the keyword name\n if keyword_node is not None:","source_hash":"f23004ae2cfc4f02728461d5ba27cf3f81962e76a322170cc44292b655b5401e","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.overlay_utils._get_node_indices","uri":"program://AgentLab/function/src.agentlab.analyze.overlay_utils._get_node_indices#L144-L188","kind":"function","name":"_get_node_indices","path":"src/agentlab/analyze/overlay_utils.py","language":"python","start_line":144,"end_line":188,"context_start_line":124,"context_end_line":208,"code":" if isinstance(node, ast.Constant):\n # Python 3.8+ uses ast.Constant for all literals\n return node.value\n elif isinstance(node, ast.Str):\n # Fallback for older Python versions\n return node.s\n elif isinstance(node, ast.Num):\n # Fallback for older Python versions\n return node.n\n elif isinstance(node, ast.List):\n # Handle list literals\n return [_extract_value(item) for item in node.elts]\n elif isinstance(node, ast.Name):\n # Handle variable names (return as string identifier)\n return node.id\n else:\n # For other node types, return a string representation\n return ast.unparse(node) if hasattr(ast, \"unparse\") else str(node)\n\n\ndef _get_node_indices(\n source: str, node: ast.AST, keyword_node: ast.keyword = None\n) -> tuple[int, int]:\n \"\"\"\n Convert AST node line/column positions to absolute character indices.\n\n Args:\n source: Original source code string\n node: AST node (the value)\n keyword_node: If provided, use this keyword node's position as start\n\n Returns:\n Tuple of (start_index, stop_index) in the source string\n \"\"\"\n lines = source.splitlines(keepends=True)\n\n # For keyword arguments, start from the keyword name\n if keyword_node is not None:\n start_line = keyword_node.lineno\n start_col = keyword_node.col_offset\n else:\n start_line = node.lineno\n start_col = node.col_offset\n\n # Calculate start index\n start_index = 0\n for i in range(start_line - 1): # lineno is 1-based\n start_index += len(lines[i])\n start_index += start_col\n\n # End index always comes from the value node\n if hasattr(node, \"end_lineno\") and hasattr(node, \"end_col_offset\"):\n end_index = 0\n for i in range(node.end_lineno - 1):\n end_index += len(lines[i])\n end_index += node.end_col_offset\n else:\n # Fallback estimation\n if hasattr(ast, \"get_source_segment\"):\n segment = ast.get_source_segment(source, node)\n end_index = start_index + len(segment) if segment else start_index + 1\n else:\n end_index = start_index + 1\n\n return start_index, end_index\n\n\ndef find_bids_and_xy_pairs(args: list[ArgInfo]) -> list[ArgInfo]:\n \"\"\"\n Find bid arguments and x,y coordinate pairs from a list of ArgInfo objects.\n\n Args:\n args: List of ArgInfo objects from parse_function_calls\n\n Returns:\n List of ArgInfo objects containing:\n - Original bid arguments (unchanged)\n - Merged x,y pairs with joint names, tuple values, and combined indices\n\n Rules for x,y pairs:\n - Must be consecutive arguments\n - Must end with 'x' and 'y' respectively\n - Must have the same prefix (everything before 'x'/'y')\n - Merged name: prefix + \"_xy\"\n - Merged value: (x_value, y_value) as tuple of floats","source_hash":"f23004ae2cfc4f02728461d5ba27cf3f81962e76a322170cc44292b655b5401e","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.overlay_utils.find_bids_and_xy_pairs","uri":"program://AgentLab/function/src.agentlab.analyze.overlay_utils.find_bids_and_xy_pairs#L191-L266","kind":"function","name":"find_bids_and_xy_pairs","path":"src/agentlab/analyze/overlay_utils.py","language":"python","start_line":191,"end_line":266,"context_start_line":171,"context_end_line":286,"code":" start_index += len(lines[i])\n start_index += start_col\n\n # End index always comes from the value node\n if hasattr(node, \"end_lineno\") and hasattr(node, \"end_col_offset\"):\n end_index = 0\n for i in range(node.end_lineno - 1):\n end_index += len(lines[i])\n end_index += node.end_col_offset\n else:\n # Fallback estimation\n if hasattr(ast, \"get_source_segment\"):\n segment = ast.get_source_segment(source, node)\n end_index = start_index + len(segment) if segment else start_index + 1\n else:\n end_index = start_index + 1\n\n return start_index, end_index\n\n\ndef find_bids_and_xy_pairs(args: list[ArgInfo]) -> list[ArgInfo]:\n \"\"\"\n Find bid arguments and x,y coordinate pairs from a list of ArgInfo objects.\n\n Args:\n args: List of ArgInfo objects from parse_function_calls\n\n Returns:\n List of ArgInfo objects containing:\n - Original bid arguments (unchanged)\n - Merged x,y pairs with joint names, tuple values, and combined indices\n\n Rules for x,y pairs:\n - Must be consecutive arguments\n - Must end with 'x' and 'y' respectively\n - Must have the same prefix (everything before 'x'/'y')\n - Merged name: prefix + \"_xy\"\n - Merged value: (x_value, y_value) as tuple of floats\n - Merged indices: start of x to stop of y\n \"\"\"\n result = []\n i = 0\n\n while i < len(args):\n current_arg = args[i]\n\n # Check if current arg name ends with 'bid'\n if current_arg.name.endswith(\"bid\"):\n result.append(current_arg)\n i += 1\n continue\n\n # Check for x,y pair\n if i + 1 < len(args) and current_arg.name.endswith(\"x\") and args[i + 1].name.endswith(\"y\"):\n\n next_arg = args[i + 1]\n\n # Extract prefixes (everything before 'x' and 'y')\n current_prefix = current_arg.name[:-1] # Remove 'x'\n next_prefix = next_arg.name[:-1] # Remove 'y'\n\n # Check if they have the same prefix and are from the same function\n if (\n current_prefix == next_prefix\n and current_arg.function_name == next_arg.function_name\n ):\n\n # Create merged ArgInfo for x,y pair\n merged_name = f\"{current_prefix}xy\"\n\n # Convert values to floats and create tuple\n try:\n x_val = float(current_arg.value)\n y_val = float(next_arg.value)\n merged_value = (x_val, y_val)\n except (ValueError, TypeError):\n # If conversion fails, keep original values\n merged_value = (current_arg.value, next_arg.value)\n\n merged_arg = ArgInfo(\n function_name=current_arg.function_name,\n name=merged_name,\n value=merged_value,\n type=\"tuple\",\n start_index=current_arg.start_index,\n stop_index=next_arg.stop_index,\n )\n\n result.append(merged_arg)\n i += 2 # Skip both x and y args\n continue\n\n # If no special handling, skip this argument\n i += 1\n\n return result\n\n\ndef overlay_cross(\n img: Image.Image,\n coord: tuple[float, float],\n color: Union[str, tuple[int, int, int]] = \"red\",\n length: int = 7,\n width: int = 1,\n) -> Image.Image:\n draw = ImageDraw.Draw(img)\n\n x, y = coord\n half_len = length // 2\n\n # Draw horizontal line\n draw.line([x - half_len, y, x + half_len, y], fill=color, width=width)\n # Draw vertical line\n draw.line([x, y - half_len, x, y + half_len], fill=color, width=width)\n\n return img","source_hash":"f23004ae2cfc4f02728461d5ba27cf3f81962e76a322170cc44292b655b5401e","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.overlay_utils.overlay_cross","uri":"program://AgentLab/function/src.agentlab.analyze.overlay_utils.overlay_cross#L269-L286","kind":"function","name":"overlay_cross","path":"src/agentlab/analyze/overlay_utils.py","language":"python","start_line":269,"end_line":286,"context_start_line":249,"context_end_line":306,"code":"\n merged_arg = ArgInfo(\n function_name=current_arg.function_name,\n name=merged_name,\n value=merged_value,\n type=\"tuple\",\n start_index=current_arg.start_index,\n stop_index=next_arg.stop_index,\n )\n\n result.append(merged_arg)\n i += 2 # Skip both x and y args\n continue\n\n # If no special handling, skip this argument\n i += 1\n\n return result\n\n\ndef overlay_cross(\n img: Image.Image,\n coord: tuple[float, float],\n color: Union[str, tuple[int, int, int]] = \"red\",\n length: int = 7,\n width: int = 1,\n) -> Image.Image:\n draw = ImageDraw.Draw(img)\n\n x, y = coord\n half_len = length // 2\n\n # Draw horizontal line\n draw.line([x - half_len, y, x + half_len, y], fill=color, width=width)\n # Draw vertical line\n draw.line([x, y - half_len, x, y + half_len], fill=color, width=width)\n\n return img\n\n\ndef overlay_rectangle(\n img: Image.Image,\n bbox: tuple[float, float, float, float],\n color: Union[str, tuple[int, int, int]] = \"red\",\n width: int = 1,\n dashed: bool = True,\n) -> Image.Image:\n draw = ImageDraw.Draw(img)\n\n x, y, w, h = bbox\n\n if dashed:\n # Draw dashed rectangle\n linedashed(draw, x, y, x + w, y, color, width)\n linedashed(draw, x + w, y, x + w, y + h, color, width)\n linedashed(draw, x + w, y + h, x, y + h, color, width)\n linedashed(draw, x, y + h, x, y, color, width)\n else:","source_hash":"f23004ae2cfc4f02728461d5ba27cf3f81962e76a322170cc44292b655b5401e","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.overlay_utils.overlay_rectangle","uri":"program://AgentLab/function/src.agentlab.analyze.overlay_utils.overlay_rectangle#L289-L309","kind":"function","name":"overlay_rectangle","path":"src/agentlab/analyze/overlay_utils.py","language":"python","start_line":289,"end_line":309,"context_start_line":269,"context_end_line":329,"code":"def overlay_cross(\n img: Image.Image,\n coord: tuple[float, float],\n color: Union[str, tuple[int, int, int]] = \"red\",\n length: int = 7,\n width: int = 1,\n) -> Image.Image:\n draw = ImageDraw.Draw(img)\n\n x, y = coord\n half_len = length // 2\n\n # Draw horizontal line\n draw.line([x - half_len, y, x + half_len, y], fill=color, width=width)\n # Draw vertical line\n draw.line([x, y - half_len, x, y + half_len], fill=color, width=width)\n\n return img\n\n\ndef overlay_rectangle(\n img: Image.Image,\n bbox: tuple[float, float, float, float],\n color: Union[str, tuple[int, int, int]] = \"red\",\n width: int = 1,\n dashed: bool = True,\n) -> Image.Image:\n draw = ImageDraw.Draw(img)\n\n x, y, w, h = bbox\n\n if dashed:\n # Draw dashed rectangle\n linedashed(draw, x, y, x + w, y, color, width)\n linedashed(draw, x + w, y, x + w, y + h, color, width)\n linedashed(draw, x + w, y + h, x, y + h, color, width)\n linedashed(draw, x, y + h, x, y, color, width)\n else:\n draw.rectangle([x, y, x + w, y + h], outline=color, width=width)\n\n return img\n\n\n# Adapted from https://stackoverflow.com/questions/51908563/dotted-or-dashed-line-with-python-pillow/58885306#58885306\ndef linedashed(\n draw: PIL.ImageDraw.Draw, x0, y0, x1, y1, fill, width, dash_length=4, nodash_length=8\n):\n line_dx = x1 - x0 # delta x (can be negative)\n line_dy = y1 - y0 # delta y (can be negative)\n line_length = math.hypot(line_dx, line_dy) # line length (positive)\n if line_length == 0:\n return # Avoid division by zero in case the line length is 0\n pixel_dx = line_dx / line_length # x add for 1px line length\n pixel_dy = line_dy / line_length # y add for 1px line length\n dash_start = 0\n while dash_start < line_length:\n dash_end = dash_start + dash_length\n if dash_end > line_length:\n dash_end = line_length\n draw.line(\n (","source_hash":"f23004ae2cfc4f02728461d5ba27cf3f81962e76a322170cc44292b655b5401e","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.overlay_utils.linedashed","uri":"program://AgentLab/function/src.agentlab.analyze.overlay_utils.linedashed#L313-L338","kind":"function","name":"linedashed","path":"src/agentlab/analyze/overlay_utils.py","language":"python","start_line":313,"end_line":338,"context_start_line":293,"context_end_line":358,"code":" width: int = 1,\n dashed: bool = True,\n) -> Image.Image:\n draw = ImageDraw.Draw(img)\n\n x, y, w, h = bbox\n\n if dashed:\n # Draw dashed rectangle\n linedashed(draw, x, y, x + w, y, color, width)\n linedashed(draw, x + w, y, x + w, y + h, color, width)\n linedashed(draw, x + w, y + h, x, y + h, color, width)\n linedashed(draw, x, y + h, x, y, color, width)\n else:\n draw.rectangle([x, y, x + w, y + h], outline=color, width=width)\n\n return img\n\n\n# Adapted from https://stackoverflow.com/questions/51908563/dotted-or-dashed-line-with-python-pillow/58885306#58885306\ndef linedashed(\n draw: PIL.ImageDraw.Draw, x0, y0, x1, y1, fill, width, dash_length=4, nodash_length=8\n):\n line_dx = x1 - x0 # delta x (can be negative)\n line_dy = y1 - y0 # delta y (can be negative)\n line_length = math.hypot(line_dx, line_dy) # line length (positive)\n if line_length == 0:\n return # Avoid division by zero in case the line length is 0\n pixel_dx = line_dx / line_length # x add for 1px line length\n pixel_dy = line_dy / line_length # y add for 1px line length\n dash_start = 0\n while dash_start < line_length:\n dash_end = dash_start + dash_length\n if dash_end > line_length:\n dash_end = line_length\n draw.line(\n (\n round(x0 + pixel_dx * dash_start),\n round(y0 + pixel_dy * dash_start),\n round(x0 + pixel_dx * dash_end),\n round(y0 + pixel_dy * dash_end),\n ),\n fill=fill,\n width=width,\n )\n dash_start += dash_length + nodash_length\n\n\ndef annotate_action(\n img: Image.Image, action_string: str, properties: dict[str, tuple], colormap: str = \"tab10\"\n) -> str:\n \"\"\"\n Annotate an image with overlays for action arguments and return colored HTML.\n\n Args:\n img: PIL Image to modify in place\n action_string: String containing function calls\n properties: Dict mapping bid strings to bounding boxes (x1, y1, x2, y2)\n colormap: Matplotlib colormap name for auto-color selection\n\n Returns:\n HTML string with arguments colored to match overlays\n \"\"\"\n # Parse function calls to get all arguments\n all_args = parse_function_calls(action_string)\n","source_hash":"f23004ae2cfc4f02728461d5ba27cf3f81962e76a322170cc44292b655b5401e","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.overlay_utils.annotate_action","uri":"program://AgentLab/function/src.agentlab.analyze.overlay_utils.annotate_action#L341-L393","kind":"function","name":"annotate_action","path":"src/agentlab/analyze/overlay_utils.py","language":"python","start_line":341,"end_line":393,"context_start_line":321,"context_end_line":413,"code":" pixel_dx = line_dx / line_length # x add for 1px line length\n pixel_dy = line_dy / line_length # y add for 1px line length\n dash_start = 0\n while dash_start < line_length:\n dash_end = dash_start + dash_length\n if dash_end > line_length:\n dash_end = line_length\n draw.line(\n (\n round(x0 + pixel_dx * dash_start),\n round(y0 + pixel_dy * dash_start),\n round(x0 + pixel_dx * dash_end),\n round(y0 + pixel_dy * dash_end),\n ),\n fill=fill,\n width=width,\n )\n dash_start += dash_length + nodash_length\n\n\ndef annotate_action(\n img: Image.Image, action_string: str, properties: dict[str, tuple], colormap: str = \"tab10\"\n) -> str:\n \"\"\"\n Annotate an image with overlays for action arguments and return colored HTML.\n\n Args:\n img: PIL Image to modify in place\n action_string: String containing function calls\n properties: Dict mapping bid strings to bounding boxes (x1, y1, x2, y2)\n colormap: Matplotlib colormap name for auto-color selection\n\n Returns:\n HTML string with arguments colored to match overlays\n \"\"\"\n # Parse function calls to get all arguments\n all_args = parse_function_calls(action_string)\n\n # Filter to get bids and xy pairs\n filtered_args = find_bids_and_xy_pairs(all_args)\n\n # Get colormap\n cmap = plt.get_cmap(colormap)\n\n # Track colors for each filtered argument\n colors = []\n\n # Add overlays to image\n for i, arg_info in enumerate(filtered_args):\n # Get color from colormap\n color_rgb = cmap(i % cmap.N)\n color_255 = tuple(int(c * 255) for c in color_rgb[:3]) # Convert to 0-255 range\n\n colors.append(color_rgb[:3]) # Store normalized RGB for HTML\n\n if arg_info.name.endswith(\"xy\"):\n # Handle x,y coordinate pairs\n x, y = arg_info.value\n overlay_cross(img, (x, y), color_255, length=9, width=3)\n\n elif arg_info.name.endswith(\"bid\"):\n # Handle bid arguments with bounding boxes\n bid_value = arg_info.value\n if bid_value in properties:\n\n bbox = properties[bid_value][\"bbox\"]\n if bbox:\n overlay_rectangle(img, bbox, color_255, width=3)\n\n # Generate colored HTML\n html = create_colored_html(action_string, filtered_args, colors)\n\n return html\n\n\ndef create_colored_html(action_string: str, filtered_args: list, colors: list) -> str:\n \"\"\"\n Create HTML with colored arguments using start/stop indices.\n\n Args:\n action_string: Original action string\n filtered_args: List of ArgInfo objects with start_index/stop_index\n colors: List of RGB tuples, same length as filtered_args\n\n Returns:\n HTML string with colored spans\n \"\"\"\n # Sort args by start position for sequential processing\n sorted_pairs = sorted(zip(filtered_args, colors), key=lambda x: x[0].start_index)\n\n # Build HTML with colored spans\n html_parts = []\n last_end = 0","source_hash":"f23004ae2cfc4f02728461d5ba27cf3f81962e76a322170cc44292b655b5401e","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.overlay_utils.create_colored_html","uri":"program://AgentLab/function/src.agentlab.analyze.overlay_utils.create_colored_html#L396-L435","kind":"function","name":"create_colored_html","path":"src/agentlab/analyze/overlay_utils.py","language":"python","start_line":396,"end_line":435,"context_start_line":376,"context_end_line":435,"code":" if arg_info.name.endswith(\"xy\"):\n # Handle x,y coordinate pairs\n x, y = arg_info.value\n overlay_cross(img, (x, y), color_255, length=9, width=3)\n\n elif arg_info.name.endswith(\"bid\"):\n # Handle bid arguments with bounding boxes\n bid_value = arg_info.value\n if bid_value in properties:\n\n bbox = properties[bid_value][\"bbox\"]\n if bbox:\n overlay_rectangle(img, bbox, color_255, width=3)\n\n # Generate colored HTML\n html = create_colored_html(action_string, filtered_args, colors)\n\n return html\n\n\ndef create_colored_html(action_string: str, filtered_args: list, colors: list) -> str:\n \"\"\"\n Create HTML with colored arguments using start/stop indices.\n\n Args:\n action_string: Original action string\n filtered_args: List of ArgInfo objects with start_index/stop_index\n colors: List of RGB tuples, same length as filtered_args\n\n Returns:\n HTML string with colored spans\n \"\"\"\n # Sort args by start position for sequential processing\n sorted_pairs = sorted(zip(filtered_args, colors), key=lambda x: x[0].start_index)\n\n # Build HTML with colored spans\n html_parts = []\n last_end = 0\n\n for arg_info, color_rgb in sorted_pairs:\n # Add uncolored text before this argument\n html_parts.append(action_string[last_end : arg_info.start_index])\n\n # Get the argument text\n arg_text = action_string[arg_info.start_index : arg_info.stop_index]\n\n # Convert color to hex\n color_hex = \"#{:02x}{:02x}{:02x}\".format(\n int(color_rgb[0] * 255), int(color_rgb[1] * 255), int(color_rgb[2] * 255)\n )\n\n # Add colored span\n html_parts.append(f'{arg_text}')\n\n last_end = arg_info.stop_index\n\n # Add remaining text\n html_parts.append(action_string[last_end:])\n\n return \"\".join(html_parts)","source_hash":"f23004ae2cfc4f02728461d5ba27cf3f81962e76a322170cc44292b655b5401e","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.episode_to_html","uri":"program://AgentLab/module/src.agentlab.analyze.episode_to_html#L1-L442","kind":"module","name":"src.agentlab.analyze.episode_to_html","path":"src/agentlab/analyze/episode_to_html.py","language":"python","start_line":1,"end_line":442,"context_start_line":1,"context_end_line":442,"code":"import base64\nfrom io import BytesIO\nfrom pathlib import Path\n\nfrom agentlab.experiments.loop import ExpResult\nfrom agentlab.experiments.study import get_most_recent_study\nfrom agentlab.llm.llm_utils import BaseMessage as AgentLabBaseMessage\n\n\ndef exp_result_to_html(\n exp_result: ExpResult,\n steps_open: bool = True,\n som_open: bool = False,\n axtree_open: bool = False,\n html_open: bool = False,\n prompt_open: bool = False,\n embed_images: bool = True,\n) -> str:\n \"\"\"\n Convert an ExpResult to HTML with collapsible sections.\n\n Args:\n exp_result: ExpResult object containing experiment data\n steps_open: Whether step sections start expanded (default: True)\n som_open: Whether SOM screenshot sections start expanded (default: False)\n axtree_open: Whether AXTree sections start expanded (default: False)\n html_open: Whether HTML sections start expanded (default: False)\n prompt_open: Whether Prompt sections start expanded (default: False)\n embed_images: Whether to embed images as base64 or use file paths (default: True)\n\n Returns:\n str: HTML string with collapsible episode visualization\n \"\"\"\n # Get basic episode info\n env_args = exp_result.exp_args.env_args\n steps_info = exp_result.steps_info\n\n # Build HTML structure\n html_parts = []\n\n # Add CSS for styling (unchanged)\n html_parts.append(\n \"\"\"\n \n \"\"\"\n )\n\n # Start main container\n html_parts.append('
')\n\n # Episode title and metadata\n html_parts.append(f'
{env_args.task_name}
')\n html_parts.append(f'
Seed: {env_args.task_seed}
')\n\n # Goal section - format like xray\n if len(steps_info) > 0:\n try:\n goal = steps_info[0].obs.get(\"goal_object\", \"No goal specified\")\n goal_html = _format_goal(goal)\n html_parts.append(\n f'
Goal:
{goal_html}
'\n )\n except (IndexError, AttributeError):\n pass\n\n # Process each step\n for i, step_info in enumerate(steps_info):\n if step_info.action is None and i == 0:\n continue # Skip initial reset step if no action\n\n # Step container with collapsible wrapper\n step_open_attr = \"open\" if steps_open else \"\"\n html_parts.append(f\"
\")\n html_parts.append(f\"Step {i}\")\n html_parts.append('
')\n\n # Screenshot (flat in step)\n screenshot_html = _get_screenshot_html(exp_result, i, embed_images)\n html_parts.append(screenshot_html)\n\n # Action (flat in step)\n if step_info.action is not None:\n html_parts.append(\n f'
Action:
{_escape_html(step_info.action)}
'\n )\n\n # Think (flat in step)\n think_content = step_info.agent_info.get(\"think\", \"\")\n if think_content:\n html_parts.append(\n f'
Think:
{_escape_html(think_content)}
'\n )\n\n # SOM Screenshot (nested collapsible)\n som_screenshot_html = _get_som_screenshot_html(exp_result, i, som_open, embed_images)\n if som_screenshot_html:\n html_parts.append(som_screenshot_html)\n\n # AXTree (nested collapsible)\n axtree_content = step_info.obs.get(\"axtree_txt\", \"\")\n if axtree_content:\n axtree_open_attr = \"open\" if axtree_open else \"\"\n html_parts.append(\n f\"\"\"\n
\n AXTree\n
\n
{_escape_html(axtree_content)}
\n
\n
\n \"\"\"\n )\n\n # HTML (nested collapsible)\n html_content = step_info.obs.get(\"dom_txt\", \"\")\n if html_content:\n html_open_attr = \"open\" if html_open else \"\"\n html_parts.append(\n f\"\"\"\n
\n HTML\n
\n
{_escape_html(html_content)}
\n
\n
\n \"\"\"\n )\n\n # Prompt/Chat messages (nested collapsible) - format like xray\n chat_messages = step_info.agent_info.get(\"chat_messages\", [])\n if chat_messages:\n prompt_open_attr = \"open\" if prompt_open else \"\"\n chat_html = _format_chat_messages_like_xray(chat_messages)\n html_parts.append(\n f\"\"\"\n
\n Prompt\n
\n {chat_html}\n
\n
\n \"\"\"\n )\n\n # Close step container\n html_parts.append(\"
\") # step-content\n html_parts.append(\"
\") # step\n\n # Close main container\n html_parts.append(\"
\")\n\n return \"\".join(html_parts)\n\n\ndef _get_screenshot_html(exp_result, step: int, embed_images: bool) -> str:\n \"\"\"Get HTML for main screenshot at given step.\"\"\"\n try:\n if embed_images:\n screenshot = exp_result.get_screenshot(step, som=False)\n return _image_to_html(screenshot, f\"Screenshot {step}\")\n else:\n screenshot_path = exp_result.get_screenshot_path(step, som=False)\n return _path_to_html(screenshot_path, f\"Screenshot {step}\")\n except (FileNotFoundError, IndexError):\n return \"

Screenshot not available

\"\n\n\ndef _get_som_screenshot_html(exp_result, step: int, som_open: bool, embed_images: bool) -> str:\n \"\"\"Get HTML for SOM screenshot if available.\"\"\"\n try:\n if embed_images:\n screenshot_som = exp_result.get_screenshot(step, som=True)\n if screenshot_som:\n som_open_attr = \"open\" if som_open else \"\"\n som_html = _image_to_html(screenshot_som, f\"SOM Screenshot {step}\")\n return f\"\"\"\n
\n Screenshot_som[{step}]\n
\n {som_html}\n
\n
\n \"\"\"\n else:\n screenshot_path = exp_result.get_screenshot_path(step, som=True)\n if screenshot_path and screenshot_path.exists():\n som_open_attr = \"open\" if som_open else \"\"\n som_html = _path_to_html(screenshot_path, f\"SOM Screenshot {step}\")\n return f\"\"\"\n
\n Screenshot_som[{step}]\n
\n {som_html}\n
\n
\n \"\"\"\n except (FileNotFoundError, IndexError):\n pass\n return \"\"\n\n\ndef _image_to_html(image, alt_text: str) -> str:\n \"\"\"Convert PIL Image to HTML img tag with base64 encoding.\"\"\"\n if image is None:\n return f\"

{alt_text} not available

\"\n\n # Convert PIL Image to base64\n buffer = BytesIO()\n image.save(buffer, format=\"PNG\")\n img_str = base64.b64encode(buffer.getvalue()).decode()\n\n return f'\"{alt_text}\"'\n\n\ndef _path_to_html(image_path, alt_text: str) -> str:\n \"\"\"Convert image path to HTML img tag.\"\"\"\n if image_path is None or not image_path.exists():\n return f\"

{alt_text} not available

\"\n\n # Convert to absolute path and use file:// protocol\n abs_path = image_path.resolve()\n return f'\"{alt_text}\"'\n\n\n# Rest of the helper functions remain unchanged...\ndef _format_goal(goal) -> str:\n \"\"\"Format goal object like xray does - using code blocks.\"\"\"\n if goal is None:\n return \"
No goal specified
\"\n\n # Format like xray's AgentLabBaseMessage approach\n goal_str = str(AgentLabBaseMessage(\"\", goal))\n\n return f\"
{_escape_html(goal_str)}
\"\n\n\ndef _format_chat_messages_like_xray(messages) -> str:\n \"\"\"Format chat messages like xray does - with proper role separation.\"\"\"\n if not messages:\n return \"
No chat messages
\"\n\n formatted_parts = []\n\n for i, msg in enumerate(messages):\n message_html = []\n\n if hasattr(msg, \"role\") and hasattr(msg, \"content\"):\n # Handle BaseMessage objects\n role = getattr(msg, \"role\", \"unknown\")\n content = getattr(msg, \"content\", str(msg))\n\n message_html.append(f'
{role.upper()}
')\n message_html.append(f'
{_escape_html(str(content))}
')\n\n elif isinstance(msg, dict):\n # Handle dict messages\n role = msg.get(\"role\", \"unknown\")\n content = msg.get(\"content\", str(msg))\n\n message_html.append(f'
{role.upper()}
')\n\n if isinstance(content, list):\n # Handle multi-part content like xray\n for part in content:\n if isinstance(part, dict):\n if part.get(\"type\") == \"text\":\n message_html.append(\n f'
{_escape_html(part.get(\"text\", \"\"))}
'\n )\n elif part.get(\"type\") == \"image\":\n message_html.append('
[IMAGE]
')\n elif part.get(\"type\") == \"tool_use\":\n tool_str = _format_tool_call_like_xray(part)\n message_html.append(\n f'
{_escape_html(tool_str)}
'\n )\n else:\n message_html.append(\n f'
{_escape_html(str(part))}
'\n )\n else:\n message_html.append(\n f'
{_escape_html(str(part))}
'\n )\n else:\n message_html.append(f'
{_escape_html(str(content))}
')\n else:\n # Handle other message types\n message_html.append(f'
{_escape_html(str(msg))}
')\n\n formatted_parts.append(f'
{\"\".join(message_html)}
')\n\n return \"\".join(formatted_parts)\n\n\ndef _format_tool_call_like_xray(tool_item: dict) -> str:\n \"\"\"Format tool calls like xray does.\"\"\"\n name = tool_item.get(\"name\", \"unknown\")\n input_data = tool_item.get(\"input\", {})\n call_id = tool_item.get(\"call_id\", \"unknown\")\n\n return f\"Tool Call: {name} `{input_data}` (call_id: {call_id})\"\n\n\ndef _escape_html(text: str) -> str:\n \"\"\"Escape HTML special characters.\"\"\"\n if not isinstance(text, str):\n text = str(text)\n\n return (\n text.replace(\"&\", \"&\")\n .replace(\"<\", \"<\")\n .replace(\">\", \">\")\n .replace('\"', \""\")\n .replace(\"'\", \"'\")\n )\n\n\nif __name__ == \"__main__\":\n\n from agentlab.experiments.exp_utils import RESULTS_DIR\n\n result_dir = get_most_recent_study(RESULTS_DIR, contains=None)\n for exp_dir in result_dir.iterdir():\n if exp_dir.is_dir():\n break\n\n print(f\"Using first exp_dir in most recent study:\\n{exp_dir}\")\n exp_result = ExpResult(exp_dir=exp_dir)\n\n page = exp_result_to_html(exp_result, embed_images=False)\n\n output_file = exp_dir / \"episode.html\"\n print(f\"Writing HTML to\\n{output_file}\")\n output_file.write_text(page)\n # cmd open output_file using subprocess\n import subprocess\n\n subprocess.run([\"open\", str(output_file)]) # macOS command to open HTML file","source_hash":"1fae4026df4795d85b781ead9e29f29bc0ee9d8eaa04518e66f9b2c0e7f98d4a","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.episode_to_html.exp_result_to_html","uri":"program://AgentLab/function/src.agentlab.analyze.episode_to_html.exp_result_to_html#L10-L255","kind":"function","name":"exp_result_to_html","path":"src/agentlab/analyze/episode_to_html.py","language":"python","start_line":10,"end_line":255,"context_start_line":1,"context_end_line":275,"code":"import base64\nfrom io import BytesIO\nfrom pathlib import Path\n\nfrom agentlab.experiments.loop import ExpResult\nfrom agentlab.experiments.study import get_most_recent_study\nfrom agentlab.llm.llm_utils import BaseMessage as AgentLabBaseMessage\n\n\ndef exp_result_to_html(\n exp_result: ExpResult,\n steps_open: bool = True,\n som_open: bool = False,\n axtree_open: bool = False,\n html_open: bool = False,\n prompt_open: bool = False,\n embed_images: bool = True,\n) -> str:\n \"\"\"\n Convert an ExpResult to HTML with collapsible sections.\n\n Args:\n exp_result: ExpResult object containing experiment data\n steps_open: Whether step sections start expanded (default: True)\n som_open: Whether SOM screenshot sections start expanded (default: False)\n axtree_open: Whether AXTree sections start expanded (default: False)\n html_open: Whether HTML sections start expanded (default: False)\n prompt_open: Whether Prompt sections start expanded (default: False)\n embed_images: Whether to embed images as base64 or use file paths (default: True)\n\n Returns:\n str: HTML string with collapsible episode visualization\n \"\"\"\n # Get basic episode info\n env_args = exp_result.exp_args.env_args\n steps_info = exp_result.steps_info\n\n # Build HTML structure\n html_parts = []\n\n # Add CSS for styling (unchanged)\n html_parts.append(\n \"\"\"\n \n \"\"\"\n )\n\n # Start main container\n html_parts.append('
')\n\n # Episode title and metadata\n html_parts.append(f'
{env_args.task_name}
')\n html_parts.append(f'
Seed: {env_args.task_seed}
')\n\n # Goal section - format like xray\n if len(steps_info) > 0:\n try:\n goal = steps_info[0].obs.get(\"goal_object\", \"No goal specified\")\n goal_html = _format_goal(goal)\n html_parts.append(\n f'
Goal:
{goal_html}
'\n )\n except (IndexError, AttributeError):\n pass\n\n # Process each step\n for i, step_info in enumerate(steps_info):\n if step_info.action is None and i == 0:\n continue # Skip initial reset step if no action\n\n # Step container with collapsible wrapper\n step_open_attr = \"open\" if steps_open else \"\"\n html_parts.append(f\"
\")\n html_parts.append(f\"Step {i}\")\n html_parts.append('
')\n\n # Screenshot (flat in step)\n screenshot_html = _get_screenshot_html(exp_result, i, embed_images)\n html_parts.append(screenshot_html)\n\n # Action (flat in step)\n if step_info.action is not None:\n html_parts.append(\n f'
Action:
{_escape_html(step_info.action)}
'\n )\n\n # Think (flat in step)\n think_content = step_info.agent_info.get(\"think\", \"\")\n if think_content:\n html_parts.append(\n f'
Think:
{_escape_html(think_content)}
'\n )\n\n # SOM Screenshot (nested collapsible)\n som_screenshot_html = _get_som_screenshot_html(exp_result, i, som_open, embed_images)\n if som_screenshot_html:\n html_parts.append(som_screenshot_html)\n\n # AXTree (nested collapsible)\n axtree_content = step_info.obs.get(\"axtree_txt\", \"\")\n if axtree_content:\n axtree_open_attr = \"open\" if axtree_open else \"\"\n html_parts.append(\n f\"\"\"\n
\n AXTree\n
\n
{_escape_html(axtree_content)}
\n
\n
\n \"\"\"\n )\n\n # HTML (nested collapsible)\n html_content = step_info.obs.get(\"dom_txt\", \"\")\n if html_content:\n html_open_attr = \"open\" if html_open else \"\"\n html_parts.append(\n f\"\"\"\n
\n HTML\n
\n
{_escape_html(html_content)}
\n
\n
\n \"\"\"\n )\n\n # Prompt/Chat messages (nested collapsible) - format like xray\n chat_messages = step_info.agent_info.get(\"chat_messages\", [])\n if chat_messages:\n prompt_open_attr = \"open\" if prompt_open else \"\"\n chat_html = _format_chat_messages_like_xray(chat_messages)\n html_parts.append(\n f\"\"\"\n
\n Prompt\n
\n {chat_html}\n
\n
\n \"\"\"\n )\n\n # Close step container\n html_parts.append(\"
\") # step-content\n html_parts.append(\"
\") # step\n\n # Close main container\n html_parts.append(\"
\")\n\n return \"\".join(html_parts)\n\n\ndef _get_screenshot_html(exp_result, step: int, embed_images: bool) -> str:\n \"\"\"Get HTML for main screenshot at given step.\"\"\"\n try:\n if embed_images:\n screenshot = exp_result.get_screenshot(step, som=False)\n return _image_to_html(screenshot, f\"Screenshot {step}\")\n else:\n screenshot_path = exp_result.get_screenshot_path(step, som=False)\n return _path_to_html(screenshot_path, f\"Screenshot {step}\")\n except (FileNotFoundError, IndexError):\n return \"

Screenshot not available

\"\n\n\ndef _get_som_screenshot_html(exp_result, step: int, som_open: bool, embed_images: bool) -> str:\n \"\"\"Get HTML for SOM screenshot if available.\"\"\"\n try:\n if embed_images:\n screenshot_som = exp_result.get_screenshot(step, som=True)","source_hash":"1fae4026df4795d85b781ead9e29f29bc0ee9d8eaa04518e66f9b2c0e7f98d4a","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.episode_to_html._get_screenshot_html","uri":"program://AgentLab/function/src.agentlab.analyze.episode_to_html._get_screenshot_html#L258-L268","kind":"function","name":"_get_screenshot_html","path":"src/agentlab/analyze/episode_to_html.py","language":"python","start_line":258,"end_line":268,"context_start_line":238,"context_end_line":288,"code":" f\"\"\"\n
\n Prompt\n
\n {chat_html}\n
\n
\n \"\"\"\n )\n\n # Close step container\n html_parts.append(\"\") # step-content\n html_parts.append(\"\") # step\n\n # Close main container\n html_parts.append(\"\")\n\n return \"\".join(html_parts)\n\n\ndef _get_screenshot_html(exp_result, step: int, embed_images: bool) -> str:\n \"\"\"Get HTML for main screenshot at given step.\"\"\"\n try:\n if embed_images:\n screenshot = exp_result.get_screenshot(step, som=False)\n return _image_to_html(screenshot, f\"Screenshot {step}\")\n else:\n screenshot_path = exp_result.get_screenshot_path(step, som=False)\n return _path_to_html(screenshot_path, f\"Screenshot {step}\")\n except (FileNotFoundError, IndexError):\n return \"

Screenshot not available

\"\n\n\ndef _get_som_screenshot_html(exp_result, step: int, som_open: bool, embed_images: bool) -> str:\n \"\"\"Get HTML for SOM screenshot if available.\"\"\"\n try:\n if embed_images:\n screenshot_som = exp_result.get_screenshot(step, som=True)\n if screenshot_som:\n som_open_attr = \"open\" if som_open else \"\"\n som_html = _image_to_html(screenshot_som, f\"SOM Screenshot {step}\")\n return f\"\"\"\n
\n Screenshot_som[{step}]\n
\n {som_html}\n
\n
\n \"\"\"\n else:\n screenshot_path = exp_result.get_screenshot_path(step, som=True)","source_hash":"1fae4026df4795d85b781ead9e29f29bc0ee9d8eaa04518e66f9b2c0e7f98d4a","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.episode_to_html._get_som_screenshot_html","uri":"program://AgentLab/function/src.agentlab.analyze.episode_to_html._get_som_screenshot_html#L271-L302","kind":"function","name":"_get_som_screenshot_html","path":"src/agentlab/analyze/episode_to_html.py","language":"python","start_line":271,"end_line":302,"context_start_line":251,"context_end_line":322,"code":"\n # Close main container\n html_parts.append(\"\")\n\n return \"\".join(html_parts)\n\n\ndef _get_screenshot_html(exp_result, step: int, embed_images: bool) -> str:\n \"\"\"Get HTML for main screenshot at given step.\"\"\"\n try:\n if embed_images:\n screenshot = exp_result.get_screenshot(step, som=False)\n return _image_to_html(screenshot, f\"Screenshot {step}\")\n else:\n screenshot_path = exp_result.get_screenshot_path(step, som=False)\n return _path_to_html(screenshot_path, f\"Screenshot {step}\")\n except (FileNotFoundError, IndexError):\n return \"

Screenshot not available

\"\n\n\ndef _get_som_screenshot_html(exp_result, step: int, som_open: bool, embed_images: bool) -> str:\n \"\"\"Get HTML for SOM screenshot if available.\"\"\"\n try:\n if embed_images:\n screenshot_som = exp_result.get_screenshot(step, som=True)\n if screenshot_som:\n som_open_attr = \"open\" if som_open else \"\"\n som_html = _image_to_html(screenshot_som, f\"SOM Screenshot {step}\")\n return f\"\"\"\n
\n Screenshot_som[{step}]\n
\n {som_html}\n
\n
\n \"\"\"\n else:\n screenshot_path = exp_result.get_screenshot_path(step, som=True)\n if screenshot_path and screenshot_path.exists():\n som_open_attr = \"open\" if som_open else \"\"\n som_html = _path_to_html(screenshot_path, f\"SOM Screenshot {step}\")\n return f\"\"\"\n
\n Screenshot_som[{step}]\n
\n {som_html}\n
\n
\n \"\"\"\n except (FileNotFoundError, IndexError):\n pass\n return \"\"\n\n\ndef _image_to_html(image, alt_text: str) -> str:\n \"\"\"Convert PIL Image to HTML img tag with base64 encoding.\"\"\"\n if image is None:\n return f\"

{alt_text} not available

\"\n\n # Convert PIL Image to base64\n buffer = BytesIO()\n image.save(buffer, format=\"PNG\")\n img_str = base64.b64encode(buffer.getvalue()).decode()\n\n return f'\"{alt_text}\"'\n\n\ndef _path_to_html(image_path, alt_text: str) -> str:\n \"\"\"Convert image path to HTML img tag.\"\"\"\n if image_path is None or not image_path.exists():\n return f\"

{alt_text} not available

\"\n","source_hash":"1fae4026df4795d85b781ead9e29f29bc0ee9d8eaa04518e66f9b2c0e7f98d4a","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.episode_to_html._image_to_html","uri":"program://AgentLab/function/src.agentlab.analyze.episode_to_html._image_to_html#L305-L315","kind":"function","name":"_image_to_html","path":"src/agentlab/analyze/episode_to_html.py","language":"python","start_line":305,"end_line":315,"context_start_line":285,"context_end_line":335,"code":" \n \"\"\"\n else:\n screenshot_path = exp_result.get_screenshot_path(step, som=True)\n if screenshot_path and screenshot_path.exists():\n som_open_attr = \"open\" if som_open else \"\"\n som_html = _path_to_html(screenshot_path, f\"SOM Screenshot {step}\")\n return f\"\"\"\n
\n Screenshot_som[{step}]\n
\n {som_html}\n
\n
\n \"\"\"\n except (FileNotFoundError, IndexError):\n pass\n return \"\"\n\n\ndef _image_to_html(image, alt_text: str) -> str:\n \"\"\"Convert PIL Image to HTML img tag with base64 encoding.\"\"\"\n if image is None:\n return f\"

{alt_text} not available

\"\n\n # Convert PIL Image to base64\n buffer = BytesIO()\n image.save(buffer, format=\"PNG\")\n img_str = base64.b64encode(buffer.getvalue()).decode()\n\n return f'\"{alt_text}\"'\n\n\ndef _path_to_html(image_path, alt_text: str) -> str:\n \"\"\"Convert image path to HTML img tag.\"\"\"\n if image_path is None or not image_path.exists():\n return f\"

{alt_text} not available

\"\n\n # Convert to absolute path and use file:// protocol\n abs_path = image_path.resolve()\n return f'\"{alt_text}\"'\n\n\n# Rest of the helper functions remain unchanged...\ndef _format_goal(goal) -> str:\n \"\"\"Format goal object like xray does - using code blocks.\"\"\"\n if goal is None:\n return \"
No goal specified
\"\n\n # Format like xray's AgentLabBaseMessage approach\n goal_str = str(AgentLabBaseMessage(\"\", goal))","source_hash":"1fae4026df4795d85b781ead9e29f29bc0ee9d8eaa04518e66f9b2c0e7f98d4a","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.episode_to_html._path_to_html","uri":"program://AgentLab/function/src.agentlab.analyze.episode_to_html._path_to_html#L318-L325","kind":"function","name":"_path_to_html","path":"src/agentlab/analyze/episode_to_html.py","language":"python","start_line":318,"end_line":325,"context_start_line":298,"context_end_line":345,"code":" \n \"\"\"\n except (FileNotFoundError, IndexError):\n pass\n return \"\"\n\n\ndef _image_to_html(image, alt_text: str) -> str:\n \"\"\"Convert PIL Image to HTML img tag with base64 encoding.\"\"\"\n if image is None:\n return f\"

{alt_text} not available

\"\n\n # Convert PIL Image to base64\n buffer = BytesIO()\n image.save(buffer, format=\"PNG\")\n img_str = base64.b64encode(buffer.getvalue()).decode()\n\n return f'\"{alt_text}\"'\n\n\ndef _path_to_html(image_path, alt_text: str) -> str:\n \"\"\"Convert image path to HTML img tag.\"\"\"\n if image_path is None or not image_path.exists():\n return f\"

{alt_text} not available

\"\n\n # Convert to absolute path and use file:// protocol\n abs_path = image_path.resolve()\n return f'\"{alt_text}\"'\n\n\n# Rest of the helper functions remain unchanged...\ndef _format_goal(goal) -> str:\n \"\"\"Format goal object like xray does - using code blocks.\"\"\"\n if goal is None:\n return \"
No goal specified
\"\n\n # Format like xray's AgentLabBaseMessage approach\n goal_str = str(AgentLabBaseMessage(\"\", goal))\n\n return f\"
{_escape_html(goal_str)}
\"\n\n\ndef _format_chat_messages_like_xray(messages) -> str:\n \"\"\"Format chat messages like xray does - with proper role separation.\"\"\"\n if not messages:\n return \"
No chat messages
\"\n\n formatted_parts = []","source_hash":"1fae4026df4795d85b781ead9e29f29bc0ee9d8eaa04518e66f9b2c0e7f98d4a","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.episode_to_html._format_goal","uri":"program://AgentLab/function/src.agentlab.analyze.episode_to_html._format_goal#L329-L337","kind":"function","name":"_format_goal","path":"src/agentlab/analyze/episode_to_html.py","language":"python","start_line":329,"end_line":337,"context_start_line":309,"context_end_line":357,"code":"\n # Convert PIL Image to base64\n buffer = BytesIO()\n image.save(buffer, format=\"PNG\")\n img_str = base64.b64encode(buffer.getvalue()).decode()\n\n return f'\"{alt_text}\"'\n\n\ndef _path_to_html(image_path, alt_text: str) -> str:\n \"\"\"Convert image path to HTML img tag.\"\"\"\n if image_path is None or not image_path.exists():\n return f\"

{alt_text} not available

\"\n\n # Convert to absolute path and use file:// protocol\n abs_path = image_path.resolve()\n return f'\"{alt_text}\"'\n\n\n# Rest of the helper functions remain unchanged...\ndef _format_goal(goal) -> str:\n \"\"\"Format goal object like xray does - using code blocks.\"\"\"\n if goal is None:\n return \"
No goal specified
\"\n\n # Format like xray's AgentLabBaseMessage approach\n goal_str = str(AgentLabBaseMessage(\"\", goal))\n\n return f\"
{_escape_html(goal_str)}
\"\n\n\ndef _format_chat_messages_like_xray(messages) -> str:\n \"\"\"Format chat messages like xray does - with proper role separation.\"\"\"\n if not messages:\n return \"
No chat messages
\"\n\n formatted_parts = []\n\n for i, msg in enumerate(messages):\n message_html = []\n\n if hasattr(msg, \"role\") and hasattr(msg, \"content\"):\n # Handle BaseMessage objects\n role = getattr(msg, \"role\", \"unknown\")\n content = getattr(msg, \"content\", str(msg))\n\n message_html.append(f'
{role.upper()}
')\n message_html.append(f'
{_escape_html(str(content))}
')\n","source_hash":"1fae4026df4795d85b781ead9e29f29bc0ee9d8eaa04518e66f9b2c0e7f98d4a","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.episode_to_html._format_chat_messages_like_xray","uri":"program://AgentLab/function/src.agentlab.analyze.episode_to_html._format_chat_messages_like_xray#L340-L396","kind":"function","name":"_format_chat_messages_like_xray","path":"src/agentlab/analyze/episode_to_html.py","language":"python","start_line":340,"end_line":396,"context_start_line":320,"context_end_line":416,"code":" if image_path is None or not image_path.exists():\n return f\"

{alt_text} not available

\"\n\n # Convert to absolute path and use file:// protocol\n abs_path = image_path.resolve()\n return f'\"{alt_text}\"'\n\n\n# Rest of the helper functions remain unchanged...\ndef _format_goal(goal) -> str:\n \"\"\"Format goal object like xray does - using code blocks.\"\"\"\n if goal is None:\n return \"
No goal specified
\"\n\n # Format like xray's AgentLabBaseMessage approach\n goal_str = str(AgentLabBaseMessage(\"\", goal))\n\n return f\"
{_escape_html(goal_str)}
\"\n\n\ndef _format_chat_messages_like_xray(messages) -> str:\n \"\"\"Format chat messages like xray does - with proper role separation.\"\"\"\n if not messages:\n return \"
No chat messages
\"\n\n formatted_parts = []\n\n for i, msg in enumerate(messages):\n message_html = []\n\n if hasattr(msg, \"role\") and hasattr(msg, \"content\"):\n # Handle BaseMessage objects\n role = getattr(msg, \"role\", \"unknown\")\n content = getattr(msg, \"content\", str(msg))\n\n message_html.append(f'
{role.upper()}
')\n message_html.append(f'
{_escape_html(str(content))}
')\n\n elif isinstance(msg, dict):\n # Handle dict messages\n role = msg.get(\"role\", \"unknown\")\n content = msg.get(\"content\", str(msg))\n\n message_html.append(f'
{role.upper()}
')\n\n if isinstance(content, list):\n # Handle multi-part content like xray\n for part in content:\n if isinstance(part, dict):\n if part.get(\"type\") == \"text\":\n message_html.append(\n f'
{_escape_html(part.get(\"text\", \"\"))}
'\n )\n elif part.get(\"type\") == \"image\":\n message_html.append('
[IMAGE]
')\n elif part.get(\"type\") == \"tool_use\":\n tool_str = _format_tool_call_like_xray(part)\n message_html.append(\n f'
{_escape_html(tool_str)}
'\n )\n else:\n message_html.append(\n f'
{_escape_html(str(part))}
'\n )\n else:\n message_html.append(\n f'
{_escape_html(str(part))}
'\n )\n else:\n message_html.append(f'
{_escape_html(str(content))}
')\n else:\n # Handle other message types\n message_html.append(f'
{_escape_html(str(msg))}
')\n\n formatted_parts.append(f'
{\"\".join(message_html)}
')\n\n return \"\".join(formatted_parts)\n\n\ndef _format_tool_call_like_xray(tool_item: dict) -> str:\n \"\"\"Format tool calls like xray does.\"\"\"\n name = tool_item.get(\"name\", \"unknown\")\n input_data = tool_item.get(\"input\", {})\n call_id = tool_item.get(\"call_id\", \"unknown\")\n\n return f\"Tool Call: {name} `{input_data}` (call_id: {call_id})\"\n\n\ndef _escape_html(text: str) -> str:\n \"\"\"Escape HTML special characters.\"\"\"\n if not isinstance(text, str):\n text = str(text)\n\n return (\n text.replace(\"&\", \"&\")\n .replace(\"<\", \"<\")\n .replace(\">\", \">\")","source_hash":"1fae4026df4795d85b781ead9e29f29bc0ee9d8eaa04518e66f9b2c0e7f98d4a","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.episode_to_html._format_tool_call_like_xray","uri":"program://AgentLab/function/src.agentlab.analyze.episode_to_html._format_tool_call_like_xray#L399-L405","kind":"function","name":"_format_tool_call_like_xray","path":"src/agentlab/analyze/episode_to_html.py","language":"python","start_line":399,"end_line":405,"context_start_line":379,"context_end_line":425,"code":" )\n else:\n message_html.append(\n f'
{_escape_html(str(part))}
'\n )\n else:\n message_html.append(\n f'
{_escape_html(str(part))}
'\n )\n else:\n message_html.append(f'
{_escape_html(str(content))}
')\n else:\n # Handle other message types\n message_html.append(f'
{_escape_html(str(msg))}
')\n\n formatted_parts.append(f'
{\"\".join(message_html)}
')\n\n return \"\".join(formatted_parts)\n\n\ndef _format_tool_call_like_xray(tool_item: dict) -> str:\n \"\"\"Format tool calls like xray does.\"\"\"\n name = tool_item.get(\"name\", \"unknown\")\n input_data = tool_item.get(\"input\", {})\n call_id = tool_item.get(\"call_id\", \"unknown\")\n\n return f\"Tool Call: {name} `{input_data}` (call_id: {call_id})\"\n\n\ndef _escape_html(text: str) -> str:\n \"\"\"Escape HTML special characters.\"\"\"\n if not isinstance(text, str):\n text = str(text)\n\n return (\n text.replace(\"&\", \"&\")\n .replace(\"<\", \"<\")\n .replace(\">\", \">\")\n .replace('\"', \""\")\n .replace(\"'\", \"'\")\n )\n\n\nif __name__ == \"__main__\":\n\n from agentlab.experiments.exp_utils import RESULTS_DIR\n","source_hash":"1fae4026df4795d85b781ead9e29f29bc0ee9d8eaa04518e66f9b2c0e7f98d4a","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.episode_to_html._escape_html","uri":"program://AgentLab/function/src.agentlab.analyze.episode_to_html._escape_html#L408-L419","kind":"function","name":"_escape_html","path":"src/agentlab/analyze/episode_to_html.py","language":"python","start_line":408,"end_line":419,"context_start_line":388,"context_end_line":439,"code":" else:\n message_html.append(f'
{_escape_html(str(content))}
')\n else:\n # Handle other message types\n message_html.append(f'
{_escape_html(str(msg))}
')\n\n formatted_parts.append(f'
{\"\".join(message_html)}
')\n\n return \"\".join(formatted_parts)\n\n\ndef _format_tool_call_like_xray(tool_item: dict) -> str:\n \"\"\"Format tool calls like xray does.\"\"\"\n name = tool_item.get(\"name\", \"unknown\")\n input_data = tool_item.get(\"input\", {})\n call_id = tool_item.get(\"call_id\", \"unknown\")\n\n return f\"Tool Call: {name} `{input_data}` (call_id: {call_id})\"\n\n\ndef _escape_html(text: str) -> str:\n \"\"\"Escape HTML special characters.\"\"\"\n if not isinstance(text, str):\n text = str(text)\n\n return (\n text.replace(\"&\", \"&\")\n .replace(\"<\", \"<\")\n .replace(\">\", \">\")\n .replace('\"', \""\")\n .replace(\"'\", \"'\")\n )\n\n\nif __name__ == \"__main__\":\n\n from agentlab.experiments.exp_utils import RESULTS_DIR\n\n result_dir = get_most_recent_study(RESULTS_DIR, contains=None)\n for exp_dir in result_dir.iterdir():\n if exp_dir.is_dir():\n break\n\n print(f\"Using first exp_dir in most recent study:\\n{exp_dir}\")\n exp_result = ExpResult(exp_dir=exp_dir)\n\n page = exp_result_to_html(exp_result, embed_images=False)\n\n output_file = exp_dir / \"episode.html\"\n print(f\"Writing HTML to\\n{output_file}\")\n output_file.write_text(page)\n # cmd open output_file using subprocess","source_hash":"1fae4026df4795d85b781ead9e29f29bc0ee9d8eaa04518e66f9b2c0e7f98d4a","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.inspect_results","uri":"program://AgentLab/module/src.agentlab.analyze.inspect_results#L1-L897","kind":"module","name":"src.agentlab.analyze.inspect_results","path":"src/agentlab/analyze/inspect_results.py","language":"python","start_line":1,"end_line":897,"context_start_line":1,"context_end_line":897,"code":"import fnmatch\nimport json\nimport random\nimport re\nimport traceback\nimport warnings\nfrom collections import defaultdict\nfrom logging import warn\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\nfrom IPython.display import display\nfrom tqdm import tqdm\n\nfrom agentlab.experiments.loop import ExpResult, get_exp_result, yield_all_exp_results\n\n# TODO find a more portable way to code set_task_category_as_index at least\n# handle dynamic imports. We don't want to always import workarena\n# from browsergym.workarena import TASK_CATEGORY_MAP\n\nwarnings.filterwarnings(\"ignore\", category=pd.errors.PerformanceWarning)\n\npd.set_option(\"display.multi_sparse\", False)\n\nAGENT_NAME_KEY = \"agent.agent_name\"\nTASK_KEY = \"env.task_name\"\n\n\ndef get_constants_and_variables(df: pd.DataFrame, drop_constants: bool = False):\n \"\"\"Filter out constants from the dataframe.\"\"\"\n\n constants = {}\n variable_keys = []\n for col in df.columns:\n try:\n nuniq = df[col].nunique(dropna=False)\n except TypeError:\n nuniq = 0 # non hashable types are considered variables\n if nuniq == 1:\n if isinstance(df[col].iloc[0], np.generic):\n val = df[col].iloc[0].item()\n else:\n val = df[col].iloc[0]\n constants[col] = val\n if drop_constants:\n df = df.drop(col, axis=1)\n else:\n variable_keys.append(col)\n\n return constants, variable_keys, df\n\n\ndef set_index_from_variables(\n df: pd.DataFrame,\n index_white_list=(\"agent.*\",),\n index_black_list=(\"*model_url*\", \"*extra*\", \"*._*\"),\n task_key=TASK_KEY,\n add_agent_and_benchmark=True,\n):\n \"\"\"Set the index, inplace, to env.task_name and all variables.\n\n Introspects `df` to find all fields that are variable and set the index to\n those fields. This will allow to easily groupby and compare results. To\n filter undersired variables from the index, use index_white_list and\n index_black_list.\n\n Args:\n df: The dataframe to modify\n index_white_list: List of wildard patterns to match variables that\n should be included in the index.\n index_black_list: List of wildard patterns to match variables that\n should be excluded from the index.\n task_key: The key to use as the first level of the index.\n add_agent_and_benchmark: If True, add agent.agent_name and env.benchmark\n \"\"\"\n df.reset_index(inplace=True)\n constants, variables, _ = get_constants_and_variables(df)\n\n index_variables = []\n if add_agent_and_benchmark:\n index_variables.append(\"agent.agent_name\")\n if \"env.benchmark\" not in df.columns:\n df[\"env.benchmark\"] = df[TASK_KEY].map(_benchmark_from_task_name)\n index_variables.append(\"env.benchmark\")\n\n for var in variables:\n white = any([fnmatch.fnmatch(var, pattern) for pattern in index_white_list])\n black = any([fnmatch.fnmatch(var, pattern) for pattern in index_black_list])\n\n if white and (not black) and (var not in index_variables):\n index_variables.append(var)\n\n for var in index_variables:\n if df[var].isnull().any():\n warn(\n f\"Variable {var} contains NaN or None values. This will be replaced by the string 'None' to avoid some pandas bug.\"\n )\n df[var] = df[var].fillna(\"None\")\n\n # agent_variables = [var for var in variables if var.startswith(\"agent.\")]\n df.set_index([task_key] + index_variables, inplace=True)\n df.sort_index(inplace=True)\n\n\ndef load_result_df(\n exp_dir,\n progress_fn=tqdm,\n set_index=True,\n result_df=None,\n index_white_list=(\"agent.*\",),\n index_black_list=(\"*model_url*\", \"*extra*\", \"*._*\"),\n remove_args_suffix=True,\n):\n \"\"\"Load the result dataframe.\n\n Will set the index to env.task_name and all columens that are not constant and\n starts with agent. This will allow to easily groupby and compare\n results. This index can be changed later using df.set_index.\n\n Args:\n exp_dir: Path to the experiment directory\n progress_fn: Progress function to use when loading the results\n set_index: If True, set the index to env.task_name and variable agent\n result_df: If not None, speed up the loading process by reusing\n alreading loaded objects.\n index_white_list: List of wildard patterns to match variables that\n should be included in the index.\n index_black_list: List of wildard patterns to match variables that\n should be excluded from the index.\n remove_args_suffix: If True, remove the _args suffix from the columns\n\n Returns:\n pd.DataFrame: The result dataframe\n \"\"\"\n\n if result_df is not None:\n result_list = list(result_df[\"exp_result\"])\n else:\n result_list = list(yield_all_exp_results(exp_dir, progress_fn=progress_fn))\n\n if len(result_list) == 0:\n return None\n\n if progress_fn is not None:\n result_list = progress_fn(result_list, desc=\"Loading results\")\n\n df = pd.DataFrame([exp_result.get_exp_record() for exp_result in result_list])\n\n if remove_args_suffix:\n df.columns = [col.replace(\"_args\", \"\") for col in df.columns]\n\n if set_index:\n set_index_from_variables(df, index_white_list, index_black_list)\n return df\n\n\ndef reduce_episodes(result_df: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Reduce the dataframe to a single row per episode and summarize some of the columns.\"\"\"\n\n levels = list(range(result_df.index.nlevels))\n return result_df.groupby(level=levels).apply(summarize)\n\n\ndef report_2d(df: pd.DataFrame, reduce_fn: callable = reduce_episodes, n_row_keys=1):\n \"\"\"Generic function to create a 2d report based on the dataframe.\n\n The code is simple but can be a bit cryptic. This is best explained in the\n following 3 steps:\n 1) Groupby: Will use the existing multi-index to groupby. Make sure to set the\n an index to the desired keys before calling this function.\n 2) Reduce: Uses the reduce_fn to reduce the content of each group to a single\n variable, creating a 1D series indexed by its original index.\n 3) Unstack: Produce a 2D table such that the first n_row_keys are used to\n specify how many dimensions are used for the rows. The remaining\n dimensions are used for the columns.\n\n Args:\n df: The dataframe to reduce\n reduce_fn: The function to use to reduce the sub dataframe. By default\n this is reduce_episodes.\n n_row_keys: The number of keys to use for the rows.\n\n Returns:\n pd.DataFrame: The 2D report\n \"\"\"\n\n levels = list(range(df.index.nlevels))\n reduced_df = df.groupby(level=levels).apply(reduce_fn) # type: pd.Series\n return reduced_df.unstack(level=levels[n_row_keys:])\n\n\ndef report_constant_and_variables(df, show_stack_traces=True):\n constants, variables, _ = get_constants_and_variables(df)\n print(\"Constants:\")\n for k, v in constants.items():\n print(f\" {k}: {v}\")\n\n print(\"\\nVariables:\")\n for var in variables:\n if not show_stack_traces and var == \"stack_trace\":\n continue\n\n # get unique with count and sort by count descending\n unique_counts = df[var].value_counts().sort_values(ascending=False)\n\n print(f\" {var}: n_unique={len(unique_counts)}\")\n for i, (val, count) in enumerate(unique_counts.items()):\n print(f\" {count}x : {val}\")\n if i >= 2:\n break\n if len(unique_counts) > 3:\n print(\" ...\\n\")\n\n\ndef get_std_err(df, metric):\n \"\"\"Get the standard error for a binary metric.\"\"\"\n # extract non missing values\n data = df[metric].dropna().values\n\n # asser either 0 or 1\n if np.all(np.isin(data, [0, 1])):\n mean = np.mean(data)\n std_err = np.sqrt(mean * (1 - mean) / len(data))\n return mean, std_err\n else:\n return get_sample_std_err(df, metric)\n\n\ndef get_sample_std_err(df, metric):\n \"\"\"Get the standard error for a binary metric.\"\"\"\n # extract non missing values\n data = df[metric].dropna().values\n\n mean = np.mean(data)\n std_err = np.std(data, ddof=1) / np.sqrt(len(data))\n if np.isnan(std_err):\n std_err = np.float64(0)\n return mean, std_err\n\n\ndef summarize(sub_df):\n if \"cum_reward\" not in sub_df:\n record = dict(\n avg_reward=np.nan,\n std_err=np.nan,\n # avg_raw_reward=np.nan,\n avg_steps=np.nan,\n n_completed=f\"0/{len(sub_df)}\",\n n_err=0,\n )\n else:\n err = sub_df[\"err_msg\"].notnull()\n n_completed = err.copy()\n for col in [\"truncated\", \"terminated\"]:\n if col in sub_df:\n n_completed = n_completed | sub_df[col]\n n_completed = n_completed.sum()\n\n if n_completed == 0:\n return None\n\n _mean_reward, std_reward = get_std_err(sub_df, \"cum_reward\")\n\n # sanity check, if there is an error the reward should be zero\n assert sub_df[sub_df[\"err_msg\"].notnull()][\"cum_reward\"].sum() == 0\n\n record = dict(\n avg_reward=sub_df[\"cum_reward\"].mean(skipna=True).round(3),\n std_err=std_reward.round(3),\n # avg_raw_reward=sub_df[\"cum_raw_reward\"].mean(skipna=True).round(3),\n avg_steps=sub_df[\"n_steps\"].mean(skipna=True).round(3),\n n_completed=f\"{n_completed}/{len(sub_df)}\",\n n_err=err.sum(skipna=True),\n )\n if \"stats.cum_cost\" in sub_df:\n record[\"cum_cost\"] = sub_df[\"stats.cum_cost\"].sum(skipna=True).round(4)\n if \"stats.cum_effective_cost\" in sub_df:\n record[\"cum_effective_cost\"] = (\n sub_df[\"stats.cum_effective_cost\"].sum(skipna=True).round(4)\n )\n record.pop(\"cum_cost\", None)\n\n return pd.Series(record)\n\n\ndef summarize_stats(sub_df):\n \"\"\"Summarize the stats columns.\"\"\"\n\n # make sure there are completed runs\n err = sub_df[\"err_msg\"].notnull()\n n_completed = err.copy()\n for col in [\"truncated\", \"terminated\"]:\n if col in sub_df:\n n_completed = n_completed | sub_df[col]\n n_completed = n_completed.sum()\n\n if n_completed == 0:\n return None\n\n record = dict(\n avg_reward=sub_df[\"cum_reward\"].mean(skipna=True).round(3),\n )\n for key in sub_df.keys():\n if key.startswith(\"stats.\"):\n key_ = key.split(\".\")[1]\n op = key_.split(\"_\")[0]\n if op == \"cum\":\n record[key_] = sub_df[key].sum(skipna=True)\n elif op == \"max\":\n record[key_] = sub_df[key].max(skipna=True)\n else:\n raise ValueError(f\"Unknown stats operation: {op}\")\n return pd.Series(record)\n\n\ndef _find_diff(tuple1, tuple2):\n \"\"\"return the list of index wher tuple1 != tuple2\"\"\"\n return [i for i, (a, b) in enumerate(zip(tuple1, tuple2)) if a != b]\n\n\ndef _extract_ablation_study(report: pd.DataFrame, progression=False):\n \"\"\"Reduce the multi-index to a change description compared to the previous row.\"\"\"\n names = report.index.names\n report = report.copy()\n # report.sort_index(inplace=True)\n\n reference_index = None\n for index in report.index:\n if reference_index is not None:\n diffs = _find_diff(reference_index, index)\n\n if progression:\n change = \"↳ \" + \", \".join([f\"{names[i]}={index[i]}\" for i in diffs])\n else:\n changes = []\n for i in diffs:\n val = index[i]\n if isinstance(val, bool):\n changes.append((\"+\" if val else \"-\") + names[i])\n else:\n changes.append(f\"{names[i]}←{val}\")\n change = \", \".join(changes)\n else:\n change = \"Initial Configuration\"\n report.loc[index, \"change\"] = change\n if progression:\n reference_index = index\n else:\n reference_index = report.index[0]\n\n report = report.reset_index()\n report = report.set_index([\"change\"])\n\n # delete columns related to old index\n return report.drop(names, axis=1)\n\n\ndef ablation_report(result_df: pd.DataFrame, reduce_fn=summarize, progression=False):\n \"\"\"Reduce the multi-index to a change description compared to the previous row.\n\n *NOTE*: This assumes that this experiments was launched with make_ablation_study.\n\n Rows will be sorted according to the average ExpArgs.order for all\n experiments associated with the multi-index.\n\n Args:\n result_df: The result dataframe as returned by load_result_df.\n reduce_fn: The function to use to reduce the sub dataframe. By default\n this is summarize.\n progression: If True, the change description will be the progression\n\n Returns:\n A dataframe with the change description as index.\n \"\"\"\n report = global_report(result_df, reduce_fn=reduce_fn)\n report = _sort_order(result_df, report)\n report = _extract_ablation_study(report, progression=progression)\n return report\n\n\ndef _get_avg_order(df: pd.DataFrame, row: pd.Series):\n \"\"\"Return the average order for the given row.\"\"\"\n df = df.reset_index(level=0, drop=True, inplace=False)\n # df.sort_index(inplace=True)\n\n sub_df = df.loc[row.name]\n orders = [get_exp_result(exp_dir).exp_args.order for exp_dir in sub_df.exp_dir]\n orders = [order for order in orders if order is not None]\n if len(orders) == 0:\n return None\n return np.mean(orders)\n\n\ndef _sort_order(result_df, report):\n \"\"\"Add a column to the report with the average order for each agent and sort.\"\"\"\n\n def add_order(row):\n return _get_avg_order(result_df, row)\n\n report[\"avg_order\"] = report.apply(add_order, axis=1)\n return report.sort_values(\"avg_order\", ascending=True)\n\n\ndef global_report(\n result_df: pd.DataFrame,\n reduce_fn=summarize,\n rename_index=lambda name: name.replace(\"agent.flags.\", \"\"),\n):\n \"\"\"Produce a report that summarize all tasks and all episodes for each\n agent.\n\n Args:\n result_df: The result dataframe as returned by load_result_df.\n reduce_fn: The function to use to reduce the sub dataframe. By default\n this is summarize.\n rename_index: Function to rename the index. By default we remove the prefix\n \"agent.flags.\"\n\n Returns:\n pd.DataFrame: The report\n \"\"\"\n\n levels = list(range(result_df.index.nlevels))\n\n if len(levels) == 1:\n print(\"Only one configuration is found, returning a per-task report.\")\n report = report_2d(result_df, reduce_fn=reduce_fn)\n report.loc[\"[ALL TASKS]\"] = reduce_fn(result_df)\n else:\n print(\n \"Found multiple configuration, averaging across tasks and returning a per-agent report.\"\n )\n report = result_df.groupby(level=levels[1:]).apply(reduce_fn)\n\n if rename_index is not None:\n index_names = [rename_index(name) for name in report.index.names]\n report = report.rename_axis(index=index_names)\n\n # if has key avg_reward\n if \"avg_reward\" in report.columns:\n report = report.sort_values(\"avg_reward\", ascending=False)\n\n return report\n\n\ndef _rename_bool_flags(report: pd.DataFrame, true_str=\"✓\", false_str=\"-\"):\n \"\"\"Rename the boolean flags to be more compact and readable.\"\"\"\n map_bool = lambda x: true_str if x is True else false_str if x is False else x\n if isinstance(report.index, pd.MultiIndex):\n report.index = report.index.set_levels(\n [[map_bool(i) for i in level] for level in report.index.levels]\n )\n return report\n\n\ndef flag_report(report: pd.DataFrame, metric: str = \"avg_reward\", round_digits: int = 2):\n # for all index in the multi-index with boolean value, get the average for\n # True and the average for False separately. Produce a new dataframe with\n # the average for True and False for each index and the ratio between the\n # two as a new column.\n\n # check the number of levels\n if report.index.nlevels <= 1:\n print(f\"Only {report.index.nlevels} levels in the index, cannot produce flag report.\")\n return\n\n report = report.copy()\n report = report.reset_index()\n\n records = []\n for col in report.columns:\n if report[col].dtype == bool:\n avg_true = report[report[col]][metric].mean()\n avg_false = report[~report[col]][metric].mean()\n ratio = avg_true / avg_false\n records.append(dict(hparam=col, avg_true=avg_true, avg_false=avg_false, ratio=ratio))\n\n flag_report = pd.DataFrame(records).set_index(\"hparam\")\n flag_report.sort_values(\"ratio\", ascending=False, inplace=True)\n if round_digits is not None:\n flag_report = flag_report.round(round_digits)\n\n return flag_report\n\n\ndef display_report(\n report: pd.DataFrame,\n apply_shrink_columns: bool = True,\n copy_to_clipboard: bool = True,\n rename_bool_flags: bool = True,\n print_only: str = None,\n):\n \"\"\"Display the report in a nicer-ish format.\n\n To be able to wrap col names we need to use set_wrap_stype, which returns a\n styled df, and doesn't behave like a normal df. For encapsulate the displaying in\n this function.\n\n Args:\n report: The report to display\n apply_shrink_columns: Make the column more compat by replacing\n underscores with newlines\n copy_to_clipboard: Copy the report to the clipboard\n rename_bool_flags: Rename the boolean flags to be more compact and readable\n print_only: Print only the given column\n \"\"\"\n report = report.copy()\n\n if apply_shrink_columns:\n report = shrink_columns(report)\n\n if rename_bool_flags:\n report = _rename_bool_flags(report)\n\n # if copy_to_clipboard:\n # to_clipboard(report)\n\n columns = list(report.columns)\n\n report.reset_index(inplace=True)\n\n if print_only:\n columns = [print_only] + columns\n report = report[columns]\n\n styled_report = set_wrap_style(report)\n\n display(styled_report)\n\n\ndef shrink_columns(df, also_wrap_index=True):\n \"\"\"Make the column names more compact by replacing underscores with newlines\"\"\"\n df = df.copy()\n\n df.columns = [col.replace(\"_\", \"\\n\") for col in df.columns]\n if also_wrap_index:\n df.index.names = [name.replace(\"_\", \"\\n\") for name in df.index.names]\n\n # Define a formatter function that formats float numbers without trailing zeros\n def formatter(x):\n if isinstance(x, float):\n return \"{:.10f}\".format(x).rstrip(\"0\").rstrip(\".\")\n return x\n\n return df.map(formatter)\n\n\ndef set_wrap_style(df):\n return df.style.set_table_styles([{\"selector\": \"th\", \"props\": [(\"white-space\", \"pre-wrap\")]}])\n\n\n# ------------\n# Error Utils\n# ------------\n\n\ndef map_err_key(err_msg: str):\n if err_msg is None:\n return err_msg\n\n # remove logs from the message if any\n err_msg = err_msg[: err_msg.find(\"=== logs ===\")].rstrip()\n regex_replacements = [\n (\n r\"your messages resulted in \\d+ tokens\",\n \"your messages resulted in x tokens\",\n ),\n (\n r\"(?<=Exception uncaught by agent or environment in task\\s)([^\\s]+)\",\n \".\",\n ),\n ]\n\n for pattern, replacement in regex_replacements:\n err_msg = re.sub(pattern, replacement, err_msg)\n return err_msg\n\n\ndef error_report(df: pd.DataFrame, max_stack_trace=10, use_log=False):\n \"\"\"Report the error message for each agent.\"\"\"\n\n if \"err_key\" not in df:\n df[\"err_key\"] = df[\"err_msg\"].map(map_err_key)\n\n unique_counts = df[\"err_key\"].value_counts().sort_values(ascending=False)\n report = []\n for err_key, count in unique_counts.items():\n report.append(\"-------------------\")\n report.append(f\"## {count\n# ... truncated ...","source_hash":"c31beb2b52085accfac773eec58e2a6594f99745c745696eb6facd3611d3ff02","truncated":true} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.inspect_results.get_constants_and_variables","uri":"program://AgentLab/function/src.agentlab.analyze.inspect_results.get_constants_and_variables#L30-L51","kind":"function","name":"get_constants_and_variables","path":"src/agentlab/analyze/inspect_results.py","language":"python","start_line":30,"end_line":51,"context_start_line":10,"context_end_line":71,"code":"\nimport numpy as np\nimport pandas as pd\nfrom IPython.display import display\nfrom tqdm import tqdm\n\nfrom agentlab.experiments.loop import ExpResult, get_exp_result, yield_all_exp_results\n\n# TODO find a more portable way to code set_task_category_as_index at least\n# handle dynamic imports. We don't want to always import workarena\n# from browsergym.workarena import TASK_CATEGORY_MAP\n\nwarnings.filterwarnings(\"ignore\", category=pd.errors.PerformanceWarning)\n\npd.set_option(\"display.multi_sparse\", False)\n\nAGENT_NAME_KEY = \"agent.agent_name\"\nTASK_KEY = \"env.task_name\"\n\n\ndef get_constants_and_variables(df: pd.DataFrame, drop_constants: bool = False):\n \"\"\"Filter out constants from the dataframe.\"\"\"\n\n constants = {}\n variable_keys = []\n for col in df.columns:\n try:\n nuniq = df[col].nunique(dropna=False)\n except TypeError:\n nuniq = 0 # non hashable types are considered variables\n if nuniq == 1:\n if isinstance(df[col].iloc[0], np.generic):\n val = df[col].iloc[0].item()\n else:\n val = df[col].iloc[0]\n constants[col] = val\n if drop_constants:\n df = df.drop(col, axis=1)\n else:\n variable_keys.append(col)\n\n return constants, variable_keys, df\n\n\ndef set_index_from_variables(\n df: pd.DataFrame,\n index_white_list=(\"agent.*\",),\n index_black_list=(\"*model_url*\", \"*extra*\", \"*._*\"),\n task_key=TASK_KEY,\n add_agent_and_benchmark=True,\n):\n \"\"\"Set the index, inplace, to env.task_name and all variables.\n\n Introspects `df` to find all fields that are variable and set the index to\n those fields. This will allow to easily groupby and compare results. To\n filter undersired variables from the index, use index_white_list and\n index_black_list.\n\n Args:\n df: The dataframe to modify\n index_white_list: List of wildard patterns to match variables that\n should be included in the index.","source_hash":"c31beb2b52085accfac773eec58e2a6594f99745c745696eb6facd3611d3ff02","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.inspect_results.set_index_from_variables","uri":"program://AgentLab/function/src.agentlab.analyze.inspect_results.set_index_from_variables#L54-L103","kind":"function","name":"set_index_from_variables","path":"src/agentlab/analyze/inspect_results.py","language":"python","start_line":54,"end_line":103,"context_start_line":34,"context_end_line":123,"code":" variable_keys = []\n for col in df.columns:\n try:\n nuniq = df[col].nunique(dropna=False)\n except TypeError:\n nuniq = 0 # non hashable types are considered variables\n if nuniq == 1:\n if isinstance(df[col].iloc[0], np.generic):\n val = df[col].iloc[0].item()\n else:\n val = df[col].iloc[0]\n constants[col] = val\n if drop_constants:\n df = df.drop(col, axis=1)\n else:\n variable_keys.append(col)\n\n return constants, variable_keys, df\n\n\ndef set_index_from_variables(\n df: pd.DataFrame,\n index_white_list=(\"agent.*\",),\n index_black_list=(\"*model_url*\", \"*extra*\", \"*._*\"),\n task_key=TASK_KEY,\n add_agent_and_benchmark=True,\n):\n \"\"\"Set the index, inplace, to env.task_name and all variables.\n\n Introspects `df` to find all fields that are variable and set the index to\n those fields. This will allow to easily groupby and compare results. To\n filter undersired variables from the index, use index_white_list and\n index_black_list.\n\n Args:\n df: The dataframe to modify\n index_white_list: List of wildard patterns to match variables that\n should be included in the index.\n index_black_list: List of wildard patterns to match variables that\n should be excluded from the index.\n task_key: The key to use as the first level of the index.\n add_agent_and_benchmark: If True, add agent.agent_name and env.benchmark\n \"\"\"\n df.reset_index(inplace=True)\n constants, variables, _ = get_constants_and_variables(df)\n\n index_variables = []\n if add_agent_and_benchmark:\n index_variables.append(\"agent.agent_name\")\n if \"env.benchmark\" not in df.columns:\n df[\"env.benchmark\"] = df[TASK_KEY].map(_benchmark_from_task_name)\n index_variables.append(\"env.benchmark\")\n\n for var in variables:\n white = any([fnmatch.fnmatch(var, pattern) for pattern in index_white_list])\n black = any([fnmatch.fnmatch(var, pattern) for pattern in index_black_list])\n\n if white and (not black) and (var not in index_variables):\n index_variables.append(var)\n\n for var in index_variables:\n if df[var].isnull().any():\n warn(\n f\"Variable {var} contains NaN or None values. This will be replaced by the string 'None' to avoid some pandas bug.\"\n )\n df[var] = df[var].fillna(\"None\")\n\n # agent_variables = [var for var in variables if var.startswith(\"agent.\")]\n df.set_index([task_key] + index_variables, inplace=True)\n df.sort_index(inplace=True)\n\n\ndef load_result_df(\n exp_dir,\n progress_fn=tqdm,\n set_index=True,\n result_df=None,\n index_white_list=(\"agent.*\",),\n index_black_list=(\"*model_url*\", \"*extra*\", \"*._*\"),\n remove_args_suffix=True,\n):\n \"\"\"Load the result dataframe.\n\n Will set the index to env.task_name and all columens that are not constant and\n starts with agent. This will allow to easily groupby and compare\n results. This index can be changed later using df.set_index.\n\n Args:\n exp_dir: Path to the experiment directory\n progress_fn: Progress function to use when loading the results","source_hash":"c31beb2b52085accfac773eec58e2a6594f99745c745696eb6facd3611d3ff02","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.inspect_results.load_result_df","uri":"program://AgentLab/function/src.agentlab.analyze.inspect_results.load_result_df#L106-L155","kind":"function","name":"load_result_df","path":"src/agentlab/analyze/inspect_results.py","language":"python","start_line":106,"end_line":155,"context_start_line":86,"context_end_line":175,"code":"\n for var in variables:\n white = any([fnmatch.fnmatch(var, pattern) for pattern in index_white_list])\n black = any([fnmatch.fnmatch(var, pattern) for pattern in index_black_list])\n\n if white and (not black) and (var not in index_variables):\n index_variables.append(var)\n\n for var in index_variables:\n if df[var].isnull().any():\n warn(\n f\"Variable {var} contains NaN or None values. This will be replaced by the string 'None' to avoid some pandas bug.\"\n )\n df[var] = df[var].fillna(\"None\")\n\n # agent_variables = [var for var in variables if var.startswith(\"agent.\")]\n df.set_index([task_key] + index_variables, inplace=True)\n df.sort_index(inplace=True)\n\n\ndef load_result_df(\n exp_dir,\n progress_fn=tqdm,\n set_index=True,\n result_df=None,\n index_white_list=(\"agent.*\",),\n index_black_list=(\"*model_url*\", \"*extra*\", \"*._*\"),\n remove_args_suffix=True,\n):\n \"\"\"Load the result dataframe.\n\n Will set the index to env.task_name and all columens that are not constant and\n starts with agent. This will allow to easily groupby and compare\n results. This index can be changed later using df.set_index.\n\n Args:\n exp_dir: Path to the experiment directory\n progress_fn: Progress function to use when loading the results\n set_index: If True, set the index to env.task_name and variable agent\n result_df: If not None, speed up the loading process by reusing\n alreading loaded objects.\n index_white_list: List of wildard patterns to match variables that\n should be included in the index.\n index_black_list: List of wildard patterns to match variables that\n should be excluded from the index.\n remove_args_suffix: If True, remove the _args suffix from the columns\n\n Returns:\n pd.DataFrame: The result dataframe\n \"\"\"\n\n if result_df is not None:\n result_list = list(result_df[\"exp_result\"])\n else:\n result_list = list(yield_all_exp_results(exp_dir, progress_fn=progress_fn))\n\n if len(result_list) == 0:\n return None\n\n if progress_fn is not None:\n result_list = progress_fn(result_list, desc=\"Loading results\")\n\n df = pd.DataFrame([exp_result.get_exp_record() for exp_result in result_list])\n\n if remove_args_suffix:\n df.columns = [col.replace(\"_args\", \"\") for col in df.columns]\n\n if set_index:\n set_index_from_variables(df, index_white_list, index_black_list)\n return df\n\n\ndef reduce_episodes(result_df: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Reduce the dataframe to a single row per episode and summarize some of the columns.\"\"\"\n\n levels = list(range(result_df.index.nlevels))\n return result_df.groupby(level=levels).apply(summarize)\n\n\ndef report_2d(df: pd.DataFrame, reduce_fn: callable = reduce_episodes, n_row_keys=1):\n \"\"\"Generic function to create a 2d report based on the dataframe.\n\n The code is simple but can be a bit cryptic. This is best explained in the\n following 3 steps:\n 1) Groupby: Will use the existing multi-index to groupby. Make sure to set the\n an index to the desired keys before calling this function.\n 2) Reduce: Uses the reduce_fn to reduce the content of each group to a single\n variable, creating a 1D series indexed by its original index.\n 3) Unstack: Produce a 2D table such that the first n_row_keys are used to\n specify how many dimensions are used for the rows. The remaining","source_hash":"c31beb2b52085accfac773eec58e2a6594f99745c745696eb6facd3611d3ff02","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.inspect_results.reduce_episodes","uri":"program://AgentLab/function/src.agentlab.analyze.inspect_results.reduce_episodes#L158-L162","kind":"function","name":"reduce_episodes","path":"src/agentlab/analyze/inspect_results.py","language":"python","start_line":158,"end_line":162,"context_start_line":138,"context_end_line":182,"code":" result_list = list(result_df[\"exp_result\"])\n else:\n result_list = list(yield_all_exp_results(exp_dir, progress_fn=progress_fn))\n\n if len(result_list) == 0:\n return None\n\n if progress_fn is not None:\n result_list = progress_fn(result_list, desc=\"Loading results\")\n\n df = pd.DataFrame([exp_result.get_exp_record() for exp_result in result_list])\n\n if remove_args_suffix:\n df.columns = [col.replace(\"_args\", \"\") for col in df.columns]\n\n if set_index:\n set_index_from_variables(df, index_white_list, index_black_list)\n return df\n\n\ndef reduce_episodes(result_df: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Reduce the dataframe to a single row per episode and summarize some of the columns.\"\"\"\n\n levels = list(range(result_df.index.nlevels))\n return result_df.groupby(level=levels).apply(summarize)\n\n\ndef report_2d(df: pd.DataFrame, reduce_fn: callable = reduce_episodes, n_row_keys=1):\n \"\"\"Generic function to create a 2d report based on the dataframe.\n\n The code is simple but can be a bit cryptic. This is best explained in the\n following 3 steps:\n 1) Groupby: Will use the existing multi-index to groupby. Make sure to set the\n an index to the desired keys before calling this function.\n 2) Reduce: Uses the reduce_fn to reduce the content of each group to a single\n variable, creating a 1D series indexed by its original index.\n 3) Unstack: Produce a 2D table such that the first n_row_keys are used to\n specify how many dimensions are used for the rows. The remaining\n dimensions are used for the columns.\n\n Args:\n df: The dataframe to reduce\n reduce_fn: The function to use to reduce the sub dataframe. By default\n this is reduce_episodes.\n n_row_keys: The number of keys to use for the rows.","source_hash":"c31beb2b52085accfac773eec58e2a6594f99745c745696eb6facd3611d3ff02","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.inspect_results.report_2d","uri":"program://AgentLab/function/src.agentlab.analyze.inspect_results.report_2d#L165-L190","kind":"function","name":"report_2d","path":"src/agentlab/analyze/inspect_results.py","language":"python","start_line":165,"end_line":190,"context_start_line":145,"context_end_line":210,"code":" if progress_fn is not None:\n result_list = progress_fn(result_list, desc=\"Loading results\")\n\n df = pd.DataFrame([exp_result.get_exp_record() for exp_result in result_list])\n\n if remove_args_suffix:\n df.columns = [col.replace(\"_args\", \"\") for col in df.columns]\n\n if set_index:\n set_index_from_variables(df, index_white_list, index_black_list)\n return df\n\n\ndef reduce_episodes(result_df: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Reduce the dataframe to a single row per episode and summarize some of the columns.\"\"\"\n\n levels = list(range(result_df.index.nlevels))\n return result_df.groupby(level=levels).apply(summarize)\n\n\ndef report_2d(df: pd.DataFrame, reduce_fn: callable = reduce_episodes, n_row_keys=1):\n \"\"\"Generic function to create a 2d report based on the dataframe.\n\n The code is simple but can be a bit cryptic. This is best explained in the\n following 3 steps:\n 1) Groupby: Will use the existing multi-index to groupby. Make sure to set the\n an index to the desired keys before calling this function.\n 2) Reduce: Uses the reduce_fn to reduce the content of each group to a single\n variable, creating a 1D series indexed by its original index.\n 3) Unstack: Produce a 2D table such that the first n_row_keys are used to\n specify how many dimensions are used for the rows. The remaining\n dimensions are used for the columns.\n\n Args:\n df: The dataframe to reduce\n reduce_fn: The function to use to reduce the sub dataframe. By default\n this is reduce_episodes.\n n_row_keys: The number of keys to use for the rows.\n\n Returns:\n pd.DataFrame: The 2D report\n \"\"\"\n\n levels = list(range(df.index.nlevels))\n reduced_df = df.groupby(level=levels).apply(reduce_fn) # type: pd.Series\n return reduced_df.unstack(level=levels[n_row_keys:])\n\n\ndef report_constant_and_variables(df, show_stack_traces=True):\n constants, variables, _ = get_constants_and_variables(df)\n print(\"Constants:\")\n for k, v in constants.items():\n print(f\" {k}: {v}\")\n\n print(\"\\nVariables:\")\n for var in variables:\n if not show_stack_traces and var == \"stack_trace\":\n continue\n\n # get unique with count and sort by count descending\n unique_counts = df[var].value_counts().sort_values(ascending=False)\n\n print(f\" {var}: n_unique={len(unique_counts)}\")\n for i, (val, count) in enumerate(unique_counts.items()):\n print(f\" {count}x : {val}\")\n if i >= 2:","source_hash":"c31beb2b52085accfac773eec58e2a6594f99745c745696eb6facd3611d3ff02","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.inspect_results.report_constant_and_variables","uri":"program://AgentLab/function/src.agentlab.analyze.inspect_results.report_constant_and_variables#L193-L213","kind":"function","name":"report_constant_and_variables","path":"src/agentlab/analyze/inspect_results.py","language":"python","start_line":193,"end_line":213,"context_start_line":173,"context_end_line":233,"code":" variable, creating a 1D series indexed by its original index.\n 3) Unstack: Produce a 2D table such that the first n_row_keys are used to\n specify how many dimensions are used for the rows. The remaining\n dimensions are used for the columns.\n\n Args:\n df: The dataframe to reduce\n reduce_fn: The function to use to reduce the sub dataframe. By default\n this is reduce_episodes.\n n_row_keys: The number of keys to use for the rows.\n\n Returns:\n pd.DataFrame: The 2D report\n \"\"\"\n\n levels = list(range(df.index.nlevels))\n reduced_df = df.groupby(level=levels).apply(reduce_fn) # type: pd.Series\n return reduced_df.unstack(level=levels[n_row_keys:])\n\n\ndef report_constant_and_variables(df, show_stack_traces=True):\n constants, variables, _ = get_constants_and_variables(df)\n print(\"Constants:\")\n for k, v in constants.items():\n print(f\" {k}: {v}\")\n\n print(\"\\nVariables:\")\n for var in variables:\n if not show_stack_traces and var == \"stack_trace\":\n continue\n\n # get unique with count and sort by count descending\n unique_counts = df[var].value_counts().sort_values(ascending=False)\n\n print(f\" {var}: n_unique={len(unique_counts)}\")\n for i, (val, count) in enumerate(unique_counts.items()):\n print(f\" {count}x : {val}\")\n if i >= 2:\n break\n if len(unique_counts) > 3:\n print(\" ...\\n\")\n\n\ndef get_std_err(df, metric):\n \"\"\"Get the standard error for a binary metric.\"\"\"\n # extract non missing values\n data = df[metric].dropna().values\n\n # asser either 0 or 1\n if np.all(np.isin(data, [0, 1])):\n mean = np.mean(data)\n std_err = np.sqrt(mean * (1 - mean) / len(data))\n return mean, std_err\n else:\n return get_sample_std_err(df, metric)\n\n\ndef get_sample_std_err(df, metric):\n \"\"\"Get the standard error for a binary metric.\"\"\"\n # extract non missing values\n data = df[metric].dropna().values","source_hash":"c31beb2b52085accfac773eec58e2a6594f99745c745696eb6facd3611d3ff02","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.inspect_results.get_std_err","uri":"program://AgentLab/function/src.agentlab.analyze.inspect_results.get_std_err#L216-L227","kind":"function","name":"get_std_err","path":"src/agentlab/analyze/inspect_results.py","language":"python","start_line":216,"end_line":227,"context_start_line":196,"context_end_line":247,"code":" for k, v in constants.items():\n print(f\" {k}: {v}\")\n\n print(\"\\nVariables:\")\n for var in variables:\n if not show_stack_traces and var == \"stack_trace\":\n continue\n\n # get unique with count and sort by count descending\n unique_counts = df[var].value_counts().sort_values(ascending=False)\n\n print(f\" {var}: n_unique={len(unique_counts)}\")\n for i, (val, count) in enumerate(unique_counts.items()):\n print(f\" {count}x : {val}\")\n if i >= 2:\n break\n if len(unique_counts) > 3:\n print(\" ...\\n\")\n\n\ndef get_std_err(df, metric):\n \"\"\"Get the standard error for a binary metric.\"\"\"\n # extract non missing values\n data = df[metric].dropna().values\n\n # asser either 0 or 1\n if np.all(np.isin(data, [0, 1])):\n mean = np.mean(data)\n std_err = np.sqrt(mean * (1 - mean) / len(data))\n return mean, std_err\n else:\n return get_sample_std_err(df, metric)\n\n\ndef get_sample_std_err(df, metric):\n \"\"\"Get the standard error for a binary metric.\"\"\"\n # extract non missing values\n data = df[metric].dropna().values\n\n mean = np.mean(data)\n std_err = np.std(data, ddof=1) / np.sqrt(len(data))\n if np.isnan(std_err):\n std_err = np.float64(0)\n return mean, std_err\n\n\ndef summarize(sub_df):\n if \"cum_reward\" not in sub_df:\n record = dict(\n avg_reward=np.nan,\n std_err=np.nan,\n # avg_raw_reward=np.nan,","source_hash":"c31beb2b52085accfac773eec58e2a6594f99745c745696eb6facd3611d3ff02","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.inspect_results.get_sample_std_err","uri":"program://AgentLab/function/src.agentlab.analyze.inspect_results.get_sample_std_err#L230-L239","kind":"function","name":"get_sample_std_err","path":"src/agentlab/analyze/inspect_results.py","language":"python","start_line":230,"end_line":239,"context_start_line":210,"context_end_line":259,"code":" if i >= 2:\n break\n if len(unique_counts) > 3:\n print(\" ...\\n\")\n\n\ndef get_std_err(df, metric):\n \"\"\"Get the standard error for a binary metric.\"\"\"\n # extract non missing values\n data = df[metric].dropna().values\n\n # asser either 0 or 1\n if np.all(np.isin(data, [0, 1])):\n mean = np.mean(data)\n std_err = np.sqrt(mean * (1 - mean) / len(data))\n return mean, std_err\n else:\n return get_sample_std_err(df, metric)\n\n\ndef get_sample_std_err(df, metric):\n \"\"\"Get the standard error for a binary metric.\"\"\"\n # extract non missing values\n data = df[metric].dropna().values\n\n mean = np.mean(data)\n std_err = np.std(data, ddof=1) / np.sqrt(len(data))\n if np.isnan(std_err):\n std_err = np.float64(0)\n return mean, std_err\n\n\ndef summarize(sub_df):\n if \"cum_reward\" not in sub_df:\n record = dict(\n avg_reward=np.nan,\n std_err=np.nan,\n # avg_raw_reward=np.nan,\n avg_steps=np.nan,\n n_completed=f\"0/{len(sub_df)}\",\n n_err=0,\n )\n else:\n err = sub_df[\"err_msg\"].notnull()\n n_completed = err.copy()\n for col in [\"truncated\", \"terminated\"]:\n if col in sub_df:\n n_completed = n_completed | sub_df[col]\n n_completed = n_completed.sum()\n","source_hash":"c31beb2b52085accfac773eec58e2a6594f99745c745696eb6facd3611d3ff02","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.inspect_results.summarize","uri":"program://AgentLab/function/src.agentlab.analyze.inspect_results.summarize#L242-L284","kind":"function","name":"summarize","path":"src/agentlab/analyze/inspect_results.py","language":"python","start_line":242,"end_line":284,"context_start_line":222,"context_end_line":304,"code":" if np.all(np.isin(data, [0, 1])):\n mean = np.mean(data)\n std_err = np.sqrt(mean * (1 - mean) / len(data))\n return mean, std_err\n else:\n return get_sample_std_err(df, metric)\n\n\ndef get_sample_std_err(df, metric):\n \"\"\"Get the standard error for a binary metric.\"\"\"\n # extract non missing values\n data = df[metric].dropna().values\n\n mean = np.mean(data)\n std_err = np.std(data, ddof=1) / np.sqrt(len(data))\n if np.isnan(std_err):\n std_err = np.float64(0)\n return mean, std_err\n\n\ndef summarize(sub_df):\n if \"cum_reward\" not in sub_df:\n record = dict(\n avg_reward=np.nan,\n std_err=np.nan,\n # avg_raw_reward=np.nan,\n avg_steps=np.nan,\n n_completed=f\"0/{len(sub_df)}\",\n n_err=0,\n )\n else:\n err = sub_df[\"err_msg\"].notnull()\n n_completed = err.copy()\n for col in [\"truncated\", \"terminated\"]:\n if col in sub_df:\n n_completed = n_completed | sub_df[col]\n n_completed = n_completed.sum()\n\n if n_completed == 0:\n return None\n\n _mean_reward, std_reward = get_std_err(sub_df, \"cum_reward\")\n\n # sanity check, if there is an error the reward should be zero\n assert sub_df[sub_df[\"err_msg\"].notnull()][\"cum_reward\"].sum() == 0\n\n record = dict(\n avg_reward=sub_df[\"cum_reward\"].mean(skipna=True).round(3),\n std_err=std_reward.round(3),\n # avg_raw_reward=sub_df[\"cum_raw_reward\"].mean(skipna=True).round(3),\n avg_steps=sub_df[\"n_steps\"].mean(skipna=True).round(3),\n n_completed=f\"{n_completed}/{len(sub_df)}\",\n n_err=err.sum(skipna=True),\n )\n if \"stats.cum_cost\" in sub_df:\n record[\"cum_cost\"] = sub_df[\"stats.cum_cost\"].sum(skipna=True).round(4)\n if \"stats.cum_effective_cost\" in sub_df:\n record[\"cum_effective_cost\"] = (\n sub_df[\"stats.cum_effective_cost\"].sum(skipna=True).round(4)\n )\n record.pop(\"cum_cost\", None)\n\n return pd.Series(record)\n\n\ndef summarize_stats(sub_df):\n \"\"\"Summarize the stats columns.\"\"\"\n\n # make sure there are completed runs\n err = sub_df[\"err_msg\"].notnull()\n n_completed = err.copy()\n for col in [\"truncated\", \"terminated\"]:\n if col in sub_df:\n n_completed = n_completed | sub_df[col]\n n_completed = n_completed.sum()\n\n if n_completed == 0:\n return None\n\n record = dict(\n avg_reward=sub_df[\"cum_reward\"].mean(skipna=True).round(3),\n )\n for key in sub_df.keys():","source_hash":"c31beb2b52085accfac773eec58e2a6594f99745c745696eb6facd3611d3ff02","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.inspect_results.summarize_stats","uri":"program://AgentLab/function/src.agentlab.analyze.inspect_results.summarize_stats#L287-L314","kind":"function","name":"summarize_stats","path":"src/agentlab/analyze/inspect_results.py","language":"python","start_line":287,"end_line":314,"context_start_line":267,"context_end_line":334,"code":"\n record = dict(\n avg_reward=sub_df[\"cum_reward\"].mean(skipna=True).round(3),\n std_err=std_reward.round(3),\n # avg_raw_reward=sub_df[\"cum_raw_reward\"].mean(skipna=True).round(3),\n avg_steps=sub_df[\"n_steps\"].mean(skipna=True).round(3),\n n_completed=f\"{n_completed}/{len(sub_df)}\",\n n_err=err.sum(skipna=True),\n )\n if \"stats.cum_cost\" in sub_df:\n record[\"cum_cost\"] = sub_df[\"stats.cum_cost\"].sum(skipna=True).round(4)\n if \"stats.cum_effective_cost\" in sub_df:\n record[\"cum_effective_cost\"] = (\n sub_df[\"stats.cum_effective_cost\"].sum(skipna=True).round(4)\n )\n record.pop(\"cum_cost\", None)\n\n return pd.Series(record)\n\n\ndef summarize_stats(sub_df):\n \"\"\"Summarize the stats columns.\"\"\"\n\n # make sure there are completed runs\n err = sub_df[\"err_msg\"].notnull()\n n_completed = err.copy()\n for col in [\"truncated\", \"terminated\"]:\n if col in sub_df:\n n_completed = n_completed | sub_df[col]\n n_completed = n_completed.sum()\n\n if n_completed == 0:\n return None\n\n record = dict(\n avg_reward=sub_df[\"cum_reward\"].mean(skipna=True).round(3),\n )\n for key in sub_df.keys():\n if key.startswith(\"stats.\"):\n key_ = key.split(\".\")[1]\n op = key_.split(\"_\")[0]\n if op == \"cum\":\n record[key_] = sub_df[key].sum(skipna=True)\n elif op == \"max\":\n record[key_] = sub_df[key].max(skipna=True)\n else:\n raise ValueError(f\"Unknown stats operation: {op}\")\n return pd.Series(record)\n\n\ndef _find_diff(tuple1, tuple2):\n \"\"\"return the list of index wher tuple1 != tuple2\"\"\"\n return [i for i, (a, b) in enumerate(zip(tuple1, tuple2)) if a != b]\n\n\ndef _extract_ablation_study(report: pd.DataFrame, progression=False):\n \"\"\"Reduce the multi-index to a change description compared to the previous row.\"\"\"\n names = report.index.names\n report = report.copy()\n # report.sort_index(inplace=True)\n\n reference_index = None\n for index in report.index:\n if reference_index is not None:\n diffs = _find_diff(reference_index, index)\n\n if progression:\n change = \"↳ \" + \", \".join([f\"{names[i]}={index[i]}\" for i in diffs])","source_hash":"c31beb2b52085accfac773eec58e2a6594f99745c745696eb6facd3611d3ff02","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.inspect_results._find_diff","uri":"program://AgentLab/function/src.agentlab.analyze.inspect_results._find_diff#L317-L319","kind":"function","name":"_find_diff","path":"src/agentlab/analyze/inspect_results.py","language":"python","start_line":317,"end_line":319,"context_start_line":297,"context_end_line":339,"code":"\n if n_completed == 0:\n return None\n\n record = dict(\n avg_reward=sub_df[\"cum_reward\"].mean(skipna=True).round(3),\n )\n for key in sub_df.keys():\n if key.startswith(\"stats.\"):\n key_ = key.split(\".\")[1]\n op = key_.split(\"_\")[0]\n if op == \"cum\":\n record[key_] = sub_df[key].sum(skipna=True)\n elif op == \"max\":\n record[key_] = sub_df[key].max(skipna=True)\n else:\n raise ValueError(f\"Unknown stats operation: {op}\")\n return pd.Series(record)\n\n\ndef _find_diff(tuple1, tuple2):\n \"\"\"return the list of index wher tuple1 != tuple2\"\"\"\n return [i for i, (a, b) in enumerate(zip(tuple1, tuple2)) if a != b]\n\n\ndef _extract_ablation_study(report: pd.DataFrame, progression=False):\n \"\"\"Reduce the multi-index to a change description compared to the previous row.\"\"\"\n names = report.index.names\n report = report.copy()\n # report.sort_index(inplace=True)\n\n reference_index = None\n for index in report.index:\n if reference_index is not None:\n diffs = _find_diff(reference_index, index)\n\n if progression:\n change = \"↳ \" + \", \".join([f\"{names[i]}={index[i]}\" for i in diffs])\n else:\n changes = []\n for i in diffs:\n val = index[i]\n if isinstance(val, bool):","source_hash":"c31beb2b52085accfac773eec58e2a6594f99745c745696eb6facd3611d3ff02","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.inspect_results._extract_ablation_study","uri":"program://AgentLab/function/src.agentlab.analyze.inspect_results._extract_ablation_study#L322-L356","kind":"function","name":"_extract_ablation_study","path":"src/agentlab/analyze/inspect_results.py","language":"python","start_line":322,"end_line":356,"context_start_line":302,"context_end_line":376,"code":" avg_reward=sub_df[\"cum_reward\"].mean(skipna=True).round(3),\n )\n for key in sub_df.keys():\n if key.startswith(\"stats.\"):\n key_ = key.split(\".\")[1]\n op = key_.split(\"_\")[0]\n if op == \"cum\":\n record[key_] = sub_df[key].sum(skipna=True)\n elif op == \"max\":\n record[key_] = sub_df[key].max(skipna=True)\n else:\n raise ValueError(f\"Unknown stats operation: {op}\")\n return pd.Series(record)\n\n\ndef _find_diff(tuple1, tuple2):\n \"\"\"return the list of index wher tuple1 != tuple2\"\"\"\n return [i for i, (a, b) in enumerate(zip(tuple1, tuple2)) if a != b]\n\n\ndef _extract_ablation_study(report: pd.DataFrame, progression=False):\n \"\"\"Reduce the multi-index to a change description compared to the previous row.\"\"\"\n names = report.index.names\n report = report.copy()\n # report.sort_index(inplace=True)\n\n reference_index = None\n for index in report.index:\n if reference_index is not None:\n diffs = _find_diff(reference_index, index)\n\n if progression:\n change = \"↳ \" + \", \".join([f\"{names[i]}={index[i]}\" for i in diffs])\n else:\n changes = []\n for i in diffs:\n val = index[i]\n if isinstance(val, bool):\n changes.append((\"+\" if val else \"-\") + names[i])\n else:\n changes.append(f\"{names[i]}←{val}\")\n change = \", \".join(changes)\n else:\n change = \"Initial Configuration\"\n report.loc[index, \"change\"] = change\n if progression:\n reference_index = index\n else:\n reference_index = report.index[0]\n\n report = report.reset_index()\n report = report.set_index([\"change\"])\n\n # delete columns related to old index\n return report.drop(names, axis=1)\n\n\ndef ablation_report(result_df: pd.DataFrame, reduce_fn=summarize, progression=False):\n \"\"\"Reduce the multi-index to a change description compared to the previous row.\n\n *NOTE*: This assumes that this experiments was launched with make_ablation_study.\n\n Rows will be sorted according to the average ExpArgs.order for all\n experiments associated with the multi-index.\n\n Args:\n result_df: The result dataframe as returned by load_result_df.\n reduce_fn: The function to use to reduce the sub dataframe. By default\n this is summarize.\n progression: If True, the change description will be the progression\n\n Returns:\n A dataframe with the change description as index.\n \"\"\"\n report = global_report(result_df, reduce_fn=reduce_fn)","source_hash":"c31beb2b52085accfac773eec58e2a6594f99745c745696eb6facd3611d3ff02","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.inspect_results.ablation_report","uri":"program://AgentLab/function/src.agentlab.analyze.inspect_results.ablation_report#L359-L379","kind":"function","name":"ablation_report","path":"src/agentlab/analyze/inspect_results.py","language":"python","start_line":359,"end_line":379,"context_start_line":339,"context_end_line":399,"code":" if isinstance(val, bool):\n changes.append((\"+\" if val else \"-\") + names[i])\n else:\n changes.append(f\"{names[i]}←{val}\")\n change = \", \".join(changes)\n else:\n change = \"Initial Configuration\"\n report.loc[index, \"change\"] = change\n if progression:\n reference_index = index\n else:\n reference_index = report.index[0]\n\n report = report.reset_index()\n report = report.set_index([\"change\"])\n\n # delete columns related to old index\n return report.drop(names, axis=1)\n\n\ndef ablation_report(result_df: pd.DataFrame, reduce_fn=summarize, progression=False):\n \"\"\"Reduce the multi-index to a change description compared to the previous row.\n\n *NOTE*: This assumes that this experiments was launched with make_ablation_study.\n\n Rows will be sorted according to the average ExpArgs.order for all\n experiments associated with the multi-index.\n\n Args:\n result_df: The result dataframe as returned by load_result_df.\n reduce_fn: The function to use to reduce the sub dataframe. By default\n this is summarize.\n progression: If True, the change description will be the progression\n\n Returns:\n A dataframe with the change description as index.\n \"\"\"\n report = global_report(result_df, reduce_fn=reduce_fn)\n report = _sort_order(result_df, report)\n report = _extract_ablation_study(report, progression=progression)\n return report\n\n\ndef _get_avg_order(df: pd.DataFrame, row: pd.Series):\n \"\"\"Return the average order for the given row.\"\"\"\n df = df.reset_index(level=0, drop=True, inplace=False)\n # df.sort_index(inplace=True)\n\n sub_df = df.loc[row.name]\n orders = [get_exp_result(exp_dir).exp_args.order for exp_dir in sub_df.exp_dir]\n orders = [order for order in orders if order is not None]\n if len(orders) == 0:\n return None\n return np.mean(orders)\n\n\ndef _sort_order(result_df, report):\n \"\"\"Add a column to the report with the average order for each agent and sort.\"\"\"\n\n def add_order(row):\n return _get_avg_order(result_df, row)","source_hash":"c31beb2b52085accfac773eec58e2a6594f99745c745696eb6facd3611d3ff02","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.inspect_results._get_avg_order","uri":"program://AgentLab/function/src.agentlab.analyze.inspect_results._get_avg_order#L382-L392","kind":"function","name":"_get_avg_order","path":"src/agentlab/analyze/inspect_results.py","language":"python","start_line":382,"end_line":392,"context_start_line":362,"context_end_line":412,"code":" *NOTE*: This assumes that this experiments was launched with make_ablation_study.\n\n Rows will be sorted according to the average ExpArgs.order for all\n experiments associated with the multi-index.\n\n Args:\n result_df: The result dataframe as returned by load_result_df.\n reduce_fn: The function to use to reduce the sub dataframe. By default\n this is summarize.\n progression: If True, the change description will be the progression\n\n Returns:\n A dataframe with the change description as index.\n \"\"\"\n report = global_report(result_df, reduce_fn=reduce_fn)\n report = _sort_order(result_df, report)\n report = _extract_ablation_study(report, progression=progression)\n return report\n\n\ndef _get_avg_order(df: pd.DataFrame, row: pd.Series):\n \"\"\"Return the average order for the given row.\"\"\"\n df = df.reset_index(level=0, drop=True, inplace=False)\n # df.sort_index(inplace=True)\n\n sub_df = df.loc[row.name]\n orders = [get_exp_result(exp_dir).exp_args.order for exp_dir in sub_df.exp_dir]\n orders = [order for order in orders if order is not None]\n if len(orders) == 0:\n return None\n return np.mean(orders)\n\n\ndef _sort_order(result_df, report):\n \"\"\"Add a column to the report with the average order for each agent and sort.\"\"\"\n\n def add_order(row):\n return _get_avg_order(result_df, row)\n\n report[\"avg_order\"] = report.apply(add_order, axis=1)\n return report.sort_values(\"avg_order\", ascending=True)\n\n\ndef global_report(\n result_df: pd.DataFrame,\n reduce_fn=summarize,\n rename_index=lambda name: name.replace(\"agent.flags.\", \"\"),\n):\n \"\"\"Produce a report that summarize all tasks and all episodes for each\n agent.\n","source_hash":"c31beb2b52085accfac773eec58e2a6594f99745c745696eb6facd3611d3ff02","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.inspect_results._sort_order","uri":"program://AgentLab/function/src.agentlab.analyze.inspect_results._sort_order#L395-L402","kind":"function","name":"_sort_order","path":"src/agentlab/analyze/inspect_results.py","language":"python","start_line":395,"end_line":402,"context_start_line":375,"context_end_line":422,"code":" \"\"\"\n report = global_report(result_df, reduce_fn=reduce_fn)\n report = _sort_order(result_df, report)\n report = _extract_ablation_study(report, progression=progression)\n return report\n\n\ndef _get_avg_order(df: pd.DataFrame, row: pd.Series):\n \"\"\"Return the average order for the given row.\"\"\"\n df = df.reset_index(level=0, drop=True, inplace=False)\n # df.sort_index(inplace=True)\n\n sub_df = df.loc[row.name]\n orders = [get_exp_result(exp_dir).exp_args.order for exp_dir in sub_df.exp_dir]\n orders = [order for order in orders if order is not None]\n if len(orders) == 0:\n return None\n return np.mean(orders)\n\n\ndef _sort_order(result_df, report):\n \"\"\"Add a column to the report with the average order for each agent and sort.\"\"\"\n\n def add_order(row):\n return _get_avg_order(result_df, row)\n\n report[\"avg_order\"] = report.apply(add_order, axis=1)\n return report.sort_values(\"avg_order\", ascending=True)\n\n\ndef global_report(\n result_df: pd.DataFrame,\n reduce_fn=summarize,\n rename_index=lambda name: name.replace(\"agent.flags.\", \"\"),\n):\n \"\"\"Produce a report that summarize all tasks and all episodes for each\n agent.\n\n Args:\n result_df: The result dataframe as returned by load_result_df.\n reduce_fn: The function to use to reduce the sub dataframe. By default\n this is summarize.\n rename_index: Function to rename the index. By default we remove the prefix\n \"agent.flags.\"\n\n Returns:\n pd.DataFrame: The report\n \"\"\"","source_hash":"c31beb2b52085accfac773eec58e2a6594f99745c745696eb6facd3611d3ff02","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.inspect_results.global_report","uri":"program://AgentLab/function/src.agentlab.analyze.inspect_results.global_report#L405-L444","kind":"function","name":"global_report","path":"src/agentlab/analyze/inspect_results.py","language":"python","start_line":405,"end_line":444,"context_start_line":385,"context_end_line":464,"code":" # df.sort_index(inplace=True)\n\n sub_df = df.loc[row.name]\n orders = [get_exp_result(exp_dir).exp_args.order for exp_dir in sub_df.exp_dir]\n orders = [order for order in orders if order is not None]\n if len(orders) == 0:\n return None\n return np.mean(orders)\n\n\ndef _sort_order(result_df, report):\n \"\"\"Add a column to the report with the average order for each agent and sort.\"\"\"\n\n def add_order(row):\n return _get_avg_order(result_df, row)\n\n report[\"avg_order\"] = report.apply(add_order, axis=1)\n return report.sort_values(\"avg_order\", ascending=True)\n\n\ndef global_report(\n result_df: pd.DataFrame,\n reduce_fn=summarize,\n rename_index=lambda name: name.replace(\"agent.flags.\", \"\"),\n):\n \"\"\"Produce a report that summarize all tasks and all episodes for each\n agent.\n\n Args:\n result_df: The result dataframe as returned by load_result_df.\n reduce_fn: The function to use to reduce the sub dataframe. By default\n this is summarize.\n rename_index: Function to rename the index. By default we remove the prefix\n \"agent.flags.\"\n\n Returns:\n pd.DataFrame: The report\n \"\"\"\n\n levels = list(range(result_df.index.nlevels))\n\n if len(levels) == 1:\n print(\"Only one configuration is found, returning a per-task report.\")\n report = report_2d(result_df, reduce_fn=reduce_fn)\n report.loc[\"[ALL TASKS]\"] = reduce_fn(result_df)\n else:\n print(\n \"Found multiple configuration, averaging across tasks and returning a per-agent report.\"\n )\n report = result_df.groupby(level=levels[1:]).apply(reduce_fn)\n\n if rename_index is not None:\n index_names = [rename_index(name) for name in report.index.names]\n report = report.rename_axis(index=index_names)\n\n # if has key avg_reward\n if \"avg_reward\" in report.columns:\n report = report.sort_values(\"avg_reward\", ascending=False)\n\n return report\n\n\ndef _rename_bool_flags(report: pd.DataFrame, true_str=\"✓\", false_str=\"-\"):\n \"\"\"Rename the boolean flags to be more compact and readable.\"\"\"\n map_bool = lambda x: true_str if x is True else false_str if x is False else x\n if isinstance(report.index, pd.MultiIndex):\n report.index = report.index.set_levels(\n [[map_bool(i) for i in level] for level in report.index.levels]\n )\n return report\n\n\ndef flag_report(report: pd.DataFrame, metric: str = \"avg_reward\", round_digits: int = 2):\n # for all index in the multi-index with boolean value, get the average for\n # True and the average for False separately. Produce a new dataframe with\n # the average for True and False for each index and the ratio between the\n # two as a new column.\n\n # check the number of levels\n if report.index.nlevels <= 1:","source_hash":"c31beb2b52085accfac773eec58e2a6594f99745c745696eb6facd3611d3ff02","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.inspect_results._rename_bool_flags","uri":"program://AgentLab/function/src.agentlab.analyze.inspect_results._rename_bool_flags#L447-L454","kind":"function","name":"_rename_bool_flags","path":"src/agentlab/analyze/inspect_results.py","language":"python","start_line":447,"end_line":454,"context_start_line":427,"context_end_line":474,"code":" print(\"Only one configuration is found, returning a per-task report.\")\n report = report_2d(result_df, reduce_fn=reduce_fn)\n report.loc[\"[ALL TASKS]\"] = reduce_fn(result_df)\n else:\n print(\n \"Found multiple configuration, averaging across tasks and returning a per-agent report.\"\n )\n report = result_df.groupby(level=levels[1:]).apply(reduce_fn)\n\n if rename_index is not None:\n index_names = [rename_index(name) for name in report.index.names]\n report = report.rename_axis(index=index_names)\n\n # if has key avg_reward\n if \"avg_reward\" in report.columns:\n report = report.sort_values(\"avg_reward\", ascending=False)\n\n return report\n\n\ndef _rename_bool_flags(report: pd.DataFrame, true_str=\"✓\", false_str=\"-\"):\n \"\"\"Rename the boolean flags to be more compact and readable.\"\"\"\n map_bool = lambda x: true_str if x is True else false_str if x is False else x\n if isinstance(report.index, pd.MultiIndex):\n report.index = report.index.set_levels(\n [[map_bool(i) for i in level] for level in report.index.levels]\n )\n return report\n\n\ndef flag_report(report: pd.DataFrame, metric: str = \"avg_reward\", round_digits: int = 2):\n # for all index in the multi-index with boolean value, get the average for\n # True and the average for False separately. Produce a new dataframe with\n # the average for True and False for each index and the ratio between the\n # two as a new column.\n\n # check the number of levels\n if report.index.nlevels <= 1:\n print(f\"Only {report.index.nlevels} levels in the index, cannot produce flag report.\")\n return\n\n report = report.copy()\n report = report.reset_index()\n\n records = []\n for col in report.columns:\n if report[col].dtype == bool:\n avg_true = report[report[col]][metric].mean()","source_hash":"c31beb2b52085accfac773eec58e2a6594f99745c745696eb6facd3611d3ff02","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.inspect_results.flag_report","uri":"program://AgentLab/function/src.agentlab.analyze.inspect_results.flag_report#L457-L484","kind":"function","name":"flag_report","path":"src/agentlab/analyze/inspect_results.py","language":"python","start_line":457,"end_line":484,"context_start_line":437,"context_end_line":504,"code":" index_names = [rename_index(name) for name in report.index.names]\n report = report.rename_axis(index=index_names)\n\n # if has key avg_reward\n if \"avg_reward\" in report.columns:\n report = report.sort_values(\"avg_reward\", ascending=False)\n\n return report\n\n\ndef _rename_bool_flags(report: pd.DataFrame, true_str=\"✓\", false_str=\"-\"):\n \"\"\"Rename the boolean flags to be more compact and readable.\"\"\"\n map_bool = lambda x: true_str if x is True else false_str if x is False else x\n if isinstance(report.index, pd.MultiIndex):\n report.index = report.index.set_levels(\n [[map_bool(i) for i in level] for level in report.index.levels]\n )\n return report\n\n\ndef flag_report(report: pd.DataFrame, metric: str = \"avg_reward\", round_digits: int = 2):\n # for all index in the multi-index with boolean value, get the average for\n # True and the average for False separately. Produce a new dataframe with\n # the average for True and False for each index and the ratio between the\n # two as a new column.\n\n # check the number of levels\n if report.index.nlevels <= 1:\n print(f\"Only {report.index.nlevels} levels in the index, cannot produce flag report.\")\n return\n\n report = report.copy()\n report = report.reset_index()\n\n records = []\n for col in report.columns:\n if report[col].dtype == bool:\n avg_true = report[report[col]][metric].mean()\n avg_false = report[~report[col]][metric].mean()\n ratio = avg_true / avg_false\n records.append(dict(hparam=col, avg_true=avg_true, avg_false=avg_false, ratio=ratio))\n\n flag_report = pd.DataFrame(records).set_index(\"hparam\")\n flag_report.sort_values(\"ratio\", ascending=False, inplace=True)\n if round_digits is not None:\n flag_report = flag_report.round(round_digits)\n\n return flag_report\n\n\ndef display_report(\n report: pd.DataFrame,\n apply_shrink_columns: bool = True,\n copy_to_clipboard: bool = True,\n rename_bool_flags: bool = True,\n print_only: str = None,\n):\n \"\"\"Display the report in a nicer-ish format.\n\n To be able to wrap col names we need to use set_wrap_stype, which returns a\n styled df, and doesn't behave like a normal df. For encapsulate the displaying in\n this function.\n\n Args:\n report: The report to display\n apply_shrink_columns: Make the column more compat by replacing\n underscores with newlines\n copy_to_clipboard: Copy the report to the clipboard","source_hash":"c31beb2b52085accfac773eec58e2a6594f99745c745696eb6facd3611d3ff02","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.inspect_results.display_report","uri":"program://AgentLab/function/src.agentlab.analyze.inspect_results.display_report#L487-L529","kind":"function","name":"display_report","path":"src/agentlab/analyze/inspect_results.py","language":"python","start_line":487,"end_line":529,"context_start_line":467,"context_end_line":549,"code":"\n report = report.copy()\n report = report.reset_index()\n\n records = []\n for col in report.columns:\n if report[col].dtype == bool:\n avg_true = report[report[col]][metric].mean()\n avg_false = report[~report[col]][metric].mean()\n ratio = avg_true / avg_false\n records.append(dict(hparam=col, avg_true=avg_true, avg_false=avg_false, ratio=ratio))\n\n flag_report = pd.DataFrame(records).set_index(\"hparam\")\n flag_report.sort_values(\"ratio\", ascending=False, inplace=True)\n if round_digits is not None:\n flag_report = flag_report.round(round_digits)\n\n return flag_report\n\n\ndef display_report(\n report: pd.DataFrame,\n apply_shrink_columns: bool = True,\n copy_to_clipboard: bool = True,\n rename_bool_flags: bool = True,\n print_only: str = None,\n):\n \"\"\"Display the report in a nicer-ish format.\n\n To be able to wrap col names we need to use set_wrap_stype, which returns a\n styled df, and doesn't behave like a normal df. For encapsulate the displaying in\n this function.\n\n Args:\n report: The report to display\n apply_shrink_columns: Make the column more compat by replacing\n underscores with newlines\n copy_to_clipboard: Copy the report to the clipboard\n rename_bool_flags: Rename the boolean flags to be more compact and readable\n print_only: Print only the given column\n \"\"\"\n report = report.copy()\n\n if apply_shrink_columns:\n report = shrink_columns(report)\n\n if rename_bool_flags:\n report = _rename_bool_flags(report)\n\n # if copy_to_clipboard:\n # to_clipboard(report)\n\n columns = list(report.columns)\n\n report.reset_index(inplace=True)\n\n if print_only:\n columns = [print_only] + columns\n report = report[columns]\n\n styled_report = set_wrap_style(report)\n\n display(styled_report)\n\n\ndef shrink_columns(df, also_wrap_index=True):\n \"\"\"Make the column names more compact by replacing underscores with newlines\"\"\"\n df = df.copy()\n\n df.columns = [col.replace(\"_\", \"\\n\") for col in df.columns]\n if also_wrap_index:\n df.index.names = [name.replace(\"_\", \"\\n\") for name in df.index.names]\n\n # Define a formatter function that formats float numbers without trailing zeros\n def formatter(x):\n if isinstance(x, float):\n return \"{:.10f}\".format(x).rstrip(\"0\").rstrip(\".\")\n return x\n\n return df.map(formatter)\n\n\ndef set_wrap_style(df):","source_hash":"c31beb2b52085accfac773eec58e2a6594f99745c745696eb6facd3611d3ff02","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.inspect_results.shrink_columns","uri":"program://AgentLab/function/src.agentlab.analyze.inspect_results.shrink_columns#L532-L546","kind":"function","name":"shrink_columns","path":"src/agentlab/analyze/inspect_results.py","language":"python","start_line":532,"end_line":546,"context_start_line":512,"context_end_line":566,"code":"\n if rename_bool_flags:\n report = _rename_bool_flags(report)\n\n # if copy_to_clipboard:\n # to_clipboard(report)\n\n columns = list(report.columns)\n\n report.reset_index(inplace=True)\n\n if print_only:\n columns = [print_only] + columns\n report = report[columns]\n\n styled_report = set_wrap_style(report)\n\n display(styled_report)\n\n\ndef shrink_columns(df, also_wrap_index=True):\n \"\"\"Make the column names more compact by replacing underscores with newlines\"\"\"\n df = df.copy()\n\n df.columns = [col.replace(\"_\", \"\\n\") for col in df.columns]\n if also_wrap_index:\n df.index.names = [name.replace(\"_\", \"\\n\") for name in df.index.names]\n\n # Define a formatter function that formats float numbers without trailing zeros\n def formatter(x):\n if isinstance(x, float):\n return \"{:.10f}\".format(x).rstrip(\"0\").rstrip(\".\")\n return x\n\n return df.map(formatter)\n\n\ndef set_wrap_style(df):\n return df.style.set_table_styles([{\"selector\": \"th\", \"props\": [(\"white-space\", \"pre-wrap\")]}])\n\n\n# ------------\n# Error Utils\n# ------------\n\n\ndef map_err_key(err_msg: str):\n if err_msg is None:\n return err_msg\n\n # remove logs from the message if any\n err_msg = err_msg[: err_msg.find(\"=== logs ===\")].rstrip()\n regex_replacements = [\n (\n r\"your messages resulted in \\d+ tokens\",","source_hash":"c31beb2b52085accfac773eec58e2a6594f99745c745696eb6facd3611d3ff02","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.inspect_results.set_wrap_style","uri":"program://AgentLab/function/src.agentlab.analyze.inspect_results.set_wrap_style#L549-L550","kind":"function","name":"set_wrap_style","path":"src/agentlab/analyze/inspect_results.py","language":"python","start_line":549,"end_line":550,"context_start_line":529,"context_end_line":570,"code":" display(styled_report)\n\n\ndef shrink_columns(df, also_wrap_index=True):\n \"\"\"Make the column names more compact by replacing underscores with newlines\"\"\"\n df = df.copy()\n\n df.columns = [col.replace(\"_\", \"\\n\") for col in df.columns]\n if also_wrap_index:\n df.index.names = [name.replace(\"_\", \"\\n\") for name in df.index.names]\n\n # Define a formatter function that formats float numbers without trailing zeros\n def formatter(x):\n if isinstance(x, float):\n return \"{:.10f}\".format(x).rstrip(\"0\").rstrip(\".\")\n return x\n\n return df.map(formatter)\n\n\ndef set_wrap_style(df):\n return df.style.set_table_styles([{\"selector\": \"th\", \"props\": [(\"white-space\", \"pre-wrap\")]}])\n\n\n# ------------\n# Error Utils\n# ------------\n\n\ndef map_err_key(err_msg: str):\n if err_msg is None:\n return err_msg\n\n # remove logs from the message if any\n err_msg = err_msg[: err_msg.find(\"=== logs ===\")].rstrip()\n regex_replacements = [\n (\n r\"your messages resulted in \\d+ tokens\",\n \"your messages resulted in x tokens\",\n ),\n (\n r\"(?<=Exception uncaught by agent or environment in task\\s)([^\\s]+)\",","source_hash":"c31beb2b52085accfac773eec58e2a6594f99745c745696eb6facd3611d3ff02","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.inspect_results.map_err_key","uri":"program://AgentLab/function/src.agentlab.analyze.inspect_results.map_err_key#L558-L577","kind":"function","name":"map_err_key","path":"src/agentlab/analyze/inspect_results.py","language":"python","start_line":558,"end_line":577,"context_start_line":538,"context_end_line":597,"code":" df.index.names = [name.replace(\"_\", \"\\n\") for name in df.index.names]\n\n # Define a formatter function that formats float numbers without trailing zeros\n def formatter(x):\n if isinstance(x, float):\n return \"{:.10f}\".format(x).rstrip(\"0\").rstrip(\".\")\n return x\n\n return df.map(formatter)\n\n\ndef set_wrap_style(df):\n return df.style.set_table_styles([{\"selector\": \"th\", \"props\": [(\"white-space\", \"pre-wrap\")]}])\n\n\n# ------------\n# Error Utils\n# ------------\n\n\ndef map_err_key(err_msg: str):\n if err_msg is None:\n return err_msg\n\n # remove logs from the message if any\n err_msg = err_msg[: err_msg.find(\"=== logs ===\")].rstrip()\n regex_replacements = [\n (\n r\"your messages resulted in \\d+ tokens\",\n \"your messages resulted in x tokens\",\n ),\n (\n r\"(?<=Exception uncaught by agent or environment in task\\s)([^\\s]+)\",\n \".\",\n ),\n ]\n\n for pattern, replacement in regex_replacements:\n err_msg = re.sub(pattern, replacement, err_msg)\n return err_msg\n\n\ndef error_report(df: pd.DataFrame, max_stack_trace=10, use_log=False):\n \"\"\"Report the error message for each agent.\"\"\"\n\n if \"err_key\" not in df:\n df[\"err_key\"] = df[\"err_msg\"].map(map_err_key)\n\n unique_counts = df[\"err_key\"].value_counts().sort_values(ascending=False)\n report = []\n for err_key, count in unique_counts.items():\n report.append(\"-------------------\")\n report.append(f\"## {count}x : \" + err_key.replace(\"\\n\", \"
\") + \"\\n\")\n\n # find sub_df with this error message\n sub_df = df[df[\"err_key\"] == err_key]\n idx = 0\n\n exp_result_list = [get_exp_result(row.exp_dir) for _, row in sub_df.iterrows()]\n exp_result_list = sorted(exp_result_list, key=lambda x: x.exp_args.env_args.task_name)","source_hash":"c31beb2b52085accfac773eec58e2a6594f99745c745696eb6facd3611d3ff02","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.inspect_results.error_report","uri":"program://AgentLab/function/src.agentlab.analyze.inspect_results.error_report#L580-L620","kind":"function","name":"error_report","path":"src/agentlab/analyze/inspect_results.py","language":"python","start_line":580,"end_line":620,"context_start_line":560,"context_end_line":640,"code":" return err_msg\n\n # remove logs from the message if any\n err_msg = err_msg[: err_msg.find(\"=== logs ===\")].rstrip()\n regex_replacements = [\n (\n r\"your messages resulted in \\d+ tokens\",\n \"your messages resulted in x tokens\",\n ),\n (\n r\"(?<=Exception uncaught by agent or environment in task\\s)([^\\s]+)\",\n \".\",\n ),\n ]\n\n for pattern, replacement in regex_replacements:\n err_msg = re.sub(pattern, replacement, err_msg)\n return err_msg\n\n\ndef error_report(df: pd.DataFrame, max_stack_trace=10, use_log=False):\n \"\"\"Report the error message for each agent.\"\"\"\n\n if \"err_key\" not in df:\n df[\"err_key\"] = df[\"err_msg\"].map(map_err_key)\n\n unique_counts = df[\"err_key\"].value_counts().sort_values(ascending=False)\n report = []\n for err_key, count in unique_counts.items():\n report.append(\"-------------------\")\n report.append(f\"## {count}x : \" + err_key.replace(\"\\n\", \"
\") + \"\\n\")\n\n # find sub_df with this error message\n sub_df = df[df[\"err_key\"] == err_key]\n idx = 0\n\n exp_result_list = [get_exp_result(row.exp_dir) for _, row in sub_df.iterrows()]\n exp_result_list = sorted(exp_result_list, key=lambda x: x.exp_args.env_args.task_name)\n for exp_result in exp_result_list:\n report.append(\n f\"* {exp_result.exp_args.env_args.task_name} seed: {exp_result.exp_args.env_args.task_seed}\"\n )\n\n report.append(f\"\\nShowing Max {max_stack_trace} stack traces:\\n\")\n for exp_result in exp_result_list:\n if idx >= max_stack_trace:\n break\n\n if not use_log:\n # print task name and stack trace\n stack_trace = exp_result.summary_info.get(\"stack_trace\", \"\")\n report.append(f\"Task Name: {exp_result.exp_args.env_args.task_name}\\n\")\n report.append(f\"exp_dir: {exp_result.exp_dir}\\n\")\n report.append(f\"Stack Trace: \\n {stack_trace}\\n\")\n report.append(\"\\n\")\n else:\n report.append(f\"```bash\\n{_format_log(exp_result)}\\n```\")\n\n idx += 1\n\n return \"\\n\".join(report)\n\n\ndef _format_log(exp_result: ExpResult, head_lines=10, tail_lines=50):\n \"\"\"Extract head and tail of the log. Try to find the traceback.\"\"\"\n log = exp_result.logs\n if log is None:\n return \"No log found\"\n\n log_lines = log.split(\"\\n\")\n if len(log_lines) <= head_lines + tail_lines:\n return log\n\n # first 10 lines:\n log_head = \"\\n\".join(log_lines[:head_lines])\n\n try:\n traceback_idx = log.rindex(\"Traceback (most recent call last):\")\n tail_idx = log.rindex(\"action:\", 0, traceback_idx)\n log_tail = log[tail_idx:]\n except ValueError:","source_hash":"c31beb2b52085accfac773eec58e2a6594f99745c745696eb6facd3611d3ff02","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.inspect_results._format_log","uri":"program://AgentLab/function/src.agentlab.analyze.inspect_results._format_log#L623-L643","kind":"function","name":"_format_log","path":"src/agentlab/analyze/inspect_results.py","language":"python","start_line":623,"end_line":643,"context_start_line":603,"context_end_line":663,"code":" report.append(f\"\\nShowing Max {max_stack_trace} stack traces:\\n\")\n for exp_result in exp_result_list:\n if idx >= max_stack_trace:\n break\n\n if not use_log:\n # print task name and stack trace\n stack_trace = exp_result.summary_info.get(\"stack_trace\", \"\")\n report.append(f\"Task Name: {exp_result.exp_args.env_args.task_name}\\n\")\n report.append(f\"exp_dir: {exp_result.exp_dir}\\n\")\n report.append(f\"Stack Trace: \\n {stack_trace}\\n\")\n report.append(\"\\n\")\n else:\n report.append(f\"```bash\\n{_format_log(exp_result)}\\n```\")\n\n idx += 1\n\n return \"\\n\".join(report)\n\n\ndef _format_log(exp_result: ExpResult, head_lines=10, tail_lines=50):\n \"\"\"Extract head and tail of the log. Try to find the traceback.\"\"\"\n log = exp_result.logs\n if log is None:\n return \"No log found\"\n\n log_lines = log.split(\"\\n\")\n if len(log_lines) <= head_lines + tail_lines:\n return log\n\n # first 10 lines:\n log_head = \"\\n\".join(log_lines[:head_lines])\n\n try:\n traceback_idx = log.rindex(\"Traceback (most recent call last):\")\n tail_idx = log.rindex(\"action:\", 0, traceback_idx)\n log_tail = log[tail_idx:]\n except ValueError:\n log_tail = \"\\n\".join(log_lines[-tail_lines:])\n\n return log_head + \"\\n...\\n...truncated middle of the log\\n...\\n\" + log_tail\n\n\ndef categorize_error(row):\n if pd.isna(row.get(\"err_msg\", None)):\n return None\n for category, check_function in ERR_CLASS_MAP.items():\n if check_function(row[\"err_msg\"], row[\"stack_trace\"]):\n if category == \"critical_server_error\":\n return is_critical_server_error(\n row[\"err_msg\"], row[\"stack_trace\"], return_error_type=True\n )\n elif category == \"minor_server_error\":\n return is_minor_server_error(\n row[\"err_msg\"], row[\"stack_trace\"], return_error_type=True\n )\n return category\n return \"other_error\"\n\n\ndef error_report_detailed(df: pd.DataFrame, max_stack_trace=10):","source_hash":"c31beb2b52085accfac773eec58e2a6594f99745c745696eb6facd3611d3ff02","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.inspect_results.categorize_error","uri":"program://AgentLab/function/src.agentlab.analyze.inspect_results.categorize_error#L646-L660","kind":"function","name":"categorize_error","path":"src/agentlab/analyze/inspect_results.py","language":"python","start_line":646,"end_line":660,"context_start_line":626,"context_end_line":680,"code":" if log is None:\n return \"No log found\"\n\n log_lines = log.split(\"\\n\")\n if len(log_lines) <= head_lines + tail_lines:\n return log\n\n # first 10 lines:\n log_head = \"\\n\".join(log_lines[:head_lines])\n\n try:\n traceback_idx = log.rindex(\"Traceback (most recent call last):\")\n tail_idx = log.rindex(\"action:\", 0, traceback_idx)\n log_tail = log[tail_idx:]\n except ValueError:\n log_tail = \"\\n\".join(log_lines[-tail_lines:])\n\n return log_head + \"\\n...\\n...truncated middle of the log\\n...\\n\" + log_tail\n\n\ndef categorize_error(row):\n if pd.isna(row.get(\"err_msg\", None)):\n return None\n for category, check_function in ERR_CLASS_MAP.items():\n if check_function(row[\"err_msg\"], row[\"stack_trace\"]):\n if category == \"critical_server_error\":\n return is_critical_server_error(\n row[\"err_msg\"], row[\"stack_trace\"], return_error_type=True\n )\n elif category == \"minor_server_error\":\n return is_minor_server_error(\n row[\"err_msg\"], row[\"stack_trace\"], return_error_type=True\n )\n return category\n return \"other_error\"\n\n\ndef error_report_detailed(df: pd.DataFrame, max_stack_trace=10):\n \"\"\"Report the error message for each agent, categorizing them as server errors or retry errors.\"\"\"\n\n df[\"error_category\"] = df.apply(categorize_error, axis=1)\n\n report = []\n for category in df[\"error_category\"].unique():\n if category is None:\n continue\n report.append(\"\\n-------------------\")\n report.append(f\"Category: {category}\")\n report.append(\"-------------------\\n\")\n report.append(f\"Total number of errors: {len(df[df['error_category'] == category])}\\n\")\n category_df = df[df[\"error_category\"] == category]\n unique_counts = category_df[\"err_msg\"].value_counts().sort_values(ascending=False)\n\n idx = 0\n for err_msg, count in unique_counts.items():","source_hash":"c31beb2b52085accfac773eec58e2a6594f99745c745696eb6facd3611d3ff02","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.inspect_results.error_report_detailed","uri":"program://AgentLab/function/src.agentlab.analyze.inspect_results.error_report_detailed#L663-L695","kind":"function","name":"error_report_detailed","path":"src/agentlab/analyze/inspect_results.py","language":"python","start_line":663,"end_line":695,"context_start_line":643,"context_end_line":715,"code":" return log_head + \"\\n...\\n...truncated middle of the log\\n...\\n\" + log_tail\n\n\ndef categorize_error(row):\n if pd.isna(row.get(\"err_msg\", None)):\n return None\n for category, check_function in ERR_CLASS_MAP.items():\n if check_function(row[\"err_msg\"], row[\"stack_trace\"]):\n if category == \"critical_server_error\":\n return is_critical_server_error(\n row[\"err_msg\"], row[\"stack_trace\"], return_error_type=True\n )\n elif category == \"minor_server_error\":\n return is_minor_server_error(\n row[\"err_msg\"], row[\"stack_trace\"], return_error_type=True\n )\n return category\n return \"other_error\"\n\n\ndef error_report_detailed(df: pd.DataFrame, max_stack_trace=10):\n \"\"\"Report the error message for each agent, categorizing them as server errors or retry errors.\"\"\"\n\n df[\"error_category\"] = df.apply(categorize_error, axis=1)\n\n report = []\n for category in df[\"error_category\"].unique():\n if category is None:\n continue\n report.append(\"\\n-------------------\")\n report.append(f\"Category: {category}\")\n report.append(\"-------------------\\n\")\n report.append(f\"Total number of errors: {len(df[df['error_category'] == category])}\\n\")\n category_df = df[df[\"error_category\"] == category]\n unique_counts = category_df[\"err_msg\"].value_counts().sort_values(ascending=False)\n\n idx = 0\n for err_msg, count in unique_counts.items():\n if idx >= max_stack_trace:\n break\n idx += 1\n report.append(\"-------------------\")\n report.append(f\"{count}x : {err_msg}\\n\")\n sub_df = category_df[category_df[\"err_msg\"] == err_msg]\n for _, row in sub_df.iterrows():\n exp_result = ExpResult(row.exp_dir)\n report.append(f\"Task Name: {exp_result.exp_args.env_args.task_name}\\n\")\n report.append(f\"exp_dir: {exp_result.exp_dir}\\n\")\n report.append(f\"Stack Trace: \\n {row['stack_trace']}\\n\")\n report.append(\"\\n\")\n break\n\n return \"\\n\".join(report)\n\n\ndef print_errors_chronologically(df: pd.DataFrame):\n \"\"\"Print the errors in chronological order, grouping contiguous chunks of the same error.\"\"\"\n df = df.sort_values(\"exp_date\", ascending=True)\n\n current_error = None\n error_count = 0\n\n for _, row in df.iterrows():\n if pd.isna(row.get(\"err_msg\", None)):\n continue\n\n error = categorize_error(row)\n\n if error != current_error:\n if current_error is not None:\n print(f\"{current_error.ljust(40)} : {str(error_count).rjust(5)} times\")\n\n current_error = error","source_hash":"c31beb2b52085accfac773eec58e2a6594f99745c745696eb6facd3611d3ff02","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.inspect_results.print_errors_chronologically","uri":"program://AgentLab/function/src.agentlab.analyze.inspect_results.print_errors_chronologically#L698-L721","kind":"function","name":"print_errors_chronologically","path":"src/agentlab/analyze/inspect_results.py","language":"python","start_line":698,"end_line":721,"context_start_line":678,"context_end_line":741,"code":"\n idx = 0\n for err_msg, count in unique_counts.items():\n if idx >= max_stack_trace:\n break\n idx += 1\n report.append(\"-------------------\")\n report.append(f\"{count}x : {err_msg}\\n\")\n sub_df = category_df[category_df[\"err_msg\"] == err_msg]\n for _, row in sub_df.iterrows():\n exp_result = ExpResult(row.exp_dir)\n report.append(f\"Task Name: {exp_result.exp_args.env_args.task_name}\\n\")\n report.append(f\"exp_dir: {exp_result.exp_dir}\\n\")\n report.append(f\"Stack Trace: \\n {row['stack_trace']}\\n\")\n report.append(\"\\n\")\n break\n\n return \"\\n\".join(report)\n\n\ndef print_errors_chronologically(df: pd.DataFrame):\n \"\"\"Print the errors in chronological order, grouping contiguous chunks of the same error.\"\"\"\n df = df.sort_values(\"exp_date\", ascending=True)\n\n current_error = None\n error_count = 0\n\n for _, row in df.iterrows():\n if pd.isna(row.get(\"err_msg\", None)):\n continue\n\n error = categorize_error(row)\n\n if error != current_error:\n if current_error is not None:\n print(f\"{current_error.ljust(40)} : {str(error_count).rjust(5)} times\")\n\n current_error = error\n error_count = 1\n else:\n error_count += 1\n\n if current_error is not None:\n print(f\"{current_error.ljust(40)} : {str(error_count).rjust(5)} times\")\n\n\ndef report_different_errors(sub_df):\n \"\"\"Report the different errors in the dataframe.\"\"\"\n\n def _categorize_error(row):\n if pd.isna(row.err_msg):\n record = {}\n else:\n record = {\n err_class: err_fn(row.err_msg, row.stack_trace)\n for err_class, err_fn in ERR_CLASS_MAP.items()\n }\n record[\"other_err\"] = np.sum(list(record.values())) == 0\n record[\"any_err\"] = True\n\n return pd.Series(record)\n\n error_report = sub_df.apply(_categorize_error, axis=1).sum(skipna=True)\n","source_hash":"c31beb2b52085accfac773eec58e2a6594f99745c745696eb6facd3611d3ff02","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.inspect_results.report_different_errors","uri":"program://AgentLab/function/src.agentlab.analyze.inspect_results.report_different_errors#L724-L745","kind":"function","name":"report_different_errors","path":"src/agentlab/analyze/inspect_results.py","language":"python","start_line":724,"end_line":745,"context_start_line":704,"context_end_line":765,"code":"\n for _, row in df.iterrows():\n if pd.isna(row.get(\"err_msg\", None)):\n continue\n\n error = categorize_error(row)\n\n if error != current_error:\n if current_error is not None:\n print(f\"{current_error.ljust(40)} : {str(error_count).rjust(5)} times\")\n\n current_error = error\n error_count = 1\n else:\n error_count += 1\n\n if current_error is not None:\n print(f\"{current_error.ljust(40)} : {str(error_count).rjust(5)} times\")\n\n\ndef report_different_errors(sub_df):\n \"\"\"Report the different errors in the dataframe.\"\"\"\n\n def _categorize_error(row):\n if pd.isna(row.err_msg):\n record = {}\n else:\n record = {\n err_class: err_fn(row.err_msg, row.stack_trace)\n for err_class, err_fn in ERR_CLASS_MAP.items()\n }\n record[\"other_err\"] = np.sum(list(record.values())) == 0\n record[\"any_err\"] = True\n\n return pd.Series(record)\n\n error_report = sub_df.apply(_categorize_error, axis=1).sum(skipna=True)\n\n # TODO: fix this bug\n assert isinstance(error_report, pd.DataFrame), \"Expected a DataFrame, got a Series.\"\n\n return error_report\n\n\n# ===============\n\n\ndef _benchmark_from_task_name(task_name: str):\n \"\"\"Extract the benchmark from the task name.\"\"\"\n # TODO should be more robost, e.g. handle workarna.L1, workarena.L2, etc.\n return task_name.split(\".\")[0]\n\n\ndef summarize_study(result_df: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Create a summary of the study. Similar to global report, but handles single agent differently.\"\"\"\n\n levels = list(range(result_df.index.nlevels))\n return result_df.groupby(level=levels[1:]).apply(summarize)\n\n\ndef split_by_key(df: pd.DataFrame, key):\n \"\"\"Return a dict of dataframes spearted by the given key.\"\"\"","source_hash":"c31beb2b52085accfac773eec58e2a6594f99745c745696eb6facd3611d3ff02","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.inspect_results._benchmark_from_task_name","uri":"program://AgentLab/function/src.agentlab.analyze.inspect_results._benchmark_from_task_name#L751-L754","kind":"function","name":"_benchmark_from_task_name","path":"src/agentlab/analyze/inspect_results.py","language":"python","start_line":751,"end_line":754,"context_start_line":731,"context_end_line":774,"code":" record = {\n err_class: err_fn(row.err_msg, row.stack_trace)\n for err_class, err_fn in ERR_CLASS_MAP.items()\n }\n record[\"other_err\"] = np.sum(list(record.values())) == 0\n record[\"any_err\"] = True\n\n return pd.Series(record)\n\n error_report = sub_df.apply(_categorize_error, axis=1).sum(skipna=True)\n\n # TODO: fix this bug\n assert isinstance(error_report, pd.DataFrame), \"Expected a DataFrame, got a Series.\"\n\n return error_report\n\n\n# ===============\n\n\ndef _benchmark_from_task_name(task_name: str):\n \"\"\"Extract the benchmark from the task name.\"\"\"\n # TODO should be more robost, e.g. handle workarna.L1, workarena.L2, etc.\n return task_name.split(\".\")[0]\n\n\ndef summarize_study(result_df: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Create a summary of the study. Similar to global report, but handles single agent differently.\"\"\"\n\n levels = list(range(result_df.index.nlevels))\n return result_df.groupby(level=levels[1:]).apply(summarize)\n\n\ndef split_by_key(df: pd.DataFrame, key):\n \"\"\"Return a dict of dataframes spearted by the given key.\"\"\"\n # check if key in df\n if key not in df.columns:\n df = df.reset_index(key, inplace=False)\n\n df_dict = {}\n for value in df[key].unique():\n sub_df = df[df[key] == value].copy()\n set_index_from_variables(sub_df)\n df_dict[value] = sub_df","source_hash":"c31beb2b52085accfac773eec58e2a6594f99745c745696eb6facd3611d3ff02","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.inspect_results.summarize_study","uri":"program://AgentLab/function/src.agentlab.analyze.inspect_results.summarize_study#L757-L761","kind":"function","name":"summarize_study","path":"src/agentlab/analyze/inspect_results.py","language":"python","start_line":757,"end_line":761,"context_start_line":737,"context_end_line":781,"code":"\n return pd.Series(record)\n\n error_report = sub_df.apply(_categorize_error, axis=1).sum(skipna=True)\n\n # TODO: fix this bug\n assert isinstance(error_report, pd.DataFrame), \"Expected a DataFrame, got a Series.\"\n\n return error_report\n\n\n# ===============\n\n\ndef _benchmark_from_task_name(task_name: str):\n \"\"\"Extract the benchmark from the task name.\"\"\"\n # TODO should be more robost, e.g. handle workarna.L1, workarena.L2, etc.\n return task_name.split(\".\")[0]\n\n\ndef summarize_study(result_df: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Create a summary of the study. Similar to global report, but handles single agent differently.\"\"\"\n\n levels = list(range(result_df.index.nlevels))\n return result_df.groupby(level=levels[1:]).apply(summarize)\n\n\ndef split_by_key(df: pd.DataFrame, key):\n \"\"\"Return a dict of dataframes spearted by the given key.\"\"\"\n # check if key in df\n if key not in df.columns:\n df = df.reset_index(key, inplace=False)\n\n df_dict = {}\n for value in df[key].unique():\n sub_df = df[df[key] == value].copy()\n set_index_from_variables(sub_df)\n df_dict[value] = sub_df\n\n return df_dict\n\n\ndef get_all_summaries(results_dir: Path, skip_hidden=True, ignore_cache=False, ignore_stale=False):\n summaries = []\n for study_dir in results_dir.iterdir():","source_hash":"c31beb2b52085accfac773eec58e2a6594f99745c745696eb6facd3611d3ff02","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.inspect_results.split_by_key","uri":"program://AgentLab/function/src.agentlab.analyze.inspect_results.split_by_key#L764-L776","kind":"function","name":"split_by_key","path":"src/agentlab/analyze/inspect_results.py","language":"python","start_line":764,"end_line":776,"context_start_line":744,"context_end_line":796,"code":"\n return error_report\n\n\n# ===============\n\n\ndef _benchmark_from_task_name(task_name: str):\n \"\"\"Extract the benchmark from the task name.\"\"\"\n # TODO should be more robost, e.g. handle workarna.L1, workarena.L2, etc.\n return task_name.split(\".\")[0]\n\n\ndef summarize_study(result_df: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Create a summary of the study. Similar to global report, but handles single agent differently.\"\"\"\n\n levels = list(range(result_df.index.nlevels))\n return result_df.groupby(level=levels[1:]).apply(summarize)\n\n\ndef split_by_key(df: pd.DataFrame, key):\n \"\"\"Return a dict of dataframes spearted by the given key.\"\"\"\n # check if key in df\n if key not in df.columns:\n df = df.reset_index(key, inplace=False)\n\n df_dict = {}\n for value in df[key].unique():\n sub_df = df[df[key] == value].copy()\n set_index_from_variables(sub_df)\n df_dict[value] = sub_df\n\n return df_dict\n\n\ndef get_all_summaries(results_dir: Path, skip_hidden=True, ignore_cache=False, ignore_stale=False):\n summaries = []\n for study_dir in results_dir.iterdir():\n print(study_dir.name)\n if skip_hidden and study_dir.name.startswith(\"_\"):\n print(\" skip (starts with '_')\")\n continue\n\n try:\n summary = get_study_summary(\n study_dir, ignore_cache=ignore_cache, ignore_stale=ignore_stale\n )\n if summary is not None:\n # set as index\n summary[\"study_dir\"] = study_dir.name\n summary.set_index(\"study_dir\", inplace=True)\n summaries.append(summary)\n","source_hash":"c31beb2b52085accfac773eec58e2a6594f99745c745696eb6facd3611d3ff02","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.inspect_results.get_all_summaries","uri":"program://AgentLab/function/src.agentlab.analyze.inspect_results.get_all_summaries#L779-L804","kind":"function","name":"get_all_summaries","path":"src/agentlab/analyze/inspect_results.py","language":"python","start_line":779,"end_line":804,"context_start_line":759,"context_end_line":824,"code":"\n levels = list(range(result_df.index.nlevels))\n return result_df.groupby(level=levels[1:]).apply(summarize)\n\n\ndef split_by_key(df: pd.DataFrame, key):\n \"\"\"Return a dict of dataframes spearted by the given key.\"\"\"\n # check if key in df\n if key not in df.columns:\n df = df.reset_index(key, inplace=False)\n\n df_dict = {}\n for value in df[key].unique():\n sub_df = df[df[key] == value].copy()\n set_index_from_variables(sub_df)\n df_dict[value] = sub_df\n\n return df_dict\n\n\ndef get_all_summaries(results_dir: Path, skip_hidden=True, ignore_cache=False, ignore_stale=False):\n summaries = []\n for study_dir in results_dir.iterdir():\n print(study_dir.name)\n if skip_hidden and study_dir.name.startswith(\"_\"):\n print(\" skip (starts with '_')\")\n continue\n\n try:\n summary = get_study_summary(\n study_dir, ignore_cache=ignore_cache, ignore_stale=ignore_stale\n )\n if summary is not None:\n # set as index\n summary[\"study_dir\"] = study_dir.name\n summary.set_index(\"study_dir\", inplace=True)\n summaries.append(summary)\n\n except Exception:\n traceback.print_exc()\n continue\n\n summaries = pd.concat(summaries)\n # reverse sort according to index\n summaries.sort_index(ascending=False, inplace=True)\n return summaries\n\n\ndef get_study_summary(\n study_dir: Path,\n ignore_cache=False,\n ignore_stale=False,\n progress_fn=None,\n sentinel=None,\n) -> pd.DataFrame:\n \"\"\"Get the cached study summary for the given study directory or computes it.\n\n The cache is based on the modified times of all the files in the study.\n\n Args:\n study_dir: The study directory to summarize\n ignore_cache: If True, ignore the cache and recompute the summary\n ignore_stale: If True, don't verify if files have changed since the last\n summary was computed. This may lead to stale summaries.\n progress_fn: Pass tqdm.tqdm to show progress.\n sentinel: Captures internal values for unit testing.","source_hash":"c31beb2b52085accfac773eec58e2a6594f99745c745696eb6facd3611d3ff02","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.inspect_results.get_study_summary","uri":"program://AgentLab/function/src.agentlab.analyze.inspect_results.get_study_summary#L807-L853","kind":"function","name":"get_study_summary","path":"src/agentlab/analyze/inspect_results.py","language":"python","start_line":807,"end_line":853,"context_start_line":787,"context_end_line":873,"code":" try:\n summary = get_study_summary(\n study_dir, ignore_cache=ignore_cache, ignore_stale=ignore_stale\n )\n if summary is not None:\n # set as index\n summary[\"study_dir\"] = study_dir.name\n summary.set_index(\"study_dir\", inplace=True)\n summaries.append(summary)\n\n except Exception:\n traceback.print_exc()\n continue\n\n summaries = pd.concat(summaries)\n # reverse sort according to index\n summaries.sort_index(ascending=False, inplace=True)\n return summaries\n\n\ndef get_study_summary(\n study_dir: Path,\n ignore_cache=False,\n ignore_stale=False,\n progress_fn=None,\n sentinel=None,\n) -> pd.DataFrame:\n \"\"\"Get the cached study summary for the given study directory or computes it.\n\n The cache is based on the modified times of all the files in the study.\n\n Args:\n study_dir: The study directory to summarize\n ignore_cache: If True, ignore the cache and recompute the summary\n ignore_stale: If True, don't verify if files have changed since the last\n summary was computed. This may lead to stale summaries.\n progress_fn: Pass tqdm.tqdm to show progress.\n sentinel: Captures internal values for unit testing.\n\n Returns:\n pd.DataFrame: The study summary\n \"\"\"\n study_dir = Path(study_dir)\n\n summary_path = study_dir / \"study_summary.csv\"\n if not ignore_stale:\n is_stale = _is_stale(study_dir, summary_path)\n else:\n is_stale = False\n\n if not ignore_cache:\n if summary_path.exists() and not is_stale:\n if sentinel is not None:\n sentinel[\"from_cache\"] = True\n return pd.read_csv(summary_path)\n\n result_df = load_result_df(study_dir, progress_fn=progress_fn)\n if result_df is None:\n return None\n\n summary = summarize_study(result_df)\n\n summary.to_csv(summary_path)\n\n if sentinel is not None:\n sentinel[\"from_cache\"] = False\n return summary\n\n\ndef _get_mtimes(dir: Path, pattern=\"[!_.]*\", whitelist=()):\n \"\"\"Recursevly get all file's modif date\"\"\"\n # use glob to get all files\n files = list(dir.rglob(pattern))\n return {str(f.relative_to(dir)): f.stat().st_mtime for f in files if f not in whitelist}\n\n\ndef _is_stale(study_dir: Path, summary_path: Path) -> bool:\n mtimes_path = study_dir / \"_last_modification_times.json\"\n mtimes = _get_mtimes(study_dir, whitelist=(summary_path,))\n if not mtimes_path.exists() or not summary_path.exists():\n stale = True\n else:\n mtimes_saved = json.loads(mtimes_path.read_text())\n stale = mtimes_saved != mtimes\n mtimes_path.write_text(json.dumps(mtimes))\n return stale\n","source_hash":"c31beb2b52085accfac773eec58e2a6594f99745c745696eb6facd3611d3ff02","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.inspect_results._get_mtimes","uri":"program://AgentLab/function/src.agentlab.analyze.inspect_results._get_mtimes#L856-L860","kind":"function","name":"_get_mtimes","path":"src/agentlab/analyze/inspect_results.py","language":"python","start_line":856,"end_line":860,"context_start_line":836,"context_end_line":880,"code":"\n if not ignore_cache:\n if summary_path.exists() and not is_stale:\n if sentinel is not None:\n sentinel[\"from_cache\"] = True\n return pd.read_csv(summary_path)\n\n result_df = load_result_df(study_dir, progress_fn=progress_fn)\n if result_df is None:\n return None\n\n summary = summarize_study(result_df)\n\n summary.to_csv(summary_path)\n\n if sentinel is not None:\n sentinel[\"from_cache\"] = False\n return summary\n\n\ndef _get_mtimes(dir: Path, pattern=\"[!_.]*\", whitelist=()):\n \"\"\"Recursevly get all file's modif date\"\"\"\n # use glob to get all files\n files = list(dir.rglob(pattern))\n return {str(f.relative_to(dir)): f.stat().st_mtime for f in files if f not in whitelist}\n\n\ndef _is_stale(study_dir: Path, summary_path: Path) -> bool:\n mtimes_path = study_dir / \"_last_modification_times.json\"\n mtimes = _get_mtimes(study_dir, whitelist=(summary_path,))\n if not mtimes_path.exists() or not summary_path.exists():\n stale = True\n else:\n mtimes_saved = json.loads(mtimes_path.read_text())\n stale = mtimes_saved != mtimes\n mtimes_path.write_text(json.dumps(mtimes))\n return stale\n\n\ndef get_all_task_messages(exp_dir, max_n_exp=None):\n result_list = list(yield_all_exp_results(exp_dir, progress_fn=tqdm))\n\n if max_n_exp is not None:\n result_list = random.sample(result_list, min(max_n_exp, len(result_list)))\n","source_hash":"c31beb2b52085accfac773eec58e2a6594f99745c745696eb6facd3611d3ff02","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.inspect_results._is_stale","uri":"program://AgentLab/function/src.agentlab.analyze.inspect_results._is_stale#L863-L872","kind":"function","name":"_is_stale","path":"src/agentlab/analyze/inspect_results.py","language":"python","start_line":863,"end_line":872,"context_start_line":843,"context_end_line":892,"code":" result_df = load_result_df(study_dir, progress_fn=progress_fn)\n if result_df is None:\n return None\n\n summary = summarize_study(result_df)\n\n summary.to_csv(summary_path)\n\n if sentinel is not None:\n sentinel[\"from_cache\"] = False\n return summary\n\n\ndef _get_mtimes(dir: Path, pattern=\"[!_.]*\", whitelist=()):\n \"\"\"Recursevly get all file's modif date\"\"\"\n # use glob to get all files\n files = list(dir.rglob(pattern))\n return {str(f.relative_to(dir)): f.stat().st_mtime for f in files if f not in whitelist}\n\n\ndef _is_stale(study_dir: Path, summary_path: Path) -> bool:\n mtimes_path = study_dir / \"_last_modification_times.json\"\n mtimes = _get_mtimes(study_dir, whitelist=(summary_path,))\n if not mtimes_path.exists() or not summary_path.exists():\n stale = True\n else:\n mtimes_saved = json.loads(mtimes_path.read_text())\n stale = mtimes_saved != mtimes\n mtimes_path.write_text(json.dumps(mtimes))\n return stale\n\n\ndef get_all_task_messages(exp_dir, max_n_exp=None):\n result_list = list(yield_all_exp_results(exp_dir, progress_fn=tqdm))\n\n if max_n_exp is not None:\n result_list = random.sample(result_list, min(max_n_exp, len(result_list)))\n\n task_messages = defaultdict(list)\n for exp_result in tqdm(result_list):\n task_name = exp_result.exp_args.env_args.task_name\n for step in exp_result.steps_info:\n try:\n task_messages[task_name].append(step.task_info[\"message\"])\n except (KeyError, TypeError):\n pass\n\n # count identical task messages:\n for task_name, messages in task_messages.items():\n unique_messages, count = np.unique(messages, return_counts=True)","source_hash":"c31beb2b52085accfac773eec58e2a6594f99745c745696eb6facd3611d3ff02","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.inspect_results.get_all_task_messages","uri":"program://AgentLab/function/src.agentlab.analyze.inspect_results.get_all_task_messages#L875-L897","kind":"function","name":"get_all_task_messages","path":"src/agentlab/analyze/inspect_results.py","language":"python","start_line":875,"end_line":897,"context_start_line":855,"context_end_line":897,"code":"\ndef _get_mtimes(dir: Path, pattern=\"[!_.]*\", whitelist=()):\n \"\"\"Recursevly get all file's modif date\"\"\"\n # use glob to get all files\n files = list(dir.rglob(pattern))\n return {str(f.relative_to(dir)): f.stat().st_mtime for f in files if f not in whitelist}\n\n\ndef _is_stale(study_dir: Path, summary_path: Path) -> bool:\n mtimes_path = study_dir / \"_last_modification_times.json\"\n mtimes = _get_mtimes(study_dir, whitelist=(summary_path,))\n if not mtimes_path.exists() or not summary_path.exists():\n stale = True\n else:\n mtimes_saved = json.loads(mtimes_path.read_text())\n stale = mtimes_saved != mtimes\n mtimes_path.write_text(json.dumps(mtimes))\n return stale\n\n\ndef get_all_task_messages(exp_dir, max_n_exp=None):\n result_list = list(yield_all_exp_results(exp_dir, progress_fn=tqdm))\n\n if max_n_exp is not None:\n result_list = random.sample(result_list, min(max_n_exp, len(result_list)))\n\n task_messages = defaultdict(list)\n for exp_result in tqdm(result_list):\n task_name = exp_result.exp_args.env_args.task_name\n for step in exp_result.steps_info:\n try:\n task_messages[task_name].append(step.task_info[\"message\"])\n except (KeyError, TypeError):\n pass\n\n # count identical task messages:\n for task_name, messages in task_messages.items():\n unique_messages, count = np.unique(messages, return_counts=True)\n # sort them\n print(task_name)\n for msg, count in sorted(zip(unique_messages, count), key=lambda x: x[1], reverse=True):\n print(f\"{count}x : {msg}\")\n print()","source_hash":"c31beb2b52085accfac773eec58e2a6594f99745c745696eb6facd3611d3ff02","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.inspect_results.add_order","uri":"program://AgentLab/function/src.agentlab.analyze.inspect_results.add_order#L398-L399","kind":"function","name":"add_order","path":"src/agentlab/analyze/inspect_results.py","language":"python","start_line":398,"end_line":399,"context_start_line":378,"context_end_line":419,"code":" report = _extract_ablation_study(report, progression=progression)\n return report\n\n\ndef _get_avg_order(df: pd.DataFrame, row: pd.Series):\n \"\"\"Return the average order for the given row.\"\"\"\n df = df.reset_index(level=0, drop=True, inplace=False)\n # df.sort_index(inplace=True)\n\n sub_df = df.loc[row.name]\n orders = [get_exp_result(exp_dir).exp_args.order for exp_dir in sub_df.exp_dir]\n orders = [order for order in orders if order is not None]\n if len(orders) == 0:\n return None\n return np.mean(orders)\n\n\ndef _sort_order(result_df, report):\n \"\"\"Add a column to the report with the average order for each agent and sort.\"\"\"\n\n def add_order(row):\n return _get_avg_order(result_df, row)\n\n report[\"avg_order\"] = report.apply(add_order, axis=1)\n return report.sort_values(\"avg_order\", ascending=True)\n\n\ndef global_report(\n result_df: pd.DataFrame,\n reduce_fn=summarize,\n rename_index=lambda name: name.replace(\"agent.flags.\", \"\"),\n):\n \"\"\"Produce a report that summarize all tasks and all episodes for each\n agent.\n\n Args:\n result_df: The result dataframe as returned by load_result_df.\n reduce_fn: The function to use to reduce the sub dataframe. By default\n this is summarize.\n rename_index: Function to rename the index. By default we remove the prefix\n \"agent.flags.\"\n","source_hash":"c31beb2b52085accfac773eec58e2a6594f99745c745696eb6facd3611d3ff02","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.inspect_results.formatter","uri":"program://AgentLab/function/src.agentlab.analyze.inspect_results.formatter#L541-L544","kind":"function","name":"formatter","path":"src/agentlab/analyze/inspect_results.py","language":"python","start_line":541,"end_line":544,"context_start_line":521,"context_end_line":564,"code":" report.reset_index(inplace=True)\n\n if print_only:\n columns = [print_only] + columns\n report = report[columns]\n\n styled_report = set_wrap_style(report)\n\n display(styled_report)\n\n\ndef shrink_columns(df, also_wrap_index=True):\n \"\"\"Make the column names more compact by replacing underscores with newlines\"\"\"\n df = df.copy()\n\n df.columns = [col.replace(\"_\", \"\\n\") for col in df.columns]\n if also_wrap_index:\n df.index.names = [name.replace(\"_\", \"\\n\") for name in df.index.names]\n\n # Define a formatter function that formats float numbers without trailing zeros\n def formatter(x):\n if isinstance(x, float):\n return \"{:.10f}\".format(x).rstrip(\"0\").rstrip(\".\")\n return x\n\n return df.map(formatter)\n\n\ndef set_wrap_style(df):\n return df.style.set_table_styles([{\"selector\": \"th\", \"props\": [(\"white-space\", \"pre-wrap\")]}])\n\n\n# ------------\n# Error Utils\n# ------------\n\n\ndef map_err_key(err_msg: str):\n if err_msg is None:\n return err_msg\n\n # remove logs from the message if any\n err_msg = err_msg[: err_msg.find(\"=== logs ===\")].rstrip()\n regex_replacements = [","source_hash":"c31beb2b52085accfac773eec58e2a6594f99745c745696eb6facd3611d3ff02","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.inspect_results._categorize_error","uri":"program://AgentLab/function/src.agentlab.analyze.inspect_results._categorize_error#L727-L738","kind":"function","name":"_categorize_error","path":"src/agentlab/analyze/inspect_results.py","language":"python","start_line":727,"end_line":738,"context_start_line":707,"context_end_line":758,"code":" continue\n\n error = categorize_error(row)\n\n if error != current_error:\n if current_error is not None:\n print(f\"{current_error.ljust(40)} : {str(error_count).rjust(5)} times\")\n\n current_error = error\n error_count = 1\n else:\n error_count += 1\n\n if current_error is not None:\n print(f\"{current_error.ljust(40)} : {str(error_count).rjust(5)} times\")\n\n\ndef report_different_errors(sub_df):\n \"\"\"Report the different errors in the dataframe.\"\"\"\n\n def _categorize_error(row):\n if pd.isna(row.err_msg):\n record = {}\n else:\n record = {\n err_class: err_fn(row.err_msg, row.stack_trace)\n for err_class, err_fn in ERR_CLASS_MAP.items()\n }\n record[\"other_err\"] = np.sum(list(record.values())) == 0\n record[\"any_err\"] = True\n\n return pd.Series(record)\n\n error_report = sub_df.apply(_categorize_error, axis=1).sum(skipna=True)\n\n # TODO: fix this bug\n assert isinstance(error_report, pd.DataFrame), \"Expected a DataFrame, got a Series.\"\n\n return error_report\n\n\n# ===============\n\n\ndef _benchmark_from_task_name(task_name: str):\n \"\"\"Extract the benchmark from the task name.\"\"\"\n # TODO should be more robost, e.g. handle workarna.L1, workarena.L2, etc.\n return task_name.split(\".\")[0]\n\n\ndef summarize_study(result_df: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Create a summary of the study. Similar to global report, but handles single agent differently.\"\"\"","source_hash":"c31beb2b52085accfac773eec58e2a6594f99745c745696eb6facd3611d3ff02","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.tapes","uri":"program://AgentLab/module/src.agentlab.analyze.tapes#L1-L241","kind":"module","name":"src.agentlab.analyze.tapes","path":"src/agentlab/analyze/tapes.py","language":"python","start_line":1,"end_line":241,"context_start_line":1,"context_end_line":241,"code":"import json\nimport logging\nimport sys\nfrom collections import defaultdict\nfrom pathlib import Path\n\nimport numpy as np\nimport yaml\nfrom tapeagents.core import Step, StepMetadata\nfrom tapeagents.observe import retrieve_all_llm_calls\nfrom tapeagents.renderers.camera_ready_renderer import CameraReadyRenderer\nfrom tapeagents.tape_browser import TapeBrowser\n\nfrom agentlab.agents.tapeagent.agent import ExtendedMetadata, Tape\nfrom agentlab.benchmarks.gaia import step_error\n\nlogger = logging.getLogger(__name__)\nfmt = \"%(asctime)s - %(levelname)s - %(name)s:%(lineno)d - %(funcName)s() - %(message)s\"\nlogging.basicConfig(level=logging.INFO, force=True, format=fmt, handlers=[logging.StreamHandler()])\n\n\nclass WrapperStep(Step):\n content: dict\n\n\ndef pretty_yaml(data: dict | None) -> str:\n return yaml.dump(data, sort_keys=False, indent=2) if data else \"\"\n\n\nclass TapesRender(CameraReadyRenderer):\n\n @property\n def style(self):\n style = \"\"\n return super().style + style\n\n def render_step(self, step: WrapperStep, index: int, **kwargs):\n step_dict = step.content.copy()\n step_dict.pop(\"metadata\", None)\n kind = step_dict.pop(\"kind\", \"Step\")\n if kind == \"set_next_node\":\n return \"\"\n # remove empty keys\n step_dict = {k: v for k, v in step_dict.items() if v is not None and v != \"\"}\n if len(step_dict) == 1:\n content = list(step_dict.values())[0]\n elif kind == \"page_observation\":\n content = step_dict.get(\"text\", pretty_yaml(step_dict))\n if len(content) > 100:\n summary = content[:100]\n content = f\"
{summary}---
{content}
\"\n elif kind == \"python_code_action\":\n content = step_dict.get(\"code\", pretty_yaml(step_dict))\n elif kind == \"code_execution_result\":\n content = pretty_yaml(step_dict.get(\"result\"))\n elif len(step_dict) == 1 and \"content\" in step_dict:\n content = step_dict[\"content\"]\n elif len(step_dict) == 1 and \"reasoning\" in step_dict:\n content = step_dict[\"reasoning\"]\n else:\n content = pretty_yaml(step_dict)\n\n if step_dict.get(\"error\") or step_dict.get(\"result\", {}).get(\"exit_code\"):\n class_ = \"error\"\n elif kind.endswith(\"thought\"):\n class_ = \"thought\"\n kind = kind[:-8]\n elif kind.endswith(\"action\"):\n class_ = \"action\"\n kind = kind[:-7]\n else:\n class_ = \"observation\"\n return f\"

{kind}

{content}
\"\n\n\nclass TapesBrowser(TapeBrowser):\n def __init__(self, tapes_folder):\n super().__init__(Tape, tapes_folder, TapesRender(), \".json\")\n\n def get_tape_files(self) -> list[str]:\n logger.info(f\"Searching for tapes in {self.tapes_folder}\")\n fpath = Path(self.tapes_folder)\n exps = [\n str(exp_dir.relative_to(fpath))\n for exp_dir in fpath.iterdir()\n if exp_dir.is_dir() and len(list(exp_dir.rglob(\"tape.json\"))) > 0\n ]\n assert exps, f\"No experiments found in {self.tapes_folder}\"\n logger.info(f\"Found {len(exps)} experiments in {self.tapes_folder}\")\n return sorted(exps)\n\n def get_steps(self, tape: dict) -> list:\n return tape[\"steps\"]\n\n def load_llm_calls(self):\n sqlite_path = self.exp_path / \"tapedata.sqlite\"\n if sqlite_path.exists():\n try:\n self.llm_calls = {\n call.prompt.id: call for call in retrieve_all_llm_calls(str(sqlite_path))\n }\n logger.info(f\"Loaded {len(self.llm_calls)} LLM calls from {sqlite_path}\")\n except Exception as e:\n logger.warning(f\"Failed to load LLM calls from {sqlite_path}: {e}\")\n else:\n logger.warning(f\"{sqlite_path} not found\")\n\n def get_context(self, tape: Tape) -> list:\n return []\n\n def get_tape_name(self, i: int, tape: Tape) -> str:\n errors = [\n bool(s.content.get(\"error\", False) or s.content.get(\"result\", {}).get(\"exit_code\"))\n for s in tape.steps\n ]\n mark = \"✅ \" if tape.metadata.reward > 0 else \"\"\n if any(errors):\n mark = \"⚠ \"\n if tape.metadata.task.get(\"file_name\"):\n mark += \"📁 \"\n number = tape.metadata.task.get(\"number\", \"\")\n n = f\"{tape.metadata.task.get('Level', '')}.{number} \" if number else \"\"\n name = tape.steps[0].content[\"content\"][:32] + \"...\"\n return f\"{n}({len(tape.steps)}){mark}{name}\"\n\n def get_exp_label(self, filename: str, tapes: list[Tape]) -> str:\n acc, n_solved = self.calculate_accuracy(tapes)\n errors = defaultdict(int)\n prompt_tokens_num = 0\n output_tokens_num = 0\n total_cost = 0.0\n visible_prompt_tokens_num = 0\n visible_output_tokens_num = 0\n visible_cost = 0.0\n no_result = 0\n actions = defaultdict(int)\n for llm_call in self.llm_calls.values():\n prompt_tokens_num += llm_call.prompt_length_tokens\n output_tokens_num += llm_call.output_length_tokens\n total_cost += llm_call.cost\n avg_steps = np.mean([len(tape) for tape in tapes])\n std_steps = np.std([len(tape) for tape in tapes])\n for tape in tapes:\n if tape.metadata.truncated:\n no_result += 1\n if tape.metadata.error:\n errors[\"fatal\"] += 1\n last_action = None\n counted = set([])\n for step in tape:\n step_dict = step.content.copy()\n kind = step_dict.get(\"kind\", \"unknown\")\n llm_call = self.llm_calls.get(step.metadata.prompt_id)\n if llm_call and step.metadata.prompt_id not in counted:\n counted.add(step.metadata.prompt_id)\n visible_prompt_tokens_num += llm_call.prompt_length_tokens\n visible_output_tokens_num += llm_call.output_length_tokens\n visible_cost += llm_call.cost\n if kind.endswith(\"action\"):\n actions[kind] += 1\n last_action = kind\n if error := self.get_step_error(step_dict, last_action):\n errors[error] += 1\n timers, timer_counts = self.aggregate_timer_times(tapes)\n html = f\"

Solved {acc:.2f}%, {n_solved} out of {len(tapes)}

\"\n if \"all\" in filename:\n html += f\"Prompt tokens: {prompt_tokens_num}
Output tokens: {output_tokens_num}
Cost: {total_cost:.2f} USD

Visible

\"\n html += f\"Prompt tokens: {visible_prompt_tokens_num}
Output tokens: {visible_output_tokens_num}
Cost: {visible_cost:.2f} USD\"\n html += f\"

Steps per tape: {avg_steps:.1f} ± {std_steps:.1f}

\"\n if errors:\n errors_str = \"
\".join(f\"{k}: {v}\" for k, v in errors.items())\n html += f\"

No result: {no_result}

\"\n html += f\"

Errors: {sum(errors.values())}

{errors_str}\"\n if actions:\n actions_str = \"
\".join(f\"{k}: {v}\" for k, v in actions.items())\n html += f\"

Actions: {sum(actions.values())}

{actions_str}\"\n if timers:\n timers_str = \"
\".join(\n f\"{'execute ' if k.endswith('action') else ''}{k}: {v:.1f} sec, avg. {v/timer_counts[k]:.1f} sec\"\n for k, v in timers.items()\n )\n html += f\"

Timings

{timers_str}\"\n return html\n\n def get_step_error(self, step_dict: dict, last_action: str | None) -> str:\n return step_error(step_dict, last_action)\n\n def calculate_accuracy(self, tapes: list[Tape]) -> tuple[float, int]:\n solved = [tape.metadata.reward for tape in tapes]\n accuracy = 100 * (sum(solved) / len(solved) if solved else 0.0)\n return accuracy, int(sum(solved))\n\n def aggregate_timer_times(self, tapes: list[Tape]):\n timer_sums = defaultdict(float)\n timer_counts = defaultdict(int)\n for tape in tapes:\n timers = tape.metadata.other.get(\"timers\", {})\n for timer_name, exec_time in timers.items():\n timer_sums[timer_name] += exec_time\n timer_counts[timer_name] += 1\n for step in tape.steps:\n action_kind = step.metadata.other.get(\"action_kind\")\n action_execution_time = step.metadata.other.get(\"action_execution_time\")\n if action_kind and action_execution_time:\n timer_sums[action_kind] += action_execution_time\n timer_counts[action_kind] += 1\n return dict(timer_sums), dict(timer_counts)\n\n def load_tapes(self, exp_dir: str) -> list[Tape]:\n tapes: list[Tape] = []\n fpath = Path(self.tapes_folder) / exp_dir\n for json_file in fpath.rglob(\"tape.json\"):\n if json_file.stat().st_size == 0:\n logger.warning(f\"Empty tape file: {json_file}\")\n continue\n try:\n with open(json_file) as f:\n tape_dict = json.load(f)\n tape = Tape(steps=[], metadata=ExtendedMetadata(**tape_dict[\"metadata\"]))\n tape.steps = [\n WrapperStep(content=s, metadata=StepMetadata(**s[\"metadata\"]))\n for s in tape_dict[\"steps\"]\n ]\n tapes.append(tape)\n except Exception as e:\n logger.warning(f\"Failed to load {json_file}: {e}\")\n logger.info(f\"Loaded {len(tapes)} tapes from {fpath}\")\n self.exp_path = fpath\n return sorted(\n tapes,\n key=lambda x: f\"{x.metadata.task.get('Level', '')}{x.metadata.task.get('number', 0):03d}\",\n )\n\n def save_annotation(self, step: int, annotation: str, tape_id: int):\n pass\n\n\nif __name__ == \"__main__\":\n results_dir = sys.argv[1] if len(sys.argv) > 1 else \"~/agentlab_results/\"\n tapes_browser = TapesBrowser(Path(results_dir).expanduser())\n tapes_browser.launch()","source_hash":"0886d56828de6e814d2184744b93e93ae8e0f032fec4aa65abdf3c0ec0539b56","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.tapes.WrapperStep","uri":"program://AgentLab/class/src.agentlab.analyze.tapes.WrapperStep#L22-L23","kind":"class","name":"WrapperStep","path":"src/agentlab/analyze/tapes.py","language":"python","start_line":22,"end_line":23,"context_start_line":2,"context_end_line":43,"code":"import logging\nimport sys\nfrom collections import defaultdict\nfrom pathlib import Path\n\nimport numpy as np\nimport yaml\nfrom tapeagents.core import Step, StepMetadata\nfrom tapeagents.observe import retrieve_all_llm_calls\nfrom tapeagents.renderers.camera_ready_renderer import CameraReadyRenderer\nfrom tapeagents.tape_browser import TapeBrowser\n\nfrom agentlab.agents.tapeagent.agent import ExtendedMetadata, Tape\nfrom agentlab.benchmarks.gaia import step_error\n\nlogger = logging.getLogger(__name__)\nfmt = \"%(asctime)s - %(levelname)s - %(name)s:%(lineno)d - %(funcName)s() - %(message)s\"\nlogging.basicConfig(level=logging.INFO, force=True, format=fmt, handlers=[logging.StreamHandler()])\n\n\nclass WrapperStep(Step):\n content: dict\n\n\ndef pretty_yaml(data: dict | None) -> str:\n return yaml.dump(data, sort_keys=False, indent=2) if data else \"\"\n\n\nclass TapesRender(CameraReadyRenderer):\n\n @property\n def style(self):\n style = \"\"\n return super().style + style\n\n def render_step(self, step: WrapperStep, index: int, **kwargs):\n step_dict = step.content.copy()\n step_dict.pop(\"metadata\", None)\n kind = step_dict.pop(\"kind\", \"Step\")\n if kind == \"set_next_node\":\n return \"\"\n # remove empty keys","source_hash":"0886d56828de6e814d2184744b93e93ae8e0f032fec4aa65abdf3c0ec0539b56","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.tapes.pretty_yaml","uri":"program://AgentLab/function/src.agentlab.analyze.tapes.pretty_yaml#L26-L27","kind":"function","name":"pretty_yaml","path":"src/agentlab/analyze/tapes.py","language":"python","start_line":26,"end_line":27,"context_start_line":6,"context_end_line":47,"code":"\nimport numpy as np\nimport yaml\nfrom tapeagents.core import Step, StepMetadata\nfrom tapeagents.observe import retrieve_all_llm_calls\nfrom tapeagents.renderers.camera_ready_renderer import CameraReadyRenderer\nfrom tapeagents.tape_browser import TapeBrowser\n\nfrom agentlab.agents.tapeagent.agent import ExtendedMetadata, Tape\nfrom agentlab.benchmarks.gaia import step_error\n\nlogger = logging.getLogger(__name__)\nfmt = \"%(asctime)s - %(levelname)s - %(name)s:%(lineno)d - %(funcName)s() - %(message)s\"\nlogging.basicConfig(level=logging.INFO, force=True, format=fmt, handlers=[logging.StreamHandler()])\n\n\nclass WrapperStep(Step):\n content: dict\n\n\ndef pretty_yaml(data: dict | None) -> str:\n return yaml.dump(data, sort_keys=False, indent=2) if data else \"\"\n\n\nclass TapesRender(CameraReadyRenderer):\n\n @property\n def style(self):\n style = \"\"\n return super().style + style\n\n def render_step(self, step: WrapperStep, index: int, **kwargs):\n step_dict = step.content.copy()\n step_dict.pop(\"metadata\", None)\n kind = step_dict.pop(\"kind\", \"Step\")\n if kind == \"set_next_node\":\n return \"\"\n # remove empty keys\n step_dict = {k: v for k, v in step_dict.items() if v is not None and v != \"\"}\n if len(step_dict) == 1:\n content = list(step_dict.values())[0]\n elif kind == \"page_observation\":","source_hash":"0886d56828de6e814d2184744b93e93ae8e0f032fec4aa65abdf3c0ec0539b56","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.tapes.TapesRender","uri":"program://AgentLab/class/src.agentlab.analyze.tapes.TapesRender#L30-L73","kind":"class","name":"TapesRender","path":"src/agentlab/analyze/tapes.py","language":"python","start_line":30,"end_line":73,"context_start_line":10,"context_end_line":93,"code":"from tapeagents.observe import retrieve_all_llm_calls\nfrom tapeagents.renderers.camera_ready_renderer import CameraReadyRenderer\nfrom tapeagents.tape_browser import TapeBrowser\n\nfrom agentlab.agents.tapeagent.agent import ExtendedMetadata, Tape\nfrom agentlab.benchmarks.gaia import step_error\n\nlogger = logging.getLogger(__name__)\nfmt = \"%(asctime)s - %(levelname)s - %(name)s:%(lineno)d - %(funcName)s() - %(message)s\"\nlogging.basicConfig(level=logging.INFO, force=True, format=fmt, handlers=[logging.StreamHandler()])\n\n\nclass WrapperStep(Step):\n content: dict\n\n\ndef pretty_yaml(data: dict | None) -> str:\n return yaml.dump(data, sort_keys=False, indent=2) if data else \"\"\n\n\nclass TapesRender(CameraReadyRenderer):\n\n @property\n def style(self):\n style = \"\"\n return super().style + style\n\n def render_step(self, step: WrapperStep, index: int, **kwargs):\n step_dict = step.content.copy()\n step_dict.pop(\"metadata\", None)\n kind = step_dict.pop(\"kind\", \"Step\")\n if kind == \"set_next_node\":\n return \"\"\n # remove empty keys\n step_dict = {k: v for k, v in step_dict.items() if v is not None and v != \"\"}\n if len(step_dict) == 1:\n content = list(step_dict.values())[0]\n elif kind == \"page_observation\":\n content = step_dict.get(\"text\", pretty_yaml(step_dict))\n if len(content) > 100:\n summary = content[:100]\n content = f\"
{summary}---
{content}
\"\n elif kind == \"python_code_action\":\n content = step_dict.get(\"code\", pretty_yaml(step_dict))\n elif kind == \"code_execution_result\":\n content = pretty_yaml(step_dict.get(\"result\"))\n elif len(step_dict) == 1 and \"content\" in step_dict:\n content = step_dict[\"content\"]\n elif len(step_dict) == 1 and \"reasoning\" in step_dict:\n content = step_dict[\"reasoning\"]\n else:\n content = pretty_yaml(step_dict)\n\n if step_dict.get(\"error\") or step_dict.get(\"result\", {}).get(\"exit_code\"):\n class_ = \"error\"\n elif kind.endswith(\"thought\"):\n class_ = \"thought\"\n kind = kind[:-8]\n elif kind.endswith(\"action\"):\n class_ = \"action\"\n kind = kind[:-7]\n else:\n class_ = \"observation\"\n return f\"

{kind}

{content}
\"\n\n\nclass TapesBrowser(TapeBrowser):\n def __init__(self, tapes_folder):\n super().__init__(Tape, tapes_folder, TapesRender(), \".json\")\n\n def get_tape_files(self) -> list[str]:\n logger.info(f\"Searching for tapes in {self.tapes_folder}\")\n fpath = Path(self.tapes_folder)\n exps = [\n str(exp_dir.relative_to(fpath))\n for exp_dir in fpath.iterdir()\n if exp_dir.is_dir() and len(list(exp_dir.rglob(\"tape.json\"))) > 0\n ]\n assert exps, f\"No experiments found in {self.tapes_folder}\"\n logger.info(f\"Found {len(exps)} experiments in {self.tapes_folder}\")\n return sorted(exps)\n\n def get_steps(self, tape: dict) -> list:\n return tape[\"steps\"]","source_hash":"0886d56828de6e814d2184744b93e93ae8e0f032fec4aa65abdf3c0ec0539b56","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.tapes.TapesBrowser","uri":"program://AgentLab/class/src.agentlab.analyze.tapes.TapesBrowser#L76-L235","kind":"class","name":"TapesBrowser","path":"src/agentlab/analyze/tapes.py","language":"python","start_line":76,"end_line":235,"context_start_line":56,"context_end_line":241,"code":" elif len(step_dict) == 1 and \"content\" in step_dict:\n content = step_dict[\"content\"]\n elif len(step_dict) == 1 and \"reasoning\" in step_dict:\n content = step_dict[\"reasoning\"]\n else:\n content = pretty_yaml(step_dict)\n\n if step_dict.get(\"error\") or step_dict.get(\"result\", {}).get(\"exit_code\"):\n class_ = \"error\"\n elif kind.endswith(\"thought\"):\n class_ = \"thought\"\n kind = kind[:-8]\n elif kind.endswith(\"action\"):\n class_ = \"action\"\n kind = kind[:-7]\n else:\n class_ = \"observation\"\n return f\"

{kind}

{content}
\"\n\n\nclass TapesBrowser(TapeBrowser):\n def __init__(self, tapes_folder):\n super().__init__(Tape, tapes_folder, TapesRender(), \".json\")\n\n def get_tape_files(self) -> list[str]:\n logger.info(f\"Searching for tapes in {self.tapes_folder}\")\n fpath = Path(self.tapes_folder)\n exps = [\n str(exp_dir.relative_to(fpath))\n for exp_dir in fpath.iterdir()\n if exp_dir.is_dir() and len(list(exp_dir.rglob(\"tape.json\"))) > 0\n ]\n assert exps, f\"No experiments found in {self.tapes_folder}\"\n logger.info(f\"Found {len(exps)} experiments in {self.tapes_folder}\")\n return sorted(exps)\n\n def get_steps(self, tape: dict) -> list:\n return tape[\"steps\"]\n\n def load_llm_calls(self):\n sqlite_path = self.exp_path / \"tapedata.sqlite\"\n if sqlite_path.exists():\n try:\n self.llm_calls = {\n call.prompt.id: call for call in retrieve_all_llm_calls(str(sqlite_path))\n }\n logger.info(f\"Loaded {len(self.llm_calls)} LLM calls from {sqlite_path}\")\n except Exception as e:\n logger.warning(f\"Failed to load LLM calls from {sqlite_path}: {e}\")\n else:\n logger.warning(f\"{sqlite_path} not found\")\n\n def get_context(self, tape: Tape) -> list:\n return []\n\n def get_tape_name(self, i: int, tape: Tape) -> str:\n errors = [\n bool(s.content.get(\"error\", False) or s.content.get(\"result\", {}).get(\"exit_code\"))\n for s in tape.steps\n ]\n mark = \"✅ \" if tape.metadata.reward > 0 else \"\"\n if any(errors):\n mark = \"⚠ \"\n if tape.metadata.task.get(\"file_name\"):\n mark += \"📁 \"\n number = tape.metadata.task.get(\"number\", \"\")\n n = f\"{tape.metadata.task.get('Level', '')}.{number} \" if number else \"\"\n name = tape.steps[0].content[\"content\"][:32] + \"...\"\n return f\"{n}({len(tape.steps)}){mark}{name}\"\n\n def get_exp_label(self, filename: str, tapes: list[Tape]) -> str:\n acc, n_solved = self.calculate_accuracy(tapes)\n errors = defaultdict(int)\n prompt_tokens_num = 0\n output_tokens_num = 0\n total_cost = 0.0\n visible_prompt_tokens_num = 0\n visible_output_tokens_num = 0\n visible_cost = 0.0\n no_result = 0\n actions = defaultdict(int)\n for llm_call in self.llm_calls.values():\n prompt_tokens_num += llm_call.prompt_length_tokens\n output_tokens_num += llm_call.output_length_tokens\n total_cost += llm_call.cost\n avg_steps = np.mean([len(tape) for tape in tapes])\n std_steps = np.std([len(tape) for tape in tapes])\n for tape in tapes:\n if tape.metadata.truncated:\n no_result += 1\n if tape.metadata.error:\n errors[\"fatal\"] += 1\n last_action = None\n counted = set([])\n for step in tape:\n step_dict = step.content.copy()\n kind = step_dict.get(\"kind\", \"unknown\")\n llm_call = self.llm_calls.get(step.metadata.prompt_id)\n if llm_call and step.metadata.prompt_id not in counted:\n counted.add(step.metadata.prompt_id)\n visible_prompt_tokens_num += llm_call.prompt_length_tokens\n visible_output_tokens_num += llm_call.output_length_tokens\n visible_cost += llm_call.cost\n if kind.endswith(\"action\"):\n actions[kind] += 1\n last_action = kind\n if error := self.get_step_error(step_dict, last_action):\n errors[error] += 1\n timers, timer_counts = self.aggregate_timer_times(tapes)\n html = f\"

Solved {acc:.2f}%, {n_solved} out of {len(tapes)}

\"\n if \"all\" in filename:\n html += f\"Prompt tokens: {prompt_tokens_num}
Output tokens: {output_tokens_num}
Cost: {total_cost:.2f} USD

Visible

\"\n html += f\"Prompt tokens: {visible_prompt_tokens_num}
Output tokens: {visible_output_tokens_num}
Cost: {visible_cost:.2f} USD\"\n html += f\"

Steps per tape: {avg_steps:.1f} ± {std_steps:.1f}

\"\n if errors:\n errors_str = \"
\".join(f\"{k}: {v}\" for k, v in errors.items())\n html += f\"

No result: {no_result}

\"\n html += f\"

Errors: {sum(errors.values())}

{errors_str}\"\n if actions:\n actions_str = \"
\".join(f\"{k}: {v}\" for k, v in actions.items())\n html += f\"

Actions: {sum(actions.values())}

{actions_str}\"\n if timers:\n timers_str = \"
\".join(\n f\"{'execute ' if k.endswith('action') else ''}{k}: {v:.1f} sec, avg. {v/timer_counts[k]:.1f} sec\"\n for k, v in timers.items()\n )\n html += f\"

Timings

{timers_str}\"\n return html\n\n def get_step_error(self, step_dict: dict, last_action: str | None) -> str:\n return step_error(step_dict, last_action)\n\n def calculate_accuracy(self, tapes: list[Tape]) -> tuple[float, int]:\n solved = [tape.metadata.reward for tape in tapes]\n accuracy = 100 * (sum(solved) / len(solved) if solved else 0.0)\n return accuracy, int(sum(solved))\n\n def aggregate_timer_times(self, tapes: list[Tape]):\n timer_sums = defaultdict(float)\n timer_counts = defaultdict(int)\n for tape in tapes:\n timers = tape.metadata.other.get(\"timers\", {})\n for timer_name, exec_time in timers.items():\n timer_sums[timer_name] += exec_time\n timer_counts[timer_name] += 1\n for step in tape.steps:\n action_kind = step.metadata.other.get(\"action_kind\")\n action_execution_time = step.metadata.other.get(\"action_execution_time\")\n if action_kind and action_execution_time:\n timer_sums[action_kind] += action_execution_time\n timer_counts[action_kind] += 1\n return dict(timer_sums), dict(timer_counts)\n\n def load_tapes(self, exp_dir: str) -> list[Tape]:\n tapes: list[Tape] = []\n fpath = Path(self.tapes_folder) / exp_dir\n for json_file in fpath.rglob(\"tape.json\"):\n if json_file.stat().st_size == 0:\n logger.warning(f\"Empty tape file: {json_file}\")\n continue\n try:\n with open(json_file) as f:\n tape_dict = json.load(f)\n tape = Tape(steps=[], metadata=ExtendedMetadata(**tape_dict[\"metadata\"]))\n tape.steps = [\n WrapperStep(content=s, metadata=StepMetadata(**s[\"metadata\"]))\n for s in tape_dict[\"steps\"]\n ]\n tapes.append(tape)\n except Exception as e:\n logger.warning(f\"Failed to load {json_file}: {e}\")\n logger.info(f\"Loaded {len(tapes)} tapes from {fpath}\")\n self.exp_path = fpath\n return sorted(\n tapes,\n key=lambda x: f\"{x.metadata.task.get('Level', '')}{x.metadata.task.get('number', 0):03d}\",\n )\n\n def save_annotation(self, step: int, annotation: str, tape_id: int):\n pass\n\n\nif __name__ == \"__main__\":\n results_dir = sys.argv[1] if len(sys.argv) > 1 else \"~/agentlab_results/\"\n tapes_browser = TapesBrowser(Path(results_dir).expanduser())\n tapes_browser.launch()","source_hash":"0886d56828de6e814d2184744b93e93ae8e0f032fec4aa65abdf3c0ec0539b56","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.tapes.style","uri":"program://AgentLab/function/src.agentlab.analyze.tapes.style#L33-L35","kind":"function","name":"style","path":"src/agentlab/analyze/tapes.py","language":"python","start_line":33,"end_line":35,"context_start_line":13,"context_end_line":55,"code":"\nfrom agentlab.agents.tapeagent.agent import ExtendedMetadata, Tape\nfrom agentlab.benchmarks.gaia import step_error\n\nlogger = logging.getLogger(__name__)\nfmt = \"%(asctime)s - %(levelname)s - %(name)s:%(lineno)d - %(funcName)s() - %(message)s\"\nlogging.basicConfig(level=logging.INFO, force=True, format=fmt, handlers=[logging.StreamHandler()])\n\n\nclass WrapperStep(Step):\n content: dict\n\n\ndef pretty_yaml(data: dict | None) -> str:\n return yaml.dump(data, sort_keys=False, indent=2) if data else \"\"\n\n\nclass TapesRender(CameraReadyRenderer):\n\n @property\n def style(self):\n style = \"\"\n return super().style + style\n\n def render_step(self, step: WrapperStep, index: int, **kwargs):\n step_dict = step.content.copy()\n step_dict.pop(\"metadata\", None)\n kind = step_dict.pop(\"kind\", \"Step\")\n if kind == \"set_next_node\":\n return \"\"\n # remove empty keys\n step_dict = {k: v for k, v in step_dict.items() if v is not None and v != \"\"}\n if len(step_dict) == 1:\n content = list(step_dict.values())[0]\n elif kind == \"page_observation\":\n content = step_dict.get(\"text\", pretty_yaml(step_dict))\n if len(content) > 100:\n summary = content[:100]\n content = f\"
{summary}---
{content}
\"\n elif kind == \"python_code_action\":\n content = step_dict.get(\"code\", pretty_yaml(step_dict))\n elif kind == \"code_execution_result\":\n content = pretty_yaml(step_dict.get(\"result\"))","source_hash":"0886d56828de6e814d2184744b93e93ae8e0f032fec4aa65abdf3c0ec0539b56","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.tapes.render_step","uri":"program://AgentLab/function/src.agentlab.analyze.tapes.render_step#L37-L73","kind":"function","name":"render_step","path":"src/agentlab/analyze/tapes.py","language":"python","start_line":37,"end_line":73,"context_start_line":17,"context_end_line":93,"code":"logger = logging.getLogger(__name__)\nfmt = \"%(asctime)s - %(levelname)s - %(name)s:%(lineno)d - %(funcName)s() - %(message)s\"\nlogging.basicConfig(level=logging.INFO, force=True, format=fmt, handlers=[logging.StreamHandler()])\n\n\nclass WrapperStep(Step):\n content: dict\n\n\ndef pretty_yaml(data: dict | None) -> str:\n return yaml.dump(data, sort_keys=False, indent=2) if data else \"\"\n\n\nclass TapesRender(CameraReadyRenderer):\n\n @property\n def style(self):\n style = \"\"\n return super().style + style\n\n def render_step(self, step: WrapperStep, index: int, **kwargs):\n step_dict = step.content.copy()\n step_dict.pop(\"metadata\", None)\n kind = step_dict.pop(\"kind\", \"Step\")\n if kind == \"set_next_node\":\n return \"\"\n # remove empty keys\n step_dict = {k: v for k, v in step_dict.items() if v is not None and v != \"\"}\n if len(step_dict) == 1:\n content = list(step_dict.values())[0]\n elif kind == \"page_observation\":\n content = step_dict.get(\"text\", pretty_yaml(step_dict))\n if len(content) > 100:\n summary = content[:100]\n content = f\"
{summary}---
{content}
\"\n elif kind == \"python_code_action\":\n content = step_dict.get(\"code\", pretty_yaml(step_dict))\n elif kind == \"code_execution_result\":\n content = pretty_yaml(step_dict.get(\"result\"))\n elif len(step_dict) == 1 and \"content\" in step_dict:\n content = step_dict[\"content\"]\n elif len(step_dict) == 1 and \"reasoning\" in step_dict:\n content = step_dict[\"reasoning\"]\n else:\n content = pretty_yaml(step_dict)\n\n if step_dict.get(\"error\") or step_dict.get(\"result\", {}).get(\"exit_code\"):\n class_ = \"error\"\n elif kind.endswith(\"thought\"):\n class_ = \"thought\"\n kind = kind[:-8]\n elif kind.endswith(\"action\"):\n class_ = \"action\"\n kind = kind[:-7]\n else:\n class_ = \"observation\"\n return f\"

{kind}

{content}
\"\n\n\nclass TapesBrowser(TapeBrowser):\n def __init__(self, tapes_folder):\n super().__init__(Tape, tapes_folder, TapesRender(), \".json\")\n\n def get_tape_files(self) -> list[str]:\n logger.info(f\"Searching for tapes in {self.tapes_folder}\")\n fpath = Path(self.tapes_folder)\n exps = [\n str(exp_dir.relative_to(fpath))\n for exp_dir in fpath.iterdir()\n if exp_dir.is_dir() and len(list(exp_dir.rglob(\"tape.json\"))) > 0\n ]\n assert exps, f\"No experiments found in {self.tapes_folder}\"\n logger.info(f\"Found {len(exps)} experiments in {self.tapes_folder}\")\n return sorted(exps)\n\n def get_steps(self, tape: dict) -> list:\n return tape[\"steps\"]","source_hash":"0886d56828de6e814d2184744b93e93ae8e0f032fec4aa65abdf3c0ec0539b56","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.tapes.__init__","uri":"program://AgentLab/function/src.agentlab.analyze.tapes.__init__#L77-L78","kind":"function","name":"__init__","path":"src/agentlab/analyze/tapes.py","language":"python","start_line":77,"end_line":78,"context_start_line":57,"context_end_line":98,"code":" content = step_dict[\"content\"]\n elif len(step_dict) == 1 and \"reasoning\" in step_dict:\n content = step_dict[\"reasoning\"]\n else:\n content = pretty_yaml(step_dict)\n\n if step_dict.get(\"error\") or step_dict.get(\"result\", {}).get(\"exit_code\"):\n class_ = \"error\"\n elif kind.endswith(\"thought\"):\n class_ = \"thought\"\n kind = kind[:-8]\n elif kind.endswith(\"action\"):\n class_ = \"action\"\n kind = kind[:-7]\n else:\n class_ = \"observation\"\n return f\"

{kind}

{content}
\"\n\n\nclass TapesBrowser(TapeBrowser):\n def __init__(self, tapes_folder):\n super().__init__(Tape, tapes_folder, TapesRender(), \".json\")\n\n def get_tape_files(self) -> list[str]:\n logger.info(f\"Searching for tapes in {self.tapes_folder}\")\n fpath = Path(self.tapes_folder)\n exps = [\n str(exp_dir.relative_to(fpath))\n for exp_dir in fpath.iterdir()\n if exp_dir.is_dir() and len(list(exp_dir.rglob(\"tape.json\"))) > 0\n ]\n assert exps, f\"No experiments found in {self.tapes_folder}\"\n logger.info(f\"Found {len(exps)} experiments in {self.tapes_folder}\")\n return sorted(exps)\n\n def get_steps(self, tape: dict) -> list:\n return tape[\"steps\"]\n\n def load_llm_calls(self):\n sqlite_path = self.exp_path / \"tapedata.sqlite\"\n if sqlite_path.exists():\n try:","source_hash":"0886d56828de6e814d2184744b93e93ae8e0f032fec4aa65abdf3c0ec0539b56","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.tapes.get_tape_files","uri":"program://AgentLab/function/src.agentlab.analyze.tapes.get_tape_files#L80-L90","kind":"function","name":"get_tape_files","path":"src/agentlab/analyze/tapes.py","language":"python","start_line":80,"end_line":90,"context_start_line":60,"context_end_line":110,"code":" else:\n content = pretty_yaml(step_dict)\n\n if step_dict.get(\"error\") or step_dict.get(\"result\", {}).get(\"exit_code\"):\n class_ = \"error\"\n elif kind.endswith(\"thought\"):\n class_ = \"thought\"\n kind = kind[:-8]\n elif kind.endswith(\"action\"):\n class_ = \"action\"\n kind = kind[:-7]\n else:\n class_ = \"observation\"\n return f\"

{kind}

{content}
\"\n\n\nclass TapesBrowser(TapeBrowser):\n def __init__(self, tapes_folder):\n super().__init__(Tape, tapes_folder, TapesRender(), \".json\")\n\n def get_tape_files(self) -> list[str]:\n logger.info(f\"Searching for tapes in {self.tapes_folder}\")\n fpath = Path(self.tapes_folder)\n exps = [\n str(exp_dir.relative_to(fpath))\n for exp_dir in fpath.iterdir()\n if exp_dir.is_dir() and len(list(exp_dir.rglob(\"tape.json\"))) > 0\n ]\n assert exps, f\"No experiments found in {self.tapes_folder}\"\n logger.info(f\"Found {len(exps)} experiments in {self.tapes_folder}\")\n return sorted(exps)\n\n def get_steps(self, tape: dict) -> list:\n return tape[\"steps\"]\n\n def load_llm_calls(self):\n sqlite_path = self.exp_path / \"tapedata.sqlite\"\n if sqlite_path.exists():\n try:\n self.llm_calls = {\n call.prompt.id: call for call in retrieve_all_llm_calls(str(sqlite_path))\n }\n logger.info(f\"Loaded {len(self.llm_calls)} LLM calls from {sqlite_path}\")\n except Exception as e:\n logger.warning(f\"Failed to load LLM calls from {sqlite_path}: {e}\")\n else:\n logger.warning(f\"{sqlite_path} not found\")\n\n def get_context(self, tape: Tape) -> list:\n return []\n","source_hash":"0886d56828de6e814d2184744b93e93ae8e0f032fec4aa65abdf3c0ec0539b56","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.tapes.get_steps","uri":"program://AgentLab/function/src.agentlab.analyze.tapes.get_steps#L92-L93","kind":"function","name":"get_steps","path":"src/agentlab/analyze/tapes.py","language":"python","start_line":92,"end_line":93,"context_start_line":72,"context_end_line":113,"code":" class_ = \"observation\"\n return f\"

{kind}

{content}
\"\n\n\nclass TapesBrowser(TapeBrowser):\n def __init__(self, tapes_folder):\n super().__init__(Tape, tapes_folder, TapesRender(), \".json\")\n\n def get_tape_files(self) -> list[str]:\n logger.info(f\"Searching for tapes in {self.tapes_folder}\")\n fpath = Path(self.tapes_folder)\n exps = [\n str(exp_dir.relative_to(fpath))\n for exp_dir in fpath.iterdir()\n if exp_dir.is_dir() and len(list(exp_dir.rglob(\"tape.json\"))) > 0\n ]\n assert exps, f\"No experiments found in {self.tapes_folder}\"\n logger.info(f\"Found {len(exps)} experiments in {self.tapes_folder}\")\n return sorted(exps)\n\n def get_steps(self, tape: dict) -> list:\n return tape[\"steps\"]\n\n def load_llm_calls(self):\n sqlite_path = self.exp_path / \"tapedata.sqlite\"\n if sqlite_path.exists():\n try:\n self.llm_calls = {\n call.prompt.id: call for call in retrieve_all_llm_calls(str(sqlite_path))\n }\n logger.info(f\"Loaded {len(self.llm_calls)} LLM calls from {sqlite_path}\")\n except Exception as e:\n logger.warning(f\"Failed to load LLM calls from {sqlite_path}: {e}\")\n else:\n logger.warning(f\"{sqlite_path} not found\")\n\n def get_context(self, tape: Tape) -> list:\n return []\n\n def get_tape_name(self, i: int, tape: Tape) -> str:\n errors = [\n bool(s.content.get(\"error\", False) or s.content.get(\"result\", {}).get(\"exit_code\"))","source_hash":"0886d56828de6e814d2184744b93e93ae8e0f032fec4aa65abdf3c0ec0539b56","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.tapes.load_llm_calls","uri":"program://AgentLab/function/src.agentlab.analyze.tapes.load_llm_calls#L95-L106","kind":"function","name":"load_llm_calls","path":"src/agentlab/analyze/tapes.py","language":"python","start_line":95,"end_line":106,"context_start_line":75,"context_end_line":126,"code":"\nclass TapesBrowser(TapeBrowser):\n def __init__(self, tapes_folder):\n super().__init__(Tape, tapes_folder, TapesRender(), \".json\")\n\n def get_tape_files(self) -> list[str]:\n logger.info(f\"Searching for tapes in {self.tapes_folder}\")\n fpath = Path(self.tapes_folder)\n exps = [\n str(exp_dir.relative_to(fpath))\n for exp_dir in fpath.iterdir()\n if exp_dir.is_dir() and len(list(exp_dir.rglob(\"tape.json\"))) > 0\n ]\n assert exps, f\"No experiments found in {self.tapes_folder}\"\n logger.info(f\"Found {len(exps)} experiments in {self.tapes_folder}\")\n return sorted(exps)\n\n def get_steps(self, tape: dict) -> list:\n return tape[\"steps\"]\n\n def load_llm_calls(self):\n sqlite_path = self.exp_path / \"tapedata.sqlite\"\n if sqlite_path.exists():\n try:\n self.llm_calls = {\n call.prompt.id: call for call in retrieve_all_llm_calls(str(sqlite_path))\n }\n logger.info(f\"Loaded {len(self.llm_calls)} LLM calls from {sqlite_path}\")\n except Exception as e:\n logger.warning(f\"Failed to load LLM calls from {sqlite_path}: {e}\")\n else:\n logger.warning(f\"{sqlite_path} not found\")\n\n def get_context(self, tape: Tape) -> list:\n return []\n\n def get_tape_name(self, i: int, tape: Tape) -> str:\n errors = [\n bool(s.content.get(\"error\", False) or s.content.get(\"result\", {}).get(\"exit_code\"))\n for s in tape.steps\n ]\n mark = \"✅ \" if tape.metadata.reward > 0 else \"\"\n if any(errors):\n mark = \"⚠ \"\n if tape.metadata.task.get(\"file_name\"):\n mark += \"📁 \"\n number = tape.metadata.task.get(\"number\", \"\")\n n = f\"{tape.metadata.task.get('Level', '')}.{number} \" if number else \"\"\n name = tape.steps[0].content[\"content\"][:32] + \"...\"\n return f\"{n}({len(tape.steps)}){mark}{name}\"\n\n def get_exp_label(self, filename: str, tapes: list[Tape]) -> str:","source_hash":"0886d56828de6e814d2184744b93e93ae8e0f032fec4aa65abdf3c0ec0539b56","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.tapes.get_context","uri":"program://AgentLab/function/src.agentlab.analyze.tapes.get_context#L108-L109","kind":"function","name":"get_context","path":"src/agentlab/analyze/tapes.py","language":"python","start_line":108,"end_line":109,"context_start_line":88,"context_end_line":129,"code":" assert exps, f\"No experiments found in {self.tapes_folder}\"\n logger.info(f\"Found {len(exps)} experiments in {self.tapes_folder}\")\n return sorted(exps)\n\n def get_steps(self, tape: dict) -> list:\n return tape[\"steps\"]\n\n def load_llm_calls(self):\n sqlite_path = self.exp_path / \"tapedata.sqlite\"\n if sqlite_path.exists():\n try:\n self.llm_calls = {\n call.prompt.id: call for call in retrieve_all_llm_calls(str(sqlite_path))\n }\n logger.info(f\"Loaded {len(self.llm_calls)} LLM calls from {sqlite_path}\")\n except Exception as e:\n logger.warning(f\"Failed to load LLM calls from {sqlite_path}: {e}\")\n else:\n logger.warning(f\"{sqlite_path} not found\")\n\n def get_context(self, tape: Tape) -> list:\n return []\n\n def get_tape_name(self, i: int, tape: Tape) -> str:\n errors = [\n bool(s.content.get(\"error\", False) or s.content.get(\"result\", {}).get(\"exit_code\"))\n for s in tape.steps\n ]\n mark = \"✅ \" if tape.metadata.reward > 0 else \"\"\n if any(errors):\n mark = \"⚠ \"\n if tape.metadata.task.get(\"file_name\"):\n mark += \"📁 \"\n number = tape.metadata.task.get(\"number\", \"\")\n n = f\"{tape.metadata.task.get('Level', '')}.{number} \" if number else \"\"\n name = tape.steps[0].content[\"content\"][:32] + \"...\"\n return f\"{n}({len(tape.steps)}){mark}{name}\"\n\n def get_exp_label(self, filename: str, tapes: list[Tape]) -> str:\n acc, n_solved = self.calculate_accuracy(tapes)\n errors = defaultdict(int)\n prompt_tokens_num = 0","source_hash":"0886d56828de6e814d2184744b93e93ae8e0f032fec4aa65abdf3c0ec0539b56","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.tapes.get_tape_name","uri":"program://AgentLab/function/src.agentlab.analyze.tapes.get_tape_name#L111-L124","kind":"function","name":"get_tape_name","path":"src/agentlab/analyze/tapes.py","language":"python","start_line":111,"end_line":124,"context_start_line":91,"context_end_line":144,"code":"\n def get_steps(self, tape: dict) -> list:\n return tape[\"steps\"]\n\n def load_llm_calls(self):\n sqlite_path = self.exp_path / \"tapedata.sqlite\"\n if sqlite_path.exists():\n try:\n self.llm_calls = {\n call.prompt.id: call for call in retrieve_all_llm_calls(str(sqlite_path))\n }\n logger.info(f\"Loaded {len(self.llm_calls)} LLM calls from {sqlite_path}\")\n except Exception as e:\n logger.warning(f\"Failed to load LLM calls from {sqlite_path}: {e}\")\n else:\n logger.warning(f\"{sqlite_path} not found\")\n\n def get_context(self, tape: Tape) -> list:\n return []\n\n def get_tape_name(self, i: int, tape: Tape) -> str:\n errors = [\n bool(s.content.get(\"error\", False) or s.content.get(\"result\", {}).get(\"exit_code\"))\n for s in tape.steps\n ]\n mark = \"✅ \" if tape.metadata.reward > 0 else \"\"\n if any(errors):\n mark = \"⚠ \"\n if tape.metadata.task.get(\"file_name\"):\n mark += \"📁 \"\n number = tape.metadata.task.get(\"number\", \"\")\n n = f\"{tape.metadata.task.get('Level', '')}.{number} \" if number else \"\"\n name = tape.steps[0].content[\"content\"][:32] + \"...\"\n return f\"{n}({len(tape.steps)}){mark}{name}\"\n\n def get_exp_label(self, filename: str, tapes: list[Tape]) -> str:\n acc, n_solved = self.calculate_accuracy(tapes)\n errors = defaultdict(int)\n prompt_tokens_num = 0\n output_tokens_num = 0\n total_cost = 0.0\n visible_prompt_tokens_num = 0\n visible_output_tokens_num = 0\n visible_cost = 0.0\n no_result = 0\n actions = defaultdict(int)\n for llm_call in self.llm_calls.values():\n prompt_tokens_num += llm_call.prompt_length_tokens\n output_tokens_num += llm_call.output_length_tokens\n total_cost += llm_call.cost\n avg_steps = np.mean([len(tape) for tape in tapes])\n std_steps = np.std([len(tape) for tape in tapes])\n for tape in tapes:\n if tape.metadata.truncated:","source_hash":"0886d56828de6e814d2184744b93e93ae8e0f032fec4aa65abdf3c0ec0539b56","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.tapes.get_exp_label","uri":"program://AgentLab/function/src.agentlab.analyze.tapes.get_exp_label#L126-L183","kind":"function","name":"get_exp_label","path":"src/agentlab/analyze/tapes.py","language":"python","start_line":126,"end_line":183,"context_start_line":106,"context_end_line":203,"code":" logger.warning(f\"{sqlite_path} not found\")\n\n def get_context(self, tape: Tape) -> list:\n return []\n\n def get_tape_name(self, i: int, tape: Tape) -> str:\n errors = [\n bool(s.content.get(\"error\", False) or s.content.get(\"result\", {}).get(\"exit_code\"))\n for s in tape.steps\n ]\n mark = \"✅ \" if tape.metadata.reward > 0 else \"\"\n if any(errors):\n mark = \"⚠ \"\n if tape.metadata.task.get(\"file_name\"):\n mark += \"📁 \"\n number = tape.metadata.task.get(\"number\", \"\")\n n = f\"{tape.metadata.task.get('Level', '')}.{number} \" if number else \"\"\n name = tape.steps[0].content[\"content\"][:32] + \"...\"\n return f\"{n}({len(tape.steps)}){mark}{name}\"\n\n def get_exp_label(self, filename: str, tapes: list[Tape]) -> str:\n acc, n_solved = self.calculate_accuracy(tapes)\n errors = defaultdict(int)\n prompt_tokens_num = 0\n output_tokens_num = 0\n total_cost = 0.0\n visible_prompt_tokens_num = 0\n visible_output_tokens_num = 0\n visible_cost = 0.0\n no_result = 0\n actions = defaultdict(int)\n for llm_call in self.llm_calls.values():\n prompt_tokens_num += llm_call.prompt_length_tokens\n output_tokens_num += llm_call.output_length_tokens\n total_cost += llm_call.cost\n avg_steps = np.mean([len(tape) for tape in tapes])\n std_steps = np.std([len(tape) for tape in tapes])\n for tape in tapes:\n if tape.metadata.truncated:\n no_result += 1\n if tape.metadata.error:\n errors[\"fatal\"] += 1\n last_action = None\n counted = set([])\n for step in tape:\n step_dict = step.content.copy()\n kind = step_dict.get(\"kind\", \"unknown\")\n llm_call = self.llm_calls.get(step.metadata.prompt_id)\n if llm_call and step.metadata.prompt_id not in counted:\n counted.add(step.metadata.prompt_id)\n visible_prompt_tokens_num += llm_call.prompt_length_tokens\n visible_output_tokens_num += llm_call.output_length_tokens\n visible_cost += llm_call.cost\n if kind.endswith(\"action\"):\n actions[kind] += 1\n last_action = kind\n if error := self.get_step_error(step_dict, last_action):\n errors[error] += 1\n timers, timer_counts = self.aggregate_timer_times(tapes)\n html = f\"

Solved {acc:.2f}%, {n_solved} out of {len(tapes)}

\"\n if \"all\" in filename:\n html += f\"Prompt tokens: {prompt_tokens_num}
Output tokens: {output_tokens_num}
Cost: {total_cost:.2f} USD

Visible

\"\n html += f\"Prompt tokens: {visible_prompt_tokens_num}
Output tokens: {visible_output_tokens_num}
Cost: {visible_cost:.2f} USD\"\n html += f\"

Steps per tape: {avg_steps:.1f} ± {std_steps:.1f}

\"\n if errors:\n errors_str = \"
\".join(f\"{k}: {v}\" for k, v in errors.items())\n html += f\"

No result: {no_result}

\"\n html += f\"

Errors: {sum(errors.values())}

{errors_str}\"\n if actions:\n actions_str = \"
\".join(f\"{k}: {v}\" for k, v in actions.items())\n html += f\"

Actions: {sum(actions.values())}

{actions_str}\"\n if timers:\n timers_str = \"
\".join(\n f\"{'execute ' if k.endswith('action') else ''}{k}: {v:.1f} sec, avg. {v/timer_counts[k]:.1f} sec\"\n for k, v in timers.items()\n )\n html += f\"

Timings

{timers_str}\"\n return html\n\n def get_step_error(self, step_dict: dict, last_action: str | None) -> str:\n return step_error(step_dict, last_action)\n\n def calculate_accuracy(self, tapes: list[Tape]) -> tuple[float, int]:\n solved = [tape.metadata.reward for tape in tapes]\n accuracy = 100 * (sum(solved) / len(solved) if solved else 0.0)\n return accuracy, int(sum(solved))\n\n def aggregate_timer_times(self, tapes: list[Tape]):\n timer_sums = defaultdict(float)\n timer_counts = defaultdict(int)\n for tape in tapes:\n timers = tape.metadata.other.get(\"timers\", {})\n for timer_name, exec_time in timers.items():\n timer_sums[timer_name] += exec_time\n timer_counts[timer_name] += 1\n for step in tape.steps:\n action_kind = step.metadata.other.get(\"action_kind\")\n action_execution_time = step.metadata.other.get(\"action_execution_time\")","source_hash":"0886d56828de6e814d2184744b93e93ae8e0f032fec4aa65abdf3c0ec0539b56","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.tapes.get_step_error","uri":"program://AgentLab/function/src.agentlab.analyze.tapes.get_step_error#L185-L186","kind":"function","name":"get_step_error","path":"src/agentlab/analyze/tapes.py","language":"python","start_line":185,"end_line":186,"context_start_line":165,"context_end_line":206,"code":" html = f\"

Solved {acc:.2f}%, {n_solved} out of {len(tapes)}

\"\n if \"all\" in filename:\n html += f\"Prompt tokens: {prompt_tokens_num}
Output tokens: {output_tokens_num}
Cost: {total_cost:.2f} USD

Visible

\"\n html += f\"Prompt tokens: {visible_prompt_tokens_num}
Output tokens: {visible_output_tokens_num}
Cost: {visible_cost:.2f} USD\"\n html += f\"

Steps per tape: {avg_steps:.1f} ± {std_steps:.1f}

\"\n if errors:\n errors_str = \"
\".join(f\"{k}: {v}\" for k, v in errors.items())\n html += f\"

No result: {no_result}

\"\n html += f\"

Errors: {sum(errors.values())}

{errors_str}\"\n if actions:\n actions_str = \"
\".join(f\"{k}: {v}\" for k, v in actions.items())\n html += f\"

Actions: {sum(actions.values())}

{actions_str}\"\n if timers:\n timers_str = \"
\".join(\n f\"{'execute ' if k.endswith('action') else ''}{k}: {v:.1f} sec, avg. {v/timer_counts[k]:.1f} sec\"\n for k, v in timers.items()\n )\n html += f\"

Timings

{timers_str}\"\n return html\n\n def get_step_error(self, step_dict: dict, last_action: str | None) -> str:\n return step_error(step_dict, last_action)\n\n def calculate_accuracy(self, tapes: list[Tape]) -> tuple[float, int]:\n solved = [tape.metadata.reward for tape in tapes]\n accuracy = 100 * (sum(solved) / len(solved) if solved else 0.0)\n return accuracy, int(sum(solved))\n\n def aggregate_timer_times(self, tapes: list[Tape]):\n timer_sums = defaultdict(float)\n timer_counts = defaultdict(int)\n for tape in tapes:\n timers = tape.metadata.other.get(\"timers\", {})\n for timer_name, exec_time in timers.items():\n timer_sums[timer_name] += exec_time\n timer_counts[timer_name] += 1\n for step in tape.steps:\n action_kind = step.metadata.other.get(\"action_kind\")\n action_execution_time = step.metadata.other.get(\"action_execution_time\")\n if action_kind and action_execution_time:\n timer_sums[action_kind] += action_execution_time\n timer_counts[action_kind] += 1","source_hash":"0886d56828de6e814d2184744b93e93ae8e0f032fec4aa65abdf3c0ec0539b56","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.tapes.calculate_accuracy","uri":"program://AgentLab/function/src.agentlab.analyze.tapes.calculate_accuracy#L188-L191","kind":"function","name":"calculate_accuracy","path":"src/agentlab/analyze/tapes.py","language":"python","start_line":188,"end_line":191,"context_start_line":168,"context_end_line":211,"code":" html += f\"Prompt tokens: {visible_prompt_tokens_num}
Output tokens: {visible_output_tokens_num}
Cost: {visible_cost:.2f} USD\"\n html += f\"

Steps per tape: {avg_steps:.1f} ± {std_steps:.1f}

\"\n if errors:\n errors_str = \"
\".join(f\"{k}: {v}\" for k, v in errors.items())\n html += f\"

No result: {no_result}

\"\n html += f\"

Errors: {sum(errors.values())}

{errors_str}\"\n if actions:\n actions_str = \"
\".join(f\"{k}: {v}\" for k, v in actions.items())\n html += f\"

Actions: {sum(actions.values())}

{actions_str}\"\n if timers:\n timers_str = \"
\".join(\n f\"{'execute ' if k.endswith('action') else ''}{k}: {v:.1f} sec, avg. {v/timer_counts[k]:.1f} sec\"\n for k, v in timers.items()\n )\n html += f\"

Timings

{timers_str}\"\n return html\n\n def get_step_error(self, step_dict: dict, last_action: str | None) -> str:\n return step_error(step_dict, last_action)\n\n def calculate_accuracy(self, tapes: list[Tape]) -> tuple[float, int]:\n solved = [tape.metadata.reward for tape in tapes]\n accuracy = 100 * (sum(solved) / len(solved) if solved else 0.0)\n return accuracy, int(sum(solved))\n\n def aggregate_timer_times(self, tapes: list[Tape]):\n timer_sums = defaultdict(float)\n timer_counts = defaultdict(int)\n for tape in tapes:\n timers = tape.metadata.other.get(\"timers\", {})\n for timer_name, exec_time in timers.items():\n timer_sums[timer_name] += exec_time\n timer_counts[timer_name] += 1\n for step in tape.steps:\n action_kind = step.metadata.other.get(\"action_kind\")\n action_execution_time = step.metadata.other.get(\"action_execution_time\")\n if action_kind and action_execution_time:\n timer_sums[action_kind] += action_execution_time\n timer_counts[action_kind] += 1\n return dict(timer_sums), dict(timer_counts)\n\n def load_tapes(self, exp_dir: str) -> list[Tape]:\n tapes: list[Tape] = []\n fpath = Path(self.tapes_folder) / exp_dir","source_hash":"0886d56828de6e814d2184744b93e93ae8e0f032fec4aa65abdf3c0ec0539b56","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.tapes.aggregate_timer_times","uri":"program://AgentLab/function/src.agentlab.analyze.tapes.aggregate_timer_times#L193-L207","kind":"function","name":"aggregate_timer_times","path":"src/agentlab/analyze/tapes.py","language":"python","start_line":193,"end_line":207,"context_start_line":173,"context_end_line":227,"code":" html += f\"

Errors: {sum(errors.values())}

{errors_str}\"\n if actions:\n actions_str = \"
\".join(f\"{k}: {v}\" for k, v in actions.items())\n html += f\"

Actions: {sum(actions.values())}

{actions_str}\"\n if timers:\n timers_str = \"
\".join(\n f\"{'execute ' if k.endswith('action') else ''}{k}: {v:.1f} sec, avg. {v/timer_counts[k]:.1f} sec\"\n for k, v in timers.items()\n )\n html += f\"

Timings

{timers_str}\"\n return html\n\n def get_step_error(self, step_dict: dict, last_action: str | None) -> str:\n return step_error(step_dict, last_action)\n\n def calculate_accuracy(self, tapes: list[Tape]) -> tuple[float, int]:\n solved = [tape.metadata.reward for tape in tapes]\n accuracy = 100 * (sum(solved) / len(solved) if solved else 0.0)\n return accuracy, int(sum(solved))\n\n def aggregate_timer_times(self, tapes: list[Tape]):\n timer_sums = defaultdict(float)\n timer_counts = defaultdict(int)\n for tape in tapes:\n timers = tape.metadata.other.get(\"timers\", {})\n for timer_name, exec_time in timers.items():\n timer_sums[timer_name] += exec_time\n timer_counts[timer_name] += 1\n for step in tape.steps:\n action_kind = step.metadata.other.get(\"action_kind\")\n action_execution_time = step.metadata.other.get(\"action_execution_time\")\n if action_kind and action_execution_time:\n timer_sums[action_kind] += action_execution_time\n timer_counts[action_kind] += 1\n return dict(timer_sums), dict(timer_counts)\n\n def load_tapes(self, exp_dir: str) -> list[Tape]:\n tapes: list[Tape] = []\n fpath = Path(self.tapes_folder) / exp_dir\n for json_file in fpath.rglob(\"tape.json\"):\n if json_file.stat().st_size == 0:\n logger.warning(f\"Empty tape file: {json_file}\")\n continue\n try:\n with open(json_file) as f:\n tape_dict = json.load(f)\n tape = Tape(steps=[], metadata=ExtendedMetadata(**tape_dict[\"metadata\"]))\n tape.steps = [\n WrapperStep(content=s, metadata=StepMetadata(**s[\"metadata\"]))\n for s in tape_dict[\"steps\"]\n ]\n tapes.append(tape)\n except Exception as e:\n logger.warning(f\"Failed to load {json_file}: {e}\")\n logger.info(f\"Loaded {len(tapes)} tapes from {fpath}\")","source_hash":"0886d56828de6e814d2184744b93e93ae8e0f032fec4aa65abdf3c0ec0539b56","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.tapes.load_tapes","uri":"program://AgentLab/function/src.agentlab.analyze.tapes.load_tapes#L209-L232","kind":"function","name":"load_tapes","path":"src/agentlab/analyze/tapes.py","language":"python","start_line":209,"end_line":232,"context_start_line":189,"context_end_line":241,"code":" solved = [tape.metadata.reward for tape in tapes]\n accuracy = 100 * (sum(solved) / len(solved) if solved else 0.0)\n return accuracy, int(sum(solved))\n\n def aggregate_timer_times(self, tapes: list[Tape]):\n timer_sums = defaultdict(float)\n timer_counts = defaultdict(int)\n for tape in tapes:\n timers = tape.metadata.other.get(\"timers\", {})\n for timer_name, exec_time in timers.items():\n timer_sums[timer_name] += exec_time\n timer_counts[timer_name] += 1\n for step in tape.steps:\n action_kind = step.metadata.other.get(\"action_kind\")\n action_execution_time = step.metadata.other.get(\"action_execution_time\")\n if action_kind and action_execution_time:\n timer_sums[action_kind] += action_execution_time\n timer_counts[action_kind] += 1\n return dict(timer_sums), dict(timer_counts)\n\n def load_tapes(self, exp_dir: str) -> list[Tape]:\n tapes: list[Tape] = []\n fpath = Path(self.tapes_folder) / exp_dir\n for json_file in fpath.rglob(\"tape.json\"):\n if json_file.stat().st_size == 0:\n logger.warning(f\"Empty tape file: {json_file}\")\n continue\n try:\n with open(json_file) as f:\n tape_dict = json.load(f)\n tape = Tape(steps=[], metadata=ExtendedMetadata(**tape_dict[\"metadata\"]))\n tape.steps = [\n WrapperStep(content=s, metadata=StepMetadata(**s[\"metadata\"]))\n for s in tape_dict[\"steps\"]\n ]\n tapes.append(tape)\n except Exception as e:\n logger.warning(f\"Failed to load {json_file}: {e}\")\n logger.info(f\"Loaded {len(tapes)} tapes from {fpath}\")\n self.exp_path = fpath\n return sorted(\n tapes,\n key=lambda x: f\"{x.metadata.task.get('Level', '')}{x.metadata.task.get('number', 0):03d}\",\n )\n\n def save_annotation(self, step: int, annotation: str, tape_id: int):\n pass\n\n\nif __name__ == \"__main__\":\n results_dir = sys.argv[1] if len(sys.argv) > 1 else \"~/agentlab_results/\"\n tapes_browser = TapesBrowser(Path(results_dir).expanduser())\n tapes_browser.launch()","source_hash":"0886d56828de6e814d2184744b93e93ae8e0f032fec4aa65abdf3c0ec0539b56","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.tapes.save_annotation","uri":"program://AgentLab/function/src.agentlab.analyze.tapes.save_annotation#L234-L235","kind":"function","name":"save_annotation","path":"src/agentlab/analyze/tapes.py","language":"python","start_line":234,"end_line":235,"context_start_line":214,"context_end_line":241,"code":" logger.warning(f\"Empty tape file: {json_file}\")\n continue\n try:\n with open(json_file) as f:\n tape_dict = json.load(f)\n tape = Tape(steps=[], metadata=ExtendedMetadata(**tape_dict[\"metadata\"]))\n tape.steps = [\n WrapperStep(content=s, metadata=StepMetadata(**s[\"metadata\"]))\n for s in tape_dict[\"steps\"]\n ]\n tapes.append(tape)\n except Exception as e:\n logger.warning(f\"Failed to load {json_file}: {e}\")\n logger.info(f\"Loaded {len(tapes)} tapes from {fpath}\")\n self.exp_path = fpath\n return sorted(\n tapes,\n key=lambda x: f\"{x.metadata.task.get('Level', '')}{x.metadata.task.get('number', 0):03d}\",\n )\n\n def save_annotation(self, step: int, annotation: str, tape_id: int):\n pass\n\n\nif __name__ == \"__main__\":\n results_dir = sys.argv[1] if len(sys.argv) > 1 else \"~/agentlab_results/\"\n tapes_browser = TapesBrowser(Path(results_dir).expanduser())\n tapes_browser.launch()","source_hash":"0886d56828de6e814d2184744b93e93ae8e0f032fec4aa65abdf3c0ec0539b56","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray","uri":"program://AgentLab/module/src.agentlab.analyze.agent_xray#L1-L1457","kind":"module","name":"src.agentlab.analyze.agent_xray","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":1,"end_line":1457,"context_start_line":1,"context_end_line":1457,"code":"import base64\nimport html\nimport os\nimport traceback\nfrom copy import deepcopy\nfrom io import BytesIO\nfrom logging import warning\nfrom pathlib import Path\n\nimport gradio as gr\nimport matplotlib.patches as patches\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom attr import dataclass\nfrom browsergym.experiments.loop import StepInfo as BGymStepInfo\nfrom langchain.schema import BaseMessage, HumanMessage\nfrom openai import OpenAI\nfrom openai.types.responses import ResponseFunctionToolCall\nfrom PIL import Image\n\nfrom agentlab.analyze import inspect_results\nfrom agentlab.analyze.episode_to_html import exp_result_to_html\nfrom agentlab.analyze.overlay_utils import annotate_action\nfrom agentlab.experiments.exp_utils import RESULTS_DIR\nfrom agentlab.experiments.loop import ExpResult, StepInfo\nfrom agentlab.experiments.study import get_most_recent_study\nfrom agentlab.llm.chat_api import make_system_message, make_user_message\nfrom agentlab.llm.llm_utils import BaseMessage as AgentLabBaseMessage\nfrom agentlab.llm.llm_utils import Discussion\nfrom agentlab.llm.response_api import MessageBuilder, ToolCalls\n\nselect_dir_instructions = \"Select Experiment Directory\"\nAGENT_NAME_KEY = \"agent.agent_name\"\nTASK_NAME_KEY = \"env.task_name\"\nTASK_SEED_KEY = \"env.task_seed\"\n\n\ndef display_table(df: pd.DataFrame):\n df = df.copy()\n df.columns = clean_column_names(df.columns)\n df.index.names = clean_column_names(df.index.names)\n return df\n\n\ndef remove_args_from_col(df: pd.DataFrame):\n df.columns = [col.replace(\"_args\", \"\") for col in df.columns]\n df.index.names = [col.replace(\"_args\", \"\") for col in df.index.names]\n return df\n\n\ndef clean_column_names(col_list):\n # col_list = [col.replace(\"_args\", \"\") for col in col_list]\n col_list = [col.replace(\".\", \".\\n\") for col in col_list] # adding space for word wrap\n # col_list = [col.replace(\"_\", \" \") for col in col_list]\n return col_list\n\n\nclass ClickMapper:\n def __init__(self, ax: plt.Axes, step_times: list[float]):\n self.ax = ax\n self.step_times = step_times\n\n def to_time(self, x_pix_coord):\n x_time_coord, _ = self.ax.transData.inverted().transform((x_pix_coord, 0))\n return x_time_coord\n\n def to_step(self, x_pix_coord):\n x_time_coord = self.to_time(x_pix_coord)\n return np.searchsorted(self.step_times, x_time_coord)\n\n\n@dataclass\nclass EpisodeId:\n agent_id: str = None\n task_name: str = None\n seed: int = None\n row_index: int = None # unique row index to disambiguate selections\n\n\n@dataclass\nclass StepId:\n episode_id: EpisodeId = None\n step: int = None\n\n\n@dataclass\nclass Info:\n results_dir: Path = None # to root directory of all experiments\n study_dirs: Path = None # the path of the currently selected experiment\n result_df: pd.DataFrame = None # the raw loaded df\n agent_df: pd.DataFrame = None # the df filtered for selected agent\n tasks_df: pd.DataFrame = None # the unique tasks for selected agent\n exp_result: ExpResult = None # the selected episode\n click_mapper: ClickMapper = None # mapping from profiler click to step\n step: int = None # currently selected step\n active_tab: str = \"Screenshot\" # currently selected observation tab\n agent_id_keys: list[str] = None # the list of columns identifying an agent\n\n def update_exp_result(self, episode_id: EpisodeId):\n if self.result_df is None or episode_id.task_name is None or episode_id.seed is None:\n self.exp_result = None\n\n # find unique row using idx\n result_df = self.agent_df.reset_index(inplace=False)\n sub_df = result_df[result_df[\"_row_index\"] == episode_id.row_index]\n if len(sub_df) == 0:\n self.exp_result = None\n raise ValueError(f\"Could not find _row_index: {episode_id.row_index}\")\n\n if len(sub_df) > 1:\n warning(\n f\"Found multiple rows with same row_index {episode_id.row_index} Using the first one.\"\n )\n exp_dir = sub_df.iloc[0][\"exp_dir\"]\n print(exp_dir)\n self.exp_result = ExpResult(exp_dir)\n self.step = 0\n\n def get_agent_id(self, row: pd.Series):\n agent_id = []\n for key in self.agent_id_keys:\n agent_id.append((key, row[key]))\n return agent_id\n\n def filter_agent_id(self, agent_id: list[tuple]):\n # Preserve a stable row index to disambiguate selections later\n tmp_df = self.result_df.reset_index(inplace=False)\n tmp_df[\"_row_index\"] = tmp_df.index\n tmp_df.set_index(TASK_NAME_KEY, inplace=True)\n\n for col, val in agent_id:\n col = col.replace(\".\\n\", \".\")\n tmp_df = tmp_df[tmp_df[col] == val]\n self.agent_df = tmp_df\n\n\ninfo = Info()\n\n\ncss = \"\"\"\n.my-markdown {\n max-height: 400px;\n overflow-y: auto;\n}\n.error-report {\n max-height: 700px;\n overflow-y: auto;\n}\n.my-code-view {\n max-height: 300px;\n overflow-y: auto;\n}\ncode {\n white-space: pre-wrap;\n}\nth {\n white-space: normal !important;\n word-wrap: break-word !important;\n}\n\"\"\"\n\n\ndef run_gradio(results_dir: Path):\n \"\"\"\n Run Gradio on the selected experiments saved at savedir_base.\n\n \"\"\"\n global info\n info.results_dir = results_dir\n\n with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:\n agent_id = gr.State(value=None)\n episode_id = gr.State(value=EpisodeId())\n agent_task_id = gr.State(value=None)\n step_id = gr.State(value=None)\n\n hidden_key_input = gr.Textbox(visible=False, elem_id=\"key_capture\")\n\n with gr.Accordion(\"Help\", open=False):\n gr.Markdown(\n \"\"\"\\\n# Agent X-Ray\n\n1. **Select your experiment directory**. You may refresh the list of directories by\nclicking the refresh button.\n\n2. **Select your episode**: Chose a triplet (agent, task, seed).\n\n 1. **Select Agent**: Click on a row of the table to select your agent\n\n 2. **Select Task**: Select the task you want to analyze, this will trigger\n an update of the available seeds.\n\n 3. **Select the Seed**: You might have multiple repetition for a given task,\n you will be able to select the seed you want to analyze.\n\n3. **Select the step**: Once your episode is selected, you can select the step\n by clicking on the profiling image. This will trigger the update of the the\n information on the corresponding step.\n\n4. **Select a tab**: You can select different visualization by clicking on the tabs.\n\"\"\"\n )\n with gr.Row():\n exp_dir_choice = gr.Dropdown(\n choices=get_directory_contents(results_dir),\n value=select_dir_instructions,\n multiselect=True,\n label=\"Experiment Directory\",\n show_label=False,\n scale=6,\n container=False,\n )\n refresh_button = gr.Button(\"↺\", scale=0, size=\"sm\")\n\n with gr.Tabs():\n with gr.Tab(\"Select Agent\"):\n with gr.Accordion(\"Agent Selector (click for help)\", open=False):\n gr.Markdown(\n \"\"\"\\\n Click on a row to select an agent. It will trigger the update of other\n fields.\n \n The update mechanism is somewhat flacky, please help figure out why (or is it just gradio?).\n \"\"\"\n )\n agent_table = gr.DataFrame(max_height=500, show_label=False, interactive=False)\n with gr.Tab(\"Select Task and Seed\", id=\"Select Task\"):\n with gr.Row():\n with gr.Column(scale=4):\n with gr.Row(): # combining the title (help) and the refresh button\n with gr.Accordion(\"Task Selector (click for help)\", open=False):\n gr.Markdown(\n \"\"\"\\\n Click on a row to select a task. It will trigger the update of other fields.\n\n The update mechanism is somewhat flacky, please help figure out why (or is it just gradio?).\n \"\"\"\n )\n refresh_results_button = gr.Button(\"↺\", scale=0, size=\"sm\")\n\n task_table = gr.DataFrame(\n max_height=500,\n show_label=False,\n interactive=False,\n elem_id=\"task_table\",\n )\n\n with gr.Column(scale=2):\n with gr.Accordion(\"Seed Selector (click for help)\", open=False):\n gr.Markdown(\n \"\"\"\\\n Click on a row to select a seed. It will trigger the update of other fields.\n\n The update mechanism is somewhat flacky, please help figure out why (or is it just gradio?).\n \"\"\"\n )\n\n seed_table = gr.DataFrame(\n max_height=500,\n show_label=False,\n interactive=False,\n elem_id=\"seed_table\",\n )\n\n with gr.Tab(\"Constants and Variables\"):\n with gr.Row():\n with gr.Column(scale=2):\n with gr.Accordion(\"Constants\", open=False):\n gr.Markdown(\n \"\"\"\\\n Constants are the parameters that are the same for **all** episodes of\n **all** agents. They are displayed as a table with the name and value of the\n constant.\"\"\"\n )\n constants = gr.DataFrame(\n max_height=500, show_label=False, interactive=False\n )\n with gr.Column(scale=2):\n with gr.Accordion(\"Variables\", open=False):\n gr.Markdown(\n \"\"\"\\\n Variables are the parameters that can change between episodes of an agent.\n They are displayed as a table with the name, value and count of unique\n values. A maximum of 3 different values are displayed.\"\"\"\n )\n variables = gr.DataFrame(\n max_height=500, show_label=False, interactive=False\n )\n with gr.Tab(\"Global Stats\"):\n global_stats = gr.DataFrame(max_height=500, show_label=False, interactive=False)\n\n with gr.Tab(\"Error Report\"):\n error_report = gr.Markdown(elem_classes=\"error-report\", show_copy_button=True)\n with gr.Row():\n episode_info = gr.Markdown(label=\"Episode Info\", elem_classes=\"my-markdown\")\n action_info = gr.Markdown(label=\"Action Info\", elem_classes=\"my-markdown\")\n state_error = gr.Markdown(label=\"Next Step Error\", elem_classes=\"my-markdown\")\n\n profiling_gr = gr.Image(\n label=\"Profiling\", show_label=False, interactive=False, show_download_button=False\n )\n\n gr.HTML(\n \"\"\"\n\n\"\"\"\n )\n with gr.Tabs() as tabs:\n code_args = dict(interactive=False, elem_classes=[\"code-container\"], show_label=False)\n with gr.Tab(\"Screenshot\") as tab_screenshot:\n som_or_not = gr.Dropdown(\n choices=[\"Raw Screenshots\", \"SOM Screenshots\"],\n label=\"Screenshot Type\",\n value=\"Raw Screenshots\",\n show_label=False,\n container=False,\n interactive=True,\n scale=0,\n )\n screenshot = gr.Image(\n show_label=False, interactive=False, show_download_button=False\n )\n\n with gr.Tab(\"Screenshot Pair\") as tab_screenshot_pair:\n with gr.Row():\n screenshot1 = gr.Image(\n show_label=False, interactive=False, show_download_button=False\n )\n screenshot2 = gr.Image(\n show_label=False, interactive=False, show_download_button=False\n )\n with gr.Tab(\"Screenshot Gallery\") as tab_screenshot_gallery:\n screenshot_gallery = gr.Gallery(\n columns=2,\n show_download_button=False,\n show_label=False,\n object_fit=\"contain\",\n preview=True,\n )\n\n with gr.Tab(\"Episode\") as tab_episode:\n episode = gr.HTML()\n\n with gr.Tab(\"DOM HTML\") as tab_html:\n html_code = gr.Code(language=\"html\", **code_args)\n\n with gr.Tab(\"Pruned DOM HTML\") as tab_pruned_html:\n pruned_html_code = gr.Code(language=\"html\", **code_args)\n\n with gr.Tab(\"AXTree\") as tab_axtree:\n axtree_code = gr.Markdown()\n\n with gr.Tab(\"Chat Messages\") as tab_chat:\n chat_messages = gr.Markdown()\n\n with gr.Tab(\"Task Error\") as tab_error:\n task_error = gr.Markdown()\n\n with gr.Tab(\"Logs\") as tab_logs:\n logs = gr.Code(language=None, **code_args)\n\n with gr.Tab(\"Stats\") as tab_stats:\n stats = gr.DataFrame(max_height=500, show_label=False, interactive=False)\n\n with gr.Tab(\"Agent Info HTML\") as tab_agent_info_html:\n with gr.Row():\n screenshot1_agent = gr.Image(\n show_label=False, interactive=False, show_download_button=False\n )\n screenshot2_agent = gr.Image(\n show_label=False, interactive=False, show_download_button=False\n )\n agent_info_html = gr.HTML()\n\n with gr.Tab(\"Agent Info MD\") as tab_agent_info_md:\n agent_info_md = gr.Markdown()\n\n with gr.Tab(\"Prompt tests\") as tab_prompt_tests:\n with gr.Row():\n prompt_markdown = gr.Textbox(\n value=\"\",\n label=\"\",\n show_label=False,\n interactive=False,\n elem_id=\"prompt_markdown\",\n )\n with gr.Column():\n prompt_tests_textbox = gr.Textbox(\n value=\"\",\n label=\"\",\n show_label=False,\n interactive=True,\n elem_id=\"prompt_tests_textbox\",\n )\n submit_button = gr.Button(value=\"Submit\")\n result_box = gr.Textbox(\n value=\"\", label=\"Result\", show_label=True, interactive=False\n )\n\n # Define the interaction\n submit_button.click(\n fn=submit_action, inputs=prompt_tests_textbox, outputs=result_box\n )\n\n # Handle Events #\n # ===============#\n\n refresh_button.click(\n fn=refresh_exp_dir_choices, inputs=exp_dir_choice, outputs=exp_dir_choice\n )\n\n refresh_results_button.click(\n fn=refresh_exp_dir_choices, inputs=exp_dir_choice, outputs=exp_dir_choice\n )\n\n exp_dir_choice.change(\n fn=new_exp_dir,\n inputs=exp_dir_choice,\n outputs=[agent_table, agent_id, constants, variables, global_stats, error_report],\n )\n\n agent_table.select(fn=on_select_agent, inputs=agent_table, outputs=[agent_id])\n task_table.select(fn=on_select_task, inputs=[task_table, agent_id], outputs=agent_task_id)\n\n agent_id.change(fn=new_agent_id, inputs=agent_id, outputs=[task_table, agent_task_id])\n agent_task_id.change(\n fn=update_seeds, inputs=agent_task_id, outputs=[seed_table, episode_id]\n )\n # seed_gr.change(fn=on_select_seed, inputs=[seed_gr, task_name], outputs=[episode_id])\n seed_table.select(on_select_seed, inputs=[seed_table, agent_task_id], outputs=episode_id)\n step_id.change(fn=update_step_info, outputs=[episode_info, action_info, state_error])\n episode_id.change(fn=new_episode, inputs=[episode_id], outputs=[profiling_gr, step_id])\n profiling_gr.select(select_step, inputs=[episode_id], outputs=step_id)\n\n # Update all tabs on step change, but only actually update the active\n # tab. This helps keeping the UI responsive when selecting a new step.\n step_id.change(\n fn=if_active(\"Screenshot\")(update_screenshot),\n inputs=som_or_not,\n outputs=screenshot,\n )\n step_id.change(\n fn=if_active(\"Screenshot Pair\", 2)(update_screenshot_pair),\n inputs=som_or_not,\n outputs=[screenshot1, screenshot2],\n )\n step_id.change(\n fn=if_active(\"Screenshot Gallery\")(update_screenshot_gallery),\n inputs=som_or_not,\n outputs=[screenshot_gallery],\n )\n screenshot_gallery.select(fn=gallery_step_change, inputs=episode_id, outputs=step_id)\n episode_id.change(fn=if_active(\"Episode\")(update_episode), outputs=episode)\n step_id.change(fn=if_active(\"DOM HTML\")(update_html), outputs=html_code)\n step_id.change(\n fn=if_active(\"Pruned DOM HTML\")(update_pruned_html), outputs=pruned_html_code\n )\n step_id.change(fn=if_active(\"AXTree\")(update_axtree), outputs=axtree_code)\n step_id.change(fn=if_active(\"Chat Messages\")(update_chat_messages), outputs=chat_messages)\n step_id.change(fn=if_active(\"Task Error\")(update_task_error), outputs=task_error)\n step_id.change(fn=if_active(\"Logs\")(update_logs), outputs=logs)\n step_id.change(fn=if_active(\"Stats\")(update_stats), outputs=stats)\n step_id.change(\n fn=if_active(\"Agent Info HTML\", 3)(update_agent_info_html),\n outputs=[agent_info_html, screenshot1_agent, screenshot2_agent],\n )\n step_id.change(fn=if_active(\"Agent Info MD\")(update_agent_info_md), outputs=agent_info_md)\n step_id.change(\n fn=if_active(\"Prompt tests\", 2)(update_prompt_tests),\n outputs=[prompt_markdown, prompt_tests_textbox],\n )\n\n # In order to handel tabs that were not visible when step was changed,\n # we need to update them individually when the tab is selected\n tab_screenshot.select(fn=update_screenshot, inputs=som_or_not, outputs=screenshot)\n tab_screenshot_pair.select(\n fn=update_screenshot_pair, inputs=som_or_not, outputs=[screenshot1, screenshot2]\n )\n tab_screenshot_gallery.select(\n fn=update_screenshot_gallery, inputs=som_or_not, outputs=[screenshot_gallery]\n )\n tab_episode.select(fn=update_episode, outputs=episode)\n tab_html.select(fn=update_html, outputs=html_code)\n tab_pruned_html.select(fn=update_pruned_html, outputs=pruned_html_code)\n tab_axtree.select(fn=update_axtree, outputs=axtree_code)\n tab_chat.select(fn=update_chat_messages, outputs=chat_messages)\n tab_error.select(fn=update_task_error, outputs=task_error)\n tab_logs.select(fn=update_logs, outputs=logs)\n tab_stats.select(fn=update_stats, outputs=stats)\n tab_agent_info_html.select(fn=update_agent_info_html, outputs=agent_info_html)\n tab_agent_info_md.select(fn=update_agent_info_md, outputs=agent_info_md)\n tab_prompt_tests.select(\n fn=update_prompt_tests, outputs=[prompt_markdown, prompt_tests_textbox]\n )\n\n som_or_not.change(fn=update_screenshot, inputs=som_or_not, output\n# ... truncated ...","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":true} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.display_table","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.display_table#L39-L43","kind":"function","name":"display_table","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":39,"end_line":43,"context_start_line":19,"context_end_line":63,"code":"from openai.types.responses import ResponseFunctionToolCall\nfrom PIL import Image\n\nfrom agentlab.analyze import inspect_results\nfrom agentlab.analyze.episode_to_html import exp_result_to_html\nfrom agentlab.analyze.overlay_utils import annotate_action\nfrom agentlab.experiments.exp_utils import RESULTS_DIR\nfrom agentlab.experiments.loop import ExpResult, StepInfo\nfrom agentlab.experiments.study import get_most_recent_study\nfrom agentlab.llm.chat_api import make_system_message, make_user_message\nfrom agentlab.llm.llm_utils import BaseMessage as AgentLabBaseMessage\nfrom agentlab.llm.llm_utils import Discussion\nfrom agentlab.llm.response_api import MessageBuilder, ToolCalls\n\nselect_dir_instructions = \"Select Experiment Directory\"\nAGENT_NAME_KEY = \"agent.agent_name\"\nTASK_NAME_KEY = \"env.task_name\"\nTASK_SEED_KEY = \"env.task_seed\"\n\n\ndef display_table(df: pd.DataFrame):\n df = df.copy()\n df.columns = clean_column_names(df.columns)\n df.index.names = clean_column_names(df.index.names)\n return df\n\n\ndef remove_args_from_col(df: pd.DataFrame):\n df.columns = [col.replace(\"_args\", \"\") for col in df.columns]\n df.index.names = [col.replace(\"_args\", \"\") for col in df.index.names]\n return df\n\n\ndef clean_column_names(col_list):\n # col_list = [col.replace(\"_args\", \"\") for col in col_list]\n col_list = [col.replace(\".\", \".\\n\") for col in col_list] # adding space for word wrap\n # col_list = [col.replace(\"_\", \" \") for col in col_list]\n return col_list\n\n\nclass ClickMapper:\n def __init__(self, ax: plt.Axes, step_times: list[float]):\n self.ax = ax\n self.step_times = step_times\n","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.remove_args_from_col","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.remove_args_from_col#L46-L49","kind":"function","name":"remove_args_from_col","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":46,"end_line":49,"context_start_line":26,"context_end_line":69,"code":"from agentlab.experiments.loop import ExpResult, StepInfo\nfrom agentlab.experiments.study import get_most_recent_study\nfrom agentlab.llm.chat_api import make_system_message, make_user_message\nfrom agentlab.llm.llm_utils import BaseMessage as AgentLabBaseMessage\nfrom agentlab.llm.llm_utils import Discussion\nfrom agentlab.llm.response_api import MessageBuilder, ToolCalls\n\nselect_dir_instructions = \"Select Experiment Directory\"\nAGENT_NAME_KEY = \"agent.agent_name\"\nTASK_NAME_KEY = \"env.task_name\"\nTASK_SEED_KEY = \"env.task_seed\"\n\n\ndef display_table(df: pd.DataFrame):\n df = df.copy()\n df.columns = clean_column_names(df.columns)\n df.index.names = clean_column_names(df.index.names)\n return df\n\n\ndef remove_args_from_col(df: pd.DataFrame):\n df.columns = [col.replace(\"_args\", \"\") for col in df.columns]\n df.index.names = [col.replace(\"_args\", \"\") for col in df.index.names]\n return df\n\n\ndef clean_column_names(col_list):\n # col_list = [col.replace(\"_args\", \"\") for col in col_list]\n col_list = [col.replace(\".\", \".\\n\") for col in col_list] # adding space for word wrap\n # col_list = [col.replace(\"_\", \" \") for col in col_list]\n return col_list\n\n\nclass ClickMapper:\n def __init__(self, ax: plt.Axes, step_times: list[float]):\n self.ax = ax\n self.step_times = step_times\n\n def to_time(self, x_pix_coord):\n x_time_coord, _ = self.ax.transData.inverted().transform((x_pix_coord, 0))\n return x_time_coord\n\n def to_step(self, x_pix_coord):\n x_time_coord = self.to_time(x_pix_coord)","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.clean_column_names","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.clean_column_names#L52-L56","kind":"function","name":"clean_column_names","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":52,"end_line":56,"context_start_line":32,"context_end_line":76,"code":"\nselect_dir_instructions = \"Select Experiment Directory\"\nAGENT_NAME_KEY = \"agent.agent_name\"\nTASK_NAME_KEY = \"env.task_name\"\nTASK_SEED_KEY = \"env.task_seed\"\n\n\ndef display_table(df: pd.DataFrame):\n df = df.copy()\n df.columns = clean_column_names(df.columns)\n df.index.names = clean_column_names(df.index.names)\n return df\n\n\ndef remove_args_from_col(df: pd.DataFrame):\n df.columns = [col.replace(\"_args\", \"\") for col in df.columns]\n df.index.names = [col.replace(\"_args\", \"\") for col in df.index.names]\n return df\n\n\ndef clean_column_names(col_list):\n # col_list = [col.replace(\"_args\", \"\") for col in col_list]\n col_list = [col.replace(\".\", \".\\n\") for col in col_list] # adding space for word wrap\n # col_list = [col.replace(\"_\", \" \") for col in col_list]\n return col_list\n\n\nclass ClickMapper:\n def __init__(self, ax: plt.Axes, step_times: list[float]):\n self.ax = ax\n self.step_times = step_times\n\n def to_time(self, x_pix_coord):\n x_time_coord, _ = self.ax.transData.inverted().transform((x_pix_coord, 0))\n return x_time_coord\n\n def to_step(self, x_pix_coord):\n x_time_coord = self.to_time(x_pix_coord)\n return np.searchsorted(self.step_times, x_time_coord)\n\n\n@dataclass\nclass EpisodeId:\n agent_id: str = None\n task_name: str = None","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.ClickMapper","uri":"program://AgentLab/class/src.agentlab.analyze.agent_xray.ClickMapper#L59-L70","kind":"class","name":"ClickMapper","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":59,"end_line":70,"context_start_line":39,"context_end_line":90,"code":"def display_table(df: pd.DataFrame):\n df = df.copy()\n df.columns = clean_column_names(df.columns)\n df.index.names = clean_column_names(df.index.names)\n return df\n\n\ndef remove_args_from_col(df: pd.DataFrame):\n df.columns = [col.replace(\"_args\", \"\") for col in df.columns]\n df.index.names = [col.replace(\"_args\", \"\") for col in df.index.names]\n return df\n\n\ndef clean_column_names(col_list):\n # col_list = [col.replace(\"_args\", \"\") for col in col_list]\n col_list = [col.replace(\".\", \".\\n\") for col in col_list] # adding space for word wrap\n # col_list = [col.replace(\"_\", \" \") for col in col_list]\n return col_list\n\n\nclass ClickMapper:\n def __init__(self, ax: plt.Axes, step_times: list[float]):\n self.ax = ax\n self.step_times = step_times\n\n def to_time(self, x_pix_coord):\n x_time_coord, _ = self.ax.transData.inverted().transform((x_pix_coord, 0))\n return x_time_coord\n\n def to_step(self, x_pix_coord):\n x_time_coord = self.to_time(x_pix_coord)\n return np.searchsorted(self.step_times, x_time_coord)\n\n\n@dataclass\nclass EpisodeId:\n agent_id: str = None\n task_name: str = None\n seed: int = None\n row_index: int = None # unique row index to disambiguate selections\n\n\n@dataclass\nclass StepId:\n episode_id: EpisodeId = None\n step: int = None\n\n\n@dataclass\nclass Info:\n results_dir: Path = None # to root directory of all experiments\n study_dirs: Path = None # the path of the currently selected experiment","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.EpisodeId","uri":"program://AgentLab/class/src.agentlab.analyze.agent_xray.EpisodeId#L74-L78","kind":"class","name":"EpisodeId","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":74,"end_line":78,"context_start_line":54,"context_end_line":98,"code":" col_list = [col.replace(\".\", \".\\n\") for col in col_list] # adding space for word wrap\n # col_list = [col.replace(\"_\", \" \") for col in col_list]\n return col_list\n\n\nclass ClickMapper:\n def __init__(self, ax: plt.Axes, step_times: list[float]):\n self.ax = ax\n self.step_times = step_times\n\n def to_time(self, x_pix_coord):\n x_time_coord, _ = self.ax.transData.inverted().transform((x_pix_coord, 0))\n return x_time_coord\n\n def to_step(self, x_pix_coord):\n x_time_coord = self.to_time(x_pix_coord)\n return np.searchsorted(self.step_times, x_time_coord)\n\n\n@dataclass\nclass EpisodeId:\n agent_id: str = None\n task_name: str = None\n seed: int = None\n row_index: int = None # unique row index to disambiguate selections\n\n\n@dataclass\nclass StepId:\n episode_id: EpisodeId = None\n step: int = None\n\n\n@dataclass\nclass Info:\n results_dir: Path = None # to root directory of all experiments\n study_dirs: Path = None # the path of the currently selected experiment\n result_df: pd.DataFrame = None # the raw loaded df\n agent_df: pd.DataFrame = None # the df filtered for selected agent\n tasks_df: pd.DataFrame = None # the unique tasks for selected agent\n exp_result: ExpResult = None # the selected episode\n click_mapper: ClickMapper = None # mapping from profiler click to step\n step: int = None # currently selected step\n active_tab: str = \"Screenshot\" # currently selected observation tab\n agent_id_keys: list[str] = None # the list of columns identifying an agent","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.StepId","uri":"program://AgentLab/class/src.agentlab.analyze.agent_xray.StepId#L82-L84","kind":"class","name":"StepId","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":82,"end_line":84,"context_start_line":62,"context_end_line":104,"code":" self.step_times = step_times\n\n def to_time(self, x_pix_coord):\n x_time_coord, _ = self.ax.transData.inverted().transform((x_pix_coord, 0))\n return x_time_coord\n\n def to_step(self, x_pix_coord):\n x_time_coord = self.to_time(x_pix_coord)\n return np.searchsorted(self.step_times, x_time_coord)\n\n\n@dataclass\nclass EpisodeId:\n agent_id: str = None\n task_name: str = None\n seed: int = None\n row_index: int = None # unique row index to disambiguate selections\n\n\n@dataclass\nclass StepId:\n episode_id: EpisodeId = None\n step: int = None\n\n\n@dataclass\nclass Info:\n results_dir: Path = None # to root directory of all experiments\n study_dirs: Path = None # the path of the currently selected experiment\n result_df: pd.DataFrame = None # the raw loaded df\n agent_df: pd.DataFrame = None # the df filtered for selected agent\n tasks_df: pd.DataFrame = None # the unique tasks for selected agent\n exp_result: ExpResult = None # the selected episode\n click_mapper: ClickMapper = None # mapping from profiler click to step\n step: int = None # currently selected step\n active_tab: str = \"Screenshot\" # currently selected observation tab\n agent_id_keys: list[str] = None # the list of columns identifying an agent\n\n def update_exp_result(self, episode_id: EpisodeId):\n if self.result_df is None or episode_id.task_name is None or episode_id.seed is None:\n self.exp_result = None\n\n # find unique row using idx","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.Info","uri":"program://AgentLab/class/src.agentlab.analyze.agent_xray.Info#L88-L135","kind":"class","name":"Info","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":88,"end_line":135,"context_start_line":68,"context_end_line":155,"code":" def to_step(self, x_pix_coord):\n x_time_coord = self.to_time(x_pix_coord)\n return np.searchsorted(self.step_times, x_time_coord)\n\n\n@dataclass\nclass EpisodeId:\n agent_id: str = None\n task_name: str = None\n seed: int = None\n row_index: int = None # unique row index to disambiguate selections\n\n\n@dataclass\nclass StepId:\n episode_id: EpisodeId = None\n step: int = None\n\n\n@dataclass\nclass Info:\n results_dir: Path = None # to root directory of all experiments\n study_dirs: Path = None # the path of the currently selected experiment\n result_df: pd.DataFrame = None # the raw loaded df\n agent_df: pd.DataFrame = None # the df filtered for selected agent\n tasks_df: pd.DataFrame = None # the unique tasks for selected agent\n exp_result: ExpResult = None # the selected episode\n click_mapper: ClickMapper = None # mapping from profiler click to step\n step: int = None # currently selected step\n active_tab: str = \"Screenshot\" # currently selected observation tab\n agent_id_keys: list[str] = None # the list of columns identifying an agent\n\n def update_exp_result(self, episode_id: EpisodeId):\n if self.result_df is None or episode_id.task_name is None or episode_id.seed is None:\n self.exp_result = None\n\n # find unique row using idx\n result_df = self.agent_df.reset_index(inplace=False)\n sub_df = result_df[result_df[\"_row_index\"] == episode_id.row_index]\n if len(sub_df) == 0:\n self.exp_result = None\n raise ValueError(f\"Could not find _row_index: {episode_id.row_index}\")\n\n if len(sub_df) > 1:\n warning(\n f\"Found multiple rows with same row_index {episode_id.row_index} Using the first one.\"\n )\n exp_dir = sub_df.iloc[0][\"exp_dir\"]\n print(exp_dir)\n self.exp_result = ExpResult(exp_dir)\n self.step = 0\n\n def get_agent_id(self, row: pd.Series):\n agent_id = []\n for key in self.agent_id_keys:\n agent_id.append((key, row[key]))\n return agent_id\n\n def filter_agent_id(self, agent_id: list[tuple]):\n # Preserve a stable row index to disambiguate selections later\n tmp_df = self.result_df.reset_index(inplace=False)\n tmp_df[\"_row_index\"] = tmp_df.index\n tmp_df.set_index(TASK_NAME_KEY, inplace=True)\n\n for col, val in agent_id:\n col = col.replace(\".\\n\", \".\")\n tmp_df = tmp_df[tmp_df[col] == val]\n self.agent_df = tmp_df\n\n\ninfo = Info()\n\n\ncss = \"\"\"\n.my-markdown {\n max-height: 400px;\n overflow-y: auto;\n}\n.error-report {\n max-height: 700px;\n overflow-y: auto;\n}\n.my-code-view {\n max-height: 300px;\n overflow-y: auto;\n}\ncode {\n white-space: pre-wrap;","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.run_gradio","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.run_gradio#L164-L542","kind":"function","name":"run_gradio","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":164,"end_line":542,"context_start_line":144,"context_end_line":562,"code":" overflow-y: auto;\n}\n.error-report {\n max-height: 700px;\n overflow-y: auto;\n}\n.my-code-view {\n max-height: 300px;\n overflow-y: auto;\n}\ncode {\n white-space: pre-wrap;\n}\nth {\n white-space: normal !important;\n word-wrap: break-word !important;\n}\n\"\"\"\n\n\ndef run_gradio(results_dir: Path):\n \"\"\"\n Run Gradio on the selected experiments saved at savedir_base.\n\n \"\"\"\n global info\n info.results_dir = results_dir\n\n with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:\n agent_id = gr.State(value=None)\n episode_id = gr.State(value=EpisodeId())\n agent_task_id = gr.State(value=None)\n step_id = gr.State(value=None)\n\n hidden_key_input = gr.Textbox(visible=False, elem_id=\"key_capture\")\n\n with gr.Accordion(\"Help\", open=False):\n gr.Markdown(\n \"\"\"\\\n# Agent X-Ray\n\n1. **Select your experiment directory**. You may refresh the list of directories by\nclicking the refresh button.\n\n2. **Select your episode**: Chose a triplet (agent, task, seed).\n\n 1. **Select Agent**: Click on a row of the table to select your agent\n\n 2. **Select Task**: Select the task you want to analyze, this will trigger\n an update of the available seeds.\n\n 3. **Select the Seed**: You might have multiple repetition for a given task,\n you will be able to select the seed you want to analyze.\n\n3. **Select the step**: Once your episode is selected, you can select the step\n by clicking on the profiling image. This will trigger the update of the the\n information on the corresponding step.\n\n4. **Select a tab**: You can select different visualization by clicking on the tabs.\n\"\"\"\n )\n with gr.Row():\n exp_dir_choice = gr.Dropdown(\n choices=get_directory_contents(results_dir),\n value=select_dir_instructions,\n multiselect=True,\n label=\"Experiment Directory\",\n show_label=False,\n scale=6,\n container=False,\n )\n refresh_button = gr.Button(\"↺\", scale=0, size=\"sm\")\n\n with gr.Tabs():\n with gr.Tab(\"Select Agent\"):\n with gr.Accordion(\"Agent Selector (click for help)\", open=False):\n gr.Markdown(\n \"\"\"\\\n Click on a row to select an agent. It will trigger the update of other\n fields.\n \n The update mechanism is somewhat flacky, please help figure out why (or is it just gradio?).\n \"\"\"\n )\n agent_table = gr.DataFrame(max_height=500, show_label=False, interactive=False)\n with gr.Tab(\"Select Task and Seed\", id=\"Select Task\"):\n with gr.Row():\n with gr.Column(scale=4):\n with gr.Row(): # combining the title (help) and the refresh button\n with gr.Accordion(\"Task Selector (click for help)\", open=False):\n gr.Markdown(\n \"\"\"\\\n Click on a row to select a task. It will trigger the update of other fields.\n\n The update mechanism is somewhat flacky, please help figure out why (or is it just gradio?).\n \"\"\"\n )\n refresh_results_button = gr.Button(\"↺\", scale=0, size=\"sm\")\n\n task_table = gr.DataFrame(\n max_height=500,\n show_label=False,\n interactive=False,\n elem_id=\"task_table\",\n )\n\n with gr.Column(scale=2):\n with gr.Accordion(\"Seed Selector (click for help)\", open=False):\n gr.Markdown(\n \"\"\"\\\n Click on a row to select a seed. It will trigger the update of other fields.\n\n The update mechanism is somewhat flacky, please help figure out why (or is it just gradio?).\n \"\"\"\n )\n\n seed_table = gr.DataFrame(\n max_height=500,\n show_label=False,\n interactive=False,\n elem_id=\"seed_table\",\n )\n\n with gr.Tab(\"Constants and Variables\"):\n with gr.Row():\n with gr.Column(scale=2):\n with gr.Accordion(\"Constants\", open=False):\n gr.Markdown(\n \"\"\"\\\n Constants are the parameters that are the same for **all** episodes of\n **all** agents. They are displayed as a table with the name and value of the\n constant.\"\"\"\n )\n constants = gr.DataFrame(\n max_height=500, show_label=False, interactive=False\n )\n with gr.Column(scale=2):\n with gr.Accordion(\"Variables\", open=False):\n gr.Markdown(\n \"\"\"\\\n Variables are the parameters that can change between episodes of an agent.\n They are displayed as a table with the name, value and count of unique\n values. A maximum of 3 different values are displayed.\"\"\"\n )\n variables = gr.DataFrame(\n max_height=500, show_label=False, interactive=False\n )\n with gr.Tab(\"Global Stats\"):\n global_stats = gr.DataFrame(max_height=500, show_label=False, interactive=False)\n\n with gr.Tab(\"Error Report\"):\n error_report = gr.Markdown(elem_classes=\"error-report\", show_copy_button=True)\n with gr.Row():\n episode_info = gr.Markdown(label=\"Episode Info\", elem_classes=\"my-markdown\")\n action_info = gr.Markdown(label=\"Action Info\", elem_classes=\"my-markdown\")\n state_error = gr.Markdown(label=\"Next Step Error\", elem_classes=\"my-markdown\")\n\n profiling_gr = gr.Image(\n label=\"Profiling\", show_label=False, interactive=False, show_download_button=False\n )\n\n gr.HTML(\n \"\"\"\n\n\"\"\"\n )\n with gr.Tabs() as tabs:\n code_args = dict(interactive=False, elem_classes=[\"code-container\"], show_label=False)\n with gr.Tab(\"Screenshot\") as tab_screenshot:\n som_or_not = gr.Dropdown(\n choices=[\"Raw Screenshots\", \"SOM Screenshots\"],\n label=\"Screenshot Type\",\n value=\"Raw Screenshots\",\n show_label=False,\n container=False,\n interactive=True,\n scale=0,\n )\n screenshot = gr.Image(\n show_label=False, interactive=False, show_download_button=False\n )\n\n with gr.Tab(\"Screenshot Pair\") as tab_screenshot_pair:\n with gr.Row():\n screenshot1 = gr.Image(\n show_label=False, interactive=False, show_download_button=False\n )\n screenshot2 = gr.Image(\n show_label=False, interactive=False, show_download_button=False\n )\n with gr.Tab(\"Screenshot Gallery\") as tab_screenshot_gallery:\n screenshot_gallery = gr.Gallery(\n columns=2,\n show_download_button=False,\n show_label=False,\n object_fit=\"contain\",\n preview=True,\n )\n\n with gr.Tab(\"Episode\") as tab_episode:\n episode = gr.HTML()\n\n with gr.Tab(\"DOM HTML\") as tab_html:\n html_code = gr.Code(language=\"html\", **code_args)\n\n with gr.Tab(\"Pruned DOM HTML\") as tab_pruned_html:\n pruned_html_code = gr.Code(language=\"html\", **code_args)\n\n with gr.Tab(\"AXTree\") as tab_axtree:\n axtree_code = gr.Markdown()\n\n with gr.Tab(\"Chat Messages\") as tab_chat:\n chat_messages = gr.Markdown()\n\n with gr.Tab(\"Task Error\") as tab_error:\n task_error = gr.Markdown()\n\n with gr.Tab(\"Logs\") as tab_logs:\n logs = gr.Code(language=None, **code_args)\n\n with gr.Tab(\"Stats\") as tab_stats:\n stats = gr.DataFrame(max_height=500, show_label=False, interactive=False)\n\n with gr.Tab(\"Agent Info HTML\") as tab_agent_info_html:\n with gr.Row():\n screenshot1_agent = gr.Image(\n show_label=False, interactive=False, show_download_button=False\n )\n screenshot2_agent = gr.Image(\n show_label=False, interactive=False, show_download_button=False\n )\n agent_info_html = gr.HTML()\n\n with gr.Tab(\"Agent Info MD\") as tab_agent_info_md:\n agent_info_md = gr.Markdown()\n\n with gr.Tab(\"Prompt tests\") as tab_prompt_tests:\n with gr.Row():\n prompt_markdown = gr.Textbox(\n value=\"\",\n label=\"\",\n show_label=False,\n interactive=False,\n elem_id=\"prompt_markdown\",\n )\n with gr.Column():\n prompt_tests_textbox = gr.Textbox(\n value=\"\",\n label=\"\",\n show_label=False,\n interactive=True,\n elem_id=\"prompt_tests_textbox\",\n )\n submit_button = gr.Button(value=\"Submit\")\n result_box = gr.Textbox(\n value=\"\", label=\"Result\", show_label=True, interactive=False\n )\n\n # Define the interaction\n submit_button.click(\n fn=submit_action, inputs=prompt_tests_textbox, outputs=result_box\n )\n\n # Handle Events #\n # ===============#\n\n refresh_button.click(\n fn=refresh_exp_dir_choices, inputs=exp_dir_choice, outputs=exp_dir_choice\n )\n\n refresh_results_button.click(\n fn=refresh_exp_dir_choices, inputs=exp_dir_choice, outputs=exp_dir_choice\n )\n\n exp_dir_choice.change(\n fn=new_exp_dir,\n inputs=exp_dir_choice,\n outputs=[agent_table, agent_id, constants, variables, global_stats, error_report],\n )\n\n agent_table.select(fn=on_select_agent, inputs=agent_table, outputs=[agent_id])\n task_table.select(fn=on_select_task, inputs=[task_table, agent_id], outputs=agent_task_id)\n\n agent_id.change(fn=new_agent_id, inputs=agent_id, outputs=[task_table, agent_task_id])\n agent_task_id.change(\n fn=update_seeds, inputs=agent_task_id, outputs=[seed_table, episode_id]\n )\n # seed_gr.change(fn=on_select_seed, inputs=[seed_gr, task_name], outputs=[episode_id])\n seed_table.select(on_select_seed, inputs=[seed_table, agent_task_id], outputs=episode_id)\n step_id.change(fn=update_step_info, outputs=[episode_info, action_info, state_error])\n episode_id.change(fn=new_episode, inputs=[episode_id], outputs=[profiling_gr, step_id])\n profiling_gr.select(select_step, inputs=[episode_id], outputs=step_id)\n\n # Update all tabs on step change, but only actually update the active\n # tab. This helps keeping the UI responsive when selecting a new step.\n step_id.change(\n fn=if_active(\"Screenshot\")(update_screenshot),\n inputs=som_or_not,\n outputs=screenshot,\n )\n step_id.change(\n fn=if_active(\"Screenshot Pair\", 2)(update_screenshot_pair),\n inputs=som_or_not,\n outputs=[screenshot1, screenshot2],\n )\n step_id.change(\n fn=if_active(\"Screenshot Gallery\")(update_screenshot_gallery),\n inputs=som_or_not,\n outputs=[screenshot_gallery],\n )\n screenshot_gallery.select(fn=gallery_step_change, inputs=episode_id, outputs=step_id)\n episode_id.change(fn=if_active(\"Episode\")(update_episode), outputs=episode)\n step_id.change(fn=if_active(\"DOM HTML\")(update_html), outputs=html_code)\n step_id.change(\n fn=if_active(\"Pruned DOM HTML\")(update_pruned_html), outputs=pruned_html_code\n )\n step_id.change(fn=if_active(\"AXTree\")(update_axtree), outputs=axtree_code)\n step_id.change(fn=if_active(\"Chat Messages\")(update_chat_messages), outputs=chat_messages)\n step_id.change(fn=if_active(\"Task Error\")(update_task_error), outputs=task_error)\n step_id.change(fn=if_active(\"Logs\")(update_logs), outputs=logs)\n step_id.change(fn=if_active(\"Stats\")(update_stats), outputs=stats)\n step_id.change(\n fn=if_active(\"Agent Info HTML\", 3)(update_agent_info_html),\n outputs=[agent_info_html, screenshot1_agent, screenshot2_agent],\n )\n step_id.change(fn=if_active(\"Agent Info MD\")(update_agent_info_md), outputs=agent_info_md)\n step_id.change(\n fn=if_active(\"Prompt tests\", 2)(update_prompt_tests),\n outputs=[prompt_markdown, prompt_tests_textbox],\n )\n\n # In order to handel tabs that were not visible when step was changed,\n # we need to update them individually when the tab is selected\n tab_screenshot.select(fn=update_screenshot, inputs=som_or_not, outputs=screenshot)\n tab_screenshot_pair.select(\n fn=update_screenshot_pair, inputs=som_or_not, outputs=[screenshot1, screenshot2]\n )\n tab_screenshot_gallery.select(\n fn=update_screenshot_gallery, inputs=som_or_not, outputs=[screenshot_gallery]\n )\n tab_episode.select(fn=update_episode, outputs=episode)\n tab_html.select(fn=update_html, outputs=html_code)\n tab_pruned_html.select(fn=update_pruned_html, outputs=pruned_html_code)\n tab_axtree.select(fn=update_axtree, outputs=axtree_code)\n tab_chat.select(fn=update_chat_messages, outputs=chat_messages)\n tab_error.select(fn=update_task_error, outputs=task_error)\n tab_logs.select(fn=update_logs, outputs=logs)\n tab_stats.select(fn=update_stats, outputs=stats)\n tab_agent_info_html.select(fn=update_agent_info_html, outputs=agent_info_html)\n tab_agent_info_md.select(fn=update_agent_info_md, outputs=agent_info_md)\n tab_prompt_tests.select(\n fn=update_prompt_tests, outputs=[prompt_markdown, prompt_tests_textbox]\n )\n\n som_or_not.change(fn=update_screenshot, inputs=som_or_not, outputs=screenshot)\n\n # keep track of active tab\n tabs.select(tab_select)\n\n demo.load(fn=refresh_exp_dir_choices, inputs=exp_dir_choice, outputs=exp_dir_choice)\n\n demo.load(\n None,\n None,\n None,\n js=\"\"\"\n function() {\n document.addEventListener('keydown', function(e) {\n if ((e.key === 'ArrowLeft' || e.key === 'ArrowRight') && (e.metaKey || e.ctrlKey)) {\n e.preventDefault();\n const hiddenInput = document.querySelector('#key_capture input, #key_capture textarea');\n if (hiddenInput) {\n let event = e.key === 'ArrowLeft' ? 'Cmd+Left' : 'Cmd+Right';\n hiddenInput.value = event;\n hiddenInput.dispatchEvent(new Event('input', {bubbles: true}));\n }\n }\n });\n }\n \"\"\",\n )\n hidden_key_input.change(\n handle_key_event,\n inputs=[hidden_key_input, step_id],\n outputs=[hidden_key_input, step_id],\n )\n\n demo.queue()\n\n do_share = os.getenv(\"AGENTXRAY_SHARE_GRADIO\", \"false\").lower() == \"true\"\n port = os.getenv(\"AGENTXRAY_APP_PORT\", None)\n if isinstance(port, str):\n port = int(port)\n demo.launch(server_port=port, share=do_share)\n\n\ndef handle_key_event(key_event, step_id: StepId):\n\n if key_event:\n global info\n\n # print(f\"Key event: {key_event}\")\n step = step_id.step\n if key_event.startswith(\"Cmd+Left\"):\n step = max(0, step - 1)\n elif key_event.startswith(\"Cmd+Right\"):\n step = min(len(info.exp_result.steps_info) - 2, step + 1)\n else:\n return gr.update()\n # print(f\"Updating step to {step} from key event {key_event}\")\n info.step = step\n step_id = StepId(episode_id=step_id.episode_id, step=step)\n return (\"\", step_id)\n","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.handle_key_event","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.handle_key_event#L545-L561","kind":"function","name":"handle_key_event","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":545,"end_line":561,"context_start_line":525,"context_end_line":581,"code":" }\n });\n }\n \"\"\",\n )\n hidden_key_input.change(\n handle_key_event,\n inputs=[hidden_key_input, step_id],\n outputs=[hidden_key_input, step_id],\n )\n\n demo.queue()\n\n do_share = os.getenv(\"AGENTXRAY_SHARE_GRADIO\", \"false\").lower() == \"true\"\n port = os.getenv(\"AGENTXRAY_APP_PORT\", None)\n if isinstance(port, str):\n port = int(port)\n demo.launch(server_port=port, share=do_share)\n\n\ndef handle_key_event(key_event, step_id: StepId):\n\n if key_event:\n global info\n\n # print(f\"Key event: {key_event}\")\n step = step_id.step\n if key_event.startswith(\"Cmd+Left\"):\n step = max(0, step - 1)\n elif key_event.startswith(\"Cmd+Right\"):\n step = min(len(info.exp_result.steps_info) - 2, step + 1)\n else:\n return gr.update()\n # print(f\"Updating step to {step} from key event {key_event}\")\n info.step = step\n step_id = StepId(episode_id=step_id.episode_id, step=step)\n return (\"\", step_id)\n\n\ndef tab_select(evt: gr.SelectData):\n global info\n info.active_tab = evt.value\n\n\ndef if_active(tab_name, n_out=1):\n def decorator(fn):\n def wrapper(*args, **kwargs):\n global info\n if info.active_tab == tab_name:\n # print(\"updating: \", fn.__name__)\n return fn(*args, **kwargs)\n else:\n # print(\"skipping: \", fn.__name__)\n if n_out == 1:\n return gr.update()\n elif n_out > 1:\n return (gr.update(),) * n_out","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.tab_select","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.tab_select#L564-L566","kind":"function","name":"tab_select","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":564,"end_line":566,"context_start_line":544,"context_end_line":586,"code":"\ndef handle_key_event(key_event, step_id: StepId):\n\n if key_event:\n global info\n\n # print(f\"Key event: {key_event}\")\n step = step_id.step\n if key_event.startswith(\"Cmd+Left\"):\n step = max(0, step - 1)\n elif key_event.startswith(\"Cmd+Right\"):\n step = min(len(info.exp_result.steps_info) - 2, step + 1)\n else:\n return gr.update()\n # print(f\"Updating step to {step} from key event {key_event}\")\n info.step = step\n step_id = StepId(episode_id=step_id.episode_id, step=step)\n return (\"\", step_id)\n\n\ndef tab_select(evt: gr.SelectData):\n global info\n info.active_tab = evt.value\n\n\ndef if_active(tab_name, n_out=1):\n def decorator(fn):\n def wrapper(*args, **kwargs):\n global info\n if info.active_tab == tab_name:\n # print(\"updating: \", fn.__name__)\n return fn(*args, **kwargs)\n else:\n # print(\"skipping: \", fn.__name__)\n if n_out == 1:\n return gr.update()\n elif n_out > 1:\n return (gr.update(),) * n_out\n\n return wrapper\n\n return decorator\n","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.if_active","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.if_active#L569-L585","kind":"function","name":"if_active","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":569,"end_line":585,"context_start_line":549,"context_end_line":605,"code":"\n # print(f\"Key event: {key_event}\")\n step = step_id.step\n if key_event.startswith(\"Cmd+Left\"):\n step = max(0, step - 1)\n elif key_event.startswith(\"Cmd+Right\"):\n step = min(len(info.exp_result.steps_info) - 2, step + 1)\n else:\n return gr.update()\n # print(f\"Updating step to {step} from key event {key_event}\")\n info.step = step\n step_id = StepId(episode_id=step_id.episode_id, step=step)\n return (\"\", step_id)\n\n\ndef tab_select(evt: gr.SelectData):\n global info\n info.active_tab = evt.value\n\n\ndef if_active(tab_name, n_out=1):\n def decorator(fn):\n def wrapper(*args, **kwargs):\n global info\n if info.active_tab == tab_name:\n # print(\"updating: \", fn.__name__)\n return fn(*args, **kwargs)\n else:\n # print(\"skipping: \", fn.__name__)\n if n_out == 1:\n return gr.update()\n elif n_out > 1:\n return (gr.update(),) * n_out\n\n return wrapper\n\n return decorator\n\n\ndef update_screenshot(som_or_not: str):\n global info\n img, action_str = get_screenshot(info, som_or_not=som_or_not, annotate=True)\n return img\n\n\ndef get_screenshot(\n info: Info, step: int = None, som_or_not: str = \"Raw Screenshots\", annotate: bool = False\n):\n if step is None:\n step = info.step\n try:\n step_info = info.exp_result.steps_info[step]\n is_som = som_or_not == \"SOM Screenshots\"\n img = info.exp_result.get_screenshot(step, som=is_som)\n if annotate:\n action_str = step_info.action\n properties = step_info.obs.get(\"extra_element_properties\", None)","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.update_screenshot","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.update_screenshot#L588-L591","kind":"function","name":"update_screenshot","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":588,"end_line":591,"context_start_line":568,"context_end_line":611,"code":"\ndef if_active(tab_name, n_out=1):\n def decorator(fn):\n def wrapper(*args, **kwargs):\n global info\n if info.active_tab == tab_name:\n # print(\"updating: \", fn.__name__)\n return fn(*args, **kwargs)\n else:\n # print(\"skipping: \", fn.__name__)\n if n_out == 1:\n return gr.update()\n elif n_out > 1:\n return (gr.update(),) * n_out\n\n return wrapper\n\n return decorator\n\n\ndef update_screenshot(som_or_not: str):\n global info\n img, action_str = get_screenshot(info, som_or_not=som_or_not, annotate=True)\n return img\n\n\ndef get_screenshot(\n info: Info, step: int = None, som_or_not: str = \"Raw Screenshots\", annotate: bool = False\n):\n if step is None:\n step = info.step\n try:\n step_info = info.exp_result.steps_info[step]\n is_som = som_or_not == \"SOM Screenshots\"\n img = info.exp_result.get_screenshot(step, som=is_som)\n if annotate:\n action_str = step_info.action\n properties = step_info.obs.get(\"extra_element_properties\", None)\n try:\n action_colored = annotate_action(\n img, action_string=action_str, properties=properties\n )\n except Exception as e:\n warning(f\"Failed to annotate action: {e}\")","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.get_screenshot","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.get_screenshot#L594-L617","kind":"function","name":"get_screenshot","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":594,"end_line":617,"context_start_line":574,"context_end_line":637,"code":" # print(\"updating: \", fn.__name__)\n return fn(*args, **kwargs)\n else:\n # print(\"skipping: \", fn.__name__)\n if n_out == 1:\n return gr.update()\n elif n_out > 1:\n return (gr.update(),) * n_out\n\n return wrapper\n\n return decorator\n\n\ndef update_screenshot(som_or_not: str):\n global info\n img, action_str = get_screenshot(info, som_or_not=som_or_not, annotate=True)\n return img\n\n\ndef get_screenshot(\n info: Info, step: int = None, som_or_not: str = \"Raw Screenshots\", annotate: bool = False\n):\n if step is None:\n step = info.step\n try:\n step_info = info.exp_result.steps_info[step]\n is_som = som_or_not == \"SOM Screenshots\"\n img = info.exp_result.get_screenshot(step, som=is_som)\n if annotate:\n action_str = step_info.action\n properties = step_info.obs.get(\"extra_element_properties\", None)\n try:\n action_colored = annotate_action(\n img, action_string=action_str, properties=properties\n )\n except Exception as e:\n warning(f\"Failed to annotate action: {e}\")\n action_colored = action_str\n else:\n action_colored = None\n return img, action_colored\n except (FileNotFoundError, IndexError):\n return None, None\n\n\ndef update_screenshot_pair(som_or_not: str):\n global info\n s1, action_str = get_screenshot(info, info.step, som_or_not, annotate=True)\n s2, action_str = get_screenshot(info, info.step + 1, som_or_not)\n return s1, s2\n\n\ndef update_screenshot_gallery(som_or_not: str):\n global info\n max_steps = len(info.exp_result.steps_info)\n\n screenshots = [get_screenshot(info, step=i, som_or_not=som_or_not)[0] for i in range(max_steps)]\n\n screenshots_and_label = [(s, f\"Step {i}\") for i, s in enumerate(screenshots)]\n\n gallery = gr.Gallery(\n value=screenshots_and_label,\n columns=2,","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.update_screenshot_pair","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.update_screenshot_pair#L620-L624","kind":"function","name":"update_screenshot_pair","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":620,"end_line":624,"context_start_line":600,"context_end_line":644,"code":" step_info = info.exp_result.steps_info[step]\n is_som = som_or_not == \"SOM Screenshots\"\n img = info.exp_result.get_screenshot(step, som=is_som)\n if annotate:\n action_str = step_info.action\n properties = step_info.obs.get(\"extra_element_properties\", None)\n try:\n action_colored = annotate_action(\n img, action_string=action_str, properties=properties\n )\n except Exception as e:\n warning(f\"Failed to annotate action: {e}\")\n action_colored = action_str\n else:\n action_colored = None\n return img, action_colored\n except (FileNotFoundError, IndexError):\n return None, None\n\n\ndef update_screenshot_pair(som_or_not: str):\n global info\n s1, action_str = get_screenshot(info, info.step, som_or_not, annotate=True)\n s2, action_str = get_screenshot(info, info.step + 1, som_or_not)\n return s1, s2\n\n\ndef update_screenshot_gallery(som_or_not: str):\n global info\n max_steps = len(info.exp_result.steps_info)\n\n screenshots = [get_screenshot(info, step=i, som_or_not=som_or_not)[0] for i in range(max_steps)]\n\n screenshots_and_label = [(s, f\"Step {i}\") for i, s in enumerate(screenshots)]\n\n gallery = gr.Gallery(\n value=screenshots_and_label,\n columns=2,\n show_download_button=False,\n show_label=False,\n object_fit=\"contain\",\n preview=True,\n selected_index=info.step,\n )\n return gallery","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.update_screenshot_gallery","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.update_screenshot_gallery#L627-L644","kind":"function","name":"update_screenshot_gallery","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":627,"end_line":644,"context_start_line":607,"context_end_line":664,"code":" action_colored = annotate_action(\n img, action_string=action_str, properties=properties\n )\n except Exception as e:\n warning(f\"Failed to annotate action: {e}\")\n action_colored = action_str\n else:\n action_colored = None\n return img, action_colored\n except (FileNotFoundError, IndexError):\n return None, None\n\n\ndef update_screenshot_pair(som_or_not: str):\n global info\n s1, action_str = get_screenshot(info, info.step, som_or_not, annotate=True)\n s2, action_str = get_screenshot(info, info.step + 1, som_or_not)\n return s1, s2\n\n\ndef update_screenshot_gallery(som_or_not: str):\n global info\n max_steps = len(info.exp_result.steps_info)\n\n screenshots = [get_screenshot(info, step=i, som_or_not=som_or_not)[0] for i in range(max_steps)]\n\n screenshots_and_label = [(s, f\"Step {i}\") for i, s in enumerate(screenshots)]\n\n gallery = gr.Gallery(\n value=screenshots_and_label,\n columns=2,\n show_download_button=False,\n show_label=False,\n object_fit=\"contain\",\n preview=True,\n selected_index=info.step,\n )\n return gallery\n\n\ndef gallery_step_change(evt: gr.SelectData, episode_id: EpisodeId):\n global info\n info.step = evt.index\n return StepId(episode_id=episode_id, step=evt.index)\n\n\n# def update_episode():\n# # get exp_results for the given episode_id\n# return exp_result_to_html(info.exp_result)\ndef update_episode():\n html_content = exp_result_to_html(info.exp_result)\n\n # Use srcdoc instead of data URL\n return f\"\"\"\"\"\"\n\n","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.gallery_step_change","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.gallery_step_change#L647-L650","kind":"function","name":"gallery_step_change","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":647,"end_line":650,"context_start_line":627,"context_end_line":670,"code":"def update_screenshot_gallery(som_or_not: str):\n global info\n max_steps = len(info.exp_result.steps_info)\n\n screenshots = [get_screenshot(info, step=i, som_or_not=som_or_not)[0] for i in range(max_steps)]\n\n screenshots_and_label = [(s, f\"Step {i}\") for i, s in enumerate(screenshots)]\n\n gallery = gr.Gallery(\n value=screenshots_and_label,\n columns=2,\n show_download_button=False,\n show_label=False,\n object_fit=\"contain\",\n preview=True,\n selected_index=info.step,\n )\n return gallery\n\n\ndef gallery_step_change(evt: gr.SelectData, episode_id: EpisodeId):\n global info\n info.step = evt.index\n return StepId(episode_id=episode_id, step=evt.index)\n\n\n# def update_episode():\n# # get exp_results for the given episode_id\n# return exp_result_to_html(info.exp_result)\ndef update_episode():\n html_content = exp_result_to_html(info.exp_result)\n\n # Use srcdoc instead of data URL\n return f\"\"\"\"\"\"\n\n\ndef update_html():\n return get_obs(key=\"dom_txt\", default=\"No DOM HTML\")\n\n\ndef update_pruned_html():\n return get_obs(key=\"pruned_html\", default=\"No Pruned HTML\")","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.update_episode","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.update_episode#L656-L662","kind":"function","name":"update_episode","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":656,"end_line":662,"context_start_line":636,"context_end_line":682,"code":" value=screenshots_and_label,\n columns=2,\n show_download_button=False,\n show_label=False,\n object_fit=\"contain\",\n preview=True,\n selected_index=info.step,\n )\n return gallery\n\n\ndef gallery_step_change(evt: gr.SelectData, episode_id: EpisodeId):\n global info\n info.step = evt.index\n return StepId(episode_id=episode_id, step=evt.index)\n\n\n# def update_episode():\n# # get exp_results for the given episode_id\n# return exp_result_to_html(info.exp_result)\ndef update_episode():\n html_content = exp_result_to_html(info.exp_result)\n\n # Use srcdoc instead of data URL\n return f\"\"\"\"\"\"\n\n\ndef update_html():\n return get_obs(key=\"dom_txt\", default=\"No DOM HTML\")\n\n\ndef update_pruned_html():\n return get_obs(key=\"pruned_html\", default=\"No Pruned HTML\")\n\n\ndef update_axtree():\n obs = get_obs(key=\"axtree_txt\", default=\"No AXTree\")\n return f\"```\\n{obs}\\n```\"\n\n\ndef dict_to_markdown(d: dict):\n \"\"\"\n Convert a dictionary to a clean markdown representation, recursively.\n\n Args:","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.update_html","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.update_html#L665-L666","kind":"function","name":"update_html","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":665,"end_line":666,"context_start_line":645,"context_end_line":686,"code":"\n\ndef gallery_step_change(evt: gr.SelectData, episode_id: EpisodeId):\n global info\n info.step = evt.index\n return StepId(episode_id=episode_id, step=evt.index)\n\n\n# def update_episode():\n# # get exp_results for the given episode_id\n# return exp_result_to_html(info.exp_result)\ndef update_episode():\n html_content = exp_result_to_html(info.exp_result)\n\n # Use srcdoc instead of data URL\n return f\"\"\"\"\"\"\n\n\ndef update_html():\n return get_obs(key=\"dom_txt\", default=\"No DOM HTML\")\n\n\ndef update_pruned_html():\n return get_obs(key=\"pruned_html\", default=\"No Pruned HTML\")\n\n\ndef update_axtree():\n obs = get_obs(key=\"axtree_txt\", default=\"No AXTree\")\n return f\"```\\n{obs}\\n```\"\n\n\ndef dict_to_markdown(d: dict):\n \"\"\"\n Convert a dictionary to a clean markdown representation, recursively.\n\n Args:\n d (dict): A dictionary where keys are strings and values can be strings,\n lists of dictionaries, or nested dictionaries.\n\n Returns:","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.update_pruned_html","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.update_pruned_html#L669-L670","kind":"function","name":"update_pruned_html","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":669,"end_line":670,"context_start_line":649,"context_end_line":690,"code":" info.step = evt.index\n return StepId(episode_id=episode_id, step=evt.index)\n\n\n# def update_episode():\n# # get exp_results for the given episode_id\n# return exp_result_to_html(info.exp_result)\ndef update_episode():\n html_content = exp_result_to_html(info.exp_result)\n\n # Use srcdoc instead of data URL\n return f\"\"\"\"\"\"\n\n\ndef update_html():\n return get_obs(key=\"dom_txt\", default=\"No DOM HTML\")\n\n\ndef update_pruned_html():\n return get_obs(key=\"pruned_html\", default=\"No Pruned HTML\")\n\n\ndef update_axtree():\n obs = get_obs(key=\"axtree_txt\", default=\"No AXTree\")\n return f\"```\\n{obs}\\n```\"\n\n\ndef dict_to_markdown(d: dict):\n \"\"\"\n Convert a dictionary to a clean markdown representation, recursively.\n\n Args:\n d (dict): A dictionary where keys are strings and values can be strings,\n lists of dictionaries, or nested dictionaries.\n\n Returns:\n str: A markdown-formatted string representation of the dictionary.\n \"\"\"\n if not isinstance(d, dict):\n if isinstance(d, ToolCalls):","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.update_axtree","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.update_axtree#L673-L675","kind":"function","name":"update_axtree","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":673,"end_line":675,"context_start_line":653,"context_end_line":695,"code":"# def update_episode():\n# # get exp_results for the given episode_id\n# return exp_result_to_html(info.exp_result)\ndef update_episode():\n html_content = exp_result_to_html(info.exp_result)\n\n # Use srcdoc instead of data URL\n return f\"\"\"\"\"\"\n\n\ndef update_html():\n return get_obs(key=\"dom_txt\", default=\"No DOM HTML\")\n\n\ndef update_pruned_html():\n return get_obs(key=\"pruned_html\", default=\"No Pruned HTML\")\n\n\ndef update_axtree():\n obs = get_obs(key=\"axtree_txt\", default=\"No AXTree\")\n return f\"```\\n{obs}\\n```\"\n\n\ndef dict_to_markdown(d: dict):\n \"\"\"\n Convert a dictionary to a clean markdown representation, recursively.\n\n Args:\n d (dict): A dictionary where keys are strings and values can be strings,\n lists of dictionaries, or nested dictionaries.\n\n Returns:\n str: A markdown-formatted string representation of the dictionary.\n \"\"\"\n if not isinstance(d, dict):\n if isinstance(d, ToolCalls):\n # ToolCalls rendered by to_markdown method.\n return \"\"\n warning(f\"Expected dict, got {type(d)}\")\n return repr(d)\n if not d:","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.dict_to_markdown","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.dict_to_markdown#L678-L710","kind":"function","name":"dict_to_markdown","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":678,"end_line":710,"context_start_line":658,"context_end_line":730,"code":"\n # Use srcdoc instead of data URL\n return f\"\"\"\"\"\"\n\n\ndef update_html():\n return get_obs(key=\"dom_txt\", default=\"No DOM HTML\")\n\n\ndef update_pruned_html():\n return get_obs(key=\"pruned_html\", default=\"No Pruned HTML\")\n\n\ndef update_axtree():\n obs = get_obs(key=\"axtree_txt\", default=\"No AXTree\")\n return f\"```\\n{obs}\\n```\"\n\n\ndef dict_to_markdown(d: dict):\n \"\"\"\n Convert a dictionary to a clean markdown representation, recursively.\n\n Args:\n d (dict): A dictionary where keys are strings and values can be strings,\n lists of dictionaries, or nested dictionaries.\n\n Returns:\n str: A markdown-formatted string representation of the dictionary.\n \"\"\"\n if not isinstance(d, dict):\n if isinstance(d, ToolCalls):\n # ToolCalls rendered by to_markdown method.\n return \"\"\n warning(f\"Expected dict, got {type(d)}\")\n return repr(d)\n if not d:\n return \"No Data\"\n res = \"\"\n for k, v in d.items():\n if isinstance(v, dict):\n res += f\"### {k}\\n{dict_to_markdown(v)}\\n\"\n elif isinstance(v, list):\n res += f\"### {k}\\n\"\n for i, item in enumerate(v):\n if isinstance(item, dict):\n res += f\"#### Item {i}\\n{dict_to_markdown(item)}\\n\"\n else:\n res += f\"- {item}\\n\"\n else:\n res += f\"- **{k}**: {v}\\n\"\n return res\n\n\ndef dict_msg_to_markdown(d: dict):\n if \"role\" not in d:\n return dict_to_markdown(d)\n parts = []\n for item in d[\"content\"]:\n\n if hasattr(item, \"dict\"):\n item = item.dict()\n\n match item[\"type\"]:\n case \"image\":\n parts.append(f\"![Image]({item['image']})\")\n case \"text\":\n parts.append(f\"\\n```\\n{item['text']}\\n```\\n\")\n case \"tool_use\":\n tool_use = _format_tool_call(item[\"name\"], item[\"input\"], item[\"id\"])\n parts.append(f\"\\n```\\n{tool_use}\\n```\\n\")\n case _:","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.dict_msg_to_markdown","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.dict_msg_to_markdown#L713-L735","kind":"function","name":"dict_msg_to_markdown","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":713,"end_line":735,"context_start_line":693,"context_end_line":755,"code":" warning(f\"Expected dict, got {type(d)}\")\n return repr(d)\n if not d:\n return \"No Data\"\n res = \"\"\n for k, v in d.items():\n if isinstance(v, dict):\n res += f\"### {k}\\n{dict_to_markdown(v)}\\n\"\n elif isinstance(v, list):\n res += f\"### {k}\\n\"\n for i, item in enumerate(v):\n if isinstance(item, dict):\n res += f\"#### Item {i}\\n{dict_to_markdown(item)}\\n\"\n else:\n res += f\"- {item}\\n\"\n else:\n res += f\"- **{k}**: {v}\\n\"\n return res\n\n\ndef dict_msg_to_markdown(d: dict):\n if \"role\" not in d:\n return dict_to_markdown(d)\n parts = []\n for item in d[\"content\"]:\n\n if hasattr(item, \"dict\"):\n item = item.dict()\n\n match item[\"type\"]:\n case \"image\":\n parts.append(f\"![Image]({item['image']})\")\n case \"text\":\n parts.append(f\"\\n```\\n{item['text']}\\n```\\n\")\n case \"tool_use\":\n tool_use = _format_tool_call(item[\"name\"], item[\"input\"], item[\"id\"])\n parts.append(f\"\\n```\\n{tool_use}\\n```\\n\")\n case _:\n parts.append(f\"\\n```\\n{str(item)}\\n```\\n\")\n\n markdown = f\"### {d['role'].capitalize()}\\n\"\n markdown += \"\\n\".join(parts)\n return markdown\n\n\ndef _format_tool_call(name: str, input: str, call_id: str):\n \"\"\"\n Format a tool call to markdown.\n \"\"\"\n return f\"Tool Call: {name} `{input}` (call_id: {call_id})\"\n\n\ndef format_chat_message(message: BaseMessage | MessageBuilder | dict):\n \"\"\"\n Format a message to markdown.\n \"\"\"\n if isinstance(message, BaseMessage):\n return message.content\n elif isinstance(message, MessageBuilder):\n return message.to_markdown()\n elif isinstance(message, dict):\n return dict_msg_to_markdown(message)\n elif isinstance(message, ResponseFunctionToolCall): # type: ignore[return]","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray._format_tool_call","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray._format_tool_call#L738-L742","kind":"function","name":"_format_tool_call","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":738,"end_line":742,"context_start_line":718,"context_end_line":762,"code":"\n if hasattr(item, \"dict\"):\n item = item.dict()\n\n match item[\"type\"]:\n case \"image\":\n parts.append(f\"![Image]({item['image']})\")\n case \"text\":\n parts.append(f\"\\n```\\n{item['text']}\\n```\\n\")\n case \"tool_use\":\n tool_use = _format_tool_call(item[\"name\"], item[\"input\"], item[\"id\"])\n parts.append(f\"\\n```\\n{tool_use}\\n```\\n\")\n case _:\n parts.append(f\"\\n```\\n{str(item)}\\n```\\n\")\n\n markdown = f\"### {d['role'].capitalize()}\\n\"\n markdown += \"\\n\".join(parts)\n return markdown\n\n\ndef _format_tool_call(name: str, input: str, call_id: str):\n \"\"\"\n Format a tool call to markdown.\n \"\"\"\n return f\"Tool Call: {name} `{input}` (call_id: {call_id})\"\n\n\ndef format_chat_message(message: BaseMessage | MessageBuilder | dict):\n \"\"\"\n Format a message to markdown.\n \"\"\"\n if isinstance(message, BaseMessage):\n return message.content\n elif isinstance(message, MessageBuilder):\n return message.to_markdown()\n elif isinstance(message, dict):\n return dict_msg_to_markdown(message)\n elif isinstance(message, ResponseFunctionToolCall): # type: ignore[return]\n too_use_str = _format_tool_call(message.name, message.arguments, message.call_id)\n return f\"### Tool Use\\n```\\n{too_use_str}\\n```\\n\"\n else:\n return str(message)\n\n\ndef update_chat_messages():","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.format_chat_message","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.format_chat_message#L745-L759","kind":"function","name":"format_chat_message","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":745,"end_line":759,"context_start_line":725,"context_end_line":779,"code":" case \"text\":\n parts.append(f\"\\n```\\n{item['text']}\\n```\\n\")\n case \"tool_use\":\n tool_use = _format_tool_call(item[\"name\"], item[\"input\"], item[\"id\"])\n parts.append(f\"\\n```\\n{tool_use}\\n```\\n\")\n case _:\n parts.append(f\"\\n```\\n{str(item)}\\n```\\n\")\n\n markdown = f\"### {d['role'].capitalize()}\\n\"\n markdown += \"\\n\".join(parts)\n return markdown\n\n\ndef _format_tool_call(name: str, input: str, call_id: str):\n \"\"\"\n Format a tool call to markdown.\n \"\"\"\n return f\"Tool Call: {name} `{input}` (call_id: {call_id})\"\n\n\ndef format_chat_message(message: BaseMessage | MessageBuilder | dict):\n \"\"\"\n Format a message to markdown.\n \"\"\"\n if isinstance(message, BaseMessage):\n return message.content\n elif isinstance(message, MessageBuilder):\n return message.to_markdown()\n elif isinstance(message, dict):\n return dict_msg_to_markdown(message)\n elif isinstance(message, ResponseFunctionToolCall): # type: ignore[return]\n too_use_str = _format_tool_call(message.name, message.arguments, message.call_id)\n return f\"### Tool Use\\n```\\n{too_use_str}\\n```\\n\"\n else:\n return str(message)\n\n\ndef update_chat_messages():\n global info\n agent_info = info.exp_result.steps_info[info.step].agent_info\n chat_messages = agent_info.get(\"chat_messages\", [\"No Chat Messages\"])\n if isinstance(chat_messages, Discussion):\n return chat_messages.to_markdown()\n\n if isinstance(chat_messages, list):\n chat_messages = [format_chat_message(m) for m in chat_messages]\n return \"\\n\\n\".join(chat_messages)\n\n\ndef update_task_error():\n global info\n try:\n stack_trace = info.exp_result.summary_info.get(\"stack_trace\", None)\n return f\"\"\"{code(stack_trace)}\"\"\"\n except FileNotFoundError:","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.update_chat_messages","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.update_chat_messages#L762-L771","kind":"function","name":"update_chat_messages","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":762,"end_line":771,"context_start_line":742,"context_end_line":791,"code":" return f\"Tool Call: {name} `{input}` (call_id: {call_id})\"\n\n\ndef format_chat_message(message: BaseMessage | MessageBuilder | dict):\n \"\"\"\n Format a message to markdown.\n \"\"\"\n if isinstance(message, BaseMessage):\n return message.content\n elif isinstance(message, MessageBuilder):\n return message.to_markdown()\n elif isinstance(message, dict):\n return dict_msg_to_markdown(message)\n elif isinstance(message, ResponseFunctionToolCall): # type: ignore[return]\n too_use_str = _format_tool_call(message.name, message.arguments, message.call_id)\n return f\"### Tool Use\\n```\\n{too_use_str}\\n```\\n\"\n else:\n return str(message)\n\n\ndef update_chat_messages():\n global info\n agent_info = info.exp_result.steps_info[info.step].agent_info\n chat_messages = agent_info.get(\"chat_messages\", [\"No Chat Messages\"])\n if isinstance(chat_messages, Discussion):\n return chat_messages.to_markdown()\n\n if isinstance(chat_messages, list):\n chat_messages = [format_chat_message(m) for m in chat_messages]\n return \"\\n\\n\".join(chat_messages)\n\n\ndef update_task_error():\n global info\n try:\n stack_trace = info.exp_result.summary_info.get(\"stack_trace\", None)\n return f\"\"\"{code(stack_trace)}\"\"\"\n except FileNotFoundError:\n return \"No Task Error\"\n\n\ndef update_logs():\n global info\n try:\n return f\"\"\"{info.exp_result.logs}\"\"\"\n except FileNotFoundError:\n return \"\"\"No Logs\"\"\"\n\n\ndef update_stats():","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.update_task_error","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.update_task_error#L774-L780","kind":"function","name":"update_task_error","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":774,"end_line":780,"context_start_line":754,"context_end_line":800,"code":" return dict_msg_to_markdown(message)\n elif isinstance(message, ResponseFunctionToolCall): # type: ignore[return]\n too_use_str = _format_tool_call(message.name, message.arguments, message.call_id)\n return f\"### Tool Use\\n```\\n{too_use_str}\\n```\\n\"\n else:\n return str(message)\n\n\ndef update_chat_messages():\n global info\n agent_info = info.exp_result.steps_info[info.step].agent_info\n chat_messages = agent_info.get(\"chat_messages\", [\"No Chat Messages\"])\n if isinstance(chat_messages, Discussion):\n return chat_messages.to_markdown()\n\n if isinstance(chat_messages, list):\n chat_messages = [format_chat_message(m) for m in chat_messages]\n return \"\\n\\n\".join(chat_messages)\n\n\ndef update_task_error():\n global info\n try:\n stack_trace = info.exp_result.summary_info.get(\"stack_trace\", None)\n return f\"\"\"{code(stack_trace)}\"\"\"\n except FileNotFoundError:\n return \"No Task Error\"\n\n\ndef update_logs():\n global info\n try:\n return f\"\"\"{info.exp_result.logs}\"\"\"\n except FileNotFoundError:\n return \"\"\"No Logs\"\"\"\n\n\ndef update_stats():\n global info\n try:\n stats = info.exp_result.steps_info[info.step].stats\n return pd.DataFrame(stats.items(), columns=[\"name\", \"value\"])\n except (FileNotFoundError, IndexError):\n return None\n\n\ndef update_agent_info_md():","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.update_logs","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.update_logs#L783-L788","kind":"function","name":"update_logs","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":783,"end_line":788,"context_start_line":763,"context_end_line":808,"code":" global info\n agent_info = info.exp_result.steps_info[info.step].agent_info\n chat_messages = agent_info.get(\"chat_messages\", [\"No Chat Messages\"])\n if isinstance(chat_messages, Discussion):\n return chat_messages.to_markdown()\n\n if isinstance(chat_messages, list):\n chat_messages = [format_chat_message(m) for m in chat_messages]\n return \"\\n\\n\".join(chat_messages)\n\n\ndef update_task_error():\n global info\n try:\n stack_trace = info.exp_result.summary_info.get(\"stack_trace\", None)\n return f\"\"\"{code(stack_trace)}\"\"\"\n except FileNotFoundError:\n return \"No Task Error\"\n\n\ndef update_logs():\n global info\n try:\n return f\"\"\"{info.exp_result.logs}\"\"\"\n except FileNotFoundError:\n return \"\"\"No Logs\"\"\"\n\n\ndef update_stats():\n global info\n try:\n stats = info.exp_result.steps_info[info.step].stats\n return pd.DataFrame(stats.items(), columns=[\"name\", \"value\"])\n except (FileNotFoundError, IndexError):\n return None\n\n\ndef update_agent_info_md():\n global info\n try:\n agent_info = info.exp_result.steps_info[info.step].agent_info\n page = agent_info.get(\"markdown_page\", None)\n if page is None:\n page = agent_info.get(\"markup_page\", None) # TODO: remove in a while\n if page is None:\n page = \"\"\"Fill up markdown_page attribute in AgentInfo to display here.\"\"\"","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.update_stats","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.update_stats#L791-L797","kind":"function","name":"update_stats","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":791,"end_line":797,"context_start_line":771,"context_end_line":817,"code":" return \"\\n\\n\".join(chat_messages)\n\n\ndef update_task_error():\n global info\n try:\n stack_trace = info.exp_result.summary_info.get(\"stack_trace\", None)\n return f\"\"\"{code(stack_trace)}\"\"\"\n except FileNotFoundError:\n return \"No Task Error\"\n\n\ndef update_logs():\n global info\n try:\n return f\"\"\"{info.exp_result.logs}\"\"\"\n except FileNotFoundError:\n return \"\"\"No Logs\"\"\"\n\n\ndef update_stats():\n global info\n try:\n stats = info.exp_result.steps_info[info.step].stats\n return pd.DataFrame(stats.items(), columns=[\"name\", \"value\"])\n except (FileNotFoundError, IndexError):\n return None\n\n\ndef update_agent_info_md():\n global info\n try:\n agent_info = info.exp_result.steps_info[info.step].agent_info\n page = agent_info.get(\"markdown_page\", None)\n if page is None:\n page = agent_info.get(\"markup_page\", None) # TODO: remove in a while\n if page is None:\n page = \"\"\"Fill up markdown_page attribute in AgentInfo to display here.\"\"\"\n return page\n except (FileNotFoundError, IndexError):\n return None\n\n\ndef update_agent_info_html():\n global info\n # screenshots from current and next step\n try:","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.update_agent_info_md","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.update_agent_info_md#L800-L811","kind":"function","name":"update_agent_info_md","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":800,"end_line":811,"context_start_line":780,"context_end_line":831,"code":" return \"No Task Error\"\n\n\ndef update_logs():\n global info\n try:\n return f\"\"\"{info.exp_result.logs}\"\"\"\n except FileNotFoundError:\n return \"\"\"No Logs\"\"\"\n\n\ndef update_stats():\n global info\n try:\n stats = info.exp_result.steps_info[info.step].stats\n return pd.DataFrame(stats.items(), columns=[\"name\", \"value\"])\n except (FileNotFoundError, IndexError):\n return None\n\n\ndef update_agent_info_md():\n global info\n try:\n agent_info = info.exp_result.steps_info[info.step].agent_info\n page = agent_info.get(\"markdown_page\", None)\n if page is None:\n page = agent_info.get(\"markup_page\", None) # TODO: remove in a while\n if page is None:\n page = \"\"\"Fill up markdown_page attribute in AgentInfo to display here.\"\"\"\n return page\n except (FileNotFoundError, IndexError):\n return None\n\n\ndef update_agent_info_html():\n global info\n # screenshots from current and next step\n try:\n s1, action_str = get_screenshot(info, info.step, False)\n s2, action_str = get_screenshot(info, info.step + 1, False)\n agent_info = info.exp_result.steps_info[info.step].agent_info\n # Minimal: show step_hints if present\n hints = (\n agent_info.get(\"step_hints\")\n or agent_info.get(\"hints\")\n or agent_info.get(\"extra_info\", {}).get(\"step_hints\")\n )\n if hints:\n if not isinstance(hints, (list, tuple)):\n hints = [hints]\n items = \"\".join(f\"
  • {html.escape(str(h))}
  • \" for h in hints)\n hints_html = f\"

    Step Hints

      {items}
    \"","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.update_agent_info_html","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.update_agent_info_html#L814-L841","kind":"function","name":"update_agent_info_html","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":814,"end_line":841,"context_start_line":794,"context_end_line":861,"code":" stats = info.exp_result.steps_info[info.step].stats\n return pd.DataFrame(stats.items(), columns=[\"name\", \"value\"])\n except (FileNotFoundError, IndexError):\n return None\n\n\ndef update_agent_info_md():\n global info\n try:\n agent_info = info.exp_result.steps_info[info.step].agent_info\n page = agent_info.get(\"markdown_page\", None)\n if page is None:\n page = agent_info.get(\"markup_page\", None) # TODO: remove in a while\n if page is None:\n page = \"\"\"Fill up markdown_page attribute in AgentInfo to display here.\"\"\"\n return page\n except (FileNotFoundError, IndexError):\n return None\n\n\ndef update_agent_info_html():\n global info\n # screenshots from current and next step\n try:\n s1, action_str = get_screenshot(info, info.step, False)\n s2, action_str = get_screenshot(info, info.step + 1, False)\n agent_info = info.exp_result.steps_info[info.step].agent_info\n # Minimal: show step_hints if present\n hints = (\n agent_info.get(\"step_hints\")\n or agent_info.get(\"hints\")\n or agent_info.get(\"extra_info\", {}).get(\"step_hints\")\n )\n if hints:\n if not isinstance(hints, (list, tuple)):\n hints = [hints]\n items = \"\".join(f\"
  • {html.escape(str(h))}
  • \" for h in hints)\n hints_html = f\"

    Step Hints

      {items}
    \"\n return _page_to_iframe(hints_html), s1, s2\n page = agent_info.get(\"html_page\", [\"No Agent Info\"])\n if page is None:\n page = \"\"\"Fill up html_page attribute in AgentInfo to display here.\"\"\"\n else:\n page = _page_to_iframe(page)\n return page, s1, s2\n\n except (FileNotFoundError, IndexError):\n return None, None, None\n\n\ndef _page_to_iframe(page: str):\n html_bytes = page.encode(\"utf-8\")\n encoded_html = base64.b64encode(html_bytes).decode(\"ascii\")\n data_url = f\"data:text/html;base64,{encoded_html}\"\n\n # Create iframe with the data URL\n page = f\"\"\"\n\n\"\"\"\n return page\n\n\ndef submit_action(input_text):\n global info\n agent_info = info.exp_result.steps_info[info.step].agent_info\n chat_messages = deepcopy(agent_info.get(\"chat_messages\", [\"No Chat Messages\"])[:2])","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray._page_to_iframe","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray._page_to_iframe#L844-L855","kind":"function","name":"_page_to_iframe","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":844,"end_line":855,"context_start_line":824,"context_end_line":875,"code":" or agent_info.get(\"hints\")\n or agent_info.get(\"extra_info\", {}).get(\"step_hints\")\n )\n if hints:\n if not isinstance(hints, (list, tuple)):\n hints = [hints]\n items = \"\".join(f\"
  • {html.escape(str(h))}
  • \" for h in hints)\n hints_html = f\"

    Step Hints

      {items}
    \"\n return _page_to_iframe(hints_html), s1, s2\n page = agent_info.get(\"html_page\", [\"No Agent Info\"])\n if page is None:\n page = \"\"\"Fill up html_page attribute in AgentInfo to display here.\"\"\"\n else:\n page = _page_to_iframe(page)\n return page, s1, s2\n\n except (FileNotFoundError, IndexError):\n return None, None, None\n\n\ndef _page_to_iframe(page: str):\n html_bytes = page.encode(\"utf-8\")\n encoded_html = base64.b64encode(html_bytes).decode(\"ascii\")\n data_url = f\"data:text/html;base64,{encoded_html}\"\n\n # Create iframe with the data URL\n page = f\"\"\"\n\n\"\"\"\n return page\n\n\ndef submit_action(input_text):\n global info\n agent_info = info.exp_result.steps_info[info.step].agent_info\n chat_messages = deepcopy(agent_info.get(\"chat_messages\", [\"No Chat Messages\"])[:2])\n if isinstance(chat_messages[1], BaseMessage): # TODO remove once langchain is deprecated\n assert isinstance(chat_messages[1], HumanMessage), \"Second message should be user\"\n chat_messages = [\n make_system_message(chat_messages[0].content),\n make_user_message(chat_messages[1].content),\n ]\n elif isinstance(chat_messages[1], dict):\n assert chat_messages[1].get(\"role\", None) == \"user\", \"Second message should be user\"\n else:\n raise ValueError(\"Chat messages should be a list of BaseMessage or dict\")\n\n client = OpenAI()\n chat_messages[1][\"content\"] = input_text\n completion = client.chat.completions.create(","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.submit_action","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.submit_action#L858-L880","kind":"function","name":"submit_action","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":858,"end_line":880,"context_start_line":838,"context_end_line":900,"code":" return page, s1, s2\n\n except (FileNotFoundError, IndexError):\n return None, None, None\n\n\ndef _page_to_iframe(page: str):\n html_bytes = page.encode(\"utf-8\")\n encoded_html = base64.b64encode(html_bytes).decode(\"ascii\")\n data_url = f\"data:text/html;base64,{encoded_html}\"\n\n # Create iframe with the data URL\n page = f\"\"\"\n\n\"\"\"\n return page\n\n\ndef submit_action(input_text):\n global info\n agent_info = info.exp_result.steps_info[info.step].agent_info\n chat_messages = deepcopy(agent_info.get(\"chat_messages\", [\"No Chat Messages\"])[:2])\n if isinstance(chat_messages[1], BaseMessage): # TODO remove once langchain is deprecated\n assert isinstance(chat_messages[1], HumanMessage), \"Second message should be user\"\n chat_messages = [\n make_system_message(chat_messages[0].content),\n make_user_message(chat_messages[1].content),\n ]\n elif isinstance(chat_messages[1], dict):\n assert chat_messages[1].get(\"role\", None) == \"user\", \"Second message should be user\"\n else:\n raise ValueError(\"Chat messages should be a list of BaseMessage or dict\")\n\n client = OpenAI()\n chat_messages[1][\"content\"] = input_text\n completion = client.chat.completions.create(\n model=\"gpt-4o-mini\",\n messages=chat_messages,\n )\n result_text = completion.choices[0].message.content\n return result_text\n\n\ndef update_prompt_tests():\n global info\n agent_info = info.exp_result.steps_info[info.step].agent_info\n chat_messages = agent_info.get(\"chat_messages\", [\"No Chat Messages\"])\n prompt = chat_messages[1]\n if isinstance(prompt, dict):\n prompt = prompt.get(\"content\", \"No Content\")\n return prompt, prompt\n\n\ndef select_step(episode_id: EpisodeId, evt: gr.SelectData):\n global info\n step = info.click_mapper.to_step(evt.index[0])\n info.step = step\n return StepId(episode_id, step)\n\n\ndef update_step_info():","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.update_prompt_tests","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.update_prompt_tests#L883-L890","kind":"function","name":"update_prompt_tests","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":883,"end_line":890,"context_start_line":863,"context_end_line":910,"code":" assert isinstance(chat_messages[1], HumanMessage), \"Second message should be user\"\n chat_messages = [\n make_system_message(chat_messages[0].content),\n make_user_message(chat_messages[1].content),\n ]\n elif isinstance(chat_messages[1], dict):\n assert chat_messages[1].get(\"role\", None) == \"user\", \"Second message should be user\"\n else:\n raise ValueError(\"Chat messages should be a list of BaseMessage or dict\")\n\n client = OpenAI()\n chat_messages[1][\"content\"] = input_text\n completion = client.chat.completions.create(\n model=\"gpt-4o-mini\",\n messages=chat_messages,\n )\n result_text = completion.choices[0].message.content\n return result_text\n\n\ndef update_prompt_tests():\n global info\n agent_info = info.exp_result.steps_info[info.step].agent_info\n chat_messages = agent_info.get(\"chat_messages\", [\"No Chat Messages\"])\n prompt = chat_messages[1]\n if isinstance(prompt, dict):\n prompt = prompt.get(\"content\", \"No Content\")\n return prompt, prompt\n\n\ndef select_step(episode_id: EpisodeId, evt: gr.SelectData):\n global info\n step = info.click_mapper.to_step(evt.index[0])\n info.step = step\n return StepId(episode_id, step)\n\n\ndef update_step_info():\n global info\n return [\n get_episode_info(info),\n get_action_info(info),\n get_state_error(info),\n ]\n\n\ndef get_obs(key: str, default=None):\n global info","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.select_step","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.select_step#L893-L897","kind":"function","name":"select_step","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":893,"end_line":897,"context_start_line":873,"context_end_line":917,"code":" client = OpenAI()\n chat_messages[1][\"content\"] = input_text\n completion = client.chat.completions.create(\n model=\"gpt-4o-mini\",\n messages=chat_messages,\n )\n result_text = completion.choices[0].message.content\n return result_text\n\n\ndef update_prompt_tests():\n global info\n agent_info = info.exp_result.steps_info[info.step].agent_info\n chat_messages = agent_info.get(\"chat_messages\", [\"No Chat Messages\"])\n prompt = chat_messages[1]\n if isinstance(prompt, dict):\n prompt = prompt.get(\"content\", \"No Content\")\n return prompt, prompt\n\n\ndef select_step(episode_id: EpisodeId, evt: gr.SelectData):\n global info\n step = info.click_mapper.to_step(evt.index[0])\n info.step = step\n return StepId(episode_id, step)\n\n\ndef update_step_info():\n global info\n return [\n get_episode_info(info),\n get_action_info(info),\n get_state_error(info),\n ]\n\n\ndef get_obs(key: str, default=None):\n global info\n obs = info.exp_result.steps_info[info.step].obs\n return obs.get(key, default)\n\n\ndef code(txt):\n # return f\"\"\"
    {txt}
    \"\"\"","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.update_step_info","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.update_step_info#L900-L906","kind":"function","name":"update_step_info","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":900,"end_line":906,"context_start_line":880,"context_end_line":926,"code":" return result_text\n\n\ndef update_prompt_tests():\n global info\n agent_info = info.exp_result.steps_info[info.step].agent_info\n chat_messages = agent_info.get(\"chat_messages\", [\"No Chat Messages\"])\n prompt = chat_messages[1]\n if isinstance(prompt, dict):\n prompt = prompt.get(\"content\", \"No Content\")\n return prompt, prompt\n\n\ndef select_step(episode_id: EpisodeId, evt: gr.SelectData):\n global info\n step = info.click_mapper.to_step(evt.index[0])\n info.step = step\n return StepId(episode_id, step)\n\n\ndef update_step_info():\n global info\n return [\n get_episode_info(info),\n get_action_info(info),\n get_state_error(info),\n ]\n\n\ndef get_obs(key: str, default=None):\n global info\n obs = info.exp_result.steps_info[info.step].obs\n return obs.get(key, default)\n\n\ndef code(txt):\n # return f\"\"\"
    {txt}
    \"\"\"\n return f\"\"\"```\\n{txt}\\n```\"\"\"\n\n\ndef get_episode_info(info: Info):\n try:\n env_args = info.exp_result.exp_args.env_args\n steps_info = info.exp_result.steps_info\n if info.step >= len(steps_info):\n info.step = len(steps_info) - 1","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.get_obs","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.get_obs#L909-L912","kind":"function","name":"get_obs","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":909,"end_line":912,"context_start_line":889,"context_end_line":932,"code":" prompt = prompt.get(\"content\", \"No Content\")\n return prompt, prompt\n\n\ndef select_step(episode_id: EpisodeId, evt: gr.SelectData):\n global info\n step = info.click_mapper.to_step(evt.index[0])\n info.step = step\n return StepId(episode_id, step)\n\n\ndef update_step_info():\n global info\n return [\n get_episode_info(info),\n get_action_info(info),\n get_state_error(info),\n ]\n\n\ndef get_obs(key: str, default=None):\n global info\n obs = info.exp_result.steps_info[info.step].obs\n return obs.get(key, default)\n\n\ndef code(txt):\n # return f\"\"\"
    {txt}
    \"\"\"\n return f\"\"\"```\\n{txt}\\n```\"\"\"\n\n\ndef get_episode_info(info: Info):\n try:\n env_args = info.exp_result.exp_args.env_args\n steps_info = info.exp_result.steps_info\n if info.step >= len(steps_info):\n info.step = len(steps_info) - 1\n if len(steps_info) == 0:\n return \"No steps were taken in this episode.\"\n step_info = steps_info[info.step]\n try:\n goal = step_info.obs[\"goal_object\"]\n except KeyError:","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.code","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.code#L915-L918","kind":"function","name":"code","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":915,"end_line":918,"context_start_line":895,"context_end_line":938,"code":" step = info.click_mapper.to_step(evt.index[0])\n info.step = step\n return StepId(episode_id, step)\n\n\ndef update_step_info():\n global info\n return [\n get_episode_info(info),\n get_action_info(info),\n get_state_error(info),\n ]\n\n\ndef get_obs(key: str, default=None):\n global info\n obs = info.exp_result.steps_info[info.step].obs\n return obs.get(key, default)\n\n\ndef code(txt):\n # return f\"\"\"
    {txt}
    \"\"\"\n return f\"\"\"```\\n{txt}\\n```\"\"\"\n\n\ndef get_episode_info(info: Info):\n try:\n env_args = info.exp_result.exp_args.env_args\n steps_info = info.exp_result.steps_info\n if info.step >= len(steps_info):\n info.step = len(steps_info) - 1\n if len(steps_info) == 0:\n return \"No steps were taken in this episode.\"\n step_info = steps_info[info.step]\n try:\n goal = step_info.obs[\"goal_object\"]\n except KeyError:\n goal = None\n try:\n cum_reward = info.exp_result.summary_info[\"cum_reward\"]\n except FileNotFoundError:\n cum_reward = np.nan\n","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.get_episode_info","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.get_episode_info#L921-L964","kind":"function","name":"get_episode_info","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":921,"end_line":964,"context_start_line":901,"context_end_line":984,"code":" global info\n return [\n get_episode_info(info),\n get_action_info(info),\n get_state_error(info),\n ]\n\n\ndef get_obs(key: str, default=None):\n global info\n obs = info.exp_result.steps_info[info.step].obs\n return obs.get(key, default)\n\n\ndef code(txt):\n # return f\"\"\"
    {txt}
    \"\"\"\n return f\"\"\"```\\n{txt}\\n```\"\"\"\n\n\ndef get_episode_info(info: Info):\n try:\n env_args = info.exp_result.exp_args.env_args\n steps_info = info.exp_result.steps_info\n if info.step >= len(steps_info):\n info.step = len(steps_info) - 1\n if len(steps_info) == 0:\n return \"No steps were taken in this episode.\"\n step_info = steps_info[info.step]\n try:\n goal = step_info.obs[\"goal_object\"]\n except KeyError:\n goal = None\n try:\n cum_reward = info.exp_result.summary_info[\"cum_reward\"]\n except FileNotFoundError:\n cum_reward = np.nan\n\n exp_dir = info.exp_result.exp_dir\n exp_dir_str = f\"{exp_dir.parent.name}/{exp_dir.name}\"\n\n info = f\"\"\"\\\n### {env_args.task_name} (seed: {env_args.task_seed})\n### Step {info.step} / {len(steps_info) - 1} (Reward: {cum_reward:.1f})\n\n**Goal:**\n\n{code(str(AgentLabBaseMessage(\"\", goal)))}\n\n**Task info:**\n\n{code(step_info.task_info)}\n\n**Terminated or Truncated:**\n{code(f\"Terminated: {step_info.terminated}, Truncated: {step_info.truncated}\")}\n\n**exp_dir:**\n\n{code(exp_dir_str)}\"\"\"\n except Exception:\n info = f\"\"\"\\\n**Error while getting episode info**\n{code(traceback.format_exc())}\"\"\"\n return info\n\n\ndef get_action_info(info: Info):\n steps_info = info.exp_result.steps_info\n img, action_str = get_screenshot(info, step=info.step, annotate=True) # to update click_mapper\n\n if len(steps_info) == 0:\n return \"No steps were taken\"\n if len(steps_info) <= info.step:\n return f\"Step {info.step} is out of bounds. The episode has {len(steps_info)} steps.\"\n\n step_info = steps_info[info.step]\n action_info = f\"\"\"\\\n**Action:**\n\n{action_str}\n\"\"\"\n think = step_info.agent_info.get(\"think\", None)\n if think is not None:\n action_info += f\"\"\"","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.get_action_info","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.get_action_info#L967-L988","kind":"function","name":"get_action_info","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":967,"end_line":988,"context_start_line":947,"context_end_line":1008,"code":"\n{code(str(AgentLabBaseMessage(\"\", goal)))}\n\n**Task info:**\n\n{code(step_info.task_info)}\n\n**Terminated or Truncated:**\n{code(f\"Terminated: {step_info.terminated}, Truncated: {step_info.truncated}\")}\n\n**exp_dir:**\n\n{code(exp_dir_str)}\"\"\"\n except Exception:\n info = f\"\"\"\\\n**Error while getting episode info**\n{code(traceback.format_exc())}\"\"\"\n return info\n\n\ndef get_action_info(info: Info):\n steps_info = info.exp_result.steps_info\n img, action_str = get_screenshot(info, step=info.step, annotate=True) # to update click_mapper\n\n if len(steps_info) == 0:\n return \"No steps were taken\"\n if len(steps_info) <= info.step:\n return f\"Step {info.step} is out of bounds. The episode has {len(steps_info)} steps.\"\n\n step_info = steps_info[info.step]\n action_info = f\"\"\"\\\n**Action:**\n\n{action_str}\n\"\"\"\n think = step_info.agent_info.get(\"think\", None)\n if think is not None:\n action_info += f\"\"\"\n**Think:**\n\n{code(think)}\"\"\"\n return action_info\n\n\ndef get_state_error(state: Info):\n try:\n step_info = state.exp_result.steps_info[state.step + 1]\n err_msg = step_info.obs.get(\"last_action_error\", None)\n except (IndexError, AttributeError):\n err_msg = None\n\n if err_msg is None or len(err_msg) == 0:\n err_msg = \"No Error\"\n return f\"\"\"\\\n**Step error after action:**\n\n{code(err_msg)}\"\"\"\n\n\ndef get_seeds_df(result_df: pd.DataFrame, task_name: str):\n result_df = result_df.reset_index(inplace=False)\n result_df = result_df[result_df[TASK_NAME_KEY] == task_name]","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.get_state_error","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.get_state_error#L991-L1003","kind":"function","name":"get_state_error","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":991,"end_line":1003,"context_start_line":971,"context_end_line":1023,"code":" if len(steps_info) == 0:\n return \"No steps were taken\"\n if len(steps_info) <= info.step:\n return f\"Step {info.step} is out of bounds. The episode has {len(steps_info)} steps.\"\n\n step_info = steps_info[info.step]\n action_info = f\"\"\"\\\n**Action:**\n\n{action_str}\n\"\"\"\n think = step_info.agent_info.get(\"think\", None)\n if think is not None:\n action_info += f\"\"\"\n**Think:**\n\n{code(think)}\"\"\"\n return action_info\n\n\ndef get_state_error(state: Info):\n try:\n step_info = state.exp_result.steps_info[state.step + 1]\n err_msg = step_info.obs.get(\"last_action_error\", None)\n except (IndexError, AttributeError):\n err_msg = None\n\n if err_msg is None or len(err_msg) == 0:\n err_msg = \"No Error\"\n return f\"\"\"\\\n**Step error after action:**\n\n{code(err_msg)}\"\"\"\n\n\ndef get_seeds_df(result_df: pd.DataFrame, task_name: str):\n result_df = result_df.reset_index(inplace=False)\n result_df = result_df[result_df[TASK_NAME_KEY] == task_name]\n\n def extract_columns(row: pd.Series):\n return pd.Series(\n {\n \"idx\": row.get(\"_row_index\", None),\n \"seed\": row.get(TASK_SEED_KEY, None),\n \"reward\": row.get(\"cum_reward\", None),\n \"err\": bool(row.get(\"err_msg\", None)),\n \"n_steps\": row.get(\"n_steps\", None),\n }\n )\n\n seed_df = result_df.apply(extract_columns, axis=1)\n # Ensure column order and readability\n seed_df = seed_df[[\"seed\", \"reward\", \"err\", \"n_steps\", \"idx\"]]","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.get_seeds_df","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.get_seeds_df#L1006-L1024","kind":"function","name":"get_seeds_df","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":1006,"end_line":1024,"context_start_line":986,"context_end_line":1044,"code":"\n{code(think)}\"\"\"\n return action_info\n\n\ndef get_state_error(state: Info):\n try:\n step_info = state.exp_result.steps_info[state.step + 1]\n err_msg = step_info.obs.get(\"last_action_error\", None)\n except (IndexError, AttributeError):\n err_msg = None\n\n if err_msg is None or len(err_msg) == 0:\n err_msg = \"No Error\"\n return f\"\"\"\\\n**Step error after action:**\n\n{code(err_msg)}\"\"\"\n\n\ndef get_seeds_df(result_df: pd.DataFrame, task_name: str):\n result_df = result_df.reset_index(inplace=False)\n result_df = result_df[result_df[TASK_NAME_KEY] == task_name]\n\n def extract_columns(row: pd.Series):\n return pd.Series(\n {\n \"idx\": row.get(\"_row_index\", None),\n \"seed\": row.get(TASK_SEED_KEY, None),\n \"reward\": row.get(\"cum_reward\", None),\n \"err\": bool(row.get(\"err_msg\", None)),\n \"n_steps\": row.get(\"n_steps\", None),\n }\n )\n\n seed_df = result_df.apply(extract_columns, axis=1)\n # Ensure column order and readability\n seed_df = seed_df[[\"seed\", \"reward\", \"err\", \"n_steps\", \"idx\"]]\n return seed_df\n\n\ndef on_select_agent(evt: gr.SelectData, df: pd.DataFrame):\n # TODO try to find a clever way to solve the sort bug here\n return info.get_agent_id(df.iloc[evt.index[0]])\n\n\ndef on_select_task(evt: gr.SelectData, df: pd.DataFrame, agent_id: list[tuple]):\n # get col index\n col_idx = df.columns.get_loc(TASK_NAME_KEY)\n return (agent_id, evt.row_value[col_idx])\n\n\ndef update_seeds(agent_task_id: tuple):\n agent_id, task_name = agent_task_id\n seed_df = get_seeds_df(info.agent_df, task_name)\n first_seed = int(seed_df.iloc[0][\"seed\"])\n first_index = int(seed_df.iloc[0][\"idx\"])\n return seed_df, EpisodeId(\n agent_id=agent_id, task_name=task_name, seed=first_seed, row_index=first_index","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.on_select_agent","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.on_select_agent#L1027-L1029","kind":"function","name":"on_select_agent","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":1027,"end_line":1029,"context_start_line":1007,"context_end_line":1049,"code":" result_df = result_df.reset_index(inplace=False)\n result_df = result_df[result_df[TASK_NAME_KEY] == task_name]\n\n def extract_columns(row: pd.Series):\n return pd.Series(\n {\n \"idx\": row.get(\"_row_index\", None),\n \"seed\": row.get(TASK_SEED_KEY, None),\n \"reward\": row.get(\"cum_reward\", None),\n \"err\": bool(row.get(\"err_msg\", None)),\n \"n_steps\": row.get(\"n_steps\", None),\n }\n )\n\n seed_df = result_df.apply(extract_columns, axis=1)\n # Ensure column order and readability\n seed_df = seed_df[[\"seed\", \"reward\", \"err\", \"n_steps\", \"idx\"]]\n return seed_df\n\n\ndef on_select_agent(evt: gr.SelectData, df: pd.DataFrame):\n # TODO try to find a clever way to solve the sort bug here\n return info.get_agent_id(df.iloc[evt.index[0]])\n\n\ndef on_select_task(evt: gr.SelectData, df: pd.DataFrame, agent_id: list[tuple]):\n # get col index\n col_idx = df.columns.get_loc(TASK_NAME_KEY)\n return (agent_id, evt.row_value[col_idx])\n\n\ndef update_seeds(agent_task_id: tuple):\n agent_id, task_name = agent_task_id\n seed_df = get_seeds_df(info.agent_df, task_name)\n first_seed = int(seed_df.iloc[0][\"seed\"])\n first_index = int(seed_df.iloc[0][\"idx\"])\n return seed_df, EpisodeId(\n agent_id=agent_id, task_name=task_name, seed=first_seed, row_index=first_index\n )\n\n\ndef on_select_seed(evt: gr.SelectData, df: pd.DataFrame, agent_task_id: tuple):\n agent_id, task_name = agent_task_id","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.on_select_task","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.on_select_task#L1032-L1035","kind":"function","name":"on_select_task","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":1032,"end_line":1035,"context_start_line":1012,"context_end_line":1055,"code":" {\n \"idx\": row.get(\"_row_index\", None),\n \"seed\": row.get(TASK_SEED_KEY, None),\n \"reward\": row.get(\"cum_reward\", None),\n \"err\": bool(row.get(\"err_msg\", None)),\n \"n_steps\": row.get(\"n_steps\", None),\n }\n )\n\n seed_df = result_df.apply(extract_columns, axis=1)\n # Ensure column order and readability\n seed_df = seed_df[[\"seed\", \"reward\", \"err\", \"n_steps\", \"idx\"]]\n return seed_df\n\n\ndef on_select_agent(evt: gr.SelectData, df: pd.DataFrame):\n # TODO try to find a clever way to solve the sort bug here\n return info.get_agent_id(df.iloc[evt.index[0]])\n\n\ndef on_select_task(evt: gr.SelectData, df: pd.DataFrame, agent_id: list[tuple]):\n # get col index\n col_idx = df.columns.get_loc(TASK_NAME_KEY)\n return (agent_id, evt.row_value[col_idx])\n\n\ndef update_seeds(agent_task_id: tuple):\n agent_id, task_name = agent_task_id\n seed_df = get_seeds_df(info.agent_df, task_name)\n first_seed = int(seed_df.iloc[0][\"seed\"])\n first_index = int(seed_df.iloc[0][\"idx\"])\n return seed_df, EpisodeId(\n agent_id=agent_id, task_name=task_name, seed=first_seed, row_index=first_index\n )\n\n\ndef on_select_seed(evt: gr.SelectData, df: pd.DataFrame, agent_task_id: tuple):\n agent_id, task_name = agent_task_id\n col_idx = df.columns.get_loc(\"seed\")\n idx_col = df.columns.get_loc(\"idx\")\n seed = evt.row_value[col_idx]\n row_index = evt.row_value[idx_col]\n return EpisodeId(agent_id=agent_id, task_name=task_name, seed=seed, row_index=row_index)\n","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.update_seeds","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.update_seeds#L1038-L1045","kind":"function","name":"update_seeds","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":1038,"end_line":1045,"context_start_line":1018,"context_end_line":1065,"code":" }\n )\n\n seed_df = result_df.apply(extract_columns, axis=1)\n # Ensure column order and readability\n seed_df = seed_df[[\"seed\", \"reward\", \"err\", \"n_steps\", \"idx\"]]\n return seed_df\n\n\ndef on_select_agent(evt: gr.SelectData, df: pd.DataFrame):\n # TODO try to find a clever way to solve the sort bug here\n return info.get_agent_id(df.iloc[evt.index[0]])\n\n\ndef on_select_task(evt: gr.SelectData, df: pd.DataFrame, agent_id: list[tuple]):\n # get col index\n col_idx = df.columns.get_loc(TASK_NAME_KEY)\n return (agent_id, evt.row_value[col_idx])\n\n\ndef update_seeds(agent_task_id: tuple):\n agent_id, task_name = agent_task_id\n seed_df = get_seeds_df(info.agent_df, task_name)\n first_seed = int(seed_df.iloc[0][\"seed\"])\n first_index = int(seed_df.iloc[0][\"idx\"])\n return seed_df, EpisodeId(\n agent_id=agent_id, task_name=task_name, seed=first_seed, row_index=first_index\n )\n\n\ndef on_select_seed(evt: gr.SelectData, df: pd.DataFrame, agent_task_id: tuple):\n agent_id, task_name = agent_task_id\n col_idx = df.columns.get_loc(\"seed\")\n idx_col = df.columns.get_loc(\"idx\")\n seed = evt.row_value[col_idx]\n row_index = evt.row_value[idx_col]\n return EpisodeId(agent_id=agent_id, task_name=task_name, seed=seed, row_index=row_index)\n\n\ndef new_episode(episode_id: EpisodeId, progress=gr.Progress()):\n print(\"new_episode\", episode_id)\n global info\n info.update_exp_result(episode_id=episode_id)\n return generate_profiling(progress.tqdm), StepId(episode_id, info.step)\n\n\ndef fig_to_pil(fig):\n buf = BytesIO()","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.on_select_seed","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.on_select_seed#L1048-L1054","kind":"function","name":"on_select_seed","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":1048,"end_line":1054,"context_start_line":1028,"context_end_line":1074,"code":" # TODO try to find a clever way to solve the sort bug here\n return info.get_agent_id(df.iloc[evt.index[0]])\n\n\ndef on_select_task(evt: gr.SelectData, df: pd.DataFrame, agent_id: list[tuple]):\n # get col index\n col_idx = df.columns.get_loc(TASK_NAME_KEY)\n return (agent_id, evt.row_value[col_idx])\n\n\ndef update_seeds(agent_task_id: tuple):\n agent_id, task_name = agent_task_id\n seed_df = get_seeds_df(info.agent_df, task_name)\n first_seed = int(seed_df.iloc[0][\"seed\"])\n first_index = int(seed_df.iloc[0][\"idx\"])\n return seed_df, EpisodeId(\n agent_id=agent_id, task_name=task_name, seed=first_seed, row_index=first_index\n )\n\n\ndef on_select_seed(evt: gr.SelectData, df: pd.DataFrame, agent_task_id: tuple):\n agent_id, task_name = agent_task_id\n col_idx = df.columns.get_loc(\"seed\")\n idx_col = df.columns.get_loc(\"idx\")\n seed = evt.row_value[col_idx]\n row_index = evt.row_value[idx_col]\n return EpisodeId(agent_id=agent_id, task_name=task_name, seed=seed, row_index=row_index)\n\n\ndef new_episode(episode_id: EpisodeId, progress=gr.Progress()):\n print(\"new_episode\", episode_id)\n global info\n info.update_exp_result(episode_id=episode_id)\n return generate_profiling(progress.tqdm), StepId(episode_id, info.step)\n\n\ndef fig_to_pil(fig):\n buf = BytesIO()\n fig.savefig(buf, format=\"png\")\n buf.seek(0)\n img_pil = Image.open(buf)\n plt.close(fig)\n return img_pil\n\n\ndef format_constant_and_variables():\n global info","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.new_episode","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.new_episode#L1057-L1061","kind":"function","name":"new_episode","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":1057,"end_line":1061,"context_start_line":1037,"context_end_line":1081,"code":"\ndef update_seeds(agent_task_id: tuple):\n agent_id, task_name = agent_task_id\n seed_df = get_seeds_df(info.agent_df, task_name)\n first_seed = int(seed_df.iloc[0][\"seed\"])\n first_index = int(seed_df.iloc[0][\"idx\"])\n return seed_df, EpisodeId(\n agent_id=agent_id, task_name=task_name, seed=first_seed, row_index=first_index\n )\n\n\ndef on_select_seed(evt: gr.SelectData, df: pd.DataFrame, agent_task_id: tuple):\n agent_id, task_name = agent_task_id\n col_idx = df.columns.get_loc(\"seed\")\n idx_col = df.columns.get_loc(\"idx\")\n seed = evt.row_value[col_idx]\n row_index = evt.row_value[idx_col]\n return EpisodeId(agent_id=agent_id, task_name=task_name, seed=seed, row_index=row_index)\n\n\ndef new_episode(episode_id: EpisodeId, progress=gr.Progress()):\n print(\"new_episode\", episode_id)\n global info\n info.update_exp_result(episode_id=episode_id)\n return generate_profiling(progress.tqdm), StepId(episode_id, info.step)\n\n\ndef fig_to_pil(fig):\n buf = BytesIO()\n fig.savefig(buf, format=\"png\")\n buf.seek(0)\n img_pil = Image.open(buf)\n plt.close(fig)\n return img_pil\n\n\ndef format_constant_and_variables():\n global info\n df = info.result_df\n constants, variables, _ = inspect_results.get_constants_and_variables(df)\n\n # map constants, a dict to a 2 column data frame with name and value\n constants = pd.DataFrame(constants.items(), columns=[\"name\", \"value\"])\n records = []\n for var in variables:","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.fig_to_pil","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.fig_to_pil#L1064-L1070","kind":"function","name":"fig_to_pil","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":1064,"end_line":1070,"context_start_line":1044,"context_end_line":1090,"code":" agent_id=agent_id, task_name=task_name, seed=first_seed, row_index=first_index\n )\n\n\ndef on_select_seed(evt: gr.SelectData, df: pd.DataFrame, agent_task_id: tuple):\n agent_id, task_name = agent_task_id\n col_idx = df.columns.get_loc(\"seed\")\n idx_col = df.columns.get_loc(\"idx\")\n seed = evt.row_value[col_idx]\n row_index = evt.row_value[idx_col]\n return EpisodeId(agent_id=agent_id, task_name=task_name, seed=seed, row_index=row_index)\n\n\ndef new_episode(episode_id: EpisodeId, progress=gr.Progress()):\n print(\"new_episode\", episode_id)\n global info\n info.update_exp_result(episode_id=episode_id)\n return generate_profiling(progress.tqdm), StepId(episode_id, info.step)\n\n\ndef fig_to_pil(fig):\n buf = BytesIO()\n fig.savefig(buf, format=\"png\")\n buf.seek(0)\n img_pil = Image.open(buf)\n plt.close(fig)\n return img_pil\n\n\ndef format_constant_and_variables():\n global info\n df = info.result_df\n constants, variables, _ = inspect_results.get_constants_and_variables(df)\n\n # map constants, a dict to a 2 column data frame with name and value\n constants = pd.DataFrame(constants.items(), columns=[\"name\", \"value\"])\n records = []\n for var in variables:\n if var == \"stack_trace\":\n continue\n\n # get unique with count and sort by count descending\n unique_counts = df[var].value_counts().sort_values(ascending=False)\n\n for i, (val, count) in enumerate(unique_counts.items()):\n record = {\n \"Name\": var,","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.format_constant_and_variables","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.format_constant_and_variables#L1073-L1112","kind":"function","name":"format_constant_and_variables","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":1073,"end_line":1112,"context_start_line":1053,"context_end_line":1132,"code":" row_index = evt.row_value[idx_col]\n return EpisodeId(agent_id=agent_id, task_name=task_name, seed=seed, row_index=row_index)\n\n\ndef new_episode(episode_id: EpisodeId, progress=gr.Progress()):\n print(\"new_episode\", episode_id)\n global info\n info.update_exp_result(episode_id=episode_id)\n return generate_profiling(progress.tqdm), StepId(episode_id, info.step)\n\n\ndef fig_to_pil(fig):\n buf = BytesIO()\n fig.savefig(buf, format=\"png\")\n buf.seek(0)\n img_pil = Image.open(buf)\n plt.close(fig)\n return img_pil\n\n\ndef format_constant_and_variables():\n global info\n df = info.result_df\n constants, variables, _ = inspect_results.get_constants_and_variables(df)\n\n # map constants, a dict to a 2 column data frame with name and value\n constants = pd.DataFrame(constants.items(), columns=[\"name\", \"value\"])\n records = []\n for var in variables:\n if var == \"stack_trace\":\n continue\n\n # get unique with count and sort by count descending\n unique_counts = df[var].value_counts().sort_values(ascending=False)\n\n for i, (val, count) in enumerate(unique_counts.items()):\n record = {\n \"Name\": var,\n \"n unique\": len(unique_counts),\n \"i\": i,\n \"count\": f\"{count}/{len(df)}\",\n \"value\": val,\n }\n\n records.append(record)\n if i >= 2:\n break\n\n if len(unique_counts) > 3:\n records.append(\n {\n \"Name\": var,\n \"n unique\": len(unique_counts),\n \"i\": \"...\",\n \"count\": \"...\",\n \"value\": \"...\",\n }\n )\n records.append({\"Name\": \"\"})\n return constants, pd.DataFrame(records)\n\n\ndef get_agent_report(result_df: pd.DataFrame):\n levels = list(range(result_df.index.nlevels))\n\n if len(levels) == 1:\n result_df = result_df.set_index(AGENT_NAME_KEY, append=True)\n levels = list(range(result_df.index.nlevels))\n\n report = result_df.groupby(level=levels[1:]).apply(inspect_results.summarize)\n\n return report\n\n\ndef update_global_stats():\n try:\n stats = inspect_results.global_report(\n info.result_df, reduce_fn=inspect_results.summarize_stats\n )\n stats.reset_index(inplace=True)","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.get_agent_report","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.get_agent_report#L1115-L1124","kind":"function","name":"get_agent_report","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":1115,"end_line":1124,"context_start_line":1095,"context_end_line":1144,"code":" }\n\n records.append(record)\n if i >= 2:\n break\n\n if len(unique_counts) > 3:\n records.append(\n {\n \"Name\": var,\n \"n unique\": len(unique_counts),\n \"i\": \"...\",\n \"count\": \"...\",\n \"value\": \"...\",\n }\n )\n records.append({\"Name\": \"\"})\n return constants, pd.DataFrame(records)\n\n\ndef get_agent_report(result_df: pd.DataFrame):\n levels = list(range(result_df.index.nlevels))\n\n if len(levels) == 1:\n result_df = result_df.set_index(AGENT_NAME_KEY, append=True)\n levels = list(range(result_df.index.nlevels))\n\n report = result_df.groupby(level=levels[1:]).apply(inspect_results.summarize)\n\n return report\n\n\ndef update_global_stats():\n try:\n stats = inspect_results.global_report(\n info.result_df, reduce_fn=inspect_results.summarize_stats\n )\n stats.reset_index(inplace=True)\n return stats\n\n except Exception as e:\n warning(f\"Error while updating global stats: {e}\")\n return None\n\n\ndef update_error_report():\n return inspect_results.error_report(info.result_df, max_stack_trace=3, use_log=True)\n\n\ndef new_exp_dir(study_names: list, progress=gr.Progress(), just_refresh=False):","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.update_global_stats","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.update_global_stats#L1127-L1137","kind":"function","name":"update_global_stats","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":1127,"end_line":1137,"context_start_line":1107,"context_end_line":1157,"code":" \"count\": \"...\",\n \"value\": \"...\",\n }\n )\n records.append({\"Name\": \"\"})\n return constants, pd.DataFrame(records)\n\n\ndef get_agent_report(result_df: pd.DataFrame):\n levels = list(range(result_df.index.nlevels))\n\n if len(levels) == 1:\n result_df = result_df.set_index(AGENT_NAME_KEY, append=True)\n levels = list(range(result_df.index.nlevels))\n\n report = result_df.groupby(level=levels[1:]).apply(inspect_results.summarize)\n\n return report\n\n\ndef update_global_stats():\n try:\n stats = inspect_results.global_report(\n info.result_df, reduce_fn=inspect_results.summarize_stats\n )\n stats.reset_index(inplace=True)\n return stats\n\n except Exception as e:\n warning(f\"Error while updating global stats: {e}\")\n return None\n\n\ndef update_error_report():\n return inspect_results.error_report(info.result_df, max_stack_trace=3, use_log=True)\n\n\ndef new_exp_dir(study_names: list, progress=gr.Progress(), just_refresh=False):\n global info\n\n # remove select_dir_instructions from study_names\n if select_dir_instructions in study_names:\n study_names.remove(select_dir_instructions)\n\n if len(study_names) == 0:\n return None, None, None, None, None, None\n\n info.study_dirs = [info.results_dir / study_name.split(\" - \")[0] for study_name in study_names]\n info.result_df = inspect_results.load_result_df(info.study_dirs, progress_fn=progress.tqdm)\n info.result_df = remove_args_from_col(info.result_df)\n","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.update_error_report","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.update_error_report#L1140-L1141","kind":"function","name":"update_error_report","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":1140,"end_line":1141,"context_start_line":1120,"context_end_line":1161,"code":" levels = list(range(result_df.index.nlevels))\n\n report = result_df.groupby(level=levels[1:]).apply(inspect_results.summarize)\n\n return report\n\n\ndef update_global_stats():\n try:\n stats = inspect_results.global_report(\n info.result_df, reduce_fn=inspect_results.summarize_stats\n )\n stats.reset_index(inplace=True)\n return stats\n\n except Exception as e:\n warning(f\"Error while updating global stats: {e}\")\n return None\n\n\ndef update_error_report():\n return inspect_results.error_report(info.result_df, max_stack_trace=3, use_log=True)\n\n\ndef new_exp_dir(study_names: list, progress=gr.Progress(), just_refresh=False):\n global info\n\n # remove select_dir_instructions from study_names\n if select_dir_instructions in study_names:\n study_names.remove(select_dir_instructions)\n\n if len(study_names) == 0:\n return None, None, None, None, None, None\n\n info.study_dirs = [info.results_dir / study_name.split(\" - \")[0] for study_name in study_names]\n info.result_df = inspect_results.load_result_df(info.study_dirs, progress_fn=progress.tqdm)\n info.result_df = remove_args_from_col(info.result_df)\n\n study_summary = inspect_results.summarize_study(info.result_df)\n # save study_summary\n\n for study_dir in info.study_dirs:","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.new_exp_dir","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.new_exp_dir#L1144-L1178","kind":"function","name":"new_exp_dir","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":1144,"end_line":1178,"context_start_line":1124,"context_end_line":1198,"code":" return report\n\n\ndef update_global_stats():\n try:\n stats = inspect_results.global_report(\n info.result_df, reduce_fn=inspect_results.summarize_stats\n )\n stats.reset_index(inplace=True)\n return stats\n\n except Exception as e:\n warning(f\"Error while updating global stats: {e}\")\n return None\n\n\ndef update_error_report():\n return inspect_results.error_report(info.result_df, max_stack_trace=3, use_log=True)\n\n\ndef new_exp_dir(study_names: list, progress=gr.Progress(), just_refresh=False):\n global info\n\n # remove select_dir_instructions from study_names\n if select_dir_instructions in study_names:\n study_names.remove(select_dir_instructions)\n\n if len(study_names) == 0:\n return None, None, None, None, None, None\n\n info.study_dirs = [info.results_dir / study_name.split(\" - \")[0] for study_name in study_names]\n info.result_df = inspect_results.load_result_df(info.study_dirs, progress_fn=progress.tqdm)\n info.result_df = remove_args_from_col(info.result_df)\n\n study_summary = inspect_results.summarize_study(info.result_df)\n # save study_summary\n\n for study_dir in info.study_dirs:\n study_summary.to_csv(study_dir / \"summary_df.csv\", index=False)\n agent_report = display_table(study_summary)\n\n info.agent_id_keys = agent_report.index.names\n agent_report.reset_index(inplace=True)\n\n agent_id = info.get_agent_id(agent_report.iloc[0])\n\n constants, variables = format_constant_and_variables()\n return (\n agent_report,\n agent_id,\n constants,\n variables,\n update_global_stats(),\n update_error_report(),\n )\n\n\ndef new_agent_id(agent_id: list[tuple]):\n global info\n info.filter_agent_id(agent_id=agent_id)\n\n info.tasks_df = inspect_results.reduce_episodes(info.agent_df).reset_index()\n info.tasks_df = info.tasks_df.drop(columns=[\"std_err\"])\n\n # task name of first element\n task_name = info.tasks_df.iloc[0][TASK_NAME_KEY]\n return info.tasks_df, (agent_id, task_name)\n\n\ndef get_directory_contents(results_dir: Path):\n exp_descriptions = []\n for dir in results_dir.iterdir():\n if not dir.is_dir():\n continue\n","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.new_agent_id","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.new_agent_id#L1181-L1190","kind":"function","name":"new_agent_id","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":1181,"end_line":1190,"context_start_line":1161,"context_end_line":1210,"code":" for study_dir in info.study_dirs:\n study_summary.to_csv(study_dir / \"summary_df.csv\", index=False)\n agent_report = display_table(study_summary)\n\n info.agent_id_keys = agent_report.index.names\n agent_report.reset_index(inplace=True)\n\n agent_id = info.get_agent_id(agent_report.iloc[0])\n\n constants, variables = format_constant_and_variables()\n return (\n agent_report,\n agent_id,\n constants,\n variables,\n update_global_stats(),\n update_error_report(),\n )\n\n\ndef new_agent_id(agent_id: list[tuple]):\n global info\n info.filter_agent_id(agent_id=agent_id)\n\n info.tasks_df = inspect_results.reduce_episodes(info.agent_df).reset_index()\n info.tasks_df = info.tasks_df.drop(columns=[\"std_err\"])\n\n # task name of first element\n task_name = info.tasks_df.iloc[0][TASK_NAME_KEY]\n return info.tasks_df, (agent_id, task_name)\n\n\ndef get_directory_contents(results_dir: Path):\n exp_descriptions = []\n for dir in results_dir.iterdir():\n if not dir.is_dir():\n continue\n\n exp_description = dir.name\n try:\n # get summary*.csv files and find the most recent\n summary_files = list(dir.glob(\"summary*.csv\"))\n if len(summary_files) != 0:\n most_recent_summary = max(summary_files, key=os.path.getctime)\n summary_df = pd.read_csv(most_recent_summary)\n\n if len(summary_df) == 0:\n continue # skip if all avg_reward are NaN\n\n # get row with max avg_reward","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.get_directory_contents","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.get_directory_contents#L1193-L1223","kind":"function","name":"get_directory_contents","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":1193,"end_line":1223,"context_start_line":1173,"context_end_line":1243,"code":" agent_id,\n constants,\n variables,\n update_global_stats(),\n update_error_report(),\n )\n\n\ndef new_agent_id(agent_id: list[tuple]):\n global info\n info.filter_agent_id(agent_id=agent_id)\n\n info.tasks_df = inspect_results.reduce_episodes(info.agent_df).reset_index()\n info.tasks_df = info.tasks_df.drop(columns=[\"std_err\"])\n\n # task name of first element\n task_name = info.tasks_df.iloc[0][TASK_NAME_KEY]\n return info.tasks_df, (agent_id, task_name)\n\n\ndef get_directory_contents(results_dir: Path):\n exp_descriptions = []\n for dir in results_dir.iterdir():\n if not dir.is_dir():\n continue\n\n exp_description = dir.name\n try:\n # get summary*.csv files and find the most recent\n summary_files = list(dir.glob(\"summary*.csv\"))\n if len(summary_files) != 0:\n most_recent_summary = max(summary_files, key=os.path.getctime)\n summary_df = pd.read_csv(most_recent_summary)\n\n if len(summary_df) == 0:\n continue # skip if all avg_reward are NaN\n\n # get row with max avg_reward\n max_reward_row = summary_df.loc[summary_df[\"avg_reward\"].idxmax(skipna=True)]\n reward = max_reward_row[\"avg_reward\"] * 100\n completed = max_reward_row[\"n_completed\"]\n n_err = max_reward_row[\"n_err\"]\n exp_description += (\n f\" - avg-reward: {reward:.1f}% - completed: {completed} - errors: {n_err}\"\n )\n except Exception as e:\n print(f\"Error while reading summary file {most_recent_summary}: {e}\")\n\n exp_descriptions.append(exp_description)\n\n return [select_dir_instructions] + sorted(exp_descriptions, reverse=True)\n\n\ndef most_recent_folder(results_dir: Path):\n return get_most_recent_study(results_dir).name\n\n\ndef refresh_exp_dir_choices(exp_dir_choice):\n global info\n return gr.Dropdown(\n choices=get_directory_contents(info.results_dir), value=exp_dir_choice, scale=1\n )\n\n\ndef generate_profiling(progress_fn):\n global info\n\n if info.exp_result is None:\n return None\n\n fig, ax = plt.subplots(figsize=(20, 3))","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.most_recent_folder","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.most_recent_folder#L1226-L1227","kind":"function","name":"most_recent_folder","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":1226,"end_line":1227,"context_start_line":1206,"context_end_line":1247,"code":"\n if len(summary_df) == 0:\n continue # skip if all avg_reward are NaN\n\n # get row with max avg_reward\n max_reward_row = summary_df.loc[summary_df[\"avg_reward\"].idxmax(skipna=True)]\n reward = max_reward_row[\"avg_reward\"] * 100\n completed = max_reward_row[\"n_completed\"]\n n_err = max_reward_row[\"n_err\"]\n exp_description += (\n f\" - avg-reward: {reward:.1f}% - completed: {completed} - errors: {n_err}\"\n )\n except Exception as e:\n print(f\"Error while reading summary file {most_recent_summary}: {e}\")\n\n exp_descriptions.append(exp_description)\n\n return [select_dir_instructions] + sorted(exp_descriptions, reverse=True)\n\n\ndef most_recent_folder(results_dir: Path):\n return get_most_recent_study(results_dir).name\n\n\ndef refresh_exp_dir_choices(exp_dir_choice):\n global info\n return gr.Dropdown(\n choices=get_directory_contents(info.results_dir), value=exp_dir_choice, scale=1\n )\n\n\ndef generate_profiling(progress_fn):\n global info\n\n if info.exp_result is None:\n return None\n\n fig, ax = plt.subplots(figsize=(20, 3))\n\n try:\n summary_info = info.exp_result.summary_info\n except FileNotFoundError:","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.refresh_exp_dir_choices","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.refresh_exp_dir_choices#L1230-L1234","kind":"function","name":"refresh_exp_dir_choices","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":1230,"end_line":1234,"context_start_line":1210,"context_end_line":1254,"code":" # get row with max avg_reward\n max_reward_row = summary_df.loc[summary_df[\"avg_reward\"].idxmax(skipna=True)]\n reward = max_reward_row[\"avg_reward\"] * 100\n completed = max_reward_row[\"n_completed\"]\n n_err = max_reward_row[\"n_err\"]\n exp_description += (\n f\" - avg-reward: {reward:.1f}% - completed: {completed} - errors: {n_err}\"\n )\n except Exception as e:\n print(f\"Error while reading summary file {most_recent_summary}: {e}\")\n\n exp_descriptions.append(exp_description)\n\n return [select_dir_instructions] + sorted(exp_descriptions, reverse=True)\n\n\ndef most_recent_folder(results_dir: Path):\n return get_most_recent_study(results_dir).name\n\n\ndef refresh_exp_dir_choices(exp_dir_choice):\n global info\n return gr.Dropdown(\n choices=get_directory_contents(info.results_dir), value=exp_dir_choice, scale=1\n )\n\n\ndef generate_profiling(progress_fn):\n global info\n\n if info.exp_result is None:\n return None\n\n fig, ax = plt.subplots(figsize=(20, 3))\n\n try:\n summary_info = info.exp_result.summary_info\n except FileNotFoundError:\n summary_info = {}\n\n info.exp_result.progress_fn = progress_fn\n steps_info = info.exp_result.steps_info\n info.exp_result.progress_fn = None\n\n step_times = plot_profiling(ax, steps_info, summary_info, progress_fn)","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.generate_profiling","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.generate_profiling#L1237-L1258","kind":"function","name":"generate_profiling","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":1237,"end_line":1258,"context_start_line":1217,"context_end_line":1278,"code":" )\n except Exception as e:\n print(f\"Error while reading summary file {most_recent_summary}: {e}\")\n\n exp_descriptions.append(exp_description)\n\n return [select_dir_instructions] + sorted(exp_descriptions, reverse=True)\n\n\ndef most_recent_folder(results_dir: Path):\n return get_most_recent_study(results_dir).name\n\n\ndef refresh_exp_dir_choices(exp_dir_choice):\n global info\n return gr.Dropdown(\n choices=get_directory_contents(info.results_dir), value=exp_dir_choice, scale=1\n )\n\n\ndef generate_profiling(progress_fn):\n global info\n\n if info.exp_result is None:\n return None\n\n fig, ax = plt.subplots(figsize=(20, 3))\n\n try:\n summary_info = info.exp_result.summary_info\n except FileNotFoundError:\n summary_info = {}\n\n info.exp_result.progress_fn = progress_fn\n steps_info = info.exp_result.steps_info\n info.exp_result.progress_fn = None\n\n step_times = plot_profiling(ax, steps_info, summary_info, progress_fn)\n fig.tight_layout()\n info.click_mapper = ClickMapper(ax, step_times=step_times)\n\n return fig_to_pil(fig)\n\n\ndef add_patch(ax, start, stop, color, label, edge=False):\n if edge:\n ax.add_patch(\n patches.Rectangle(\n (start, 0),\n stop - start,\n 1,\n edgecolor=color,\n alpha=1,\n label=label,\n fill=False,\n linewidth=3,\n )\n )\n else:\n ax.add_patch(\n patches.Rectangle((start, 0), stop - start, 1, color=color, alpha=1, label=label)\n )","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.add_patch","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.add_patch#L1261-L1278","kind":"function","name":"add_patch","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":1261,"end_line":1278,"context_start_line":1241,"context_end_line":1298,"code":" return None\n\n fig, ax = plt.subplots(figsize=(20, 3))\n\n try:\n summary_info = info.exp_result.summary_info\n except FileNotFoundError:\n summary_info = {}\n\n info.exp_result.progress_fn = progress_fn\n steps_info = info.exp_result.steps_info\n info.exp_result.progress_fn = None\n\n step_times = plot_profiling(ax, steps_info, summary_info, progress_fn)\n fig.tight_layout()\n info.click_mapper = ClickMapper(ax, step_times=step_times)\n\n return fig_to_pil(fig)\n\n\ndef add_patch(ax, start, stop, color, label, edge=False):\n if edge:\n ax.add_patch(\n patches.Rectangle(\n (start, 0),\n stop - start,\n 1,\n edgecolor=color,\n alpha=1,\n label=label,\n fill=False,\n linewidth=3,\n )\n )\n else:\n ax.add_patch(\n patches.Rectangle((start, 0), stop - start, 1, color=color, alpha=1, label=label)\n )\n\n\ndef plot_profiling(ax, step_info_list: list[StepInfo], summary_info: dict, progress_fn):\n if len(step_info_list) == 0:\n warning(\"No step info to plot\")\n return None\n\n # Updated labels to include new profiling stages\n labels = [\n \"reset\",\n \"env\",\n \"agent\",\n \"exec action\",\n \"action error\",\n \"wait for page\",\n \"validation\",\n \"get observation\",\n ]\n labels = {e: e for e in labels}\n","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.plot_profiling","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.plot_profiling#L1281-L1449","kind":"function","name":"plot_profiling","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":1281,"end_line":1449,"context_start_line":1261,"context_end_line":1457,"code":"def add_patch(ax, start, stop, color, label, edge=False):\n if edge:\n ax.add_patch(\n patches.Rectangle(\n (start, 0),\n stop - start,\n 1,\n edgecolor=color,\n alpha=1,\n label=label,\n fill=False,\n linewidth=3,\n )\n )\n else:\n ax.add_patch(\n patches.Rectangle((start, 0), stop - start, 1, color=color, alpha=1, label=label)\n )\n\n\ndef plot_profiling(ax, step_info_list: list[StepInfo], summary_info: dict, progress_fn):\n if len(step_info_list) == 0:\n warning(\"No step info to plot\")\n return None\n\n # Updated labels to include new profiling stages\n labels = [\n \"reset\",\n \"env\",\n \"agent\",\n \"exec action\",\n \"action error\",\n \"wait for page\",\n \"validation\",\n \"get observation\",\n ]\n labels = {e: e for e in labels}\n\n colors = plt.get_cmap(\"tab20c\").colors\n\n t0 = step_info_list[0].profiling.env_start\n all_times = []\n step_times = []\n for i, step_info in progress_fn(list(enumerate(step_info_list)), desc=\"Building plot.\"):\n assert isinstance(\n step_info, (StepInfo, BGymStepInfo)\n ), f\"Expected StepInfo or BGymStepInfo, got {type(step_info)}\"\n step = step_info.step\n\n prof = deepcopy(step_info.profiling)\n # remove t0 from elements in profiling using for\n for key, value in prof.__dict__.items():\n if isinstance(value, float):\n setattr(prof, key, value - t0)\n all_times.append(value - t0)\n\n if i == 0:\n # reset\n add_patch(ax, prof.env_start, prof.env_stop, colors[14], labels.pop(\"reset\", None))\n\n else:\n # env\n add_patch(ax, prof.env_start, prof.env_stop, colors[1], labels.pop(\"env\", None))\n\n # action\n label = labels.pop(\"exec action\", None)\n add_patch(ax, prof.action_exec_start, prof.action_exec_stop, colors[3], label)\n\n # NEW: Add wait for page loading visualization\n if (\n hasattr(prof, \"wait_for_page_loading_start\")\n and prof.wait_for_page_loading_start is not None\n and prof.wait_for_page_loading_start > 0\n ):\n add_patch(\n ax,\n prof.wait_for_page_loading_start,\n prof.wait_for_page_loading_stop,\n colors[19],\n labels.pop(\"wait for page\", None),\n )\n\n # NEW: Add validation visualization\n if (\n hasattr(prof, \"validation_start\")\n and prof.validation_start is not None\n and prof.validation_start > 0\n ):\n add_patch(\n ax,\n prof.validation_start,\n prof.validation_stop,\n colors[8],\n labels.pop(\"validation\", None),\n )\n\n # NEW: Add get observation visualization\n if (\n hasattr(prof, \"get_observation_start\")\n and prof.get_observation_start is not None\n and prof.get_observation_start > 0\n ):\n add_patch(\n ax,\n prof.get_observation_start,\n prof.get_observation_stop,\n colors[12],\n labels.pop(\"get observation\", None),\n )\n\n try:\n next_step_error = step_info_list[i + 1].obs[\"last_action_error\"]\n except (IndexError, KeyError, TypeError):\n next_step_error = \"\"\n\n if next_step_error:\n # add a hollow rectangle for error\n label = labels.pop(\"action error\", None)\n add_patch(ax, prof.env_start, prof.env_stop, \"red\", label, edge=True)\n\n if step_info.action is not None:\n # Blue rectangle for agent_start to agent_stop\n add_patch(ax, prof.agent_start, prof.agent_stop, colors[10], labels.pop(\"agent\", None))\n\n # Black vertical bar at agent stop\n ax.axvline(prof.agent_stop, color=\"black\", linewidth=3)\n step_times.append(prof.agent_stop)\n\n ax.text(\n prof.agent_stop,\n 0,\n str(step + 1),\n color=\"white\",\n fontsize=12,\n verticalalignment=\"bottom\",\n horizontalalignment=\"left\",\n rotation=0,\n clip_on=True,\n fontweight=1000,\n backgroundcolor=colors[12],\n )\n\n if step_info.truncated or step_info.terminated:\n if step_info.truncated:\n color = \"black\"\n elif step_info.terminated:\n if summary_info.get(\"cum_reward\", 0) > 0:\n color = \"limegreen\"\n else:\n color = \"black\"\n\n ax.axvline(prof.env_stop, color=color, linewidth=4, linestyle=\":\")\n\n text = f\"R:{summary_info.get('cum_reward', np.nan):.1f}\"\n\n if summary_info[\"err_msg\"]:\n text = \"Err\"\n color = \"red\"\n\n ax.text(\n prof.env_stop,\n 0.98,\n text,\n color=\"white\",\n fontsize=12,\n verticalalignment=\"top\",\n horizontalalignment=\"right\",\n rotation=0,\n clip_on=True,\n # antialiased=True,\n fontweight=1000,\n backgroundcolor=color,\n )\n\n ax.set_ylim(0, 1)\n ax.set_xlim(0, max(all_times) + 1)\n\n ax.set_xlabel(\"Time\")\n ax.set_yticks([])\n\n # position legend above outside the fig in one row\n ax.legend(\n loc=\"upper center\",\n bbox_to_anchor=(0.5, 1.2),\n ncol=8, # Updated to accommodate new labels\n frameon=True,\n )\n\n return step_times\n\n\ndef main():\n run_gradio(RESULTS_DIR)\n\n\nif __name__ == \"__main__\":\n main()","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.main","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.main#L1452-L1453","kind":"function","name":"main","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":1452,"end_line":1453,"context_start_line":1432,"context_end_line":1457,"code":" backgroundcolor=color,\n )\n\n ax.set_ylim(0, 1)\n ax.set_xlim(0, max(all_times) + 1)\n\n ax.set_xlabel(\"Time\")\n ax.set_yticks([])\n\n # position legend above outside the fig in one row\n ax.legend(\n loc=\"upper center\",\n bbox_to_anchor=(0.5, 1.2),\n ncol=8, # Updated to accommodate new labels\n frameon=True,\n )\n\n return step_times\n\n\ndef main():\n run_gradio(RESULTS_DIR)\n\n\nif __name__ == \"__main__\":\n main()","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.__init__","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.__init__#L60-L62","kind":"function","name":"__init__","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":60,"end_line":62,"context_start_line":40,"context_end_line":82,"code":" df = df.copy()\n df.columns = clean_column_names(df.columns)\n df.index.names = clean_column_names(df.index.names)\n return df\n\n\ndef remove_args_from_col(df: pd.DataFrame):\n df.columns = [col.replace(\"_args\", \"\") for col in df.columns]\n df.index.names = [col.replace(\"_args\", \"\") for col in df.index.names]\n return df\n\n\ndef clean_column_names(col_list):\n # col_list = [col.replace(\"_args\", \"\") for col in col_list]\n col_list = [col.replace(\".\", \".\\n\") for col in col_list] # adding space for word wrap\n # col_list = [col.replace(\"_\", \" \") for col in col_list]\n return col_list\n\n\nclass ClickMapper:\n def __init__(self, ax: plt.Axes, step_times: list[float]):\n self.ax = ax\n self.step_times = step_times\n\n def to_time(self, x_pix_coord):\n x_time_coord, _ = self.ax.transData.inverted().transform((x_pix_coord, 0))\n return x_time_coord\n\n def to_step(self, x_pix_coord):\n x_time_coord = self.to_time(x_pix_coord)\n return np.searchsorted(self.step_times, x_time_coord)\n\n\n@dataclass\nclass EpisodeId:\n agent_id: str = None\n task_name: str = None\n seed: int = None\n row_index: int = None # unique row index to disambiguate selections\n\n\n@dataclass\nclass StepId:","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.to_time","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.to_time#L64-L66","kind":"function","name":"to_time","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":64,"end_line":66,"context_start_line":44,"context_end_line":86,"code":"\n\ndef remove_args_from_col(df: pd.DataFrame):\n df.columns = [col.replace(\"_args\", \"\") for col in df.columns]\n df.index.names = [col.replace(\"_args\", \"\") for col in df.index.names]\n return df\n\n\ndef clean_column_names(col_list):\n # col_list = [col.replace(\"_args\", \"\") for col in col_list]\n col_list = [col.replace(\".\", \".\\n\") for col in col_list] # adding space for word wrap\n # col_list = [col.replace(\"_\", \" \") for col in col_list]\n return col_list\n\n\nclass ClickMapper:\n def __init__(self, ax: plt.Axes, step_times: list[float]):\n self.ax = ax\n self.step_times = step_times\n\n def to_time(self, x_pix_coord):\n x_time_coord, _ = self.ax.transData.inverted().transform((x_pix_coord, 0))\n return x_time_coord\n\n def to_step(self, x_pix_coord):\n x_time_coord = self.to_time(x_pix_coord)\n return np.searchsorted(self.step_times, x_time_coord)\n\n\n@dataclass\nclass EpisodeId:\n agent_id: str = None\n task_name: str = None\n seed: int = None\n row_index: int = None # unique row index to disambiguate selections\n\n\n@dataclass\nclass StepId:\n episode_id: EpisodeId = None\n step: int = None\n\n","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.to_step","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.to_step#L68-L70","kind":"function","name":"to_step","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":68,"end_line":70,"context_start_line":48,"context_end_line":90,"code":" df.index.names = [col.replace(\"_args\", \"\") for col in df.index.names]\n return df\n\n\ndef clean_column_names(col_list):\n # col_list = [col.replace(\"_args\", \"\") for col in col_list]\n col_list = [col.replace(\".\", \".\\n\") for col in col_list] # adding space for word wrap\n # col_list = [col.replace(\"_\", \" \") for col in col_list]\n return col_list\n\n\nclass ClickMapper:\n def __init__(self, ax: plt.Axes, step_times: list[float]):\n self.ax = ax\n self.step_times = step_times\n\n def to_time(self, x_pix_coord):\n x_time_coord, _ = self.ax.transData.inverted().transform((x_pix_coord, 0))\n return x_time_coord\n\n def to_step(self, x_pix_coord):\n x_time_coord = self.to_time(x_pix_coord)\n return np.searchsorted(self.step_times, x_time_coord)\n\n\n@dataclass\nclass EpisodeId:\n agent_id: str = None\n task_name: str = None\n seed: int = None\n row_index: int = None # unique row index to disambiguate selections\n\n\n@dataclass\nclass StepId:\n episode_id: EpisodeId = None\n step: int = None\n\n\n@dataclass\nclass Info:\n results_dir: Path = None # to root directory of all experiments\n study_dirs: Path = None # the path of the currently selected experiment","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.update_exp_result","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.update_exp_result#L100-L118","kind":"function","name":"update_exp_result","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":100,"end_line":118,"context_start_line":80,"context_end_line":138,"code":"\n@dataclass\nclass StepId:\n episode_id: EpisodeId = None\n step: int = None\n\n\n@dataclass\nclass Info:\n results_dir: Path = None # to root directory of all experiments\n study_dirs: Path = None # the path of the currently selected experiment\n result_df: pd.DataFrame = None # the raw loaded df\n agent_df: pd.DataFrame = None # the df filtered for selected agent\n tasks_df: pd.DataFrame = None # the unique tasks for selected agent\n exp_result: ExpResult = None # the selected episode\n click_mapper: ClickMapper = None # mapping from profiler click to step\n step: int = None # currently selected step\n active_tab: str = \"Screenshot\" # currently selected observation tab\n agent_id_keys: list[str] = None # the list of columns identifying an agent\n\n def update_exp_result(self, episode_id: EpisodeId):\n if self.result_df is None or episode_id.task_name is None or episode_id.seed is None:\n self.exp_result = None\n\n # find unique row using idx\n result_df = self.agent_df.reset_index(inplace=False)\n sub_df = result_df[result_df[\"_row_index\"] == episode_id.row_index]\n if len(sub_df) == 0:\n self.exp_result = None\n raise ValueError(f\"Could not find _row_index: {episode_id.row_index}\")\n\n if len(sub_df) > 1:\n warning(\n f\"Found multiple rows with same row_index {episode_id.row_index} Using the first one.\"\n )\n exp_dir = sub_df.iloc[0][\"exp_dir\"]\n print(exp_dir)\n self.exp_result = ExpResult(exp_dir)\n self.step = 0\n\n def get_agent_id(self, row: pd.Series):\n agent_id = []\n for key in self.agent_id_keys:\n agent_id.append((key, row[key]))\n return agent_id\n\n def filter_agent_id(self, agent_id: list[tuple]):\n # Preserve a stable row index to disambiguate selections later\n tmp_df = self.result_df.reset_index(inplace=False)\n tmp_df[\"_row_index\"] = tmp_df.index\n tmp_df.set_index(TASK_NAME_KEY, inplace=True)\n\n for col, val in agent_id:\n col = col.replace(\".\\n\", \".\")\n tmp_df = tmp_df[tmp_df[col] == val]\n self.agent_df = tmp_df\n\n\ninfo = Info()","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.get_agent_id","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.get_agent_id#L120-L124","kind":"function","name":"get_agent_id","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":120,"end_line":124,"context_start_line":100,"context_end_line":144,"code":" def update_exp_result(self, episode_id: EpisodeId):\n if self.result_df is None or episode_id.task_name is None or episode_id.seed is None:\n self.exp_result = None\n\n # find unique row using idx\n result_df = self.agent_df.reset_index(inplace=False)\n sub_df = result_df[result_df[\"_row_index\"] == episode_id.row_index]\n if len(sub_df) == 0:\n self.exp_result = None\n raise ValueError(f\"Could not find _row_index: {episode_id.row_index}\")\n\n if len(sub_df) > 1:\n warning(\n f\"Found multiple rows with same row_index {episode_id.row_index} Using the first one.\"\n )\n exp_dir = sub_df.iloc[0][\"exp_dir\"]\n print(exp_dir)\n self.exp_result = ExpResult(exp_dir)\n self.step = 0\n\n def get_agent_id(self, row: pd.Series):\n agent_id = []\n for key in self.agent_id_keys:\n agent_id.append((key, row[key]))\n return agent_id\n\n def filter_agent_id(self, agent_id: list[tuple]):\n # Preserve a stable row index to disambiguate selections later\n tmp_df = self.result_df.reset_index(inplace=False)\n tmp_df[\"_row_index\"] = tmp_df.index\n tmp_df.set_index(TASK_NAME_KEY, inplace=True)\n\n for col, val in agent_id:\n col = col.replace(\".\\n\", \".\")\n tmp_df = tmp_df[tmp_df[col] == val]\n self.agent_df = tmp_df\n\n\ninfo = Info()\n\n\ncss = \"\"\"\n.my-markdown {\n max-height: 400px;\n overflow-y: auto;","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.filter_agent_id","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.filter_agent_id#L126-L135","kind":"function","name":"filter_agent_id","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":126,"end_line":135,"context_start_line":106,"context_end_line":155,"code":" sub_df = result_df[result_df[\"_row_index\"] == episode_id.row_index]\n if len(sub_df) == 0:\n self.exp_result = None\n raise ValueError(f\"Could not find _row_index: {episode_id.row_index}\")\n\n if len(sub_df) > 1:\n warning(\n f\"Found multiple rows with same row_index {episode_id.row_index} Using the first one.\"\n )\n exp_dir = sub_df.iloc[0][\"exp_dir\"]\n print(exp_dir)\n self.exp_result = ExpResult(exp_dir)\n self.step = 0\n\n def get_agent_id(self, row: pd.Series):\n agent_id = []\n for key in self.agent_id_keys:\n agent_id.append((key, row[key]))\n return agent_id\n\n def filter_agent_id(self, agent_id: list[tuple]):\n # Preserve a stable row index to disambiguate selections later\n tmp_df = self.result_df.reset_index(inplace=False)\n tmp_df[\"_row_index\"] = tmp_df.index\n tmp_df.set_index(TASK_NAME_KEY, inplace=True)\n\n for col, val in agent_id:\n col = col.replace(\".\\n\", \".\")\n tmp_df = tmp_df[tmp_df[col] == val]\n self.agent_df = tmp_df\n\n\ninfo = Info()\n\n\ncss = \"\"\"\n.my-markdown {\n max-height: 400px;\n overflow-y: auto;\n}\n.error-report {\n max-height: 700px;\n overflow-y: auto;\n}\n.my-code-view {\n max-height: 300px;\n overflow-y: auto;\n}\ncode {\n white-space: pre-wrap;","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.decorator","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.decorator#L570-L583","kind":"function","name":"decorator","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":570,"end_line":583,"context_start_line":550,"context_end_line":603,"code":" # print(f\"Key event: {key_event}\")\n step = step_id.step\n if key_event.startswith(\"Cmd+Left\"):\n step = max(0, step - 1)\n elif key_event.startswith(\"Cmd+Right\"):\n step = min(len(info.exp_result.steps_info) - 2, step + 1)\n else:\n return gr.update()\n # print(f\"Updating step to {step} from key event {key_event}\")\n info.step = step\n step_id = StepId(episode_id=step_id.episode_id, step=step)\n return (\"\", step_id)\n\n\ndef tab_select(evt: gr.SelectData):\n global info\n info.active_tab = evt.value\n\n\ndef if_active(tab_name, n_out=1):\n def decorator(fn):\n def wrapper(*args, **kwargs):\n global info\n if info.active_tab == tab_name:\n # print(\"updating: \", fn.__name__)\n return fn(*args, **kwargs)\n else:\n # print(\"skipping: \", fn.__name__)\n if n_out == 1:\n return gr.update()\n elif n_out > 1:\n return (gr.update(),) * n_out\n\n return wrapper\n\n return decorator\n\n\ndef update_screenshot(som_or_not: str):\n global info\n img, action_str = get_screenshot(info, som_or_not=som_or_not, annotate=True)\n return img\n\n\ndef get_screenshot(\n info: Info, step: int = None, som_or_not: str = \"Raw Screenshots\", annotate: bool = False\n):\n if step is None:\n step = info.step\n try:\n step_info = info.exp_result.steps_info[step]\n is_som = som_or_not == \"SOM Screenshots\"\n img = info.exp_result.get_screenshot(step, som=is_som)\n if annotate:","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.extract_columns","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.extract_columns#L1010-L1019","kind":"function","name":"extract_columns","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":1010,"end_line":1019,"context_start_line":990,"context_end_line":1039,"code":"\ndef get_state_error(state: Info):\n try:\n step_info = state.exp_result.steps_info[state.step + 1]\n err_msg = step_info.obs.get(\"last_action_error\", None)\n except (IndexError, AttributeError):\n err_msg = None\n\n if err_msg is None or len(err_msg) == 0:\n err_msg = \"No Error\"\n return f\"\"\"\\\n**Step error after action:**\n\n{code(err_msg)}\"\"\"\n\n\ndef get_seeds_df(result_df: pd.DataFrame, task_name: str):\n result_df = result_df.reset_index(inplace=False)\n result_df = result_df[result_df[TASK_NAME_KEY] == task_name]\n\n def extract_columns(row: pd.Series):\n return pd.Series(\n {\n \"idx\": row.get(\"_row_index\", None),\n \"seed\": row.get(TASK_SEED_KEY, None),\n \"reward\": row.get(\"cum_reward\", None),\n \"err\": bool(row.get(\"err_msg\", None)),\n \"n_steps\": row.get(\"n_steps\", None),\n }\n )\n\n seed_df = result_df.apply(extract_columns, axis=1)\n # Ensure column order and readability\n seed_df = seed_df[[\"seed\", \"reward\", \"err\", \"n_steps\", \"idx\"]]\n return seed_df\n\n\ndef on_select_agent(evt: gr.SelectData, df: pd.DataFrame):\n # TODO try to find a clever way to solve the sort bug here\n return info.get_agent_id(df.iloc[evt.index[0]])\n\n\ndef on_select_task(evt: gr.SelectData, df: pd.DataFrame, agent_id: list[tuple]):\n # get col index\n col_idx = df.columns.get_loc(TASK_NAME_KEY)\n return (agent_id, evt.row_value[col_idx])\n\n\ndef update_seeds(agent_task_id: tuple):\n agent_id, task_name = agent_task_id","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.analyze.agent_xray.wrapper","uri":"program://AgentLab/function/src.agentlab.analyze.agent_xray.wrapper#L571-L581","kind":"function","name":"wrapper","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":571,"end_line":581,"context_start_line":551,"context_end_line":601,"code":" step = step_id.step\n if key_event.startswith(\"Cmd+Left\"):\n step = max(0, step - 1)\n elif key_event.startswith(\"Cmd+Right\"):\n step = min(len(info.exp_result.steps_info) - 2, step + 1)\n else:\n return gr.update()\n # print(f\"Updating step to {step} from key event {key_event}\")\n info.step = step\n step_id = StepId(episode_id=step_id.episode_id, step=step)\n return (\"\", step_id)\n\n\ndef tab_select(evt: gr.SelectData):\n global info\n info.active_tab = evt.value\n\n\ndef if_active(tab_name, n_out=1):\n def decorator(fn):\n def wrapper(*args, **kwargs):\n global info\n if info.active_tab == tab_name:\n # print(\"updating: \", fn.__name__)\n return fn(*args, **kwargs)\n else:\n # print(\"skipping: \", fn.__name__)\n if n_out == 1:\n return gr.update()\n elif n_out > 1:\n return (gr.update(),) * n_out\n\n return wrapper\n\n return decorator\n\n\ndef update_screenshot(som_or_not: str):\n global info\n img, action_str = get_screenshot(info, som_or_not=som_or_not, annotate=True)\n return img\n\n\ndef get_screenshot(\n info: Info, step: int = None, som_or_not: str = \"Raw Screenshots\", annotate: bool = False\n):\n if step is None:\n step = info.step\n try:\n step_info = info.exp_result.steps_info[step]\n is_som = som_or_not == \"SOM Screenshots\"","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.huggingface_utils","uri":"program://AgentLab/module/src.agentlab.llm.huggingface_utils#L1-L208","kind":"module","name":"src.agentlab.llm.huggingface_utils","path":"src/agentlab/llm/huggingface_utils.py","language":"python","start_line":1,"end_line":208,"context_start_line":1,"context_end_line":208,"code":"import logging\nimport os\nimport time\nfrom functools import partial\nfrom typing import Any, List, Optional, Union\n\nfrom pydantic import Field\n\nfrom agentlab.llm.base_api import AbstractChatModel\nfrom agentlab.llm.llm_utils import AIMessage, Discussion\nfrom agentlab.llm.prompt_templates import PromptTemplate, get_prompt_template\n\n\nclass HFBaseChatModel(AbstractChatModel):\n \"\"\"\n Custom LLM Chatbot that can interface with HuggingFace models with support for multiple samples.\n\n This class allows for the creation of a custom chatbot using models hosted\n on HuggingFace Hub or a local checkpoint. It provides flexibility in defining\n the temperature for response sampling and the maximum number of new tokens\n in the response.\n\n Attributes:\n llm (Any): The HuggingFaceHub model instance.\n prompt_template (Any): Template for the prompt to be used for the model's input sequence.\n tokenizer (Any): The tokenizer to use for the model.\n n_retry_server (int): Number of times to retry on server failure.\n \"\"\"\n\n llm: Any = Field(description=\"The HuggingFaceHub model instance\")\n tokenizer: Any = Field(\n default=None,\n description=\"The tokenizer to use for the model\",\n )\n prompt_template: Optional[PromptTemplate] = Field(\n default=None,\n description=\"Template for the prompt to be used for the model's input sequence\",\n )\n n_retry_server: int = Field(\n default=4,\n description=\"The number of times to retry the server if it fails to respond\",\n )\n\n def __init__(self, model_name, base_model_name, n_retry_server, log_probs):\n super().__init__()\n self.n_retry_server = n_retry_server\n self.log_probs = log_probs\n\n # Lazy import to avoid heavy transformers import when unused\n try:\n from transformers import AutoTokenizer, GPT2TokenizerFast # type: ignore\n except Exception as e: # pragma: no cover - surfaced only when transformers missing\n raise ImportError(\n \"The 'transformers' package is required for HuggingFace models. Install it to use HF backends.\"\n ) from e\n\n if base_model_name is None:\n self.tokenizer = AutoTokenizer.from_pretrained(model_name)\n else:\n self.tokenizer = AutoTokenizer.from_pretrained(base_model_name)\n if isinstance(self.tokenizer, GPT2TokenizerFast):\n logging.warning(\n f\"No chat template is defined for {base_model_name}. Resolving to the hard-coded templates.\"\n )\n self.tokenizer = None\n self.prompt_template = get_prompt_template(model_name)\n\n def __call__(\n self,\n messages: list[dict],\n n_samples: int = 1,\n temperature: Optional[float] = None,\n ) -> Union[AIMessage, List[AIMessage]]:\n \"\"\"\n Generate one or more responses for the given messages.\n\n Args:\n messages: List of message dictionaries containing the conversation history.\n n_samples: Number of independent responses to generate. Defaults to 1.\n temperature: The temperature for response sampling. Defaults to None.\n\n Returns:\n If n_samples=1, returns a single AIMessage.\n If n_samples>1, returns a list of AIMessages.\n\n Raises:\n Exception: If the server fails to respond after n_retry_server attempts or if the chat template fails.\n \"\"\"\n if self.tokenizer:\n try:\n if isinstance(messages, Discussion):\n messages.merge()\n prompt = self.tokenizer.apply_chat_template(messages, tokenize=False)\n except Exception as e:\n if \"Conversation roles must alternate\" in str(e):\n logging.warning(\n \"Failed to apply the chat template. Maybe because it doesn't support the 'system' role. \"\n \"Retrying with the 'system' role appended to the 'user' role.\"\n )\n messages = _prepend_system_to_first_user(messages)\n prompt = self.tokenizer.apply_chat_template(messages, tokenize=False)\n else:\n raise e\n elif self.prompt_template:\n prompt = self.prompt_template.construct_prompt(messages)\n\n responses = []\n for _ in range(n_samples):\n itr = 0\n while True:\n try:\n temperature = (\n temperature\n if temperature is not None\n else getattr(self, \"temperature\", 0.1)\n )\n answer = self.llm(prompt, temperature=temperature)\n response = AIMessage(answer)\n if self.log_probs:\n response[\"content\"] = answer.generated_text\n response[\"log_probs\"] = answer.details\n responses.append(response)\n break\n except Exception as e:\n if itr == self.n_retry_server - 1:\n raise e\n logging.warning(\n f\"Failed to get a response from the server: \\n{e}\\n\"\n f\"Retrying... ({itr+1}/{self.n_retry_server})\"\n )\n time.sleep(5)\n itr += 1\n\n return responses[0] if n_samples == 1 else responses\n\n def _llm_type(self):\n return \"huggingface\"\n\n\ndef _prepend_system_to_first_user(messages, column_remap={}):\n # Initialize an index for the system message\n system_index = None\n\n human_key = column_remap.get(\"HumanMessage\", \"user\")\n role_key = column_remap.get(\"role\", \"role\")\n text_key = column_remap.get(\"text\", \"content\")\n\n # Find the system content and its index\n for i, msg in enumerate(messages):\n if msg[role_key] == \"system\":\n system_index = i\n system_content = msg[text_key]\n break # Stop after finding the first system message\n\n # If a system message was found, modify the first user message and remove the system message\n if system_index is not None:\n for msg in messages:\n if msg[role_key] == human_key:\n # Prepend system content to the first user content\n msg[text_key] = str(system_content) + \"\\n\" + str(msg[text_key])\n # Remove the original system message\n del messages[system_index]\n break # Ensures that only the first user message is modified\n\n return messages\n\n\nclass HuggingFaceURLChatModel(HFBaseChatModel):\n \"\"\"HF backend using a Text Generation Inference (TGI) HTTP endpoint.\n\n This class is placed here to keep all heavy HF imports optional and only\n loaded when a HF backend is explicitly requested.\n \"\"\"\n\n def __init__(\n self,\n model_name: str,\n model_url: str,\n base_model_name: Optional[str] = None,\n token: Optional[str] = None,\n temperature: Optional[float] = 1e-1,\n max_new_tokens: Optional[int] = 512,\n n_retry_server: Optional[int] = 4,\n log_probs: Optional[bool] = False,\n ):\n super().__init__(model_name, base_model_name, n_retry_server, log_probs)\n if temperature is not None and temperature < 1e-3:\n logging.warning(\"Models might behave weirdly when temperature is too low.\")\n self.temperature = temperature\n\n if token is None:\n # support both env var names used elsewhere\n token = os.environ.get(\"TGI_TOKEN\") or os.environ.get(\"AGENTLAB_MODEL_TOKEN\")\n\n # Lazy import huggingface_hub here to avoid import on non-HF paths\n try:\n from huggingface_hub import InferenceClient # type: ignore\n except Exception as e: # pragma: no cover - surfaced only when package missing\n raise ImportError(\n \"The 'huggingface_hub' package is required for HuggingFace URL backends.\"\n ) from e\n\n client = InferenceClient(model=model_url, token=token)\n self.llm = partial(\n client.text_generation,\n max_new_tokens=max_new_tokens,\n details=log_probs,\n )","source_hash":"9b2a8e6feb3b940567fd65f65fda874c9e0450ae9bf7c2721c0059438927ab0d","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.huggingface_utils.HFBaseChatModel","uri":"program://AgentLab/class/src.agentlab.llm.huggingface_utils.HFBaseChatModel#L14-L137","kind":"class","name":"HFBaseChatModel","path":"src/agentlab/llm/huggingface_utils.py","language":"python","start_line":14,"end_line":137,"context_start_line":1,"context_end_line":157,"code":"import logging\nimport os\nimport time\nfrom functools import partial\nfrom typing import Any, List, Optional, Union\n\nfrom pydantic import Field\n\nfrom agentlab.llm.base_api import AbstractChatModel\nfrom agentlab.llm.llm_utils import AIMessage, Discussion\nfrom agentlab.llm.prompt_templates import PromptTemplate, get_prompt_template\n\n\nclass HFBaseChatModel(AbstractChatModel):\n \"\"\"\n Custom LLM Chatbot that can interface with HuggingFace models with support for multiple samples.\n\n This class allows for the creation of a custom chatbot using models hosted\n on HuggingFace Hub or a local checkpoint. It provides flexibility in defining\n the temperature for response sampling and the maximum number of new tokens\n in the response.\n\n Attributes:\n llm (Any): The HuggingFaceHub model instance.\n prompt_template (Any): Template for the prompt to be used for the model's input sequence.\n tokenizer (Any): The tokenizer to use for the model.\n n_retry_server (int): Number of times to retry on server failure.\n \"\"\"\n\n llm: Any = Field(description=\"The HuggingFaceHub model instance\")\n tokenizer: Any = Field(\n default=None,\n description=\"The tokenizer to use for the model\",\n )\n prompt_template: Optional[PromptTemplate] = Field(\n default=None,\n description=\"Template for the prompt to be used for the model's input sequence\",\n )\n n_retry_server: int = Field(\n default=4,\n description=\"The number of times to retry the server if it fails to respond\",\n )\n\n def __init__(self, model_name, base_model_name, n_retry_server, log_probs):\n super().__init__()\n self.n_retry_server = n_retry_server\n self.log_probs = log_probs\n\n # Lazy import to avoid heavy transformers import when unused\n try:\n from transformers import AutoTokenizer, GPT2TokenizerFast # type: ignore\n except Exception as e: # pragma: no cover - surfaced only when transformers missing\n raise ImportError(\n \"The 'transformers' package is required for HuggingFace models. Install it to use HF backends.\"\n ) from e\n\n if base_model_name is None:\n self.tokenizer = AutoTokenizer.from_pretrained(model_name)\n else:\n self.tokenizer = AutoTokenizer.from_pretrained(base_model_name)\n if isinstance(self.tokenizer, GPT2TokenizerFast):\n logging.warning(\n f\"No chat template is defined for {base_model_name}. Resolving to the hard-coded templates.\"\n )\n self.tokenizer = None\n self.prompt_template = get_prompt_template(model_name)\n\n def __call__(\n self,\n messages: list[dict],\n n_samples: int = 1,\n temperature: Optional[float] = None,\n ) -> Union[AIMessage, List[AIMessage]]:\n \"\"\"\n Generate one or more responses for the given messages.\n\n Args:\n messages: List of message dictionaries containing the conversation history.\n n_samples: Number of independent responses to generate. Defaults to 1.\n temperature: The temperature for response sampling. Defaults to None.\n\n Returns:\n If n_samples=1, returns a single AIMessage.\n If n_samples>1, returns a list of AIMessages.\n\n Raises:\n Exception: If the server fails to respond after n_retry_server attempts or if the chat template fails.\n \"\"\"\n if self.tokenizer:\n try:\n if isinstance(messages, Discussion):\n messages.merge()\n prompt = self.tokenizer.apply_chat_template(messages, tokenize=False)\n except Exception as e:\n if \"Conversation roles must alternate\" in str(e):\n logging.warning(\n \"Failed to apply the chat template. Maybe because it doesn't support the 'system' role. \"\n \"Retrying with the 'system' role appended to the 'user' role.\"\n )\n messages = _prepend_system_to_first_user(messages)\n prompt = self.tokenizer.apply_chat_template(messages, tokenize=False)\n else:\n raise e\n elif self.prompt_template:\n prompt = self.prompt_template.construct_prompt(messages)\n\n responses = []\n for _ in range(n_samples):\n itr = 0\n while True:\n try:\n temperature = (\n temperature\n if temperature is not None\n else getattr(self, \"temperature\", 0.1)\n )\n answer = self.llm(prompt, temperature=temperature)\n response = AIMessage(answer)\n if self.log_probs:\n response[\"content\"] = answer.generated_text\n response[\"log_probs\"] = answer.details\n responses.append(response)\n break\n except Exception as e:\n if itr == self.n_retry_server - 1:\n raise e\n logging.warning(\n f\"Failed to get a response from the server: \\n{e}\\n\"\n f\"Retrying... ({itr+1}/{self.n_retry_server})\"\n )\n time.sleep(5)\n itr += 1\n\n return responses[0] if n_samples == 1 else responses\n\n def _llm_type(self):\n return \"huggingface\"\n\n\ndef _prepend_system_to_first_user(messages, column_remap={}):\n # Initialize an index for the system message\n system_index = None\n\n human_key = column_remap.get(\"HumanMessage\", \"user\")\n role_key = column_remap.get(\"role\", \"role\")\n text_key = column_remap.get(\"text\", \"content\")\n\n # Find the system content and its index\n for i, msg in enumerate(messages):\n if msg[role_key] == \"system\":\n system_index = i\n system_content = msg[text_key]\n break # Stop after finding the first system message\n\n # If a system message was found, modify the first user message and remove the system message\n if system_index is not None:\n for msg in messages:","source_hash":"9b2a8e6feb3b940567fd65f65fda874c9e0450ae9bf7c2721c0059438927ab0d","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.huggingface_utils._prepend_system_to_first_user","uri":"program://AgentLab/function/src.agentlab.llm.huggingface_utils._prepend_system_to_first_user#L140-L165","kind":"function","name":"_prepend_system_to_first_user","path":"src/agentlab/llm/huggingface_utils.py","language":"python","start_line":140,"end_line":165,"context_start_line":120,"context_end_line":185,"code":" response[\"content\"] = answer.generated_text\n response[\"log_probs\"] = answer.details\n responses.append(response)\n break\n except Exception as e:\n if itr == self.n_retry_server - 1:\n raise e\n logging.warning(\n f\"Failed to get a response from the server: \\n{e}\\n\"\n f\"Retrying... ({itr+1}/{self.n_retry_server})\"\n )\n time.sleep(5)\n itr += 1\n\n return responses[0] if n_samples == 1 else responses\n\n def _llm_type(self):\n return \"huggingface\"\n\n\ndef _prepend_system_to_first_user(messages, column_remap={}):\n # Initialize an index for the system message\n system_index = None\n\n human_key = column_remap.get(\"HumanMessage\", \"user\")\n role_key = column_remap.get(\"role\", \"role\")\n text_key = column_remap.get(\"text\", \"content\")\n\n # Find the system content and its index\n for i, msg in enumerate(messages):\n if msg[role_key] == \"system\":\n system_index = i\n system_content = msg[text_key]\n break # Stop after finding the first system message\n\n # If a system message was found, modify the first user message and remove the system message\n if system_index is not None:\n for msg in messages:\n if msg[role_key] == human_key:\n # Prepend system content to the first user content\n msg[text_key] = str(system_content) + \"\\n\" + str(msg[text_key])\n # Remove the original system message\n del messages[system_index]\n break # Ensures that only the first user message is modified\n\n return messages\n\n\nclass HuggingFaceURLChatModel(HFBaseChatModel):\n \"\"\"HF backend using a Text Generation Inference (TGI) HTTP endpoint.\n\n This class is placed here to keep all heavy HF imports optional and only\n loaded when a HF backend is explicitly requested.\n \"\"\"\n\n def __init__(\n self,\n model_name: str,\n model_url: str,\n base_model_name: Optional[str] = None,\n token: Optional[str] = None,\n temperature: Optional[float] = 1e-1,\n max_new_tokens: Optional[int] = 512,\n n_retry_server: Optional[int] = 4,\n log_probs: Optional[bool] = False,\n ):","source_hash":"9b2a8e6feb3b940567fd65f65fda874c9e0450ae9bf7c2721c0059438927ab0d","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.huggingface_utils.HuggingFaceURLChatModel","uri":"program://AgentLab/class/src.agentlab.llm.huggingface_utils.HuggingFaceURLChatModel#L168-L208","kind":"class","name":"HuggingFaceURLChatModel","path":"src/agentlab/llm/huggingface_utils.py","language":"python","start_line":168,"end_line":208,"context_start_line":148,"context_end_line":208,"code":" # Find the system content and its index\n for i, msg in enumerate(messages):\n if msg[role_key] == \"system\":\n system_index = i\n system_content = msg[text_key]\n break # Stop after finding the first system message\n\n # If a system message was found, modify the first user message and remove the system message\n if system_index is not None:\n for msg in messages:\n if msg[role_key] == human_key:\n # Prepend system content to the first user content\n msg[text_key] = str(system_content) + \"\\n\" + str(msg[text_key])\n # Remove the original system message\n del messages[system_index]\n break # Ensures that only the first user message is modified\n\n return messages\n\n\nclass HuggingFaceURLChatModel(HFBaseChatModel):\n \"\"\"HF backend using a Text Generation Inference (TGI) HTTP endpoint.\n\n This class is placed here to keep all heavy HF imports optional and only\n loaded when a HF backend is explicitly requested.\n \"\"\"\n\n def __init__(\n self,\n model_name: str,\n model_url: str,\n base_model_name: Optional[str] = None,\n token: Optional[str] = None,\n temperature: Optional[float] = 1e-1,\n max_new_tokens: Optional[int] = 512,\n n_retry_server: Optional[int] = 4,\n log_probs: Optional[bool] = False,\n ):\n super().__init__(model_name, base_model_name, n_retry_server, log_probs)\n if temperature is not None and temperature < 1e-3:\n logging.warning(\"Models might behave weirdly when temperature is too low.\")\n self.temperature = temperature\n\n if token is None:\n # support both env var names used elsewhere\n token = os.environ.get(\"TGI_TOKEN\") or os.environ.get(\"AGENTLAB_MODEL_TOKEN\")\n\n # Lazy import huggingface_hub here to avoid import on non-HF paths\n try:\n from huggingface_hub import InferenceClient # type: ignore\n except Exception as e: # pragma: no cover - surfaced only when package missing\n raise ImportError(\n \"The 'huggingface_hub' package is required for HuggingFace URL backends.\"\n ) from e\n\n client = InferenceClient(model=model_url, token=token)\n self.llm = partial(\n client.text_generation,\n max_new_tokens=max_new_tokens,\n details=log_probs,\n )","source_hash":"9b2a8e6feb3b940567fd65f65fda874c9e0450ae9bf7c2721c0059438927ab0d","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.huggingface_utils.__init__","uri":"program://AgentLab/function/src.agentlab.llm.huggingface_utils.__init__#L175-L208","kind":"function","name":"__init__","path":"src/agentlab/llm/huggingface_utils.py","language":"python","start_line":175,"end_line":208,"context_start_line":155,"context_end_line":208,"code":" # If a system message was found, modify the first user message and remove the system message\n if system_index is not None:\n for msg in messages:\n if msg[role_key] == human_key:\n # Prepend system content to the first user content\n msg[text_key] = str(system_content) + \"\\n\" + str(msg[text_key])\n # Remove the original system message\n del messages[system_index]\n break # Ensures that only the first user message is modified\n\n return messages\n\n\nclass HuggingFaceURLChatModel(HFBaseChatModel):\n \"\"\"HF backend using a Text Generation Inference (TGI) HTTP endpoint.\n\n This class is placed here to keep all heavy HF imports optional and only\n loaded when a HF backend is explicitly requested.\n \"\"\"\n\n def __init__(\n self,\n model_name: str,\n model_url: str,\n base_model_name: Optional[str] = None,\n token: Optional[str] = None,\n temperature: Optional[float] = 1e-1,\n max_new_tokens: Optional[int] = 512,\n n_retry_server: Optional[int] = 4,\n log_probs: Optional[bool] = False,\n ):\n super().__init__(model_name, base_model_name, n_retry_server, log_probs)\n if temperature is not None and temperature < 1e-3:\n logging.warning(\"Models might behave weirdly when temperature is too low.\")\n self.temperature = temperature\n\n if token is None:\n # support both env var names used elsewhere\n token = os.environ.get(\"TGI_TOKEN\") or os.environ.get(\"AGENTLAB_MODEL_TOKEN\")\n\n # Lazy import huggingface_hub here to avoid import on non-HF paths\n try:\n from huggingface_hub import InferenceClient # type: ignore\n except Exception as e: # pragma: no cover - surfaced only when package missing\n raise ImportError(\n \"The 'huggingface_hub' package is required for HuggingFace URL backends.\"\n ) from e\n\n client = InferenceClient(model=model_url, token=token)\n self.llm = partial(\n client.text_generation,\n max_new_tokens=max_new_tokens,\n details=log_probs,\n )","source_hash":"9b2a8e6feb3b940567fd65f65fda874c9e0450ae9bf7c2721c0059438927ab0d","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.huggingface_utils.__call__","uri":"program://AgentLab/function/src.agentlab.llm.huggingface_utils.__call__#L68-L134","kind":"function","name":"__call__","path":"src/agentlab/llm/huggingface_utils.py","language":"python","start_line":68,"end_line":134,"context_start_line":48,"context_end_line":154,"code":"\n # Lazy import to avoid heavy transformers import when unused\n try:\n from transformers import AutoTokenizer, GPT2TokenizerFast # type: ignore\n except Exception as e: # pragma: no cover - surfaced only when transformers missing\n raise ImportError(\n \"The 'transformers' package is required for HuggingFace models. Install it to use HF backends.\"\n ) from e\n\n if base_model_name is None:\n self.tokenizer = AutoTokenizer.from_pretrained(model_name)\n else:\n self.tokenizer = AutoTokenizer.from_pretrained(base_model_name)\n if isinstance(self.tokenizer, GPT2TokenizerFast):\n logging.warning(\n f\"No chat template is defined for {base_model_name}. Resolving to the hard-coded templates.\"\n )\n self.tokenizer = None\n self.prompt_template = get_prompt_template(model_name)\n\n def __call__(\n self,\n messages: list[dict],\n n_samples: int = 1,\n temperature: Optional[float] = None,\n ) -> Union[AIMessage, List[AIMessage]]:\n \"\"\"\n Generate one or more responses for the given messages.\n\n Args:\n messages: List of message dictionaries containing the conversation history.\n n_samples: Number of independent responses to generate. Defaults to 1.\n temperature: The temperature for response sampling. Defaults to None.\n\n Returns:\n If n_samples=1, returns a single AIMessage.\n If n_samples>1, returns a list of AIMessages.\n\n Raises:\n Exception: If the server fails to respond after n_retry_server attempts or if the chat template fails.\n \"\"\"\n if self.tokenizer:\n try:\n if isinstance(messages, Discussion):\n messages.merge()\n prompt = self.tokenizer.apply_chat_template(messages, tokenize=False)\n except Exception as e:\n if \"Conversation roles must alternate\" in str(e):\n logging.warning(\n \"Failed to apply the chat template. Maybe because it doesn't support the 'system' role. \"\n \"Retrying with the 'system' role appended to the 'user' role.\"\n )\n messages = _prepend_system_to_first_user(messages)\n prompt = self.tokenizer.apply_chat_template(messages, tokenize=False)\n else:\n raise e\n elif self.prompt_template:\n prompt = self.prompt_template.construct_prompt(messages)\n\n responses = []\n for _ in range(n_samples):\n itr = 0\n while True:\n try:\n temperature = (\n temperature\n if temperature is not None\n else getattr(self, \"temperature\", 0.1)\n )\n answer = self.llm(prompt, temperature=temperature)\n response = AIMessage(answer)\n if self.log_probs:\n response[\"content\"] = answer.generated_text\n response[\"log_probs\"] = answer.details\n responses.append(response)\n break\n except Exception as e:\n if itr == self.n_retry_server - 1:\n raise e\n logging.warning(\n f\"Failed to get a response from the server: \\n{e}\\n\"\n f\"Retrying... ({itr+1}/{self.n_retry_server})\"\n )\n time.sleep(5)\n itr += 1\n\n return responses[0] if n_samples == 1 else responses\n\n def _llm_type(self):\n return \"huggingface\"\n\n\ndef _prepend_system_to_first_user(messages, column_remap={}):\n # Initialize an index for the system message\n system_index = None\n\n human_key = column_remap.get(\"HumanMessage\", \"user\")\n role_key = column_remap.get(\"role\", \"role\")\n text_key = column_remap.get(\"text\", \"content\")\n\n # Find the system content and its index\n for i, msg in enumerate(messages):\n if msg[role_key] == \"system\":\n system_index = i\n system_content = msg[text_key]\n break # Stop after finding the first system message\n","source_hash":"9b2a8e6feb3b940567fd65f65fda874c9e0450ae9bf7c2721c0059438927ab0d","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.huggingface_utils._llm_type","uri":"program://AgentLab/function/src.agentlab.llm.huggingface_utils._llm_type#L136-L137","kind":"function","name":"_llm_type","path":"src/agentlab/llm/huggingface_utils.py","language":"python","start_line":136,"end_line":137,"context_start_line":116,"context_end_line":157,"code":" )\n answer = self.llm(prompt, temperature=temperature)\n response = AIMessage(answer)\n if self.log_probs:\n response[\"content\"] = answer.generated_text\n response[\"log_probs\"] = answer.details\n responses.append(response)\n break\n except Exception as e:\n if itr == self.n_retry_server - 1:\n raise e\n logging.warning(\n f\"Failed to get a response from the server: \\n{e}\\n\"\n f\"Retrying... ({itr+1}/{self.n_retry_server})\"\n )\n time.sleep(5)\n itr += 1\n\n return responses[0] if n_samples == 1 else responses\n\n def _llm_type(self):\n return \"huggingface\"\n\n\ndef _prepend_system_to_first_user(messages, column_remap={}):\n # Initialize an index for the system message\n system_index = None\n\n human_key = column_remap.get(\"HumanMessage\", \"user\")\n role_key = column_remap.get(\"role\", \"role\")\n text_key = column_remap.get(\"text\", \"content\")\n\n # Find the system content and its index\n for i, msg in enumerate(messages):\n if msg[role_key] == \"system\":\n system_index = i\n system_content = msg[text_key]\n break # Stop after finding the first system message\n\n # If a system message was found, modify the first user message and remove the system message\n if system_index is not None:\n for msg in messages:","source_hash":"9b2a8e6feb3b940567fd65f65fda874c9e0450ae9bf7c2721c0059438927ab0d","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api","uri":"program://AgentLab/module/src.agentlab.llm.response_api#L1-L1020","kind":"module","name":"src.agentlab.llm.response_api","path":"src/agentlab/llm/response_api.py","language":"python","start_line":1,"end_line":1020,"context_start_line":1,"context_end_line":1020,"code":"import json\nimport logging\nimport os\nfrom abc import ABC, abstractmethod\nfrom dataclasses import dataclass, field\nfrom typing import Any, Dict, List, Literal, Optional, Union\n\nimport openai\nfrom anthropic import Anthropic\nfrom anthropic.types import Completion\nfrom anthropic.types import Message as AnthrophicMessage\nfrom openai import OpenAI\n\nfrom agentlab.llm.llm_utils import image_to_png_base64_url\n\nfrom .base_api import BaseModelArgs\nfrom .llm_utils import (\n call_anthropic_api_with_retries,\n call_openai_api_with_retries,\n)\nfrom .tracking import TrackAPIPricingMixin\n\n\"\"\"This module contains utlity classes for building input messages and interacting with LLM APIs. \nIt includes:\n 1. Message Builder for building input messages\n 2. Base Reponse class for different LLM APIs (OpenAI, Anthropic, etc.)\n 3. Factory classes (inherits from BaseModelArgs) for creating instances of LLM Response models.\n\"\"\"\n\nlogger = logging.getLogger(__name__)\n\nContentItem = Dict[str, Any]\nMessage = Dict[str, Union[str, List[ContentItem]]]\n\n\n@dataclass\nclass ToolCall:\n \"\"\"Represents a tool call made by the LLM.\n Attributes:\n name: Name of the tool called.\n arguments: Arguments passed to the tool.\n raw_call: The raw call object from the LLM API.\n tool_response: Output of the tool call goes here. It can be only one content item.\n \"\"\"\n\n name: str = field(default=None)\n arguments: Dict[str, Any] = field(default_factory=dict)\n raw_call: Any = field(default=None)\n tool_response: ContentItem = None\n\n @property\n def is_response_set(self) -> bool:\n \"\"\"Check if the tool response is set.\"\"\"\n return self.tool_response is not None\n\n def response_text(self, text: str) -> \"MessageBuilder\":\n self.tool_response = {\"text\": text}\n return self\n\n def response_image(self, image: str) -> \"MessageBuilder\":\n self.tool_response = {\"image\": image}\n return self\n\n def __repr__(self):\n return f\"ToolCall(name={self.name}, arguments={self.arguments})\"\n\n\n@dataclass\nclass ToolCalls:\n \"\"\"A collection of tool calls made by the LLM.\n\n Attributes:\n tool_calls: List of ToolCall objects.\n raw_calls: Represents raw tool calls object returned by a LLM API, may contain one or more tool calls.\n \"\"\"\n\n tool_calls: List[ToolCall] = field(default_factory=list)\n raw_calls: List[Any] = field(default_factory=list)\n\n def add_tool_call(self, tool_call: ToolCall) -> \"ToolCalls\":\n self.tool_calls.append(tool_call)\n return self\n\n @property\n def all_responses_set(self) -> bool:\n \"\"\"Check if all tool calls have responses set.\"\"\"\n return all(call.is_response_set for call in self.tool_calls)\n\n def __len__(self) -> int:\n \"\"\"Return the number of tool calls.\"\"\"\n return len(self.tool_calls)\n\n def __iter__(self):\n \"\"\"Make ToolCalls iterable.\"\"\"\n return iter(self.tool_calls)\n\n def __bool__(self):\n \"\"\"Check if there are any tool calls.\"\"\"\n return len(self.tool_calls) > 0\n\n\n@dataclass\nclass LLMOutput:\n \"\"\"Serializable object for the output of a response LLM.\"\"\"\n\n raw_response: Any = field(default=None)\n think: str = field(default=\"\")\n action: str | None = field(default=None) # Default action if no tool call is made\n tool_calls: ToolCalls | None = field(\n default=None\n ) # This will hold the tool call response if any\n\n\nclass MessageBuilder:\n def __init__(self, role: str):\n self.role = role\n self.content: List[ContentItem] = []\n self.responded_tool_calls: ToolCalls = None\n\n @classmethod\n def system(cls) -> \"MessageBuilder\":\n return cls(\"system\")\n\n @classmethod\n def user(cls) -> \"MessageBuilder\":\n return cls(\"user\")\n\n @classmethod\n def assistant(cls) -> \"MessageBuilder\":\n return cls(\"assistant\")\n\n @abstractmethod\n def prepare_message(self) -> List[Message]:\n \"\"\"Prepare the message for the API call.\"\"\"\n raise NotImplementedError(\"Subclasses must implement this method.\")\n\n def add_text(self, text: str) -> \"MessageBuilder\":\n self.content.append({\"text\": text})\n return self\n\n def add_image(self, image: str) -> \"MessageBuilder\":\n self.content.append({\"image\": image})\n return self\n\n def to_markdown(self) -> str:\n parts = []\n for item in self.content:\n if \"text\" in item:\n parts.append(f\"\\n```\\n{item['text']}\\n```\\n\")\n elif \"image\" in item:\n parts.append(f\"![Image]({item['image']})\")\n\n # Tool call markdown repr\n if self.responded_tool_calls is not None:\n for i, tool_call in enumerate(self.responded_tool_calls.tool_calls, 1):\n parts.append(\n f\"\\n**Tool Call {i}**: {tool_call_to_python_code(tool_call.name, tool_call.arguments)}\"\n )\n response = tool_call.tool_response\n if response is not None:\n parts.append(f\"\\n**Tool Response {i}:**\")\n content = (\n f\"```\\n{response['text']}\\n```\"\n if \"text\" in response\n else f\"![Tool Response Image]({response['image']})\"\n )\n parts.append(content)\n\n markdown = f\"### {self.role.capitalize()}\\n\"\n markdown += \"\\n\".join(parts)\n\n return markdown\n\n def add_image_url(self, image_url: str) -> \"MessageBuilder\":\n \"\"\"Add an image URL to the message content.\"\"\"\n self.content.append({\"image\": image_to_png_base64_url(image_url)})\n return self\n\n def mark_all_previous_msg_for_caching(self):\n \"\"\"Insert a cache breakpoint in the message content.\"\"\"\n # This is a placeholder for future implementation.\n raise NotImplementedError\n\n @classmethod\n def add_responded_tool_calls(cls, responded_tool_calls: ToolCalls) -> \"MessageBuilder\":\n \"\"\"Add tool calls to the message content.\"\"\"\n assert responded_tool_calls.all_responses_set, \"All tool calls must have a response.\"\n msg = cls(\"tool\")\n msg.responded_tool_calls = responded_tool_calls\n return msg\n\n\nclass OpenAIResponseAPIMessageBuilder(MessageBuilder):\n @classmethod\n def system(cls) -> \"OpenAIResponseAPIMessageBuilder\":\n # OpenAI Responses API uses 'developer' role for system messages\n return cls(\"developer\")\n\n def prepare_message(self) -> List[Message]:\n content = []\n for item in self.content:\n content.append(self.convert_content_to_expected_format(item))\n output = [{\"role\": self.role, \"content\": content}]\n\n return output if self.role != \"tool\" else self.handle_tool_call()\n\n def convert_content_to_expected_format(self, content: ContentItem) -> ContentItem:\n \"\"\"Convert the content item to the expected format for OpenAI Responses.\"\"\"\n if \"text\" in content:\n content_type = \"input_text\" if self.role != \"assistant\" else \"output_text\"\n return {\"type\": content_type, \"text\": content[\"text\"]}\n elif \"image\" in content:\n return {\"type\": \"input_image\", \"image_url\": content[\"image\"]}\n else:\n raise ValueError(f\"Unsupported content type: {content}\")\n\n def handle_tool_call(self) -> List[Message]:\n \"\"\"Handle the tool call response from the last raw response.\"\"\"\n if self.responded_tool_calls is None:\n raise ValueError(\"No tool calls found in responded_tool_calls\")\n\n output = []\n output.extend(self.responded_tool_calls.raw_calls.output) # this contains response\n for fn_call in self.responded_tool_calls:\n call_type = fn_call.raw_call.type\n call_id = fn_call.raw_call.call_id\n call_response = fn_call.tool_response\n\n match call_type:\n case \"function_call\":\n # image output is not supported in function calls response.\n assert (\n \"image\" not in call_response\n ), \"Image output is not supported in function calls response.\"\n fn_call_response = {\n \"type\": \"function_call_output\",\n \"call_id\": call_id,\n \"output\": self.convert_content_to_expected_format(call_response)[\"text\"],\n }\n output.append(fn_call_response)\n\n case \"computer_call\":\n # For computer calls, use only images are expected.\n assert (\n \"text\" not in call_response\n ), \"Text output is not supported in computer calls response.\"\n computer_call_output = {\n \"type\": \"computer_call_output\",\n \"call_id\": call_id,\n \"output\": self.convert_content_to_expected_format(call_response),\n }\n output.append(computer_call_output) # this needs to be a screenshot\n\n return output\n\n def mark_all_previous_msg_for_caching(self):\n \"\"\"Nothing special to do here for openAI. They do not have a notion of cache breakpoints.\"\"\"\n pass\n\n\nclass AnthropicAPIMessageBuilder(MessageBuilder):\n def prepare_message(self) -> List[Message]:\n content = [self.transform_content(item) for item in self.content]\n output = {\"role\": self.role, \"content\": content}\n\n if self.role == \"tool\":\n return self.handle_tool_call()\n\n if self.role == \"assistant\":\n # Strip whitespace from assistant text responses. See anthropic error code 400.\n for c in output[\"content\"]:\n if \"text\" in c:\n c[\"text\"] = c[\"text\"].strip()\n return [output]\n\n def handle_tool_call(self) -> List[Message]:\n \"\"\"Handle the tool call response from the last raw response.\"\"\"\n if self.responded_tool_calls is None:\n raise ValueError(\"No tool calls found in responded_tool_calls\")\n\n llm_tool_call = {\n \"role\": \"assistant\",\n \"content\": self.responded_tool_calls.raw_calls.content,\n } # Add the toolcall block\n tool_response = {\"role\": \"user\", \"content\": []} # Anthropic expects a list of messages\n for call in self.responded_tool_calls:\n assert (\n \"image\" not in call.tool_response\n ), \"Image output is not supported in tool calls response.\"\n tool_response[\"content\"].append(\n {\n \"type\": \"tool_result\",\n \"tool_use_id\": call.raw_call.id,\n \"content\": self.transform_content(call.tool_response)[\n \"text\"\n ], # needs to be str\n }\n )\n\n return [llm_tool_call, tool_response]\n\n def transform_content(self, content: ContentItem) -> ContentItem:\n \"\"\"Transform content item to the format expected by Anthropic API.\"\"\"\n if \"text\" in content:\n return {\"type\": \"text\", \"text\": content[\"text\"]}\n elif \"image\" in content:\n img_str: str = content[\"image\"]\n # make sure to get rid of the image type for anthropic\n # e.g. \"data:image/png;base64\"\n if img_str.startswith(\"data:image/png;base64,\"):\n img_str = img_str[len(\"data:image/png;base64,\") :]\n return {\n \"type\": \"image\",\n \"source\": {\n \"type\": \"base64\",\n \"media_type\": \"image/png\",\n \"data\": img_str,\n },\n }\n else:\n raise ValueError(f\"Unsupported content type: {content}\")\n\n def mark_all_previous_msg_for_caching(self) -> List[Message]:\n \"\"\"Insert a cache breakpoint in the message content to mark all previous messages for caching.\"\"\"\n self._cache_breakpoint = True\n\n\nclass OpenAIChatCompletionAPIMessageBuilder(MessageBuilder):\n def prepare_message(self) -> List[Message]:\n \"\"\"Prepare the message for the OpenAI API.\"\"\"\n content = []\n for item in self.content:\n content.append(self.convert_content_to_expected_format(item))\n output = [{\"role\": self.role, \"content\": content}]\n return output if self.role != \"tool\" else self.handle_tool_call()\n\n def convert_content_to_expected_format(self, content: ContentItem) -> ContentItem:\n \"\"\"Transform content item to the format expected by OpenAI ChatCompletion.\"\"\"\n if \"text\" in content:\n return {\"type\": \"text\", \"text\": content[\"text\"]}\n elif \"image\" in content:\n return {\"type\": \"image_url\", \"image_url\": {\"url\": content[\"image\"]}}\n else:\n raise ValueError(f\"Unsupported content type: {content}\")\n\n def handle_tool_call(self) -> List[Message]:\n \"\"\"Handle the tool call response from the last raw response.\"\"\"\n if self.responded_tool_calls is None:\n raise ValueError(\"No tool calls found in responded_tool_calls\")\n output = []\n output.append(\n self.responded_tool_calls.raw_calls.choices[0].message\n ) # add raw calls to output\n for fn_call in self.responded_tool_calls:\n raw_call = fn_call.raw_call\n assert (\n \"image\" not in fn_call.tool_response\n ), \"Image output is not supported in function calls response.\"\n # a function_call_output dict has keys \"role\", \"tool_call_id\" and \"content\"\n tool_call_reponse = {\n \"name\": raw_call[\"function\"][\"name\"], # required with OpenRouter\n \"role\": \"tool\",\n \"tool_call_id\": raw_call[\"id\"],\n \"content\": self.convert_content_to_expected_format(fn_call.tool_response)[\"text\"],\n }\n output.append(tool_call_reponse)\n\n return output\n\n def mark_all_previous_msg_for_caching(self):\n \"\"\"Nothing special to do here for openAI. They do not have a notion of cache breakpoints.\"\"\"\n pass\n\n\n@dataclass\nclass APIPayload:\n messages: List[MessageBuilder] | None = None\n tools: List[Dict[str, Any]] | None = None\n tool_choice: Literal[\"none\", \"auto\", \"any\", \"required\"] | None = None\n force_call_tool: str | None = (\n None # Name of the tool to call # If set, will force the LLM to call this tool.\n )\n use_cache_breakpoints: bool = (\n False # If True, will apply cache breakpoints to the messages. # applicable for Anthropic\n )\n cache_tool_definition: bool = (\n False # If True, will cache the tool definition in the last message.\n )\n cache_complete_prompt: bool = (\n False # If True, will cache the complete prompt in the last message.\n )\n reasoning_effort: Literal[\"low\", \"medium\", \"high\"] | None = None\n\n def __post_init__(self):\n if self.tool_choice and self.force_call_tool:\n raise ValueError(\"tool_choice and force_call_tool are mutually exclusive\")\n if self.reasoning_effort is not None:\n logger.info(\n \"In agentlab reasoning_effort is used by LiteLLM API only. We will eventually shift to LiteLLM API for all LLMs.\"\n )\n\n\n# # Base class for all API Endpoints\nclass BaseResponseModel(ABC):\n def __init__(\n self,\n model_name: str,\n api_key: Optional[str] = None,\n temperature: float | None = None,\n max_tokens: int | None = None,\n ):\n self.model_name = model_name\n self.api_key = api_key\n self.temperature = temperature\n self.max_tokens = max_tokens\n super().__init__()\n\n def __call__(self, payload: APIPayload) -> LLMOutput:\n \"\"\"Make a call to the model and return the parsed response.\"\"\"\n response = self._call_api(payload)\n return self._parse_response(response)\n\n @abstractmethod\n def _call_api(self, payload: APIPayload) -> Any:\n \"\"\"Make a call to the model API and return the raw response.\"\"\"\n pass\n\n @abstractmethod\n def _parse_response(self, response: Any) -> LLMOutput:\n \"\"\"Parse the raw response from the model API and return a structured response.\"\"\"\n pass\n\n\nclass AgentlabAction:\n \"\"\"\n Collection of utility function to convert tool calls to Agentlab action format.\n \"\"\"\n\n @staticmethod\n def convert_toolcall_to_agentlab_action_format(toolcall: ToolCall) -> str:\n \"\"\"Convert a tool call to an Agentlab environment action string.\n\n Args:\n toolcall: ToolCall object containing the name and arguments of the tool call.\n\n Returns:\n A string representing the action in Agentlab format i.e. python function call string.\n \"\"\"\n\n tool_name, tool_args = toolcall.name, toolcall.arguments\n return tool_call_to_python_code(tool_name, tool_args)\n\n @staticmethod\n def convert_multiactions_to_agentlab_action_format(actions: list[str]) -> str | None:\n \"\"\"Convert multiple actions list to a format that env supports.\n\n Args:\n actions: List of action strings to be joined.\n\n Returns:\n Joined actions separated by newlines, or None if empty.\n \"\"\"\n return \"\\n\".join(actions) if actions else None\n\n\nclass BaseModelWithPricing(TrackAPIPricingMixin, BaseResponseModel):\n pass\n\n\nclass OpenAIResponseModel(BaseModelWithPricing):\n def __init__(\n self,\n model_name: str,\n base_url: Optional[str] = None,\n api_key: Optional[str] = None,\n temperature: float | None = None,\n max_tokens: int | None = 100,\n ):\n self.action_space_as_tools = True # this should be a config\n super().__init__( # This is passed to BaseModel\n model_name=model_name, api_key=api_key, temperature=temperature, max_tokens=max_tokens\n )\n client_args = {}\n if base_url is not None:\n client_args[\"base_url\"] = base_url\n if api_key is not None:\n client_args[\"api_key\"] = api_key\n self.client = OpenAI(**client_args)\n # Init pricing tracker after super() so that all attributes have been set.\n self.init_pricing_tracker(pricing_api=\"openai\") # Use the PricingMixin\n\n def _call_api(self, payload: APIPayload) -> \"OpenAIResponseObject\":\n\n input = []\n for msg in payload.messages:\n input.extend(msg.prepare_message())\n api_params: Dict[str, Any] = {\n \"model\": self.model_name,\n \"input\": input,\n }\n # Not all Open AI models support these parameters (example: o3), so we check if they are set.\n if self.temperature is not None:\n api_params[\"temperature\"] = self.temperature\n if self.max_tokens is not None:\n api_params[\"max_output_tokens\"] = self.max_tokens\n if payload.tools is not None:\n api_params[\"tools\"] = payload.tools\n if payload.tool_choice is not None and payload.force_call_tool is None:\n api_params[\"tool_choice\"] = (\n \"required\" if payload.tool_choice in (\"required\", \"any\") else payload.tool_choice\n )\n if payload.force_call_tool is not None:\n api_params[\"tool_choice\"] = {\"type\": \"function\", \"name\": payload.force_call_tool}\n\n response = call_openai_api_with_retries(\n self.client.responses.create,\n api_params,\n )\n\n return response\n\n def _parse_response(self, response: \"OpenAIResponseObject\") -> LLMOutput:\n \"\"\"Parse the raw response from the OpenAI Responses API.\"\"\"\n\n think_output = self._extract_thinking_content_from_response(response)\n toolcalls = self._extract_tool_calls_from_response(response)\n\n if self.action_space_as_tools:\n env_action = self._extract_env_action\n# ... truncated ...","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":true} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api.ToolCall","uri":"program://AgentLab/class/src.agentlab.llm.response_api.ToolCall#L37-L65","kind":"class","name":"ToolCall","path":"src/agentlab/llm/response_api.py","language":"python","start_line":37,"end_line":65,"context_start_line":17,"context_end_line":85,"code":"from .llm_utils import (\n call_anthropic_api_with_retries,\n call_openai_api_with_retries,\n)\nfrom .tracking import TrackAPIPricingMixin\n\n\"\"\"This module contains utlity classes for building input messages and interacting with LLM APIs. \nIt includes:\n 1. Message Builder for building input messages\n 2. Base Reponse class for different LLM APIs (OpenAI, Anthropic, etc.)\n 3. Factory classes (inherits from BaseModelArgs) for creating instances of LLM Response models.\n\"\"\"\n\nlogger = logging.getLogger(__name__)\n\nContentItem = Dict[str, Any]\nMessage = Dict[str, Union[str, List[ContentItem]]]\n\n\n@dataclass\nclass ToolCall:\n \"\"\"Represents a tool call made by the LLM.\n Attributes:\n name: Name of the tool called.\n arguments: Arguments passed to the tool.\n raw_call: The raw call object from the LLM API.\n tool_response: Output of the tool call goes here. It can be only one content item.\n \"\"\"\n\n name: str = field(default=None)\n arguments: Dict[str, Any] = field(default_factory=dict)\n raw_call: Any = field(default=None)\n tool_response: ContentItem = None\n\n @property\n def is_response_set(self) -> bool:\n \"\"\"Check if the tool response is set.\"\"\"\n return self.tool_response is not None\n\n def response_text(self, text: str) -> \"MessageBuilder\":\n self.tool_response = {\"text\": text}\n return self\n\n def response_image(self, image: str) -> \"MessageBuilder\":\n self.tool_response = {\"image\": image}\n return self\n\n def __repr__(self):\n return f\"ToolCall(name={self.name}, arguments={self.arguments})\"\n\n\n@dataclass\nclass ToolCalls:\n \"\"\"A collection of tool calls made by the LLM.\n\n Attributes:\n tool_calls: List of ToolCall objects.\n raw_calls: Represents raw tool calls object returned by a LLM API, may contain one or more tool calls.\n \"\"\"\n\n tool_calls: List[ToolCall] = field(default_factory=list)\n raw_calls: List[Any] = field(default_factory=list)\n\n def add_tool_call(self, tool_call: ToolCall) -> \"ToolCalls\":\n self.tool_calls.append(tool_call)\n return self\n\n @property\n def all_responses_set(self) -> bool:","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api.ToolCalls","uri":"program://AgentLab/class/src.agentlab.llm.response_api.ToolCalls#L69-L99","kind":"class","name":"ToolCalls","path":"src/agentlab/llm/response_api.py","language":"python","start_line":69,"end_line":99,"context_start_line":49,"context_end_line":119,"code":" tool_response: ContentItem = None\n\n @property\n def is_response_set(self) -> bool:\n \"\"\"Check if the tool response is set.\"\"\"\n return self.tool_response is not None\n\n def response_text(self, text: str) -> \"MessageBuilder\":\n self.tool_response = {\"text\": text}\n return self\n\n def response_image(self, image: str) -> \"MessageBuilder\":\n self.tool_response = {\"image\": image}\n return self\n\n def __repr__(self):\n return f\"ToolCall(name={self.name}, arguments={self.arguments})\"\n\n\n@dataclass\nclass ToolCalls:\n \"\"\"A collection of tool calls made by the LLM.\n\n Attributes:\n tool_calls: List of ToolCall objects.\n raw_calls: Represents raw tool calls object returned by a LLM API, may contain one or more tool calls.\n \"\"\"\n\n tool_calls: List[ToolCall] = field(default_factory=list)\n raw_calls: List[Any] = field(default_factory=list)\n\n def add_tool_call(self, tool_call: ToolCall) -> \"ToolCalls\":\n self.tool_calls.append(tool_call)\n return self\n\n @property\n def all_responses_set(self) -> bool:\n \"\"\"Check if all tool calls have responses set.\"\"\"\n return all(call.is_response_set for call in self.tool_calls)\n\n def __len__(self) -> int:\n \"\"\"Return the number of tool calls.\"\"\"\n return len(self.tool_calls)\n\n def __iter__(self):\n \"\"\"Make ToolCalls iterable.\"\"\"\n return iter(self.tool_calls)\n\n def __bool__(self):\n \"\"\"Check if there are any tool calls.\"\"\"\n return len(self.tool_calls) > 0\n\n\n@dataclass\nclass LLMOutput:\n \"\"\"Serializable object for the output of a response LLM.\"\"\"\n\n raw_response: Any = field(default=None)\n think: str = field(default=\"\")\n action: str | None = field(default=None) # Default action if no tool call is made\n tool_calls: ToolCalls | None = field(\n default=None\n ) # This will hold the tool call response if any\n\n\nclass MessageBuilder:\n def __init__(self, role: str):\n self.role = role\n self.content: List[ContentItem] = []\n self.responded_tool_calls: ToolCalls = None\n","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api.LLMOutput","uri":"program://AgentLab/class/src.agentlab.llm.response_api.LLMOutput#L103-L111","kind":"class","name":"LLMOutput","path":"src/agentlab/llm/response_api.py","language":"python","start_line":103,"end_line":111,"context_start_line":83,"context_end_line":131,"code":"\n @property\n def all_responses_set(self) -> bool:\n \"\"\"Check if all tool calls have responses set.\"\"\"\n return all(call.is_response_set for call in self.tool_calls)\n\n def __len__(self) -> int:\n \"\"\"Return the number of tool calls.\"\"\"\n return len(self.tool_calls)\n\n def __iter__(self):\n \"\"\"Make ToolCalls iterable.\"\"\"\n return iter(self.tool_calls)\n\n def __bool__(self):\n \"\"\"Check if there are any tool calls.\"\"\"\n return len(self.tool_calls) > 0\n\n\n@dataclass\nclass LLMOutput:\n \"\"\"Serializable object for the output of a response LLM.\"\"\"\n\n raw_response: Any = field(default=None)\n think: str = field(default=\"\")\n action: str | None = field(default=None) # Default action if no tool call is made\n tool_calls: ToolCalls | None = field(\n default=None\n ) # This will hold the tool call response if any\n\n\nclass MessageBuilder:\n def __init__(self, role: str):\n self.role = role\n self.content: List[ContentItem] = []\n self.responded_tool_calls: ToolCalls = None\n\n @classmethod\n def system(cls) -> \"MessageBuilder\":\n return cls(\"system\")\n\n @classmethod\n def user(cls) -> \"MessageBuilder\":\n return cls(\"user\")\n\n @classmethod\n def assistant(cls) -> \"MessageBuilder\":\n return cls(\"assistant\")\n","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api.MessageBuilder","uri":"program://AgentLab/class/src.agentlab.llm.response_api.MessageBuilder#L114-L190","kind":"class","name":"MessageBuilder","path":"src/agentlab/llm/response_api.py","language":"python","start_line":114,"end_line":190,"context_start_line":94,"context_end_line":210,"code":" \"\"\"Make ToolCalls iterable.\"\"\"\n return iter(self.tool_calls)\n\n def __bool__(self):\n \"\"\"Check if there are any tool calls.\"\"\"\n return len(self.tool_calls) > 0\n\n\n@dataclass\nclass LLMOutput:\n \"\"\"Serializable object for the output of a response LLM.\"\"\"\n\n raw_response: Any = field(default=None)\n think: str = field(default=\"\")\n action: str | None = field(default=None) # Default action if no tool call is made\n tool_calls: ToolCalls | None = field(\n default=None\n ) # This will hold the tool call response if any\n\n\nclass MessageBuilder:\n def __init__(self, role: str):\n self.role = role\n self.content: List[ContentItem] = []\n self.responded_tool_calls: ToolCalls = None\n\n @classmethod\n def system(cls) -> \"MessageBuilder\":\n return cls(\"system\")\n\n @classmethod\n def user(cls) -> \"MessageBuilder\":\n return cls(\"user\")\n\n @classmethod\n def assistant(cls) -> \"MessageBuilder\":\n return cls(\"assistant\")\n\n @abstractmethod\n def prepare_message(self) -> List[Message]:\n \"\"\"Prepare the message for the API call.\"\"\"\n raise NotImplementedError(\"Subclasses must implement this method.\")\n\n def add_text(self, text: str) -> \"MessageBuilder\":\n self.content.append({\"text\": text})\n return self\n\n def add_image(self, image: str) -> \"MessageBuilder\":\n self.content.append({\"image\": image})\n return self\n\n def to_markdown(self) -> str:\n parts = []\n for item in self.content:\n if \"text\" in item:\n parts.append(f\"\\n```\\n{item['text']}\\n```\\n\")\n elif \"image\" in item:\n parts.append(f\"![Image]({item['image']})\")\n\n # Tool call markdown repr\n if self.responded_tool_calls is not None:\n for i, tool_call in enumerate(self.responded_tool_calls.tool_calls, 1):\n parts.append(\n f\"\\n**Tool Call {i}**: {tool_call_to_python_code(tool_call.name, tool_call.arguments)}\"\n )\n response = tool_call.tool_response\n if response is not None:\n parts.append(f\"\\n**Tool Response {i}:**\")\n content = (\n f\"```\\n{response['text']}\\n```\"\n if \"text\" in response\n else f\"![Tool Response Image]({response['image']})\"\n )\n parts.append(content)\n\n markdown = f\"### {self.role.capitalize()}\\n\"\n markdown += \"\\n\".join(parts)\n\n return markdown\n\n def add_image_url(self, image_url: str) -> \"MessageBuilder\":\n \"\"\"Add an image URL to the message content.\"\"\"\n self.content.append({\"image\": image_to_png_base64_url(image_url)})\n return self\n\n def mark_all_previous_msg_for_caching(self):\n \"\"\"Insert a cache breakpoint in the message content.\"\"\"\n # This is a placeholder for future implementation.\n raise NotImplementedError\n\n @classmethod\n def add_responded_tool_calls(cls, responded_tool_calls: ToolCalls) -> \"MessageBuilder\":\n \"\"\"Add tool calls to the message content.\"\"\"\n assert responded_tool_calls.all_responses_set, \"All tool calls must have a response.\"\n msg = cls(\"tool\")\n msg.responded_tool_calls = responded_tool_calls\n return msg\n\n\nclass OpenAIResponseAPIMessageBuilder(MessageBuilder):\n @classmethod\n def system(cls) -> \"OpenAIResponseAPIMessageBuilder\":\n # OpenAI Responses API uses 'developer' role for system messages\n return cls(\"developer\")\n\n def prepare_message(self) -> List[Message]:\n content = []\n for item in self.content:\n content.append(self.convert_content_to_expected_format(item))\n output = [{\"role\": self.role, \"content\": content}]\n\n return output if self.role != \"tool\" else self.handle_tool_call()\n\n def convert_content_to_expected_format(self, content: ContentItem) -> ContentItem:\n \"\"\"Convert the content item to the expected format for OpenAI Responses.\"\"\"\n if \"text\" in content:\n content_type = \"input_text\" if self.role != \"assistant\" else \"output_text\"","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api.OpenAIResponseAPIMessageBuilder","uri":"program://AgentLab/class/src.agentlab.llm.response_api.OpenAIResponseAPIMessageBuilder#L193-L258","kind":"class","name":"OpenAIResponseAPIMessageBuilder","path":"src/agentlab/llm/response_api.py","language":"python","start_line":193,"end_line":258,"context_start_line":173,"context_end_line":278,"code":"\n def add_image_url(self, image_url: str) -> \"MessageBuilder\":\n \"\"\"Add an image URL to the message content.\"\"\"\n self.content.append({\"image\": image_to_png_base64_url(image_url)})\n return self\n\n def mark_all_previous_msg_for_caching(self):\n \"\"\"Insert a cache breakpoint in the message content.\"\"\"\n # This is a placeholder for future implementation.\n raise NotImplementedError\n\n @classmethod\n def add_responded_tool_calls(cls, responded_tool_calls: ToolCalls) -> \"MessageBuilder\":\n \"\"\"Add tool calls to the message content.\"\"\"\n assert responded_tool_calls.all_responses_set, \"All tool calls must have a response.\"\n msg = cls(\"tool\")\n msg.responded_tool_calls = responded_tool_calls\n return msg\n\n\nclass OpenAIResponseAPIMessageBuilder(MessageBuilder):\n @classmethod\n def system(cls) -> \"OpenAIResponseAPIMessageBuilder\":\n # OpenAI Responses API uses 'developer' role for system messages\n return cls(\"developer\")\n\n def prepare_message(self) -> List[Message]:\n content = []\n for item in self.content:\n content.append(self.convert_content_to_expected_format(item))\n output = [{\"role\": self.role, \"content\": content}]\n\n return output if self.role != \"tool\" else self.handle_tool_call()\n\n def convert_content_to_expected_format(self, content: ContentItem) -> ContentItem:\n \"\"\"Convert the content item to the expected format for OpenAI Responses.\"\"\"\n if \"text\" in content:\n content_type = \"input_text\" if self.role != \"assistant\" else \"output_text\"\n return {\"type\": content_type, \"text\": content[\"text\"]}\n elif \"image\" in content:\n return {\"type\": \"input_image\", \"image_url\": content[\"image\"]}\n else:\n raise ValueError(f\"Unsupported content type: {content}\")\n\n def handle_tool_call(self) -> List[Message]:\n \"\"\"Handle the tool call response from the last raw response.\"\"\"\n if self.responded_tool_calls is None:\n raise ValueError(\"No tool calls found in responded_tool_calls\")\n\n output = []\n output.extend(self.responded_tool_calls.raw_calls.output) # this contains response\n for fn_call in self.responded_tool_calls:\n call_type = fn_call.raw_call.type\n call_id = fn_call.raw_call.call_id\n call_response = fn_call.tool_response\n\n match call_type:\n case \"function_call\":\n # image output is not supported in function calls response.\n assert (\n \"image\" not in call_response\n ), \"Image output is not supported in function calls response.\"\n fn_call_response = {\n \"type\": \"function_call_output\",\n \"call_id\": call_id,\n \"output\": self.convert_content_to_expected_format(call_response)[\"text\"],\n }\n output.append(fn_call_response)\n\n case \"computer_call\":\n # For computer calls, use only images are expected.\n assert (\n \"text\" not in call_response\n ), \"Text output is not supported in computer calls response.\"\n computer_call_output = {\n \"type\": \"computer_call_output\",\n \"call_id\": call_id,\n \"output\": self.convert_content_to_expected_format(call_response),\n }\n output.append(computer_call_output) # this needs to be a screenshot\n\n return output\n\n def mark_all_previous_msg_for_caching(self):\n \"\"\"Nothing special to do here for openAI. They do not have a notion of cache breakpoints.\"\"\"\n pass\n\n\nclass AnthropicAPIMessageBuilder(MessageBuilder):\n def prepare_message(self) -> List[Message]:\n content = [self.transform_content(item) for item in self.content]\n output = {\"role\": self.role, \"content\": content}\n\n if self.role == \"tool\":\n return self.handle_tool_call()\n\n if self.role == \"assistant\":\n # Strip whitespace from assistant text responses. See anthropic error code 400.\n for c in output[\"content\"]:\n if \"text\" in c:\n c[\"text\"] = c[\"text\"].strip()\n return [output]\n\n def handle_tool_call(self) -> List[Message]:\n \"\"\"Handle the tool call response from the last raw response.\"\"\"\n if self.responded_tool_calls is None:","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api.AnthropicAPIMessageBuilder","uri":"program://AgentLab/class/src.agentlab.llm.response_api.AnthropicAPIMessageBuilder#L261-L325","kind":"class","name":"AnthropicAPIMessageBuilder","path":"src/agentlab/llm/response_api.py","language":"python","start_line":261,"end_line":325,"context_start_line":241,"context_end_line":345,"code":"\n case \"computer_call\":\n # For computer calls, use only images are expected.\n assert (\n \"text\" not in call_response\n ), \"Text output is not supported in computer calls response.\"\n computer_call_output = {\n \"type\": \"computer_call_output\",\n \"call_id\": call_id,\n \"output\": self.convert_content_to_expected_format(call_response),\n }\n output.append(computer_call_output) # this needs to be a screenshot\n\n return output\n\n def mark_all_previous_msg_for_caching(self):\n \"\"\"Nothing special to do here for openAI. They do not have a notion of cache breakpoints.\"\"\"\n pass\n\n\nclass AnthropicAPIMessageBuilder(MessageBuilder):\n def prepare_message(self) -> List[Message]:\n content = [self.transform_content(item) for item in self.content]\n output = {\"role\": self.role, \"content\": content}\n\n if self.role == \"tool\":\n return self.handle_tool_call()\n\n if self.role == \"assistant\":\n # Strip whitespace from assistant text responses. See anthropic error code 400.\n for c in output[\"content\"]:\n if \"text\" in c:\n c[\"text\"] = c[\"text\"].strip()\n return [output]\n\n def handle_tool_call(self) -> List[Message]:\n \"\"\"Handle the tool call response from the last raw response.\"\"\"\n if self.responded_tool_calls is None:\n raise ValueError(\"No tool calls found in responded_tool_calls\")\n\n llm_tool_call = {\n \"role\": \"assistant\",\n \"content\": self.responded_tool_calls.raw_calls.content,\n } # Add the toolcall block\n tool_response = {\"role\": \"user\", \"content\": []} # Anthropic expects a list of messages\n for call in self.responded_tool_calls:\n assert (\n \"image\" not in call.tool_response\n ), \"Image output is not supported in tool calls response.\"\n tool_response[\"content\"].append(\n {\n \"type\": \"tool_result\",\n \"tool_use_id\": call.raw_call.id,\n \"content\": self.transform_content(call.tool_response)[\n \"text\"\n ], # needs to be str\n }\n )\n\n return [llm_tool_call, tool_response]\n\n def transform_content(self, content: ContentItem) -> ContentItem:\n \"\"\"Transform content item to the format expected by Anthropic API.\"\"\"\n if \"text\" in content:\n return {\"type\": \"text\", \"text\": content[\"text\"]}\n elif \"image\" in content:\n img_str: str = content[\"image\"]\n # make sure to get rid of the image type for anthropic\n # e.g. \"data:image/png;base64\"\n if img_str.startswith(\"data:image/png;base64,\"):\n img_str = img_str[len(\"data:image/png;base64,\") :]\n return {\n \"type\": \"image\",\n \"source\": {\n \"type\": \"base64\",\n \"media_type\": \"image/png\",\n \"data\": img_str,\n },\n }\n else:\n raise ValueError(f\"Unsupported content type: {content}\")\n\n def mark_all_previous_msg_for_caching(self) -> List[Message]:\n \"\"\"Insert a cache breakpoint in the message content to mark all previous messages for caching.\"\"\"\n self._cache_breakpoint = True\n\n\nclass OpenAIChatCompletionAPIMessageBuilder(MessageBuilder):\n def prepare_message(self) -> List[Message]:\n \"\"\"Prepare the message for the OpenAI API.\"\"\"\n content = []\n for item in self.content:\n content.append(self.convert_content_to_expected_format(item))\n output = [{\"role\": self.role, \"content\": content}]\n return output if self.role != \"tool\" else self.handle_tool_call()\n\n def convert_content_to_expected_format(self, content: ContentItem) -> ContentItem:\n \"\"\"Transform content item to the format expected by OpenAI ChatCompletion.\"\"\"\n if \"text\" in content:\n return {\"type\": \"text\", \"text\": content[\"text\"]}\n elif \"image\" in content:\n return {\"type\": \"image_url\", \"image_url\": {\"url\": content[\"image\"]}}\n else:\n raise ValueError(f\"Unsupported content type: {content}\")\n","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api.OpenAIChatCompletionAPIMessageBuilder","uri":"program://AgentLab/class/src.agentlab.llm.response_api.OpenAIChatCompletionAPIMessageBuilder#L328-L372","kind":"class","name":"OpenAIChatCompletionAPIMessageBuilder","path":"src/agentlab/llm/response_api.py","language":"python","start_line":328,"end_line":372,"context_start_line":308,"context_end_line":392,"code":" # make sure to get rid of the image type for anthropic\n # e.g. \"data:image/png;base64\"\n if img_str.startswith(\"data:image/png;base64,\"):\n img_str = img_str[len(\"data:image/png;base64,\") :]\n return {\n \"type\": \"image\",\n \"source\": {\n \"type\": \"base64\",\n \"media_type\": \"image/png\",\n \"data\": img_str,\n },\n }\n else:\n raise ValueError(f\"Unsupported content type: {content}\")\n\n def mark_all_previous_msg_for_caching(self) -> List[Message]:\n \"\"\"Insert a cache breakpoint in the message content to mark all previous messages for caching.\"\"\"\n self._cache_breakpoint = True\n\n\nclass OpenAIChatCompletionAPIMessageBuilder(MessageBuilder):\n def prepare_message(self) -> List[Message]:\n \"\"\"Prepare the message for the OpenAI API.\"\"\"\n content = []\n for item in self.content:\n content.append(self.convert_content_to_expected_format(item))\n output = [{\"role\": self.role, \"content\": content}]\n return output if self.role != \"tool\" else self.handle_tool_call()\n\n def convert_content_to_expected_format(self, content: ContentItem) -> ContentItem:\n \"\"\"Transform content item to the format expected by OpenAI ChatCompletion.\"\"\"\n if \"text\" in content:\n return {\"type\": \"text\", \"text\": content[\"text\"]}\n elif \"image\" in content:\n return {\"type\": \"image_url\", \"image_url\": {\"url\": content[\"image\"]}}\n else:\n raise ValueError(f\"Unsupported content type: {content}\")\n\n def handle_tool_call(self) -> List[Message]:\n \"\"\"Handle the tool call response from the last raw response.\"\"\"\n if self.responded_tool_calls is None:\n raise ValueError(\"No tool calls found in responded_tool_calls\")\n output = []\n output.append(\n self.responded_tool_calls.raw_calls.choices[0].message\n ) # add raw calls to output\n for fn_call in self.responded_tool_calls:\n raw_call = fn_call.raw_call\n assert (\n \"image\" not in fn_call.tool_response\n ), \"Image output is not supported in function calls response.\"\n # a function_call_output dict has keys \"role\", \"tool_call_id\" and \"content\"\n tool_call_reponse = {\n \"name\": raw_call[\"function\"][\"name\"], # required with OpenRouter\n \"role\": \"tool\",\n \"tool_call_id\": raw_call[\"id\"],\n \"content\": self.convert_content_to_expected_format(fn_call.tool_response)[\"text\"],\n }\n output.append(tool_call_reponse)\n\n return output\n\n def mark_all_previous_msg_for_caching(self):\n \"\"\"Nothing special to do here for openAI. They do not have a notion of cache breakpoints.\"\"\"\n pass\n\n\n@dataclass\nclass APIPayload:\n messages: List[MessageBuilder] | None = None\n tools: List[Dict[str, Any]] | None = None\n tool_choice: Literal[\"none\", \"auto\", \"any\", \"required\"] | None = None\n force_call_tool: str | None = (\n None # Name of the tool to call # If set, will force the LLM to call this tool.\n )\n use_cache_breakpoints: bool = (\n False # If True, will apply cache breakpoints to the messages. # applicable for Anthropic\n )\n cache_tool_definition: bool = (\n False # If True, will cache the tool definition in the last message.\n )\n cache_complete_prompt: bool = (\n False # If True, will cache the complete prompt in the last message.\n )\n reasoning_effort: Literal[\"low\", \"medium\", \"high\"] | None = None","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api.APIPayload","uri":"program://AgentLab/class/src.agentlab.llm.response_api.APIPayload#L376-L400","kind":"class","name":"APIPayload","path":"src/agentlab/llm/response_api.py","language":"python","start_line":376,"end_line":400,"context_start_line":356,"context_end_line":420,"code":" assert (\n \"image\" not in fn_call.tool_response\n ), \"Image output is not supported in function calls response.\"\n # a function_call_output dict has keys \"role\", \"tool_call_id\" and \"content\"\n tool_call_reponse = {\n \"name\": raw_call[\"function\"][\"name\"], # required with OpenRouter\n \"role\": \"tool\",\n \"tool_call_id\": raw_call[\"id\"],\n \"content\": self.convert_content_to_expected_format(fn_call.tool_response)[\"text\"],\n }\n output.append(tool_call_reponse)\n\n return output\n\n def mark_all_previous_msg_for_caching(self):\n \"\"\"Nothing special to do here for openAI. They do not have a notion of cache breakpoints.\"\"\"\n pass\n\n\n@dataclass\nclass APIPayload:\n messages: List[MessageBuilder] | None = None\n tools: List[Dict[str, Any]] | None = None\n tool_choice: Literal[\"none\", \"auto\", \"any\", \"required\"] | None = None\n force_call_tool: str | None = (\n None # Name of the tool to call # If set, will force the LLM to call this tool.\n )\n use_cache_breakpoints: bool = (\n False # If True, will apply cache breakpoints to the messages. # applicable for Anthropic\n )\n cache_tool_definition: bool = (\n False # If True, will cache the tool definition in the last message.\n )\n cache_complete_prompt: bool = (\n False # If True, will cache the complete prompt in the last message.\n )\n reasoning_effort: Literal[\"low\", \"medium\", \"high\"] | None = None\n\n def __post_init__(self):\n if self.tool_choice and self.force_call_tool:\n raise ValueError(\"tool_choice and force_call_tool are mutually exclusive\")\n if self.reasoning_effort is not None:\n logger.info(\n \"In agentlab reasoning_effort is used by LiteLLM API only. We will eventually shift to LiteLLM API for all LLMs.\"\n )\n\n\n# # Base class for all API Endpoints\nclass BaseResponseModel(ABC):\n def __init__(\n self,\n model_name: str,\n api_key: Optional[str] = None,\n temperature: float | None = None,\n max_tokens: int | None = None,\n ):\n self.model_name = model_name\n self.api_key = api_key\n self.temperature = temperature\n self.max_tokens = max_tokens\n super().__init__()\n\n def __call__(self, payload: APIPayload) -> LLMOutput:\n \"\"\"Make a call to the model and return the parsed response.\"\"\"\n response = self._call_api(payload)","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api.BaseResponseModel","uri":"program://AgentLab/class/src.agentlab.llm.response_api.BaseResponseModel#L404-L431","kind":"class","name":"BaseResponseModel","path":"src/agentlab/llm/response_api.py","language":"python","start_line":404,"end_line":431,"context_start_line":384,"context_end_line":451,"code":" False # If True, will apply cache breakpoints to the messages. # applicable for Anthropic\n )\n cache_tool_definition: bool = (\n False # If True, will cache the tool definition in the last message.\n )\n cache_complete_prompt: bool = (\n False # If True, will cache the complete prompt in the last message.\n )\n reasoning_effort: Literal[\"low\", \"medium\", \"high\"] | None = None\n\n def __post_init__(self):\n if self.tool_choice and self.force_call_tool:\n raise ValueError(\"tool_choice and force_call_tool are mutually exclusive\")\n if self.reasoning_effort is not None:\n logger.info(\n \"In agentlab reasoning_effort is used by LiteLLM API only. We will eventually shift to LiteLLM API for all LLMs.\"\n )\n\n\n# # Base class for all API Endpoints\nclass BaseResponseModel(ABC):\n def __init__(\n self,\n model_name: str,\n api_key: Optional[str] = None,\n temperature: float | None = None,\n max_tokens: int | None = None,\n ):\n self.model_name = model_name\n self.api_key = api_key\n self.temperature = temperature\n self.max_tokens = max_tokens\n super().__init__()\n\n def __call__(self, payload: APIPayload) -> LLMOutput:\n \"\"\"Make a call to the model and return the parsed response.\"\"\"\n response = self._call_api(payload)\n return self._parse_response(response)\n\n @abstractmethod\n def _call_api(self, payload: APIPayload) -> Any:\n \"\"\"Make a call to the model API and return the raw response.\"\"\"\n pass\n\n @abstractmethod\n def _parse_response(self, response: Any) -> LLMOutput:\n \"\"\"Parse the raw response from the model API and return a structured response.\"\"\"\n pass\n\n\nclass AgentlabAction:\n \"\"\"\n Collection of utility function to convert tool calls to Agentlab action format.\n \"\"\"\n\n @staticmethod\n def convert_toolcall_to_agentlab_action_format(toolcall: ToolCall) -> str:\n \"\"\"Convert a tool call to an Agentlab environment action string.\n\n Args:\n toolcall: ToolCall object containing the name and arguments of the tool call.\n\n Returns:\n A string representing the action in Agentlab format i.e. python function call string.\n \"\"\"\n\n tool_name, tool_args = toolcall.name, toolcall.arguments\n return tool_call_to_python_code(tool_name, tool_args)","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api.AgentlabAction","uri":"program://AgentLab/class/src.agentlab.llm.response_api.AgentlabAction#L434-L463","kind":"class","name":"AgentlabAction","path":"src/agentlab/llm/response_api.py","language":"python","start_line":434,"end_line":463,"context_start_line":414,"context_end_line":483,"code":" self.temperature = temperature\n self.max_tokens = max_tokens\n super().__init__()\n\n def __call__(self, payload: APIPayload) -> LLMOutput:\n \"\"\"Make a call to the model and return the parsed response.\"\"\"\n response = self._call_api(payload)\n return self._parse_response(response)\n\n @abstractmethod\n def _call_api(self, payload: APIPayload) -> Any:\n \"\"\"Make a call to the model API and return the raw response.\"\"\"\n pass\n\n @abstractmethod\n def _parse_response(self, response: Any) -> LLMOutput:\n \"\"\"Parse the raw response from the model API and return a structured response.\"\"\"\n pass\n\n\nclass AgentlabAction:\n \"\"\"\n Collection of utility function to convert tool calls to Agentlab action format.\n \"\"\"\n\n @staticmethod\n def convert_toolcall_to_agentlab_action_format(toolcall: ToolCall) -> str:\n \"\"\"Convert a tool call to an Agentlab environment action string.\n\n Args:\n toolcall: ToolCall object containing the name and arguments of the tool call.\n\n Returns:\n A string representing the action in Agentlab format i.e. python function call string.\n \"\"\"\n\n tool_name, tool_args = toolcall.name, toolcall.arguments\n return tool_call_to_python_code(tool_name, tool_args)\n\n @staticmethod\n def convert_multiactions_to_agentlab_action_format(actions: list[str]) -> str | None:\n \"\"\"Convert multiple actions list to a format that env supports.\n\n Args:\n actions: List of action strings to be joined.\n\n Returns:\n Joined actions separated by newlines, or None if empty.\n \"\"\"\n return \"\\n\".join(actions) if actions else None\n\n\nclass BaseModelWithPricing(TrackAPIPricingMixin, BaseResponseModel):\n pass\n\n\nclass OpenAIResponseModel(BaseModelWithPricing):\n def __init__(\n self,\n model_name: str,\n base_url: Optional[str] = None,\n api_key: Optional[str] = None,\n temperature: float | None = None,\n max_tokens: int | None = 100,\n ):\n self.action_space_as_tools = True # this should be a config\n super().__init__( # This is passed to BaseModel\n model_name=model_name, api_key=api_key, temperature=temperature, max_tokens=max_tokens\n )\n client_args = {}","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api.BaseModelWithPricing","uri":"program://AgentLab/class/src.agentlab.llm.response_api.BaseModelWithPricing#L466-L467","kind":"class","name":"BaseModelWithPricing","path":"src/agentlab/llm/response_api.py","language":"python","start_line":466,"end_line":467,"context_start_line":446,"context_end_line":487,"code":" Returns:\n A string representing the action in Agentlab format i.e. python function call string.\n \"\"\"\n\n tool_name, tool_args = toolcall.name, toolcall.arguments\n return tool_call_to_python_code(tool_name, tool_args)\n\n @staticmethod\n def convert_multiactions_to_agentlab_action_format(actions: list[str]) -> str | None:\n \"\"\"Convert multiple actions list to a format that env supports.\n\n Args:\n actions: List of action strings to be joined.\n\n Returns:\n Joined actions separated by newlines, or None if empty.\n \"\"\"\n return \"\\n\".join(actions) if actions else None\n\n\nclass BaseModelWithPricing(TrackAPIPricingMixin, BaseResponseModel):\n pass\n\n\nclass OpenAIResponseModel(BaseModelWithPricing):\n def __init__(\n self,\n model_name: str,\n base_url: Optional[str] = None,\n api_key: Optional[str] = None,\n temperature: float | None = None,\n max_tokens: int | None = 100,\n ):\n self.action_space_as_tools = True # this should be a config\n super().__init__( # This is passed to BaseModel\n model_name=model_name, api_key=api_key, temperature=temperature, max_tokens=max_tokens\n )\n client_args = {}\n if base_url is not None:\n client_args[\"base_url\"] = base_url\n if api_key is not None:\n client_args[\"api_key\"] = api_key","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api.OpenAIResponseModel","uri":"program://AgentLab/class/src.agentlab.llm.response_api.OpenAIResponseModel#L470-L594","kind":"class","name":"OpenAIResponseModel","path":"src/agentlab/llm/response_api.py","language":"python","start_line":470,"end_line":594,"context_start_line":450,"context_end_line":614,"code":" tool_name, tool_args = toolcall.name, toolcall.arguments\n return tool_call_to_python_code(tool_name, tool_args)\n\n @staticmethod\n def convert_multiactions_to_agentlab_action_format(actions: list[str]) -> str | None:\n \"\"\"Convert multiple actions list to a format that env supports.\n\n Args:\n actions: List of action strings to be joined.\n\n Returns:\n Joined actions separated by newlines, or None if empty.\n \"\"\"\n return \"\\n\".join(actions) if actions else None\n\n\nclass BaseModelWithPricing(TrackAPIPricingMixin, BaseResponseModel):\n pass\n\n\nclass OpenAIResponseModel(BaseModelWithPricing):\n def __init__(\n self,\n model_name: str,\n base_url: Optional[str] = None,\n api_key: Optional[str] = None,\n temperature: float | None = None,\n max_tokens: int | None = 100,\n ):\n self.action_space_as_tools = True # this should be a config\n super().__init__( # This is passed to BaseModel\n model_name=model_name, api_key=api_key, temperature=temperature, max_tokens=max_tokens\n )\n client_args = {}\n if base_url is not None:\n client_args[\"base_url\"] = base_url\n if api_key is not None:\n client_args[\"api_key\"] = api_key\n self.client = OpenAI(**client_args)\n # Init pricing tracker after super() so that all attributes have been set.\n self.init_pricing_tracker(pricing_api=\"openai\") # Use the PricingMixin\n\n def _call_api(self, payload: APIPayload) -> \"OpenAIResponseObject\":\n\n input = []\n for msg in payload.messages:\n input.extend(msg.prepare_message())\n api_params: Dict[str, Any] = {\n \"model\": self.model_name,\n \"input\": input,\n }\n # Not all Open AI models support these parameters (example: o3), so we check if they are set.\n if self.temperature is not None:\n api_params[\"temperature\"] = self.temperature\n if self.max_tokens is not None:\n api_params[\"max_output_tokens\"] = self.max_tokens\n if payload.tools is not None:\n api_params[\"tools\"] = payload.tools\n if payload.tool_choice is not None and payload.force_call_tool is None:\n api_params[\"tool_choice\"] = (\n \"required\" if payload.tool_choice in (\"required\", \"any\") else payload.tool_choice\n )\n if payload.force_call_tool is not None:\n api_params[\"tool_choice\"] = {\"type\": \"function\", \"name\": payload.force_call_tool}\n\n response = call_openai_api_with_retries(\n self.client.responses.create,\n api_params,\n )\n\n return response\n\n def _parse_response(self, response: \"OpenAIResponseObject\") -> LLMOutput:\n \"\"\"Parse the raw response from the OpenAI Responses API.\"\"\"\n\n think_output = self._extract_thinking_content_from_response(response)\n toolcalls = self._extract_tool_calls_from_response(response)\n\n if self.action_space_as_tools:\n env_action = self._extract_env_actions_from_toolcalls(toolcalls)\n else:\n env_action = self._extract_env_actions_from_text_response(response)\n\n return LLMOutput(\n raw_response=response,\n think=think_output,\n action=env_action if env_action is not None else None,\n tool_calls=toolcalls if toolcalls is not None else None,\n )\n\n def _extract_tool_calls_from_response(self, response: \"OpenAIResponseObject\") -> ToolCalls:\n \"\"\"Extracts tool calls from the response.\"\"\"\n tool_calls = []\n for output in response.output:\n if output.type == \"function_call\":\n tool_name = output.name\n tool_args = json.loads(output.arguments)\n elif output.type == \"computer_call\":\n tool_name, tool_args = self.cua_action_to_env_tool_name_and_args(output.action)\n else:\n continue\n tool_calls.append(ToolCall(name=tool_name, arguments=tool_args, raw_call=output))\n\n return ToolCalls(tool_calls=tool_calls, raw_calls=response)\n\n def _extract_env_actions_from_toolcalls(self, toolcalls: ToolCalls) -> Any | None:\n \"\"\"Extracts actions from the response.\"\"\"\n if not toolcalls:\n return None\n\n actions = [\n AgentlabAction.convert_toolcall_to_agentlab_action_format(call) for call in toolcalls\n ]\n actions = (\n AgentlabAction.convert_multiactions_to_agentlab_action_format(actions)\n if len(actions) > 1\n else actions[0]\n )\n return actions\n\n def _extract_thinking_content_from_response(self, response: \"OpenAIResponseObject\") -> str:\n \"\"\"Extracts the thinking content from the response.\"\"\"\n thinking_content = \"\"\n for output in response.output:\n if output.type == \"reasoning\":\n if len(output.summary) > 0:\n thinking_content += output.summary[0].text + \"\\n\"\n elif output.type == \"message\" and output.content:\n thinking_content += output.content[0].text + \"\\n\"\n elif hasattr(output, \"output_text\") and output.output_text:\n thinking_content += f\"{output.output_text}\\n\"\n return thinking_content\n\n def cua_action_to_env_tool_name_and_args(self, action: str) -> tuple[str, Dict[str, Any]]:\n \"\"\" \"Overwrite this method to convert a computer action to agentlab action string\"\"\"\n raise NotImplementedError(\n \"This method should be implemented in the subclass to convert a computer action to agentlab action string.\"\n )\n\n def _extract_env_actions_from_text_response(\n self, response: \"OpenAIResponseObject\"\n ) -> str | None:\n \"\"\"Extracts environment actions from the text response.\"\"\"\n # Use when action space is not given as tools.\n pass\n\n\nclass OpenAIChatCompletionModel(BaseModelWithPricing):\n def __init__(\n self,\n model_name: str,\n base_url: Optional[str] = None,\n api_key: Optional[str] = None,\n temperature: float | None = None,\n max_tokens: int | None = 100,\n ):\n super().__init__(\n model_name=model_name,\n temperature=temperature,\n max_tokens=max_tokens,\n )\n self.action_space_as_tools = True # this should be a config\n client_args = {}\n if base_url is not None:\n client_args[\"base_url\"] = base_url","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api.OpenAIChatCompletionModel","uri":"program://AgentLab/class/src.agentlab.llm.response_api.OpenAIChatCompletionModel#L597-L761","kind":"class","name":"OpenAIChatCompletionModel","path":"src/agentlab/llm/response_api.py","language":"python","start_line":597,"end_line":761,"context_start_line":577,"context_end_line":781,"code":" elif output.type == \"message\" and output.content:\n thinking_content += output.content[0].text + \"\\n\"\n elif hasattr(output, \"output_text\") and output.output_text:\n thinking_content += f\"{output.output_text}\\n\"\n return thinking_content\n\n def cua_action_to_env_tool_name_and_args(self, action: str) -> tuple[str, Dict[str, Any]]:\n \"\"\" \"Overwrite this method to convert a computer action to agentlab action string\"\"\"\n raise NotImplementedError(\n \"This method should be implemented in the subclass to convert a computer action to agentlab action string.\"\n )\n\n def _extract_env_actions_from_text_response(\n self, response: \"OpenAIResponseObject\"\n ) -> str | None:\n \"\"\"Extracts environment actions from the text response.\"\"\"\n # Use when action space is not given as tools.\n pass\n\n\nclass OpenAIChatCompletionModel(BaseModelWithPricing):\n def __init__(\n self,\n model_name: str,\n base_url: Optional[str] = None,\n api_key: Optional[str] = None,\n temperature: float | None = None,\n max_tokens: int | None = 100,\n ):\n super().__init__(\n model_name=model_name,\n temperature=temperature,\n max_tokens=max_tokens,\n )\n self.action_space_as_tools = True # this should be a config\n client_args = {}\n if base_url is not None:\n client_args[\"base_url\"] = base_url\n if api_key is not None:\n client_args[\"api_key\"] = api_key\n self.client = OpenAI(**client_args)\n self.init_pricing_tracker(pricing_api=\"openai\") # Use the PricingMixin\n\n def _call_api(self, payload: APIPayload) -> \"openai.types.chat.ChatCompletion\":\n input = []\n for msg in payload.messages:\n input.extend(msg.prepare_message())\n api_params: Dict[str, Any] = {\n \"model\": self.model_name,\n \"messages\": input,\n }\n if self.temperature is not None:\n api_params[\"temperature\"] = self.temperature\n\n if self.max_tokens is not None:\n api_params[\"max_completion_tokens\"] = self.max_tokens\n\n if payload.tools is not None:\n # tools format is OpenAI Response API format.\n api_params[\"tools\"] = self.format_tools_for_chat_completion(payload.tools)\n\n if payload.tool_choice is not None and payload.force_call_tool is None:\n api_params[\"tool_choice\"] = (\n \"required\" if payload.tool_choice in (\"required\", \"any\") else payload.tool_choice\n )\n\n if payload.force_call_tool is not None:\n api_params[\"tool_choice\"] = {\n \"type\": \"function\",\n \"function\": {\"name\": payload.force_call_tool},\n }\n\n response = call_openai_api_with_retries(self.client.chat.completions.create, api_params)\n\n return response\n\n def _parse_response(self, response: \"openai.types.chat.ChatCompletion\") -> LLMOutput:\n think_output = self._extract_thinking_content_from_response(response)\n tool_calls = self._extract_tool_calls_from_response(response)\n\n if self.action_space_as_tools:\n env_action = self._extract_env_actions_from_toolcalls(tool_calls)\n else:\n env_action = self._extract_env_actions_from_text_response(response)\n return LLMOutput(\n raw_response=response,\n think=think_output,\n action=env_action if env_action is not None else None,\n tool_calls=tool_calls if tool_calls is not None else None,\n )\n\n def _extract_thinking_content_from_response(\n self, response: openai.types.chat.ChatCompletion, wrap_tag=\"think\"\n ):\n \"\"\"Extracts the content from the message, including reasoning if available.\n It wraps the reasoning around ... for easy identification of reasoning content,\n When LLM produces 'text' and 'reasoning' in the same message.\n Note: The wrapping of 'thinking' content may not be nedeed and may be reconsidered.\n\n Args:\n response: The message object or dict containing content and reasoning.\n wrap_tag: The tag name to wrap reasoning content (default: \"think\").\n\n Returns:\n str: The extracted content with reasoning wrapped in specified tags.\n \"\"\"\n message = response.choices[0].message\n if not isinstance(message, dict):\n message = message.to_dict()\n\n reasoning_content = message.get(\"reasoning\", None)\n msg_content = message.get(\"text\", \"\") # works for Open-router\n if reasoning_content:\n # Wrap reasoning in tags with newlines for clarity\n reasoning_content = f\"<{wrap_tag}>{reasoning_content}\\n\"\n logging.debug(\"Extracting content from response.choices[i].message.reasoning\")\n else:\n reasoning_content = \"\"\n return f\"{reasoning_content}{msg_content}{message.get('content', '')}\"\n\n def _extract_tool_calls_from_response(\n self, response: openai.types.chat.ChatCompletion\n ) -> ToolCalls | None:\n \"\"\"Extracts tool calls from the response.\"\"\"\n message = response.choices[0].message.to_dict()\n tool_calls = message.get(\"tool_calls\", None)\n if tool_calls is None:\n return None\n tool_call_list = []\n for tc in tool_calls:\n tool_call_list.append(\n ToolCall(\n name=tc[\"function\"][\"name\"],\n arguments=json.loads(tc[\"function\"][\"arguments\"]),\n raw_call=tc,\n )\n )\n return ToolCalls(tool_calls=tool_call_list, raw_calls=response)\n\n def _extract_env_actions_from_toolcalls(self, toolcalls: ToolCalls) -> Any | None:\n \"\"\"Extracts actions from the response.\"\"\"\n if not toolcalls:\n return None\n\n actions = [\n AgentlabAction.convert_toolcall_to_agentlab_action_format(call) for call in toolcalls\n ]\n actions = (\n AgentlabAction.convert_multiactions_to_agentlab_action_format(actions)\n if len(actions) > 1\n else actions[0]\n )\n return actions\n\n def _extract_env_actions_from_text_response(\n self, response: \"openai.types.chat.ChatCompletion\"\n ) -> str | None:\n \"\"\"Extracts environment actions from the text response.\"\"\"\n # Use when action space is not given as tools.\n pass\n\n @staticmethod\n def format_tools_for_chat_completion(tools):\n \"\"\"Formats response tools format for OpenAI Chat Completion API.\n\n Why we need this?\n Ans: actionset.to_tool_description() in bgym only returns description\n format valid for OpenAI Response API.\n\n Args:\n tools: List of tool descriptions to format for Chat Completion API.\n\n Returns:\n Formatted tools list compatible with OpenAI Chat Completion API, or None if tools is None.\n \"\"\"\n formatted_tools = None\n if tools is not None:\n formatted_tools = [\n {\n \"type\": tool[\"type\"],\n \"function\": {k: tool[k] for k in (\"name\", \"description\", \"parameters\")},\n }\n for tool in tools\n ]\n return formatted_tools\n\n\nclass ClaudeResponseModel(BaseModelWithPricing):\n def __init__(\n self,\n model_name: str,\n base_url: Optional[str] = None,\n api_key: Optional[str] = None,\n temperature: float | None = None,\n max_tokens: int | None = 100,\n ):\n self.action_space_as_tools = True # this should be a config\n\n super().__init__(\n model_name=model_name,\n api_key=api_key,\n temperature=temperature,\n max_tokens=max_tokens,\n )\n client_args = {}","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api.ClaudeResponseModel","uri":"program://AgentLab/class/src.agentlab.llm.response_api.ClaudeResponseModel#L764-L905","kind":"class","name":"ClaudeResponseModel","path":"src/agentlab/llm/response_api.py","language":"python","start_line":764,"end_line":905,"context_start_line":744,"context_end_line":925,"code":" format valid for OpenAI Response API.\n\n Args:\n tools: List of tool descriptions to format for Chat Completion API.\n\n Returns:\n Formatted tools list compatible with OpenAI Chat Completion API, or None if tools is None.\n \"\"\"\n formatted_tools = None\n if tools is not None:\n formatted_tools = [\n {\n \"type\": tool[\"type\"],\n \"function\": {k: tool[k] for k in (\"name\", \"description\", \"parameters\")},\n }\n for tool in tools\n ]\n return formatted_tools\n\n\nclass ClaudeResponseModel(BaseModelWithPricing):\n def __init__(\n self,\n model_name: str,\n base_url: Optional[str] = None,\n api_key: Optional[str] = None,\n temperature: float | None = None,\n max_tokens: int | None = 100,\n ):\n self.action_space_as_tools = True # this should be a config\n\n super().__init__(\n model_name=model_name,\n api_key=api_key,\n temperature=temperature,\n max_tokens=max_tokens,\n )\n client_args = {}\n if base_url is not None:\n client_args[\"base_url\"] = base_url\n if api_key is not None:\n client_args[\"api_key\"] = api_key\n self.client = Anthropic(**client_args)\n self.init_pricing_tracker(pricing_api=\"anthropic\") # Use the PricingMixin\n\n def _call_api(self, payload: APIPayload) -> Completion:\n sys_msg, other_msgs = self.filter_system_messages(payload.messages)\n sys_msg_text = \"\\n\".join(c[\"text\"] for m in sys_msg for c in m.content)\n input = []\n for msg in other_msgs:\n temp = msg.prepare_message()\n if payload.use_cache_breakpoints:\n temp = self.apply_cache_breakpoints(msg, temp)\n input.extend(temp)\n\n api_params: Dict[str, Any] = {\n \"model\": self.model_name,\n \"messages\": input,\n \"system\": sys_msg_text,\n } # Anthropic API expects system message as a string\n\n if self.temperature is not None:\n api_params[\"temperature\"] = self.temperature\n if self.max_tokens is not None:\n api_params[\"max_tokens\"] = self.max_tokens\n\n if payload.tools is not None:\n api_params[\"tools\"] = payload.tools\n if payload.tool_choice is not None and payload.force_call_tool is None:\n api_params[\"tool_choice\"] = (\n {\"type\": \"any\"}\n if payload.tool_choice in (\"required\", \"any\")\n else {\"type\": payload.tool_choice}\n )\n if payload.force_call_tool is not None:\n api_params[\"tool_choice\"] = {\"type\": \"tool\", \"name\": payload.force_call_tool}\n if payload.cache_tool_definition:\n # Indicating cache control for the last message enables caching of the last message.\n api_params[\"tools\"][-1][\"cache_control\"] = {\"type\": \"ephemeral\"}\n if payload.cache_complete_prompt:\n # Indicating cache control for the last message enables caching of the complete prompt.\n api_params[\"messages\"][-1][\"content\"][-1][\"cache_control\"] = {\"type\": \"ephemeral\"}\n\n response = call_anthropic_api_with_retries(self.client.messages.create, api_params)\n\n return response\n\n @staticmethod\n def filter_system_messages(messages: list[dict | MessageBuilder]) -> tuple[MessageBuilder]:\n \"\"\"Filter system messages from the list of messages.\"\"\"\n # System message cannot have an image in the middle of the text sequences.\n # Images can be appended in the end of the system message.\n\n sys_msgs, other_msgs = [], []\n for msg in messages:\n if isinstance(msg, MessageBuilder) and msg.role == \"system\":\n sys_msgs.append(msg)\n for c in msg.content:\n if c.get(\"type\") == \"image\":\n raise TypeError(\"System messages cannot contain images.\")\n else:\n other_msgs.append(msg)\n return sys_msgs, other_msgs\n\n def _parse_response(self, response: \"AnthrophicMessage\") -> LLMOutput:\n\n toolcalls = self._extract_tool_calls_from_response(response)\n think_output = self._extract_thinking_content_from_response(response)\n if self.action_space_as_tools:\n env_action = self._extract_env_actions_from_toolcalls(toolcalls)\n else:\n env_action = self._extract_env_actions_from_text_response(response)\n return LLMOutput(\n raw_response=response,\n think=think_output,\n action=env_action if env_action is not None else None,\n tool_calls=toolcalls if toolcalls is not None else None,\n )\n\n def _extract_tool_calls_from_response(self, response: \"AnthrophicMessage\") -> ToolCalls:\n \"\"\"Extracts tool calls from the response.\"\"\"\n tool_calls = []\n for output in response.content:\n if output.type == \"tool_use\":\n tool_calls.append(\n ToolCall(\n name=output.name,\n arguments=output.input,\n raw_call=output,\n )\n )\n return ToolCalls(tool_calls=tool_calls, raw_calls=response)\n\n def _extract_thinking_content_from_response(self, response: \"AnthrophicMessage\"):\n \"\"\"Extracts the thinking content from the response.\"\"\"\n return \"\".join(output.text for output in response.content if output.type == \"text\")\n\n def _extract_env_actions_from_toolcalls(self, toolcalls: ToolCalls) -> Any | None:\n \"\"\"Extracts actions from the response.\"\"\"\n if not toolcalls:\n return None\n\n actions = [\n AgentlabAction.convert_toolcall_to_agentlab_action_format(call) for call in toolcalls\n ]\n actions = (\n AgentlabAction.convert_multiactions_to_agentlab_action_format(actions)\n if len(actions) > 1\n else actions[0]\n )\n return actions\n\n def _extract_env_actions_from_text_response(self, response: \"AnthrophicMessage\") -> str | None:\n \"\"\"Extracts environment actions from the text response.\"\"\"\n # Use when action space is not given as tools.\n pass\n\n def apply_cache_breakpoints(self, msg: Message, prepared_msg: dict) -> List[Message]:\n \"\"\"Apply cache breakpoints to the messages.\"\"\"\n if getattr(msg, \"_cache_breakpoint\", False):\n prepared_msg[-1][\"content\"][-1][\"cache_control\"] = {\"type\": \"ephemeral\"}\n return prepared_msg\n\n\n# Factory classes to create the appropriate model based on the API endpoint.\n\n\n@dataclass\nclass OpenAIResponseModelArgs(BaseModelArgs):\n \"\"\"Serializable object for instantiating a generic chat model with an OpenAI\n model.\"\"\"\n\n api = \"openai\"\n\n def make_model(self):\n return OpenAIResponseModel(\n model_name=self.model_name,\n temperature=self.temperature,\n max_tokens=self.max_new_tokens,\n )\n\n def get_message_builder(self) -> MessageBuilder:","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api.OpenAIResponseModelArgs","uri":"program://AgentLab/class/src.agentlab.llm.response_api.OpenAIResponseModelArgs#L912-L926","kind":"class","name":"OpenAIResponseModelArgs","path":"src/agentlab/llm/response_api.py","language":"python","start_line":912,"end_line":926,"context_start_line":892,"context_end_line":946,"code":" else actions[0]\n )\n return actions\n\n def _extract_env_actions_from_text_response(self, response: \"AnthrophicMessage\") -> str | None:\n \"\"\"Extracts environment actions from the text response.\"\"\"\n # Use when action space is not given as tools.\n pass\n\n def apply_cache_breakpoints(self, msg: Message, prepared_msg: dict) -> List[Message]:\n \"\"\"Apply cache breakpoints to the messages.\"\"\"\n if getattr(msg, \"_cache_breakpoint\", False):\n prepared_msg[-1][\"content\"][-1][\"cache_control\"] = {\"type\": \"ephemeral\"}\n return prepared_msg\n\n\n# Factory classes to create the appropriate model based on the API endpoint.\n\n\n@dataclass\nclass OpenAIResponseModelArgs(BaseModelArgs):\n \"\"\"Serializable object for instantiating a generic chat model with an OpenAI\n model.\"\"\"\n\n api = \"openai\"\n\n def make_model(self):\n return OpenAIResponseModel(\n model_name=self.model_name,\n temperature=self.temperature,\n max_tokens=self.max_new_tokens,\n )\n\n def get_message_builder(self) -> MessageBuilder:\n return OpenAIResponseAPIMessageBuilder\n\n\n@dataclass\nclass ClaudeResponseModelArgs(BaseModelArgs):\n \"\"\"Serializable object for instantiating a generic chat model with an OpenAI\n model.\"\"\"\n\n api = \"anthropic\"\n\n def make_model(self):\n return ClaudeResponseModel(\n model_name=self.model_name,\n temperature=self.temperature,\n max_tokens=self.max_new_tokens,\n )\n\n def get_message_builder(self) -> MessageBuilder:\n return AnthropicAPIMessageBuilder\n\n","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api.ClaudeResponseModelArgs","uri":"program://AgentLab/class/src.agentlab.llm.response_api.ClaudeResponseModelArgs#L930-L944","kind":"class","name":"ClaudeResponseModelArgs","path":"src/agentlab/llm/response_api.py","language":"python","start_line":930,"end_line":944,"context_start_line":910,"context_end_line":964,"code":"\n@dataclass\nclass OpenAIResponseModelArgs(BaseModelArgs):\n \"\"\"Serializable object for instantiating a generic chat model with an OpenAI\n model.\"\"\"\n\n api = \"openai\"\n\n def make_model(self):\n return OpenAIResponseModel(\n model_name=self.model_name,\n temperature=self.temperature,\n max_tokens=self.max_new_tokens,\n )\n\n def get_message_builder(self) -> MessageBuilder:\n return OpenAIResponseAPIMessageBuilder\n\n\n@dataclass\nclass ClaudeResponseModelArgs(BaseModelArgs):\n \"\"\"Serializable object for instantiating a generic chat model with an OpenAI\n model.\"\"\"\n\n api = \"anthropic\"\n\n def make_model(self):\n return ClaudeResponseModel(\n model_name=self.model_name,\n temperature=self.temperature,\n max_tokens=self.max_new_tokens,\n )\n\n def get_message_builder(self) -> MessageBuilder:\n return AnthropicAPIMessageBuilder\n\n\n@dataclass\nclass OpenAIChatModelArgs(BaseModelArgs):\n \"\"\"Serializable object for instantiating a generic chat model with an OpenAI\n model.\"\"\"\n\n api = \"openai\"\n\n def make_model(self):\n return OpenAIChatCompletionModel(\n model_name=self.model_name,\n temperature=self.temperature,\n max_tokens=self.max_new_tokens,\n )\n\n def get_message_builder(self) -> MessageBuilder:\n return OpenAIChatCompletionAPIMessageBuilder\n\n","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api.OpenAIChatModelArgs","uri":"program://AgentLab/class/src.agentlab.llm.response_api.OpenAIChatModelArgs#L948-L962","kind":"class","name":"OpenAIChatModelArgs","path":"src/agentlab/llm/response_api.py","language":"python","start_line":948,"end_line":962,"context_start_line":928,"context_end_line":982,"code":"\n@dataclass\nclass ClaudeResponseModelArgs(BaseModelArgs):\n \"\"\"Serializable object for instantiating a generic chat model with an OpenAI\n model.\"\"\"\n\n api = \"anthropic\"\n\n def make_model(self):\n return ClaudeResponseModel(\n model_name=self.model_name,\n temperature=self.temperature,\n max_tokens=self.max_new_tokens,\n )\n\n def get_message_builder(self) -> MessageBuilder:\n return AnthropicAPIMessageBuilder\n\n\n@dataclass\nclass OpenAIChatModelArgs(BaseModelArgs):\n \"\"\"Serializable object for instantiating a generic chat model with an OpenAI\n model.\"\"\"\n\n api = \"openai\"\n\n def make_model(self):\n return OpenAIChatCompletionModel(\n model_name=self.model_name,\n temperature=self.temperature,\n max_tokens=self.max_new_tokens,\n )\n\n def get_message_builder(self) -> MessageBuilder:\n return OpenAIChatCompletionAPIMessageBuilder\n\n\n@dataclass\nclass OpenRouterModelArgs(BaseModelArgs):\n \"\"\"Serializable object for instantiating a generic chat model with an OpenRouter\n model.\"\"\"\n\n api: str = \"openai\" # tool description format used by actionset.to_tool_description() in bgym\n\n def make_model(self):\n return OpenAIChatCompletionModel(\n base_url=\"https://openrouter.ai/api/v1\",\n api_key=os.getenv(\"OPENROUTER_API_KEY\"),\n model_name=self.model_name,\n temperature=self.temperature,\n max_tokens=self.max_new_tokens,\n )\n\n def get_message_builder(self) -> MessageBuilder:\n return OpenAIChatCompletionAPIMessageBuilder","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api.OpenRouterModelArgs","uri":"program://AgentLab/class/src.agentlab.llm.response_api.OpenRouterModelArgs#L966-L982","kind":"class","name":"OpenRouterModelArgs","path":"src/agentlab/llm/response_api.py","language":"python","start_line":966,"end_line":982,"context_start_line":946,"context_end_line":1002,"code":"\n@dataclass\nclass OpenAIChatModelArgs(BaseModelArgs):\n \"\"\"Serializable object for instantiating a generic chat model with an OpenAI\n model.\"\"\"\n\n api = \"openai\"\n\n def make_model(self):\n return OpenAIChatCompletionModel(\n model_name=self.model_name,\n temperature=self.temperature,\n max_tokens=self.max_new_tokens,\n )\n\n def get_message_builder(self) -> MessageBuilder:\n return OpenAIChatCompletionAPIMessageBuilder\n\n\n@dataclass\nclass OpenRouterModelArgs(BaseModelArgs):\n \"\"\"Serializable object for instantiating a generic chat model with an OpenRouter\n model.\"\"\"\n\n api: str = \"openai\" # tool description format used by actionset.to_tool_description() in bgym\n\n def make_model(self):\n return OpenAIChatCompletionModel(\n base_url=\"https://openrouter.ai/api/v1\",\n api_key=os.getenv(\"OPENROUTER_API_KEY\"),\n model_name=self.model_name,\n temperature=self.temperature,\n max_tokens=self.max_new_tokens,\n )\n\n def get_message_builder(self) -> MessageBuilder:\n return OpenAIChatCompletionAPIMessageBuilder\n\n\ndef tool_call_to_python_code(func_name, kwargs):\n \"\"\"Format a function name and kwargs dict into a Python function call string.\"\"\"\n if kwargs is None:\n kwargs = {}\n\n if not kwargs:\n return f\"{func_name}()\"\n\n args_str = \", \".join(f\"{key}={repr(value)}\" for key, value in kwargs.items())\n return f\"{func_name}({args_str})\"\n\n\n# ___Not__Tested__#\n\n# class VLLMModelArgs(BaseModelArgs):\n# \"\"\"Serializable object for instantiating a generic chat model with a VLLM\n# model.\"\"\"\n","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api.tool_call_to_python_code","uri":"program://AgentLab/function/src.agentlab.llm.response_api.tool_call_to_python_code#L985-L994","kind":"function","name":"tool_call_to_python_code","path":"src/agentlab/llm/response_api.py","language":"python","start_line":985,"end_line":994,"context_start_line":965,"context_end_line":1014,"code":"@dataclass\nclass OpenRouterModelArgs(BaseModelArgs):\n \"\"\"Serializable object for instantiating a generic chat model with an OpenRouter\n model.\"\"\"\n\n api: str = \"openai\" # tool description format used by actionset.to_tool_description() in bgym\n\n def make_model(self):\n return OpenAIChatCompletionModel(\n base_url=\"https://openrouter.ai/api/v1\",\n api_key=os.getenv(\"OPENROUTER_API_KEY\"),\n model_name=self.model_name,\n temperature=self.temperature,\n max_tokens=self.max_new_tokens,\n )\n\n def get_message_builder(self) -> MessageBuilder:\n return OpenAIChatCompletionAPIMessageBuilder\n\n\ndef tool_call_to_python_code(func_name, kwargs):\n \"\"\"Format a function name and kwargs dict into a Python function call string.\"\"\"\n if kwargs is None:\n kwargs = {}\n\n if not kwargs:\n return f\"{func_name}()\"\n\n args_str = \", \".join(f\"{key}={repr(value)}\" for key, value in kwargs.items())\n return f\"{func_name}({args_str})\"\n\n\n# ___Not__Tested__#\n\n# class VLLMModelArgs(BaseModelArgs):\n# \"\"\"Serializable object for instantiating a generic chat model with a VLLM\n# model.\"\"\"\n\n# api = \"openai\" # tool description format used by actionset.to_tool_description() in bgym\n\n# def make_model(self, extra_kwargs=None, **kwargs):\n# return OpenAIChatCompletionModel(\n# client_args={\n# \"base_url\": \"http://localhost:8000/v1\",\n# \"api_key\": os.getenv(\"VLLM_API_KEY\", \"EMPTY\"),\n# },\n# model_name=self.model_name, # this needs to be set\n# temperature=self.temperature,\n# max_tokens=self.max_new_tokens,\n# extra_kwargs=extra_kwargs,","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api.is_response_set","uri":"program://AgentLab/function/src.agentlab.llm.response_api.is_response_set#L52-L54","kind":"function","name":"is_response_set","path":"src/agentlab/llm/response_api.py","language":"python","start_line":52,"end_line":54,"context_start_line":32,"context_end_line":74,"code":"ContentItem = Dict[str, Any]\nMessage = Dict[str, Union[str, List[ContentItem]]]\n\n\n@dataclass\nclass ToolCall:\n \"\"\"Represents a tool call made by the LLM.\n Attributes:\n name: Name of the tool called.\n arguments: Arguments passed to the tool.\n raw_call: The raw call object from the LLM API.\n tool_response: Output of the tool call goes here. It can be only one content item.\n \"\"\"\n\n name: str = field(default=None)\n arguments: Dict[str, Any] = field(default_factory=dict)\n raw_call: Any = field(default=None)\n tool_response: ContentItem = None\n\n @property\n def is_response_set(self) -> bool:\n \"\"\"Check if the tool response is set.\"\"\"\n return self.tool_response is not None\n\n def response_text(self, text: str) -> \"MessageBuilder\":\n self.tool_response = {\"text\": text}\n return self\n\n def response_image(self, image: str) -> \"MessageBuilder\":\n self.tool_response = {\"image\": image}\n return self\n\n def __repr__(self):\n return f\"ToolCall(name={self.name}, arguments={self.arguments})\"\n\n\n@dataclass\nclass ToolCalls:\n \"\"\"A collection of tool calls made by the LLM.\n\n Attributes:\n tool_calls: List of ToolCall objects.\n raw_calls: Represents raw tool calls object returned by a LLM API, may contain one or more tool calls.","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api.response_text","uri":"program://AgentLab/function/src.agentlab.llm.response_api.response_text#L56-L58","kind":"function","name":"response_text","path":"src/agentlab/llm/response_api.py","language":"python","start_line":56,"end_line":58,"context_start_line":36,"context_end_line":78,"code":"@dataclass\nclass ToolCall:\n \"\"\"Represents a tool call made by the LLM.\n Attributes:\n name: Name of the tool called.\n arguments: Arguments passed to the tool.\n raw_call: The raw call object from the LLM API.\n tool_response: Output of the tool call goes here. It can be only one content item.\n \"\"\"\n\n name: str = field(default=None)\n arguments: Dict[str, Any] = field(default_factory=dict)\n raw_call: Any = field(default=None)\n tool_response: ContentItem = None\n\n @property\n def is_response_set(self) -> bool:\n \"\"\"Check if the tool response is set.\"\"\"\n return self.tool_response is not None\n\n def response_text(self, text: str) -> \"MessageBuilder\":\n self.tool_response = {\"text\": text}\n return self\n\n def response_image(self, image: str) -> \"MessageBuilder\":\n self.tool_response = {\"image\": image}\n return self\n\n def __repr__(self):\n return f\"ToolCall(name={self.name}, arguments={self.arguments})\"\n\n\n@dataclass\nclass ToolCalls:\n \"\"\"A collection of tool calls made by the LLM.\n\n Attributes:\n tool_calls: List of ToolCall objects.\n raw_calls: Represents raw tool calls object returned by a LLM API, may contain one or more tool calls.\n \"\"\"\n\n tool_calls: List[ToolCall] = field(default_factory=list)\n raw_calls: List[Any] = field(default_factory=list)","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api.response_image","uri":"program://AgentLab/function/src.agentlab.llm.response_api.response_image#L60-L62","kind":"function","name":"response_image","path":"src/agentlab/llm/response_api.py","language":"python","start_line":60,"end_line":62,"context_start_line":40,"context_end_line":82,"code":" name: Name of the tool called.\n arguments: Arguments passed to the tool.\n raw_call: The raw call object from the LLM API.\n tool_response: Output of the tool call goes here. It can be only one content item.\n \"\"\"\n\n name: str = field(default=None)\n arguments: Dict[str, Any] = field(default_factory=dict)\n raw_call: Any = field(default=None)\n tool_response: ContentItem = None\n\n @property\n def is_response_set(self) -> bool:\n \"\"\"Check if the tool response is set.\"\"\"\n return self.tool_response is not None\n\n def response_text(self, text: str) -> \"MessageBuilder\":\n self.tool_response = {\"text\": text}\n return self\n\n def response_image(self, image: str) -> \"MessageBuilder\":\n self.tool_response = {\"image\": image}\n return self\n\n def __repr__(self):\n return f\"ToolCall(name={self.name}, arguments={self.arguments})\"\n\n\n@dataclass\nclass ToolCalls:\n \"\"\"A collection of tool calls made by the LLM.\n\n Attributes:\n tool_calls: List of ToolCall objects.\n raw_calls: Represents raw tool calls object returned by a LLM API, may contain one or more tool calls.\n \"\"\"\n\n tool_calls: List[ToolCall] = field(default_factory=list)\n raw_calls: List[Any] = field(default_factory=list)\n\n def add_tool_call(self, tool_call: ToolCall) -> \"ToolCalls\":\n self.tool_calls.append(tool_call)\n return self","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api.__repr__","uri":"program://AgentLab/function/src.agentlab.llm.response_api.__repr__#L64-L65","kind":"function","name":"__repr__","path":"src/agentlab/llm/response_api.py","language":"python","start_line":64,"end_line":65,"context_start_line":44,"context_end_line":85,"code":" \"\"\"\n\n name: str = field(default=None)\n arguments: Dict[str, Any] = field(default_factory=dict)\n raw_call: Any = field(default=None)\n tool_response: ContentItem = None\n\n @property\n def is_response_set(self) -> bool:\n \"\"\"Check if the tool response is set.\"\"\"\n return self.tool_response is not None\n\n def response_text(self, text: str) -> \"MessageBuilder\":\n self.tool_response = {\"text\": text}\n return self\n\n def response_image(self, image: str) -> \"MessageBuilder\":\n self.tool_response = {\"image\": image}\n return self\n\n def __repr__(self):\n return f\"ToolCall(name={self.name}, arguments={self.arguments})\"\n\n\n@dataclass\nclass ToolCalls:\n \"\"\"A collection of tool calls made by the LLM.\n\n Attributes:\n tool_calls: List of ToolCall objects.\n raw_calls: Represents raw tool calls object returned by a LLM API, may contain one or more tool calls.\n \"\"\"\n\n tool_calls: List[ToolCall] = field(default_factory=list)\n raw_calls: List[Any] = field(default_factory=list)\n\n def add_tool_call(self, tool_call: ToolCall) -> \"ToolCalls\":\n self.tool_calls.append(tool_call)\n return self\n\n @property\n def all_responses_set(self) -> bool:","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api.add_tool_call","uri":"program://AgentLab/function/src.agentlab.llm.response_api.add_tool_call#L80-L82","kind":"function","name":"add_tool_call","path":"src/agentlab/llm/response_api.py","language":"python","start_line":80,"end_line":82,"context_start_line":60,"context_end_line":102,"code":" def response_image(self, image: str) -> \"MessageBuilder\":\n self.tool_response = {\"image\": image}\n return self\n\n def __repr__(self):\n return f\"ToolCall(name={self.name}, arguments={self.arguments})\"\n\n\n@dataclass\nclass ToolCalls:\n \"\"\"A collection of tool calls made by the LLM.\n\n Attributes:\n tool_calls: List of ToolCall objects.\n raw_calls: Represents raw tool calls object returned by a LLM API, may contain one or more tool calls.\n \"\"\"\n\n tool_calls: List[ToolCall] = field(default_factory=list)\n raw_calls: List[Any] = field(default_factory=list)\n\n def add_tool_call(self, tool_call: ToolCall) -> \"ToolCalls\":\n self.tool_calls.append(tool_call)\n return self\n\n @property\n def all_responses_set(self) -> bool:\n \"\"\"Check if all tool calls have responses set.\"\"\"\n return all(call.is_response_set for call in self.tool_calls)\n\n def __len__(self) -> int:\n \"\"\"Return the number of tool calls.\"\"\"\n return len(self.tool_calls)\n\n def __iter__(self):\n \"\"\"Make ToolCalls iterable.\"\"\"\n return iter(self.tool_calls)\n\n def __bool__(self):\n \"\"\"Check if there are any tool calls.\"\"\"\n return len(self.tool_calls) > 0\n\n\n@dataclass","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api.all_responses_set","uri":"program://AgentLab/function/src.agentlab.llm.response_api.all_responses_set#L85-L87","kind":"function","name":"all_responses_set","path":"src/agentlab/llm/response_api.py","language":"python","start_line":85,"end_line":87,"context_start_line":65,"context_end_line":107,"code":" return f\"ToolCall(name={self.name}, arguments={self.arguments})\"\n\n\n@dataclass\nclass ToolCalls:\n \"\"\"A collection of tool calls made by the LLM.\n\n Attributes:\n tool_calls: List of ToolCall objects.\n raw_calls: Represents raw tool calls object returned by a LLM API, may contain one or more tool calls.\n \"\"\"\n\n tool_calls: List[ToolCall] = field(default_factory=list)\n raw_calls: List[Any] = field(default_factory=list)\n\n def add_tool_call(self, tool_call: ToolCall) -> \"ToolCalls\":\n self.tool_calls.append(tool_call)\n return self\n\n @property\n def all_responses_set(self) -> bool:\n \"\"\"Check if all tool calls have responses set.\"\"\"\n return all(call.is_response_set for call in self.tool_calls)\n\n def __len__(self) -> int:\n \"\"\"Return the number of tool calls.\"\"\"\n return len(self.tool_calls)\n\n def __iter__(self):\n \"\"\"Make ToolCalls iterable.\"\"\"\n return iter(self.tool_calls)\n\n def __bool__(self):\n \"\"\"Check if there are any tool calls.\"\"\"\n return len(self.tool_calls) > 0\n\n\n@dataclass\nclass LLMOutput:\n \"\"\"Serializable object for the output of a response LLM.\"\"\"\n\n raw_response: Any = field(default=None)\n think: str = field(default=\"\")","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api.__len__","uri":"program://AgentLab/function/src.agentlab.llm.response_api.__len__#L89-L91","kind":"function","name":"__len__","path":"src/agentlab/llm/response_api.py","language":"python","start_line":89,"end_line":91,"context_start_line":69,"context_end_line":111,"code":"class ToolCalls:\n \"\"\"A collection of tool calls made by the LLM.\n\n Attributes:\n tool_calls: List of ToolCall objects.\n raw_calls: Represents raw tool calls object returned by a LLM API, may contain one or more tool calls.\n \"\"\"\n\n tool_calls: List[ToolCall] = field(default_factory=list)\n raw_calls: List[Any] = field(default_factory=list)\n\n def add_tool_call(self, tool_call: ToolCall) -> \"ToolCalls\":\n self.tool_calls.append(tool_call)\n return self\n\n @property\n def all_responses_set(self) -> bool:\n \"\"\"Check if all tool calls have responses set.\"\"\"\n return all(call.is_response_set for call in self.tool_calls)\n\n def __len__(self) -> int:\n \"\"\"Return the number of tool calls.\"\"\"\n return len(self.tool_calls)\n\n def __iter__(self):\n \"\"\"Make ToolCalls iterable.\"\"\"\n return iter(self.tool_calls)\n\n def __bool__(self):\n \"\"\"Check if there are any tool calls.\"\"\"\n return len(self.tool_calls) > 0\n\n\n@dataclass\nclass LLMOutput:\n \"\"\"Serializable object for the output of a response LLM.\"\"\"\n\n raw_response: Any = field(default=None)\n think: str = field(default=\"\")\n action: str | None = field(default=None) # Default action if no tool call is made\n tool_calls: ToolCalls | None = field(\n default=None\n ) # This will hold the tool call response if any","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api.__iter__","uri":"program://AgentLab/function/src.agentlab.llm.response_api.__iter__#L93-L95","kind":"function","name":"__iter__","path":"src/agentlab/llm/response_api.py","language":"python","start_line":93,"end_line":95,"context_start_line":73,"context_end_line":115,"code":" tool_calls: List of ToolCall objects.\n raw_calls: Represents raw tool calls object returned by a LLM API, may contain one or more tool calls.\n \"\"\"\n\n tool_calls: List[ToolCall] = field(default_factory=list)\n raw_calls: List[Any] = field(default_factory=list)\n\n def add_tool_call(self, tool_call: ToolCall) -> \"ToolCalls\":\n self.tool_calls.append(tool_call)\n return self\n\n @property\n def all_responses_set(self) -> bool:\n \"\"\"Check if all tool calls have responses set.\"\"\"\n return all(call.is_response_set for call in self.tool_calls)\n\n def __len__(self) -> int:\n \"\"\"Return the number of tool calls.\"\"\"\n return len(self.tool_calls)\n\n def __iter__(self):\n \"\"\"Make ToolCalls iterable.\"\"\"\n return iter(self.tool_calls)\n\n def __bool__(self):\n \"\"\"Check if there are any tool calls.\"\"\"\n return len(self.tool_calls) > 0\n\n\n@dataclass\nclass LLMOutput:\n \"\"\"Serializable object for the output of a response LLM.\"\"\"\n\n raw_response: Any = field(default=None)\n think: str = field(default=\"\")\n action: str | None = field(default=None) # Default action if no tool call is made\n tool_calls: ToolCalls | None = field(\n default=None\n ) # This will hold the tool call response if any\n\n\nclass MessageBuilder:\n def __init__(self, role: str):","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api.__bool__","uri":"program://AgentLab/function/src.agentlab.llm.response_api.__bool__#L97-L99","kind":"function","name":"__bool__","path":"src/agentlab/llm/response_api.py","language":"python","start_line":97,"end_line":99,"context_start_line":77,"context_end_line":119,"code":" tool_calls: List[ToolCall] = field(default_factory=list)\n raw_calls: List[Any] = field(default_factory=list)\n\n def add_tool_call(self, tool_call: ToolCall) -> \"ToolCalls\":\n self.tool_calls.append(tool_call)\n return self\n\n @property\n def all_responses_set(self) -> bool:\n \"\"\"Check if all tool calls have responses set.\"\"\"\n return all(call.is_response_set for call in self.tool_calls)\n\n def __len__(self) -> int:\n \"\"\"Return the number of tool calls.\"\"\"\n return len(self.tool_calls)\n\n def __iter__(self):\n \"\"\"Make ToolCalls iterable.\"\"\"\n return iter(self.tool_calls)\n\n def __bool__(self):\n \"\"\"Check if there are any tool calls.\"\"\"\n return len(self.tool_calls) > 0\n\n\n@dataclass\nclass LLMOutput:\n \"\"\"Serializable object for the output of a response LLM.\"\"\"\n\n raw_response: Any = field(default=None)\n think: str = field(default=\"\")\n action: str | None = field(default=None) # Default action if no tool call is made\n tool_calls: ToolCalls | None = field(\n default=None\n ) # This will hold the tool call response if any\n\n\nclass MessageBuilder:\n def __init__(self, role: str):\n self.role = role\n self.content: List[ContentItem] = []\n self.responded_tool_calls: ToolCalls = None\n","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api.__init__","uri":"program://AgentLab/function/src.agentlab.llm.response_api.__init__#L765-L787","kind":"function","name":"__init__","path":"src/agentlab/llm/response_api.py","language":"python","start_line":765,"end_line":787,"context_start_line":745,"context_end_line":807,"code":"\n Args:\n tools: List of tool descriptions to format for Chat Completion API.\n\n Returns:\n Formatted tools list compatible with OpenAI Chat Completion API, or None if tools is None.\n \"\"\"\n formatted_tools = None\n if tools is not None:\n formatted_tools = [\n {\n \"type\": tool[\"type\"],\n \"function\": {k: tool[k] for k in (\"name\", \"description\", \"parameters\")},\n }\n for tool in tools\n ]\n return formatted_tools\n\n\nclass ClaudeResponseModel(BaseModelWithPricing):\n def __init__(\n self,\n model_name: str,\n base_url: Optional[str] = None,\n api_key: Optional[str] = None,\n temperature: float | None = None,\n max_tokens: int | None = 100,\n ):\n self.action_space_as_tools = True # this should be a config\n\n super().__init__(\n model_name=model_name,\n api_key=api_key,\n temperature=temperature,\n max_tokens=max_tokens,\n )\n client_args = {}\n if base_url is not None:\n client_args[\"base_url\"] = base_url\n if api_key is not None:\n client_args[\"api_key\"] = api_key\n self.client = Anthropic(**client_args)\n self.init_pricing_tracker(pricing_api=\"anthropic\") # Use the PricingMixin\n\n def _call_api(self, payload: APIPayload) -> Completion:\n sys_msg, other_msgs = self.filter_system_messages(payload.messages)\n sys_msg_text = \"\\n\".join(c[\"text\"] for m in sys_msg for c in m.content)\n input = []\n for msg in other_msgs:\n temp = msg.prepare_message()\n if payload.use_cache_breakpoints:\n temp = self.apply_cache_breakpoints(msg, temp)\n input.extend(temp)\n\n api_params: Dict[str, Any] = {\n \"model\": self.model_name,\n \"messages\": input,\n \"system\": sys_msg_text,\n } # Anthropic API expects system message as a string\n\n if self.temperature is not None:\n api_params[\"temperature\"] = self.temperature\n if self.max_tokens is not None:","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api.system","uri":"program://AgentLab/function/src.agentlab.llm.response_api.system#L195-L197","kind":"function","name":"system","path":"src/agentlab/llm/response_api.py","language":"python","start_line":195,"end_line":197,"context_start_line":175,"context_end_line":217,"code":" \"\"\"Add an image URL to the message content.\"\"\"\n self.content.append({\"image\": image_to_png_base64_url(image_url)})\n return self\n\n def mark_all_previous_msg_for_caching(self):\n \"\"\"Insert a cache breakpoint in the message content.\"\"\"\n # This is a placeholder for future implementation.\n raise NotImplementedError\n\n @classmethod\n def add_responded_tool_calls(cls, responded_tool_calls: ToolCalls) -> \"MessageBuilder\":\n \"\"\"Add tool calls to the message content.\"\"\"\n assert responded_tool_calls.all_responses_set, \"All tool calls must have a response.\"\n msg = cls(\"tool\")\n msg.responded_tool_calls = responded_tool_calls\n return msg\n\n\nclass OpenAIResponseAPIMessageBuilder(MessageBuilder):\n @classmethod\n def system(cls) -> \"OpenAIResponseAPIMessageBuilder\":\n # OpenAI Responses API uses 'developer' role for system messages\n return cls(\"developer\")\n\n def prepare_message(self) -> List[Message]:\n content = []\n for item in self.content:\n content.append(self.convert_content_to_expected_format(item))\n output = [{\"role\": self.role, \"content\": content}]\n\n return output if self.role != \"tool\" else self.handle_tool_call()\n\n def convert_content_to_expected_format(self, content: ContentItem) -> ContentItem:\n \"\"\"Convert the content item to the expected format for OpenAI Responses.\"\"\"\n if \"text\" in content:\n content_type = \"input_text\" if self.role != \"assistant\" else \"output_text\"\n return {\"type\": content_type, \"text\": content[\"text\"]}\n elif \"image\" in content:\n return {\"type\": \"input_image\", \"image_url\": content[\"image\"]}\n else:\n raise ValueError(f\"Unsupported content type: {content}\")\n\n def handle_tool_call(self) -> List[Message]:","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api.user","uri":"program://AgentLab/function/src.agentlab.llm.response_api.user#L125-L126","kind":"function","name":"user","path":"src/agentlab/llm/response_api.py","language":"python","start_line":125,"end_line":126,"context_start_line":105,"context_end_line":146,"code":"\n raw_response: Any = field(default=None)\n think: str = field(default=\"\")\n action: str | None = field(default=None) # Default action if no tool call is made\n tool_calls: ToolCalls | None = field(\n default=None\n ) # This will hold the tool call response if any\n\n\nclass MessageBuilder:\n def __init__(self, role: str):\n self.role = role\n self.content: List[ContentItem] = []\n self.responded_tool_calls: ToolCalls = None\n\n @classmethod\n def system(cls) -> \"MessageBuilder\":\n return cls(\"system\")\n\n @classmethod\n def user(cls) -> \"MessageBuilder\":\n return cls(\"user\")\n\n @classmethod\n def assistant(cls) -> \"MessageBuilder\":\n return cls(\"assistant\")\n\n @abstractmethod\n def prepare_message(self) -> List[Message]:\n \"\"\"Prepare the message for the API call.\"\"\"\n raise NotImplementedError(\"Subclasses must implement this method.\")\n\n def add_text(self, text: str) -> \"MessageBuilder\":\n self.content.append({\"text\": text})\n return self\n\n def add_image(self, image: str) -> \"MessageBuilder\":\n self.content.append({\"image\": image})\n return self\n\n def to_markdown(self) -> str:\n parts = []","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api.assistant","uri":"program://AgentLab/function/src.agentlab.llm.response_api.assistant#L129-L130","kind":"function","name":"assistant","path":"src/agentlab/llm/response_api.py","language":"python","start_line":129,"end_line":130,"context_start_line":109,"context_end_line":150,"code":" tool_calls: ToolCalls | None = field(\n default=None\n ) # This will hold the tool call response if any\n\n\nclass MessageBuilder:\n def __init__(self, role: str):\n self.role = role\n self.content: List[ContentItem] = []\n self.responded_tool_calls: ToolCalls = None\n\n @classmethod\n def system(cls) -> \"MessageBuilder\":\n return cls(\"system\")\n\n @classmethod\n def user(cls) -> \"MessageBuilder\":\n return cls(\"user\")\n\n @classmethod\n def assistant(cls) -> \"MessageBuilder\":\n return cls(\"assistant\")\n\n @abstractmethod\n def prepare_message(self) -> List[Message]:\n \"\"\"Prepare the message for the API call.\"\"\"\n raise NotImplementedError(\"Subclasses must implement this method.\")\n\n def add_text(self, text: str) -> \"MessageBuilder\":\n self.content.append({\"text\": text})\n return self\n\n def add_image(self, image: str) -> \"MessageBuilder\":\n self.content.append({\"image\": image})\n return self\n\n def to_markdown(self) -> str:\n parts = []\n for item in self.content:\n if \"text\" in item:\n parts.append(f\"\\n```\\n{item['text']}\\n```\\n\")\n elif \"image\" in item:","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api.prepare_message","uri":"program://AgentLab/function/src.agentlab.llm.response_api.prepare_message#L329-L335","kind":"function","name":"prepare_message","path":"src/agentlab/llm/response_api.py","language":"python","start_line":329,"end_line":335,"context_start_line":309,"context_end_line":355,"code":" # e.g. \"data:image/png;base64\"\n if img_str.startswith(\"data:image/png;base64,\"):\n img_str = img_str[len(\"data:image/png;base64,\") :]\n return {\n \"type\": \"image\",\n \"source\": {\n \"type\": \"base64\",\n \"media_type\": \"image/png\",\n \"data\": img_str,\n },\n }\n else:\n raise ValueError(f\"Unsupported content type: {content}\")\n\n def mark_all_previous_msg_for_caching(self) -> List[Message]:\n \"\"\"Insert a cache breakpoint in the message content to mark all previous messages for caching.\"\"\"\n self._cache_breakpoint = True\n\n\nclass OpenAIChatCompletionAPIMessageBuilder(MessageBuilder):\n def prepare_message(self) -> List[Message]:\n \"\"\"Prepare the message for the OpenAI API.\"\"\"\n content = []\n for item in self.content:\n content.append(self.convert_content_to_expected_format(item))\n output = [{\"role\": self.role, \"content\": content}]\n return output if self.role != \"tool\" else self.handle_tool_call()\n\n def convert_content_to_expected_format(self, content: ContentItem) -> ContentItem:\n \"\"\"Transform content item to the format expected by OpenAI ChatCompletion.\"\"\"\n if \"text\" in content:\n return {\"type\": \"text\", \"text\": content[\"text\"]}\n elif \"image\" in content:\n return {\"type\": \"image_url\", \"image_url\": {\"url\": content[\"image\"]}}\n else:\n raise ValueError(f\"Unsupported content type: {content}\")\n\n def handle_tool_call(self) -> List[Message]:\n \"\"\"Handle the tool call response from the last raw response.\"\"\"\n if self.responded_tool_calls is None:\n raise ValueError(\"No tool calls found in responded_tool_calls\")\n output = []\n output.append(\n self.responded_tool_calls.raw_calls.choices[0].message\n ) # add raw calls to output\n for fn_call in self.responded_tool_calls:\n raw_call = fn_call.raw_call","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api.add_text","uri":"program://AgentLab/function/src.agentlab.llm.response_api.add_text#L137-L139","kind":"function","name":"add_text","path":"src/agentlab/llm/response_api.py","language":"python","start_line":137,"end_line":139,"context_start_line":117,"context_end_line":159,"code":" self.content: List[ContentItem] = []\n self.responded_tool_calls: ToolCalls = None\n\n @classmethod\n def system(cls) -> \"MessageBuilder\":\n return cls(\"system\")\n\n @classmethod\n def user(cls) -> \"MessageBuilder\":\n return cls(\"user\")\n\n @classmethod\n def assistant(cls) -> \"MessageBuilder\":\n return cls(\"assistant\")\n\n @abstractmethod\n def prepare_message(self) -> List[Message]:\n \"\"\"Prepare the message for the API call.\"\"\"\n raise NotImplementedError(\"Subclasses must implement this method.\")\n\n def add_text(self, text: str) -> \"MessageBuilder\":\n self.content.append({\"text\": text})\n return self\n\n def add_image(self, image: str) -> \"MessageBuilder\":\n self.content.append({\"image\": image})\n return self\n\n def to_markdown(self) -> str:\n parts = []\n for item in self.content:\n if \"text\" in item:\n parts.append(f\"\\n```\\n{item['text']}\\n```\\n\")\n elif \"image\" in item:\n parts.append(f\"![Image]({item['image']})\")\n\n # Tool call markdown repr\n if self.responded_tool_calls is not None:\n for i, tool_call in enumerate(self.responded_tool_calls.tool_calls, 1):\n parts.append(\n f\"\\n**Tool Call {i}**: {tool_call_to_python_code(tool_call.name, tool_call.arguments)}\"\n )\n response = tool_call.tool_response","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api.add_image","uri":"program://AgentLab/function/src.agentlab.llm.response_api.add_image#L141-L143","kind":"function","name":"add_image","path":"src/agentlab/llm/response_api.py","language":"python","start_line":141,"end_line":143,"context_start_line":121,"context_end_line":163,"code":" def system(cls) -> \"MessageBuilder\":\n return cls(\"system\")\n\n @classmethod\n def user(cls) -> \"MessageBuilder\":\n return cls(\"user\")\n\n @classmethod\n def assistant(cls) -> \"MessageBuilder\":\n return cls(\"assistant\")\n\n @abstractmethod\n def prepare_message(self) -> List[Message]:\n \"\"\"Prepare the message for the API call.\"\"\"\n raise NotImplementedError(\"Subclasses must implement this method.\")\n\n def add_text(self, text: str) -> \"MessageBuilder\":\n self.content.append({\"text\": text})\n return self\n\n def add_image(self, image: str) -> \"MessageBuilder\":\n self.content.append({\"image\": image})\n return self\n\n def to_markdown(self) -> str:\n parts = []\n for item in self.content:\n if \"text\" in item:\n parts.append(f\"\\n```\\n{item['text']}\\n```\\n\")\n elif \"image\" in item:\n parts.append(f\"![Image]({item['image']})\")\n\n # Tool call markdown repr\n if self.responded_tool_calls is not None:\n for i, tool_call in enumerate(self.responded_tool_calls.tool_calls, 1):\n parts.append(\n f\"\\n**Tool Call {i}**: {tool_call_to_python_code(tool_call.name, tool_call.arguments)}\"\n )\n response = tool_call.tool_response\n if response is not None:\n parts.append(f\"\\n**Tool Response {i}:**\")\n content = (\n f\"```\\n{response['text']}\\n```\"","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api.to_markdown","uri":"program://AgentLab/function/src.agentlab.llm.response_api.to_markdown#L145-L172","kind":"function","name":"to_markdown","path":"src/agentlab/llm/response_api.py","language":"python","start_line":145,"end_line":172,"context_start_line":125,"context_end_line":192,"code":" def user(cls) -> \"MessageBuilder\":\n return cls(\"user\")\n\n @classmethod\n def assistant(cls) -> \"MessageBuilder\":\n return cls(\"assistant\")\n\n @abstractmethod\n def prepare_message(self) -> List[Message]:\n \"\"\"Prepare the message for the API call.\"\"\"\n raise NotImplementedError(\"Subclasses must implement this method.\")\n\n def add_text(self, text: str) -> \"MessageBuilder\":\n self.content.append({\"text\": text})\n return self\n\n def add_image(self, image: str) -> \"MessageBuilder\":\n self.content.append({\"image\": image})\n return self\n\n def to_markdown(self) -> str:\n parts = []\n for item in self.content:\n if \"text\" in item:\n parts.append(f\"\\n```\\n{item['text']}\\n```\\n\")\n elif \"image\" in item:\n parts.append(f\"![Image]({item['image']})\")\n\n # Tool call markdown repr\n if self.responded_tool_calls is not None:\n for i, tool_call in enumerate(self.responded_tool_calls.tool_calls, 1):\n parts.append(\n f\"\\n**Tool Call {i}**: {tool_call_to_python_code(tool_call.name, tool_call.arguments)}\"\n )\n response = tool_call.tool_response\n if response is not None:\n parts.append(f\"\\n**Tool Response {i}:**\")\n content = (\n f\"```\\n{response['text']}\\n```\"\n if \"text\" in response\n else f\"![Tool Response Image]({response['image']})\"\n )\n parts.append(content)\n\n markdown = f\"### {self.role.capitalize()}\\n\"\n markdown += \"\\n\".join(parts)\n\n return markdown\n\n def add_image_url(self, image_url: str) -> \"MessageBuilder\":\n \"\"\"Add an image URL to the message content.\"\"\"\n self.content.append({\"image\": image_to_png_base64_url(image_url)})\n return self\n\n def mark_all_previous_msg_for_caching(self):\n \"\"\"Insert a cache breakpoint in the message content.\"\"\"\n # This is a placeholder for future implementation.\n raise NotImplementedError\n\n @classmethod\n def add_responded_tool_calls(cls, responded_tool_calls: ToolCalls) -> \"MessageBuilder\":\n \"\"\"Add tool calls to the message content.\"\"\"\n assert responded_tool_calls.all_responses_set, \"All tool calls must have a response.\"\n msg = cls(\"tool\")\n msg.responded_tool_calls = responded_tool_calls\n return msg\n\n","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api.add_image_url","uri":"program://AgentLab/function/src.agentlab.llm.response_api.add_image_url#L174-L177","kind":"function","name":"add_image_url","path":"src/agentlab/llm/response_api.py","language":"python","start_line":174,"end_line":177,"context_start_line":154,"context_end_line":197,"code":" if self.responded_tool_calls is not None:\n for i, tool_call in enumerate(self.responded_tool_calls.tool_calls, 1):\n parts.append(\n f\"\\n**Tool Call {i}**: {tool_call_to_python_code(tool_call.name, tool_call.arguments)}\"\n )\n response = tool_call.tool_response\n if response is not None:\n parts.append(f\"\\n**Tool Response {i}:**\")\n content = (\n f\"```\\n{response['text']}\\n```\"\n if \"text\" in response\n else f\"![Tool Response Image]({response['image']})\"\n )\n parts.append(content)\n\n markdown = f\"### {self.role.capitalize()}\\n\"\n markdown += \"\\n\".join(parts)\n\n return markdown\n\n def add_image_url(self, image_url: str) -> \"MessageBuilder\":\n \"\"\"Add an image URL to the message content.\"\"\"\n self.content.append({\"image\": image_to_png_base64_url(image_url)})\n return self\n\n def mark_all_previous_msg_for_caching(self):\n \"\"\"Insert a cache breakpoint in the message content.\"\"\"\n # This is a placeholder for future implementation.\n raise NotImplementedError\n\n @classmethod\n def add_responded_tool_calls(cls, responded_tool_calls: ToolCalls) -> \"MessageBuilder\":\n \"\"\"Add tool calls to the message content.\"\"\"\n assert responded_tool_calls.all_responses_set, \"All tool calls must have a response.\"\n msg = cls(\"tool\")\n msg.responded_tool_calls = responded_tool_calls\n return msg\n\n\nclass OpenAIResponseAPIMessageBuilder(MessageBuilder):\n @classmethod\n def system(cls) -> \"OpenAIResponseAPIMessageBuilder\":\n # OpenAI Responses API uses 'developer' role for system messages\n return cls(\"developer\")","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api.mark_all_previous_msg_for_caching","uri":"program://AgentLab/function/src.agentlab.llm.response_api.mark_all_previous_msg_for_caching#L370-L372","kind":"function","name":"mark_all_previous_msg_for_caching","path":"src/agentlab/llm/response_api.py","language":"python","start_line":370,"end_line":372,"context_start_line":350,"context_end_line":392,"code":" output = []\n output.append(\n self.responded_tool_calls.raw_calls.choices[0].message\n ) # add raw calls to output\n for fn_call in self.responded_tool_calls:\n raw_call = fn_call.raw_call\n assert (\n \"image\" not in fn_call.tool_response\n ), \"Image output is not supported in function calls response.\"\n # a function_call_output dict has keys \"role\", \"tool_call_id\" and \"content\"\n tool_call_reponse = {\n \"name\": raw_call[\"function\"][\"name\"], # required with OpenRouter\n \"role\": \"tool\",\n \"tool_call_id\": raw_call[\"id\"],\n \"content\": self.convert_content_to_expected_format(fn_call.tool_response)[\"text\"],\n }\n output.append(tool_call_reponse)\n\n return output\n\n def mark_all_previous_msg_for_caching(self):\n \"\"\"Nothing special to do here for openAI. They do not have a notion of cache breakpoints.\"\"\"\n pass\n\n\n@dataclass\nclass APIPayload:\n messages: List[MessageBuilder] | None = None\n tools: List[Dict[str, Any]] | None = None\n tool_choice: Literal[\"none\", \"auto\", \"any\", \"required\"] | None = None\n force_call_tool: str | None = (\n None # Name of the tool to call # If set, will force the LLM to call this tool.\n )\n use_cache_breakpoints: bool = (\n False # If True, will apply cache breakpoints to the messages. # applicable for Anthropic\n )\n cache_tool_definition: bool = (\n False # If True, will cache the tool definition in the last message.\n )\n cache_complete_prompt: bool = (\n False # If True, will cache the complete prompt in the last message.\n )\n reasoning_effort: Literal[\"low\", \"medium\", \"high\"] | None = None","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api.add_responded_tool_calls","uri":"program://AgentLab/function/src.agentlab.llm.response_api.add_responded_tool_calls#L185-L190","kind":"function","name":"add_responded_tool_calls","path":"src/agentlab/llm/response_api.py","language":"python","start_line":185,"end_line":190,"context_start_line":165,"context_end_line":210,"code":" else f\"![Tool Response Image]({response['image']})\"\n )\n parts.append(content)\n\n markdown = f\"### {self.role.capitalize()}\\n\"\n markdown += \"\\n\".join(parts)\n\n return markdown\n\n def add_image_url(self, image_url: str) -> \"MessageBuilder\":\n \"\"\"Add an image URL to the message content.\"\"\"\n self.content.append({\"image\": image_to_png_base64_url(image_url)})\n return self\n\n def mark_all_previous_msg_for_caching(self):\n \"\"\"Insert a cache breakpoint in the message content.\"\"\"\n # This is a placeholder for future implementation.\n raise NotImplementedError\n\n @classmethod\n def add_responded_tool_calls(cls, responded_tool_calls: ToolCalls) -> \"MessageBuilder\":\n \"\"\"Add tool calls to the message content.\"\"\"\n assert responded_tool_calls.all_responses_set, \"All tool calls must have a response.\"\n msg = cls(\"tool\")\n msg.responded_tool_calls = responded_tool_calls\n return msg\n\n\nclass OpenAIResponseAPIMessageBuilder(MessageBuilder):\n @classmethod\n def system(cls) -> \"OpenAIResponseAPIMessageBuilder\":\n # OpenAI Responses API uses 'developer' role for system messages\n return cls(\"developer\")\n\n def prepare_message(self) -> List[Message]:\n content = []\n for item in self.content:\n content.append(self.convert_content_to_expected_format(item))\n output = [{\"role\": self.role, \"content\": content}]\n\n return output if self.role != \"tool\" else self.handle_tool_call()\n\n def convert_content_to_expected_format(self, content: ContentItem) -> ContentItem:\n \"\"\"Convert the content item to the expected format for OpenAI Responses.\"\"\"\n if \"text\" in content:\n content_type = \"input_text\" if self.role != \"assistant\" else \"output_text\"","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api.convert_content_to_expected_format","uri":"program://AgentLab/function/src.agentlab.llm.response_api.convert_content_to_expected_format#L337-L344","kind":"function","name":"convert_content_to_expected_format","path":"src/agentlab/llm/response_api.py","language":"python","start_line":337,"end_line":344,"context_start_line":317,"context_end_line":364,"code":" \"data\": img_str,\n },\n }\n else:\n raise ValueError(f\"Unsupported content type: {content}\")\n\n def mark_all_previous_msg_for_caching(self) -> List[Message]:\n \"\"\"Insert a cache breakpoint in the message content to mark all previous messages for caching.\"\"\"\n self._cache_breakpoint = True\n\n\nclass OpenAIChatCompletionAPIMessageBuilder(MessageBuilder):\n def prepare_message(self) -> List[Message]:\n \"\"\"Prepare the message for the OpenAI API.\"\"\"\n content = []\n for item in self.content:\n content.append(self.convert_content_to_expected_format(item))\n output = [{\"role\": self.role, \"content\": content}]\n return output if self.role != \"tool\" else self.handle_tool_call()\n\n def convert_content_to_expected_format(self, content: ContentItem) -> ContentItem:\n \"\"\"Transform content item to the format expected by OpenAI ChatCompletion.\"\"\"\n if \"text\" in content:\n return {\"type\": \"text\", \"text\": content[\"text\"]}\n elif \"image\" in content:\n return {\"type\": \"image_url\", \"image_url\": {\"url\": content[\"image\"]}}\n else:\n raise ValueError(f\"Unsupported content type: {content}\")\n\n def handle_tool_call(self) -> List[Message]:\n \"\"\"Handle the tool call response from the last raw response.\"\"\"\n if self.responded_tool_calls is None:\n raise ValueError(\"No tool calls found in responded_tool_calls\")\n output = []\n output.append(\n self.responded_tool_calls.raw_calls.choices[0].message\n ) # add raw calls to output\n for fn_call in self.responded_tool_calls:\n raw_call = fn_call.raw_call\n assert (\n \"image\" not in fn_call.tool_response\n ), \"Image output is not supported in function calls response.\"\n # a function_call_output dict has keys \"role\", \"tool_call_id\" and \"content\"\n tool_call_reponse = {\n \"name\": raw_call[\"function\"][\"name\"], # required with OpenRouter\n \"role\": \"tool\",\n \"tool_call_id\": raw_call[\"id\"],\n \"content\": self.convert_content_to_expected_format(fn_call.tool_response)[\"text\"],","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api.handle_tool_call","uri":"program://AgentLab/function/src.agentlab.llm.response_api.handle_tool_call#L346-L368","kind":"function","name":"handle_tool_call","path":"src/agentlab/llm/response_api.py","language":"python","start_line":346,"end_line":368,"context_start_line":326,"context_end_line":388,"code":"\n\nclass OpenAIChatCompletionAPIMessageBuilder(MessageBuilder):\n def prepare_message(self) -> List[Message]:\n \"\"\"Prepare the message for the OpenAI API.\"\"\"\n content = []\n for item in self.content:\n content.append(self.convert_content_to_expected_format(item))\n output = [{\"role\": self.role, \"content\": content}]\n return output if self.role != \"tool\" else self.handle_tool_call()\n\n def convert_content_to_expected_format(self, content: ContentItem) -> ContentItem:\n \"\"\"Transform content item to the format expected by OpenAI ChatCompletion.\"\"\"\n if \"text\" in content:\n return {\"type\": \"text\", \"text\": content[\"text\"]}\n elif \"image\" in content:\n return {\"type\": \"image_url\", \"image_url\": {\"url\": content[\"image\"]}}\n else:\n raise ValueError(f\"Unsupported content type: {content}\")\n\n def handle_tool_call(self) -> List[Message]:\n \"\"\"Handle the tool call response from the last raw response.\"\"\"\n if self.responded_tool_calls is None:\n raise ValueError(\"No tool calls found in responded_tool_calls\")\n output = []\n output.append(\n self.responded_tool_calls.raw_calls.choices[0].message\n ) # add raw calls to output\n for fn_call in self.responded_tool_calls:\n raw_call = fn_call.raw_call\n assert (\n \"image\" not in fn_call.tool_response\n ), \"Image output is not supported in function calls response.\"\n # a function_call_output dict has keys \"role\", \"tool_call_id\" and \"content\"\n tool_call_reponse = {\n \"name\": raw_call[\"function\"][\"name\"], # required with OpenRouter\n \"role\": \"tool\",\n \"tool_call_id\": raw_call[\"id\"],\n \"content\": self.convert_content_to_expected_format(fn_call.tool_response)[\"text\"],\n }\n output.append(tool_call_reponse)\n\n return output\n\n def mark_all_previous_msg_for_caching(self):\n \"\"\"Nothing special to do here for openAI. They do not have a notion of cache breakpoints.\"\"\"\n pass\n\n\n@dataclass\nclass APIPayload:\n messages: List[MessageBuilder] | None = None\n tools: List[Dict[str, Any]] | None = None\n tool_choice: Literal[\"none\", \"auto\", \"any\", \"required\"] | None = None\n force_call_tool: str | None = (\n None # Name of the tool to call # If set, will force the LLM to call this tool.\n )\n use_cache_breakpoints: bool = (\n False # If True, will apply cache breakpoints to the messages. # applicable for Anthropic\n )\n cache_tool_definition: bool = (\n False # If True, will cache the tool definition in the last message.\n )","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api.transform_content","uri":"program://AgentLab/function/src.agentlab.llm.response_api.transform_content#L302-L321","kind":"function","name":"transform_content","path":"src/agentlab/llm/response_api.py","language":"python","start_line":302,"end_line":321,"context_start_line":282,"context_end_line":341,"code":" \"role\": \"assistant\",\n \"content\": self.responded_tool_calls.raw_calls.content,\n } # Add the toolcall block\n tool_response = {\"role\": \"user\", \"content\": []} # Anthropic expects a list of messages\n for call in self.responded_tool_calls:\n assert (\n \"image\" not in call.tool_response\n ), \"Image output is not supported in tool calls response.\"\n tool_response[\"content\"].append(\n {\n \"type\": \"tool_result\",\n \"tool_use_id\": call.raw_call.id,\n \"content\": self.transform_content(call.tool_response)[\n \"text\"\n ], # needs to be str\n }\n )\n\n return [llm_tool_call, tool_response]\n\n def transform_content(self, content: ContentItem) -> ContentItem:\n \"\"\"Transform content item to the format expected by Anthropic API.\"\"\"\n if \"text\" in content:\n return {\"type\": \"text\", \"text\": content[\"text\"]}\n elif \"image\" in content:\n img_str: str = content[\"image\"]\n # make sure to get rid of the image type for anthropic\n # e.g. \"data:image/png;base64\"\n if img_str.startswith(\"data:image/png;base64,\"):\n img_str = img_str[len(\"data:image/png;base64,\") :]\n return {\n \"type\": \"image\",\n \"source\": {\n \"type\": \"base64\",\n \"media_type\": \"image/png\",\n \"data\": img_str,\n },\n }\n else:\n raise ValueError(f\"Unsupported content type: {content}\")\n\n def mark_all_previous_msg_for_caching(self) -> List[Message]:\n \"\"\"Insert a cache breakpoint in the message content to mark all previous messages for caching.\"\"\"\n self._cache_breakpoint = True\n\n\nclass OpenAIChatCompletionAPIMessageBuilder(MessageBuilder):\n def prepare_message(self) -> List[Message]:\n \"\"\"Prepare the message for the OpenAI API.\"\"\"\n content = []\n for item in self.content:\n content.append(self.convert_content_to_expected_format(item))\n output = [{\"role\": self.role, \"content\": content}]\n return output if self.role != \"tool\" else self.handle_tool_call()\n\n def convert_content_to_expected_format(self, content: ContentItem) -> ContentItem:\n \"\"\"Transform content item to the format expected by OpenAI ChatCompletion.\"\"\"\n if \"text\" in content:\n return {\"type\": \"text\", \"text\": content[\"text\"]}\n elif \"image\" in content:","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api.__post_init__","uri":"program://AgentLab/function/src.agentlab.llm.response_api.__post_init__#L394-L400","kind":"function","name":"__post_init__","path":"src/agentlab/llm/response_api.py","language":"python","start_line":394,"end_line":400,"context_start_line":374,"context_end_line":420,"code":"\n@dataclass\nclass APIPayload:\n messages: List[MessageBuilder] | None = None\n tools: List[Dict[str, Any]] | None = None\n tool_choice: Literal[\"none\", \"auto\", \"any\", \"required\"] | None = None\n force_call_tool: str | None = (\n None # Name of the tool to call # If set, will force the LLM to call this tool.\n )\n use_cache_breakpoints: bool = (\n False # If True, will apply cache breakpoints to the messages. # applicable for Anthropic\n )\n cache_tool_definition: bool = (\n False # If True, will cache the tool definition in the last message.\n )\n cache_complete_prompt: bool = (\n False # If True, will cache the complete prompt in the last message.\n )\n reasoning_effort: Literal[\"low\", \"medium\", \"high\"] | None = None\n\n def __post_init__(self):\n if self.tool_choice and self.force_call_tool:\n raise ValueError(\"tool_choice and force_call_tool are mutually exclusive\")\n if self.reasoning_effort is not None:\n logger.info(\n \"In agentlab reasoning_effort is used by LiteLLM API only. We will eventually shift to LiteLLM API for all LLMs.\"\n )\n\n\n# # Base class for all API Endpoints\nclass BaseResponseModel(ABC):\n def __init__(\n self,\n model_name: str,\n api_key: Optional[str] = None,\n temperature: float | None = None,\n max_tokens: int | None = None,\n ):\n self.model_name = model_name\n self.api_key = api_key\n self.temperature = temperature\n self.max_tokens = max_tokens\n super().__init__()\n\n def __call__(self, payload: APIPayload) -> LLMOutput:\n \"\"\"Make a call to the model and return the parsed response.\"\"\"\n response = self._call_api(payload)","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api.__call__","uri":"program://AgentLab/function/src.agentlab.llm.response_api.__call__#L418-L421","kind":"function","name":"__call__","path":"src/agentlab/llm/response_api.py","language":"python","start_line":418,"end_line":421,"context_start_line":398,"context_end_line":441,"code":" logger.info(\n \"In agentlab reasoning_effort is used by LiteLLM API only. We will eventually shift to LiteLLM API for all LLMs.\"\n )\n\n\n# # Base class for all API Endpoints\nclass BaseResponseModel(ABC):\n def __init__(\n self,\n model_name: str,\n api_key: Optional[str] = None,\n temperature: float | None = None,\n max_tokens: int | None = None,\n ):\n self.model_name = model_name\n self.api_key = api_key\n self.temperature = temperature\n self.max_tokens = max_tokens\n super().__init__()\n\n def __call__(self, payload: APIPayload) -> LLMOutput:\n \"\"\"Make a call to the model and return the parsed response.\"\"\"\n response = self._call_api(payload)\n return self._parse_response(response)\n\n @abstractmethod\n def _call_api(self, payload: APIPayload) -> Any:\n \"\"\"Make a call to the model API and return the raw response.\"\"\"\n pass\n\n @abstractmethod\n def _parse_response(self, response: Any) -> LLMOutput:\n \"\"\"Parse the raw response from the model API and return a structured response.\"\"\"\n pass\n\n\nclass AgentlabAction:\n \"\"\"\n Collection of utility function to convert tool calls to Agentlab action format.\n \"\"\"\n\n @staticmethod\n def convert_toolcall_to_agentlab_action_format(toolcall: ToolCall) -> str:\n \"\"\"Convert a tool call to an Agentlab environment action string.","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api._call_api","uri":"program://AgentLab/function/src.agentlab.llm.response_api._call_api#L789-L829","kind":"function","name":"_call_api","path":"src/agentlab/llm/response_api.py","language":"python","start_line":789,"end_line":829,"context_start_line":769,"context_end_line":849,"code":" api_key: Optional[str] = None,\n temperature: float | None = None,\n max_tokens: int | None = 100,\n ):\n self.action_space_as_tools = True # this should be a config\n\n super().__init__(\n model_name=model_name,\n api_key=api_key,\n temperature=temperature,\n max_tokens=max_tokens,\n )\n client_args = {}\n if base_url is not None:\n client_args[\"base_url\"] = base_url\n if api_key is not None:\n client_args[\"api_key\"] = api_key\n self.client = Anthropic(**client_args)\n self.init_pricing_tracker(pricing_api=\"anthropic\") # Use the PricingMixin\n\n def _call_api(self, payload: APIPayload) -> Completion:\n sys_msg, other_msgs = self.filter_system_messages(payload.messages)\n sys_msg_text = \"\\n\".join(c[\"text\"] for m in sys_msg for c in m.content)\n input = []\n for msg in other_msgs:\n temp = msg.prepare_message()\n if payload.use_cache_breakpoints:\n temp = self.apply_cache_breakpoints(msg, temp)\n input.extend(temp)\n\n api_params: Dict[str, Any] = {\n \"model\": self.model_name,\n \"messages\": input,\n \"system\": sys_msg_text,\n } # Anthropic API expects system message as a string\n\n if self.temperature is not None:\n api_params[\"temperature\"] = self.temperature\n if self.max_tokens is not None:\n api_params[\"max_tokens\"] = self.max_tokens\n\n if payload.tools is not None:\n api_params[\"tools\"] = payload.tools\n if payload.tool_choice is not None and payload.force_call_tool is None:\n api_params[\"tool_choice\"] = (\n {\"type\": \"any\"}\n if payload.tool_choice in (\"required\", \"any\")\n else {\"type\": payload.tool_choice}\n )\n if payload.force_call_tool is not None:\n api_params[\"tool_choice\"] = {\"type\": \"tool\", \"name\": payload.force_call_tool}\n if payload.cache_tool_definition:\n # Indicating cache control for the last message enables caching of the last message.\n api_params[\"tools\"][-1][\"cache_control\"] = {\"type\": \"ephemeral\"}\n if payload.cache_complete_prompt:\n # Indicating cache control for the last message enables caching of the complete prompt.\n api_params[\"messages\"][-1][\"content\"][-1][\"cache_control\"] = {\"type\": \"ephemeral\"}\n\n response = call_anthropic_api_with_retries(self.client.messages.create, api_params)\n\n return response\n\n @staticmethod\n def filter_system_messages(messages: list[dict | MessageBuilder]) -> tuple[MessageBuilder]:\n \"\"\"Filter system messages from the list of messages.\"\"\"\n # System message cannot have an image in the middle of the text sequences.\n # Images can be appended in the end of the system message.\n\n sys_msgs, other_msgs = [], []\n for msg in messages:\n if isinstance(msg, MessageBuilder) and msg.role == \"system\":\n sys_msgs.append(msg)\n for c in msg.content:\n if c.get(\"type\") == \"image\":\n raise TypeError(\"System messages cannot contain images.\")\n else:\n other_msgs.append(msg)\n return sys_msgs, other_msgs\n\n def _parse_response(self, response: \"AnthrophicMessage\") -> LLMOutput:\n","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api._parse_response","uri":"program://AgentLab/function/src.agentlab.llm.response_api._parse_response#L848-L861","kind":"function","name":"_parse_response","path":"src/agentlab/llm/response_api.py","language":"python","start_line":848,"end_line":861,"context_start_line":828,"context_end_line":881,"code":"\n return response\n\n @staticmethod\n def filter_system_messages(messages: list[dict | MessageBuilder]) -> tuple[MessageBuilder]:\n \"\"\"Filter system messages from the list of messages.\"\"\"\n # System message cannot have an image in the middle of the text sequences.\n # Images can be appended in the end of the system message.\n\n sys_msgs, other_msgs = [], []\n for msg in messages:\n if isinstance(msg, MessageBuilder) and msg.role == \"system\":\n sys_msgs.append(msg)\n for c in msg.content:\n if c.get(\"type\") == \"image\":\n raise TypeError(\"System messages cannot contain images.\")\n else:\n other_msgs.append(msg)\n return sys_msgs, other_msgs\n\n def _parse_response(self, response: \"AnthrophicMessage\") -> LLMOutput:\n\n toolcalls = self._extract_tool_calls_from_response(response)\n think_output = self._extract_thinking_content_from_response(response)\n if self.action_space_as_tools:\n env_action = self._extract_env_actions_from_toolcalls(toolcalls)\n else:\n env_action = self._extract_env_actions_from_text_response(response)\n return LLMOutput(\n raw_response=response,\n think=think_output,\n action=env_action if env_action is not None else None,\n tool_calls=toolcalls if toolcalls is not None else None,\n )\n\n def _extract_tool_calls_from_response(self, response: \"AnthrophicMessage\") -> ToolCalls:\n \"\"\"Extracts tool calls from the response.\"\"\"\n tool_calls = []\n for output in response.content:\n if output.type == \"tool_use\":\n tool_calls.append(\n ToolCall(\n name=output.name,\n arguments=output.input,\n raw_call=output,\n )\n )\n return ToolCalls(tool_calls=tool_calls, raw_calls=response)\n\n def _extract_thinking_content_from_response(self, response: \"AnthrophicMessage\"):\n \"\"\"Extracts the thinking content from the response.\"\"\"\n return \"\".join(output.text for output in response.content if output.type == \"text\")\n\n def _extract_env_actions_from_toolcalls(self, toolcalls: ToolCalls) -> Any | None:","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api.convert_toolcall_to_agentlab_action_format","uri":"program://AgentLab/function/src.agentlab.llm.response_api.convert_toolcall_to_agentlab_action_format#L440-L451","kind":"function","name":"convert_toolcall_to_agentlab_action_format","path":"src/agentlab/llm/response_api.py","language":"python","start_line":440,"end_line":451,"context_start_line":420,"context_end_line":471,"code":" response = self._call_api(payload)\n return self._parse_response(response)\n\n @abstractmethod\n def _call_api(self, payload: APIPayload) -> Any:\n \"\"\"Make a call to the model API and return the raw response.\"\"\"\n pass\n\n @abstractmethod\n def _parse_response(self, response: Any) -> LLMOutput:\n \"\"\"Parse the raw response from the model API and return a structured response.\"\"\"\n pass\n\n\nclass AgentlabAction:\n \"\"\"\n Collection of utility function to convert tool calls to Agentlab action format.\n \"\"\"\n\n @staticmethod\n def convert_toolcall_to_agentlab_action_format(toolcall: ToolCall) -> str:\n \"\"\"Convert a tool call to an Agentlab environment action string.\n\n Args:\n toolcall: ToolCall object containing the name and arguments of the tool call.\n\n Returns:\n A string representing the action in Agentlab format i.e. python function call string.\n \"\"\"\n\n tool_name, tool_args = toolcall.name, toolcall.arguments\n return tool_call_to_python_code(tool_name, tool_args)\n\n @staticmethod\n def convert_multiactions_to_agentlab_action_format(actions: list[str]) -> str | None:\n \"\"\"Convert multiple actions list to a format that env supports.\n\n Args:\n actions: List of action strings to be joined.\n\n Returns:\n Joined actions separated by newlines, or None if empty.\n \"\"\"\n return \"\\n\".join(actions) if actions else None\n\n\nclass BaseModelWithPricing(TrackAPIPricingMixin, BaseResponseModel):\n pass\n\n\nclass OpenAIResponseModel(BaseModelWithPricing):\n def __init__(","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api.convert_multiactions_to_agentlab_action_format","uri":"program://AgentLab/function/src.agentlab.llm.response_api.convert_multiactions_to_agentlab_action_format#L454-L463","kind":"function","name":"convert_multiactions_to_agentlab_action_format","path":"src/agentlab/llm/response_api.py","language":"python","start_line":454,"end_line":463,"context_start_line":434,"context_end_line":483,"code":"class AgentlabAction:\n \"\"\"\n Collection of utility function to convert tool calls to Agentlab action format.\n \"\"\"\n\n @staticmethod\n def convert_toolcall_to_agentlab_action_format(toolcall: ToolCall) -> str:\n \"\"\"Convert a tool call to an Agentlab environment action string.\n\n Args:\n toolcall: ToolCall object containing the name and arguments of the tool call.\n\n Returns:\n A string representing the action in Agentlab format i.e. python function call string.\n \"\"\"\n\n tool_name, tool_args = toolcall.name, toolcall.arguments\n return tool_call_to_python_code(tool_name, tool_args)\n\n @staticmethod\n def convert_multiactions_to_agentlab_action_format(actions: list[str]) -> str | None:\n \"\"\"Convert multiple actions list to a format that env supports.\n\n Args:\n actions: List of action strings to be joined.\n\n Returns:\n Joined actions separated by newlines, or None if empty.\n \"\"\"\n return \"\\n\".join(actions) if actions else None\n\n\nclass BaseModelWithPricing(TrackAPIPricingMixin, BaseResponseModel):\n pass\n\n\nclass OpenAIResponseModel(BaseModelWithPricing):\n def __init__(\n self,\n model_name: str,\n base_url: Optional[str] = None,\n api_key: Optional[str] = None,\n temperature: float | None = None,\n max_tokens: int | None = 100,\n ):\n self.action_space_as_tools = True # this should be a config\n super().__init__( # This is passed to BaseModel\n model_name=model_name, api_key=api_key, temperature=temperature, max_tokens=max_tokens\n )\n client_args = {}","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api._extract_tool_calls_from_response","uri":"program://AgentLab/function/src.agentlab.llm.response_api._extract_tool_calls_from_response#L863-L875","kind":"function","name":"_extract_tool_calls_from_response","path":"src/agentlab/llm/response_api.py","language":"python","start_line":863,"end_line":875,"context_start_line":843,"context_end_line":895,"code":" raise TypeError(\"System messages cannot contain images.\")\n else:\n other_msgs.append(msg)\n return sys_msgs, other_msgs\n\n def _parse_response(self, response: \"AnthrophicMessage\") -> LLMOutput:\n\n toolcalls = self._extract_tool_calls_from_response(response)\n think_output = self._extract_thinking_content_from_response(response)\n if self.action_space_as_tools:\n env_action = self._extract_env_actions_from_toolcalls(toolcalls)\n else:\n env_action = self._extract_env_actions_from_text_response(response)\n return LLMOutput(\n raw_response=response,\n think=think_output,\n action=env_action if env_action is not None else None,\n tool_calls=toolcalls if toolcalls is not None else None,\n )\n\n def _extract_tool_calls_from_response(self, response: \"AnthrophicMessage\") -> ToolCalls:\n \"\"\"Extracts tool calls from the response.\"\"\"\n tool_calls = []\n for output in response.content:\n if output.type == \"tool_use\":\n tool_calls.append(\n ToolCall(\n name=output.name,\n arguments=output.input,\n raw_call=output,\n )\n )\n return ToolCalls(tool_calls=tool_calls, raw_calls=response)\n\n def _extract_thinking_content_from_response(self, response: \"AnthrophicMessage\"):\n \"\"\"Extracts the thinking content from the response.\"\"\"\n return \"\".join(output.text for output in response.content if output.type == \"text\")\n\n def _extract_env_actions_from_toolcalls(self, toolcalls: ToolCalls) -> Any | None:\n \"\"\"Extracts actions from the response.\"\"\"\n if not toolcalls:\n return None\n\n actions = [\n AgentlabAction.convert_toolcall_to_agentlab_action_format(call) for call in toolcalls\n ]\n actions = (\n AgentlabAction.convert_multiactions_to_agentlab_action_format(actions)\n if len(actions) > 1\n else actions[0]\n )\n return actions\n","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api._extract_env_actions_from_toolcalls","uri":"program://AgentLab/function/src.agentlab.llm.response_api._extract_env_actions_from_toolcalls#L881-L894","kind":"function","name":"_extract_env_actions_from_toolcalls","path":"src/agentlab/llm/response_api.py","language":"python","start_line":881,"end_line":894,"context_start_line":861,"context_end_line":914,"code":" )\n\n def _extract_tool_calls_from_response(self, response: \"AnthrophicMessage\") -> ToolCalls:\n \"\"\"Extracts tool calls from the response.\"\"\"\n tool_calls = []\n for output in response.content:\n if output.type == \"tool_use\":\n tool_calls.append(\n ToolCall(\n name=output.name,\n arguments=output.input,\n raw_call=output,\n )\n )\n return ToolCalls(tool_calls=tool_calls, raw_calls=response)\n\n def _extract_thinking_content_from_response(self, response: \"AnthrophicMessage\"):\n \"\"\"Extracts the thinking content from the response.\"\"\"\n return \"\".join(output.text for output in response.content if output.type == \"text\")\n\n def _extract_env_actions_from_toolcalls(self, toolcalls: ToolCalls) -> Any | None:\n \"\"\"Extracts actions from the response.\"\"\"\n if not toolcalls:\n return None\n\n actions = [\n AgentlabAction.convert_toolcall_to_agentlab_action_format(call) for call in toolcalls\n ]\n actions = (\n AgentlabAction.convert_multiactions_to_agentlab_action_format(actions)\n if len(actions) > 1\n else actions[0]\n )\n return actions\n\n def _extract_env_actions_from_text_response(self, response: \"AnthrophicMessage\") -> str | None:\n \"\"\"Extracts environment actions from the text response.\"\"\"\n # Use when action space is not given as tools.\n pass\n\n def apply_cache_breakpoints(self, msg: Message, prepared_msg: dict) -> List[Message]:\n \"\"\"Apply cache breakpoints to the messages.\"\"\"\n if getattr(msg, \"_cache_breakpoint\", False):\n prepared_msg[-1][\"content\"][-1][\"cache_control\"] = {\"type\": \"ephemeral\"}\n return prepared_msg\n\n\n# Factory classes to create the appropriate model based on the API endpoint.\n\n\n@dataclass\nclass OpenAIResponseModelArgs(BaseModelArgs):\n \"\"\"Serializable object for instantiating a generic chat model with an OpenAI\n model.\"\"\"","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api._extract_thinking_content_from_response","uri":"program://AgentLab/function/src.agentlab.llm.response_api._extract_thinking_content_from_response#L877-L879","kind":"function","name":"_extract_thinking_content_from_response","path":"src/agentlab/llm/response_api.py","language":"python","start_line":877,"end_line":879,"context_start_line":857,"context_end_line":899,"code":" raw_response=response,\n think=think_output,\n action=env_action if env_action is not None else None,\n tool_calls=toolcalls if toolcalls is not None else None,\n )\n\n def _extract_tool_calls_from_response(self, response: \"AnthrophicMessage\") -> ToolCalls:\n \"\"\"Extracts tool calls from the response.\"\"\"\n tool_calls = []\n for output in response.content:\n if output.type == \"tool_use\":\n tool_calls.append(\n ToolCall(\n name=output.name,\n arguments=output.input,\n raw_call=output,\n )\n )\n return ToolCalls(tool_calls=tool_calls, raw_calls=response)\n\n def _extract_thinking_content_from_response(self, response: \"AnthrophicMessage\"):\n \"\"\"Extracts the thinking content from the response.\"\"\"\n return \"\".join(output.text for output in response.content if output.type == \"text\")\n\n def _extract_env_actions_from_toolcalls(self, toolcalls: ToolCalls) -> Any | None:\n \"\"\"Extracts actions from the response.\"\"\"\n if not toolcalls:\n return None\n\n actions = [\n AgentlabAction.convert_toolcall_to_agentlab_action_format(call) for call in toolcalls\n ]\n actions = (\n AgentlabAction.convert_multiactions_to_agentlab_action_format(actions)\n if len(actions) > 1\n else actions[0]\n )\n return actions\n\n def _extract_env_actions_from_text_response(self, response: \"AnthrophicMessage\") -> str | None:\n \"\"\"Extracts environment actions from the text response.\"\"\"\n # Use when action space is not given as tools.\n pass","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api.cua_action_to_env_tool_name_and_args","uri":"program://AgentLab/function/src.agentlab.llm.response_api.cua_action_to_env_tool_name_and_args#L583-L587","kind":"function","name":"cua_action_to_env_tool_name_and_args","path":"src/agentlab/llm/response_api.py","language":"python","start_line":583,"end_line":587,"context_start_line":563,"context_end_line":607,"code":" actions = (\n AgentlabAction.convert_multiactions_to_agentlab_action_format(actions)\n if len(actions) > 1\n else actions[0]\n )\n return actions\n\n def _extract_thinking_content_from_response(self, response: \"OpenAIResponseObject\") -> str:\n \"\"\"Extracts the thinking content from the response.\"\"\"\n thinking_content = \"\"\n for output in response.output:\n if output.type == \"reasoning\":\n if len(output.summary) > 0:\n thinking_content += output.summary[0].text + \"\\n\"\n elif output.type == \"message\" and output.content:\n thinking_content += output.content[0].text + \"\\n\"\n elif hasattr(output, \"output_text\") and output.output_text:\n thinking_content += f\"{output.output_text}\\n\"\n return thinking_content\n\n def cua_action_to_env_tool_name_and_args(self, action: str) -> tuple[str, Dict[str, Any]]:\n \"\"\" \"Overwrite this method to convert a computer action to agentlab action string\"\"\"\n raise NotImplementedError(\n \"This method should be implemented in the subclass to convert a computer action to agentlab action string.\"\n )\n\n def _extract_env_actions_from_text_response(\n self, response: \"OpenAIResponseObject\"\n ) -> str | None:\n \"\"\"Extracts environment actions from the text response.\"\"\"\n # Use when action space is not given as tools.\n pass\n\n\nclass OpenAIChatCompletionModel(BaseModelWithPricing):\n def __init__(\n self,\n model_name: str,\n base_url: Optional[str] = None,\n api_key: Optional[str] = None,\n temperature: float | None = None,\n max_tokens: int | None = 100,\n ):\n super().__init__(\n model_name=model_name,","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api._extract_env_actions_from_text_response","uri":"program://AgentLab/function/src.agentlab.llm.response_api._extract_env_actions_from_text_response#L896-L899","kind":"function","name":"_extract_env_actions_from_text_response","path":"src/agentlab/llm/response_api.py","language":"python","start_line":896,"end_line":899,"context_start_line":876,"context_end_line":919,"code":"\n def _extract_thinking_content_from_response(self, response: \"AnthrophicMessage\"):\n \"\"\"Extracts the thinking content from the response.\"\"\"\n return \"\".join(output.text for output in response.content if output.type == \"text\")\n\n def _extract_env_actions_from_toolcalls(self, toolcalls: ToolCalls) -> Any | None:\n \"\"\"Extracts actions from the response.\"\"\"\n if not toolcalls:\n return None\n\n actions = [\n AgentlabAction.convert_toolcall_to_agentlab_action_format(call) for call in toolcalls\n ]\n actions = (\n AgentlabAction.convert_multiactions_to_agentlab_action_format(actions)\n if len(actions) > 1\n else actions[0]\n )\n return actions\n\n def _extract_env_actions_from_text_response(self, response: \"AnthrophicMessage\") -> str | None:\n \"\"\"Extracts environment actions from the text response.\"\"\"\n # Use when action space is not given as tools.\n pass\n\n def apply_cache_breakpoints(self, msg: Message, prepared_msg: dict) -> List[Message]:\n \"\"\"Apply cache breakpoints to the messages.\"\"\"\n if getattr(msg, \"_cache_breakpoint\", False):\n prepared_msg[-1][\"content\"][-1][\"cache_control\"] = {\"type\": \"ephemeral\"}\n return prepared_msg\n\n\n# Factory classes to create the appropriate model based on the API endpoint.\n\n\n@dataclass\nclass OpenAIResponseModelArgs(BaseModelArgs):\n \"\"\"Serializable object for instantiating a generic chat model with an OpenAI\n model.\"\"\"\n\n api = \"openai\"\n\n def make_model(self):\n return OpenAIResponseModel(","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api.format_tools_for_chat_completion","uri":"program://AgentLab/function/src.agentlab.llm.response_api.format_tools_for_chat_completion#L739-L761","kind":"function","name":"format_tools_for_chat_completion","path":"src/agentlab/llm/response_api.py","language":"python","start_line":739,"end_line":761,"context_start_line":719,"context_end_line":781,"code":" return None\n\n actions = [\n AgentlabAction.convert_toolcall_to_agentlab_action_format(call) for call in toolcalls\n ]\n actions = (\n AgentlabAction.convert_multiactions_to_agentlab_action_format(actions)\n if len(actions) > 1\n else actions[0]\n )\n return actions\n\n def _extract_env_actions_from_text_response(\n self, response: \"openai.types.chat.ChatCompletion\"\n ) -> str | None:\n \"\"\"Extracts environment actions from the text response.\"\"\"\n # Use when action space is not given as tools.\n pass\n\n @staticmethod\n def format_tools_for_chat_completion(tools):\n \"\"\"Formats response tools format for OpenAI Chat Completion API.\n\n Why we need this?\n Ans: actionset.to_tool_description() in bgym only returns description\n format valid for OpenAI Response API.\n\n Args:\n tools: List of tool descriptions to format for Chat Completion API.\n\n Returns:\n Formatted tools list compatible with OpenAI Chat Completion API, or None if tools is None.\n \"\"\"\n formatted_tools = None\n if tools is not None:\n formatted_tools = [\n {\n \"type\": tool[\"type\"],\n \"function\": {k: tool[k] for k in (\"name\", \"description\", \"parameters\")},\n }\n for tool in tools\n ]\n return formatted_tools\n\n\nclass ClaudeResponseModel(BaseModelWithPricing):\n def __init__(\n self,\n model_name: str,\n base_url: Optional[str] = None,\n api_key: Optional[str] = None,\n temperature: float | None = None,\n max_tokens: int | None = 100,\n ):\n self.action_space_as_tools = True # this should be a config\n\n super().__init__(\n model_name=model_name,\n api_key=api_key,\n temperature=temperature,\n max_tokens=max_tokens,\n )\n client_args = {}","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api.filter_system_messages","uri":"program://AgentLab/function/src.agentlab.llm.response_api.filter_system_messages#L832-L846","kind":"function","name":"filter_system_messages","path":"src/agentlab/llm/response_api.py","language":"python","start_line":832,"end_line":846,"context_start_line":812,"context_end_line":866,"code":" if payload.tool_choice is not None and payload.force_call_tool is None:\n api_params[\"tool_choice\"] = (\n {\"type\": \"any\"}\n if payload.tool_choice in (\"required\", \"any\")\n else {\"type\": payload.tool_choice}\n )\n if payload.force_call_tool is not None:\n api_params[\"tool_choice\"] = {\"type\": \"tool\", \"name\": payload.force_call_tool}\n if payload.cache_tool_definition:\n # Indicating cache control for the last message enables caching of the last message.\n api_params[\"tools\"][-1][\"cache_control\"] = {\"type\": \"ephemeral\"}\n if payload.cache_complete_prompt:\n # Indicating cache control for the last message enables caching of the complete prompt.\n api_params[\"messages\"][-1][\"content\"][-1][\"cache_control\"] = {\"type\": \"ephemeral\"}\n\n response = call_anthropic_api_with_retries(self.client.messages.create, api_params)\n\n return response\n\n @staticmethod\n def filter_system_messages(messages: list[dict | MessageBuilder]) -> tuple[MessageBuilder]:\n \"\"\"Filter system messages from the list of messages.\"\"\"\n # System message cannot have an image in the middle of the text sequences.\n # Images can be appended in the end of the system message.\n\n sys_msgs, other_msgs = [], []\n for msg in messages:\n if isinstance(msg, MessageBuilder) and msg.role == \"system\":\n sys_msgs.append(msg)\n for c in msg.content:\n if c.get(\"type\") == \"image\":\n raise TypeError(\"System messages cannot contain images.\")\n else:\n other_msgs.append(msg)\n return sys_msgs, other_msgs\n\n def _parse_response(self, response: \"AnthrophicMessage\") -> LLMOutput:\n\n toolcalls = self._extract_tool_calls_from_response(response)\n think_output = self._extract_thinking_content_from_response(response)\n if self.action_space_as_tools:\n env_action = self._extract_env_actions_from_toolcalls(toolcalls)\n else:\n env_action = self._extract_env_actions_from_text_response(response)\n return LLMOutput(\n raw_response=response,\n think=think_output,\n action=env_action if env_action is not None else None,\n tool_calls=toolcalls if toolcalls is not None else None,\n )\n\n def _extract_tool_calls_from_response(self, response: \"AnthrophicMessage\") -> ToolCalls:\n \"\"\"Extracts tool calls from the response.\"\"\"\n tool_calls = []\n for output in response.content:","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api.apply_cache_breakpoints","uri":"program://AgentLab/function/src.agentlab.llm.response_api.apply_cache_breakpoints#L901-L905","kind":"function","name":"apply_cache_breakpoints","path":"src/agentlab/llm/response_api.py","language":"python","start_line":901,"end_line":905,"context_start_line":881,"context_end_line":925,"code":" def _extract_env_actions_from_toolcalls(self, toolcalls: ToolCalls) -> Any | None:\n \"\"\"Extracts actions from the response.\"\"\"\n if not toolcalls:\n return None\n\n actions = [\n AgentlabAction.convert_toolcall_to_agentlab_action_format(call) for call in toolcalls\n ]\n actions = (\n AgentlabAction.convert_multiactions_to_agentlab_action_format(actions)\n if len(actions) > 1\n else actions[0]\n )\n return actions\n\n def _extract_env_actions_from_text_response(self, response: \"AnthrophicMessage\") -> str | None:\n \"\"\"Extracts environment actions from the text response.\"\"\"\n # Use when action space is not given as tools.\n pass\n\n def apply_cache_breakpoints(self, msg: Message, prepared_msg: dict) -> List[Message]:\n \"\"\"Apply cache breakpoints to the messages.\"\"\"\n if getattr(msg, \"_cache_breakpoint\", False):\n prepared_msg[-1][\"content\"][-1][\"cache_control\"] = {\"type\": \"ephemeral\"}\n return prepared_msg\n\n\n# Factory classes to create the appropriate model based on the API endpoint.\n\n\n@dataclass\nclass OpenAIResponseModelArgs(BaseModelArgs):\n \"\"\"Serializable object for instantiating a generic chat model with an OpenAI\n model.\"\"\"\n\n api = \"openai\"\n\n def make_model(self):\n return OpenAIResponseModel(\n model_name=self.model_name,\n temperature=self.temperature,\n max_tokens=self.max_new_tokens,\n )\n\n def get_message_builder(self) -> MessageBuilder:","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api.make_model","uri":"program://AgentLab/function/src.agentlab.llm.response_api.make_model#L972-L979","kind":"function","name":"make_model","path":"src/agentlab/llm/response_api.py","language":"python","start_line":972,"end_line":979,"context_start_line":952,"context_end_line":999,"code":" api = \"openai\"\n\n def make_model(self):\n return OpenAIChatCompletionModel(\n model_name=self.model_name,\n temperature=self.temperature,\n max_tokens=self.max_new_tokens,\n )\n\n def get_message_builder(self) -> MessageBuilder:\n return OpenAIChatCompletionAPIMessageBuilder\n\n\n@dataclass\nclass OpenRouterModelArgs(BaseModelArgs):\n \"\"\"Serializable object for instantiating a generic chat model with an OpenRouter\n model.\"\"\"\n\n api: str = \"openai\" # tool description format used by actionset.to_tool_description() in bgym\n\n def make_model(self):\n return OpenAIChatCompletionModel(\n base_url=\"https://openrouter.ai/api/v1\",\n api_key=os.getenv(\"OPENROUTER_API_KEY\"),\n model_name=self.model_name,\n temperature=self.temperature,\n max_tokens=self.max_new_tokens,\n )\n\n def get_message_builder(self) -> MessageBuilder:\n return OpenAIChatCompletionAPIMessageBuilder\n\n\ndef tool_call_to_python_code(func_name, kwargs):\n \"\"\"Format a function name and kwargs dict into a Python function call string.\"\"\"\n if kwargs is None:\n kwargs = {}\n\n if not kwargs:\n return f\"{func_name}()\"\n\n args_str = \", \".join(f\"{key}={repr(value)}\" for key, value in kwargs.items())\n return f\"{func_name}({args_str})\"\n\n\n# ___Not__Tested__#\n\n# class VLLMModelArgs(BaseModelArgs):","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.response_api.get_message_builder","uri":"program://AgentLab/function/src.agentlab.llm.response_api.get_message_builder#L981-L982","kind":"function","name":"get_message_builder","path":"src/agentlab/llm/response_api.py","language":"python","start_line":981,"end_line":982,"context_start_line":961,"context_end_line":1002,"code":" def get_message_builder(self) -> MessageBuilder:\n return OpenAIChatCompletionAPIMessageBuilder\n\n\n@dataclass\nclass OpenRouterModelArgs(BaseModelArgs):\n \"\"\"Serializable object for instantiating a generic chat model with an OpenRouter\n model.\"\"\"\n\n api: str = \"openai\" # tool description format used by actionset.to_tool_description() in bgym\n\n def make_model(self):\n return OpenAIChatCompletionModel(\n base_url=\"https://openrouter.ai/api/v1\",\n api_key=os.getenv(\"OPENROUTER_API_KEY\"),\n model_name=self.model_name,\n temperature=self.temperature,\n max_tokens=self.max_new_tokens,\n )\n\n def get_message_builder(self) -> MessageBuilder:\n return OpenAIChatCompletionAPIMessageBuilder\n\n\ndef tool_call_to_python_code(func_name, kwargs):\n \"\"\"Format a function name and kwargs dict into a Python function call string.\"\"\"\n if kwargs is None:\n kwargs = {}\n\n if not kwargs:\n return f\"{func_name}()\"\n\n args_str = \", \".join(f\"{key}={repr(value)}\" for key, value in kwargs.items())\n return f\"{func_name}({args_str})\"\n\n\n# ___Not__Tested__#\n\n# class VLLMModelArgs(BaseModelArgs):\n# \"\"\"Serializable object for instantiating a generic chat model with a VLLM\n# model.\"\"\"\n","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.base_api","uri":"program://AgentLab/module/src.agentlab.llm.base_api#L1-L34","kind":"module","name":"src.agentlab.llm.base_api","path":"src/agentlab/llm/base_api.py","language":"python","start_line":1,"end_line":34,"context_start_line":1,"context_end_line":34,"code":"from abc import ABC, abstractmethod\nfrom dataclasses import dataclass\n\n\nclass AbstractChatModel(ABC):\n @abstractmethod\n def __call__(self, messages: list[dict]) -> dict:\n pass\n\n def get_stats(self):\n return {}\n\n\n@dataclass\nclass BaseModelArgs(ABC):\n \"\"\"Base class for all model arguments.\"\"\"\n\n model_name: str\n max_total_tokens: int = None\n max_input_tokens: int = None\n max_new_tokens: int = None\n temperature: float = 0.1\n vision_support: bool = False\n log_probs: bool = False\n\n @abstractmethod\n def make_model(self) -> AbstractChatModel:\n pass\n\n def prepare_server(self):\n pass\n\n def close_server(self):\n pass","source_hash":"27abf96f0b6dc633c41b67db2274629ce0b8cdff07b92d72e060fb5d4847efae","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.base_api.AbstractChatModel","uri":"program://AgentLab/class/src.agentlab.llm.base_api.AbstractChatModel#L5-L11","kind":"class","name":"AbstractChatModel","path":"src/agentlab/llm/base_api.py","language":"python","start_line":5,"end_line":11,"context_start_line":1,"context_end_line":31,"code":"from abc import ABC, abstractmethod\nfrom dataclasses import dataclass\n\n\nclass AbstractChatModel(ABC):\n @abstractmethod\n def __call__(self, messages: list[dict]) -> dict:\n pass\n\n def get_stats(self):\n return {}\n\n\n@dataclass\nclass BaseModelArgs(ABC):\n \"\"\"Base class for all model arguments.\"\"\"\n\n model_name: str\n max_total_tokens: int = None\n max_input_tokens: int = None\n max_new_tokens: int = None\n temperature: float = 0.1\n vision_support: bool = False\n log_probs: bool = False\n\n @abstractmethod\n def make_model(self) -> AbstractChatModel:\n pass\n\n def prepare_server(self):\n pass","source_hash":"27abf96f0b6dc633c41b67db2274629ce0b8cdff07b92d72e060fb5d4847efae","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.base_api.BaseModelArgs","uri":"program://AgentLab/class/src.agentlab.llm.base_api.BaseModelArgs#L15-L34","kind":"class","name":"BaseModelArgs","path":"src/agentlab/llm/base_api.py","language":"python","start_line":15,"end_line":34,"context_start_line":1,"context_end_line":34,"code":"from abc import ABC, abstractmethod\nfrom dataclasses import dataclass\n\n\nclass AbstractChatModel(ABC):\n @abstractmethod\n def __call__(self, messages: list[dict]) -> dict:\n pass\n\n def get_stats(self):\n return {}\n\n\n@dataclass\nclass BaseModelArgs(ABC):\n \"\"\"Base class for all model arguments.\"\"\"\n\n model_name: str\n max_total_tokens: int = None\n max_input_tokens: int = None\n max_new_tokens: int = None\n temperature: float = 0.1\n vision_support: bool = False\n log_probs: bool = False\n\n @abstractmethod\n def make_model(self) -> AbstractChatModel:\n pass\n\n def prepare_server(self):\n pass\n\n def close_server(self):\n pass","source_hash":"27abf96f0b6dc633c41b67db2274629ce0b8cdff07b92d72e060fb5d4847efae","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.base_api.__call__","uri":"program://AgentLab/function/src.agentlab.llm.base_api.__call__#L7-L8","kind":"function","name":"__call__","path":"src/agentlab/llm/base_api.py","language":"python","start_line":7,"end_line":8,"context_start_line":1,"context_end_line":28,"code":"from abc import ABC, abstractmethod\nfrom dataclasses import dataclass\n\n\nclass AbstractChatModel(ABC):\n @abstractmethod\n def __call__(self, messages: list[dict]) -> dict:\n pass\n\n def get_stats(self):\n return {}\n\n\n@dataclass\nclass BaseModelArgs(ABC):\n \"\"\"Base class for all model arguments.\"\"\"\n\n model_name: str\n max_total_tokens: int = None\n max_input_tokens: int = None\n max_new_tokens: int = None\n temperature: float = 0.1\n vision_support: bool = False\n log_probs: bool = False\n\n @abstractmethod\n def make_model(self) -> AbstractChatModel:\n pass","source_hash":"27abf96f0b6dc633c41b67db2274629ce0b8cdff07b92d72e060fb5d4847efae","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.base_api.get_stats","uri":"program://AgentLab/function/src.agentlab.llm.base_api.get_stats#L10-L11","kind":"function","name":"get_stats","path":"src/agentlab/llm/base_api.py","language":"python","start_line":10,"end_line":11,"context_start_line":1,"context_end_line":31,"code":"from abc import ABC, abstractmethod\nfrom dataclasses import dataclass\n\n\nclass AbstractChatModel(ABC):\n @abstractmethod\n def __call__(self, messages: list[dict]) -> dict:\n pass\n\n def get_stats(self):\n return {}\n\n\n@dataclass\nclass BaseModelArgs(ABC):\n \"\"\"Base class for all model arguments.\"\"\"\n\n model_name: str\n max_total_tokens: int = None\n max_input_tokens: int = None\n max_new_tokens: int = None\n temperature: float = 0.1\n vision_support: bool = False\n log_probs: bool = False\n\n @abstractmethod\n def make_model(self) -> AbstractChatModel:\n pass\n\n def prepare_server(self):\n pass","source_hash":"27abf96f0b6dc633c41b67db2274629ce0b8cdff07b92d72e060fb5d4847efae","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.base_api.make_model","uri":"program://AgentLab/function/src.agentlab.llm.base_api.make_model#L27-L28","kind":"function","name":"make_model","path":"src/agentlab/llm/base_api.py","language":"python","start_line":27,"end_line":28,"context_start_line":7,"context_end_line":34,"code":" def __call__(self, messages: list[dict]) -> dict:\n pass\n\n def get_stats(self):\n return {}\n\n\n@dataclass\nclass BaseModelArgs(ABC):\n \"\"\"Base class for all model arguments.\"\"\"\n\n model_name: str\n max_total_tokens: int = None\n max_input_tokens: int = None\n max_new_tokens: int = None\n temperature: float = 0.1\n vision_support: bool = False\n log_probs: bool = False\n\n @abstractmethod\n def make_model(self) -> AbstractChatModel:\n pass\n\n def prepare_server(self):\n pass\n\n def close_server(self):\n pass","source_hash":"27abf96f0b6dc633c41b67db2274629ce0b8cdff07b92d72e060fb5d4847efae","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.base_api.prepare_server","uri":"program://AgentLab/function/src.agentlab.llm.base_api.prepare_server#L30-L31","kind":"function","name":"prepare_server","path":"src/agentlab/llm/base_api.py","language":"python","start_line":30,"end_line":31,"context_start_line":10,"context_end_line":34,"code":" def get_stats(self):\n return {}\n\n\n@dataclass\nclass BaseModelArgs(ABC):\n \"\"\"Base class for all model arguments.\"\"\"\n\n model_name: str\n max_total_tokens: int = None\n max_input_tokens: int = None\n max_new_tokens: int = None\n temperature: float = 0.1\n vision_support: bool = False\n log_probs: bool = False\n\n @abstractmethod\n def make_model(self) -> AbstractChatModel:\n pass\n\n def prepare_server(self):\n pass\n\n def close_server(self):\n pass","source_hash":"27abf96f0b6dc633c41b67db2274629ce0b8cdff07b92d72e060fb5d4847efae","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.base_api.close_server","uri":"program://AgentLab/function/src.agentlab.llm.base_api.close_server#L33-L34","kind":"function","name":"close_server","path":"src/agentlab/llm/base_api.py","language":"python","start_line":33,"end_line":34,"context_start_line":13,"context_end_line":34,"code":"\n@dataclass\nclass BaseModelArgs(ABC):\n \"\"\"Base class for all model arguments.\"\"\"\n\n model_name: str\n max_total_tokens: int = None\n max_input_tokens: int = None\n max_new_tokens: int = None\n temperature: float = 0.1\n vision_support: bool = False\n log_probs: bool = False\n\n @abstractmethod\n def make_model(self) -> AbstractChatModel:\n pass\n\n def prepare_server(self):\n pass\n\n def close_server(self):\n pass","source_hash":"27abf96f0b6dc633c41b67db2274629ce0b8cdff07b92d72e060fb5d4847efae","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.litellm_api","uri":"program://AgentLab/module/src.agentlab.llm.litellm_api#L1-L326","kind":"module","name":"src.agentlab.llm.litellm_api","path":"src/agentlab/llm/litellm_api.py","language":"python","start_line":1,"end_line":326,"context_start_line":1,"context_end_line":326,"code":"import json\nimport logging\nfrom dataclasses import dataclass\nfrom functools import partial\nfrom typing import Any, Dict, List, Optional, Type\n\nimport litellm\nfrom litellm import completion\nfrom openai.types.chat import ChatCompletion as OpenAIChatCompletion\n\nfrom agentlab.llm.base_api import BaseModelArgs\nfrom agentlab.llm.response_api import (\n AgentlabAction,\n APIPayload,\n BaseModelWithPricing,\n LLMOutput,\n Message,\n MessageBuilder,\n OpenAIChatCompletionAPIMessageBuilder,\n ToolCall,\n ToolCalls,\n)\n\nlitellm.modify_params = True\n\n\nclass LiteLLMModel(BaseModelWithPricing):\n def __init__(\n self,\n model_name: str,\n base_url: Optional[str] = None,\n api_key: Optional[str] = None,\n temperature: float | None = None,\n max_tokens: int | None = 100,\n use_only_first_toolcall: bool = False,\n ):\n super().__init__(\n model_name=model_name,\n temperature=temperature,\n max_tokens=max_tokens,\n )\n self.action_space_as_tools = True # this should be a config\n client_args = {}\n if base_url is not None:\n client_args[\"base_url\"] = base_url\n if api_key is not None:\n client_args[\"api_key\"] = api_key\n self.client = partial(completion, **client_args)\n self.init_pricing_tracker(pricing_api=\"litellm\")\n self.use_only_first_toolcall = use_only_first_toolcall\n try:\n self.litellm_info = litellm.get_model_info(model_name)\n # maybe log this in xray\n\n except Exception as e:\n logging.error(f\"Failed to get litellm model info: {e}\")\n\n def _call_api(self, payload: APIPayload) -> \"OpenAIChatCompletion\":\n \"\"\"\n Calls the LiteLLM API with the given payload.\n\n Args:\n payload (APIPayload): The payload to send to the API.\n\n Returns:\n OpenAIChatCompletion: An object with the same keys as OpenAIChatCompletion.\n \"\"\"\n input = []\n for msg in payload.messages: # type: ignore\n input.extend(msg.prepare_message())\n api_params: Dict[str, Any] = {\n \"model\": self.model_name,\n \"messages\": input,\n }\n if self.temperature is not None:\n api_params[\"temperature\"] = self.temperature\n\n if self.max_tokens is not None:\n api_params[\"max_completion_tokens\"] = self.max_tokens\n\n if payload.tools is not None:\n api_params[\"tools\"] = (\n self.format_tools_for_chat_completion(payload.tools)\n if \"function\" not in payload.tools[0] # convert if responses_api_tools\n else payload.tools\n )\n\n if payload.tool_choice is not None and payload.force_call_tool is None:\n api_params[\"tool_choice\"] = (\n \"required\" if payload.tool_choice in (\"required\", \"any\") else payload.tool_choice\n )\n\n if payload.force_call_tool is not None:\n api_params[\"tool_choice\"] = {\n \"type\": \"function\",\n \"function\": {\"name\": payload.force_call_tool},\n }\n\n if payload.reasoning_effort is not None:\n api_params[\"reasoning_effort\"] = payload.reasoning_effort\n\n if \"tools\" in api_params and payload.cache_tool_definition:\n api_params[\"tools\"][-1][\"cache_control\"] = {\"type\": \"ephemeral\"} # type: ignore\n\n if payload.cache_complete_prompt:\n # Indicating cache control for the last message enables caching of the complete prompt.\n api_params[\"messages\"][-1][\"content\"][-1][\"cache_control\"] = {\"type\": \"ephemeral\"}\n\n response = self.client(**api_params, num_retries=5)\n\n return response # type: ignore\n\n def _parse_response(self, response: \"OpenAIChatCompletion\") -> LLMOutput:\n think_output = self._extract_thinking_content_from_response(response)\n tool_calls = self._extract_tool_calls_from_response(response)\n\n if self.action_space_as_tools:\n env_action = self._extract_env_actions_from_toolcalls(tool_calls) # type: ignore\n else:\n env_action = self._extract_env_actions_from_text_response(response)\n return LLMOutput(\n raw_response=response,\n think=think_output,\n action=env_action if env_action is not None else None,\n tool_calls=tool_calls if tool_calls is not None else None,\n )\n\n def _extract_thinking_content_from_response(\n self, response: OpenAIChatCompletion, wrap_tag=\"think\"\n ):\n \"\"\"Extracts the content from the message, including reasoning if available.\n It wraps the reasoning around ... for easy identification of reasoning content,\n When LLM produces 'text' and 'reasoning' in the same message.\n Note: The wrapping of 'thinking' content may not be nedeed and may be reconsidered.\n\n Args:\n response: The message object or dict containing content and reasoning.\n wrap_tag: The tag name to wrap reasoning content (default: \"think\").\n\n Returns:\n str: The extracted content with reasoning wrapped in specified tags.\n \"\"\"\n message = response.choices[0].message\n if not isinstance(message, dict):\n message = message.to_dict()\n\n reasoning_content = message.get(\"reasoning\", None)\n msg_content = message.get(\"text\", \"\") # works for Open-router\n if reasoning_content:\n # Wrap reasoning in tags with newlines for clarity\n reasoning_content = f\"<{wrap_tag}>{reasoning_content}\\n\"\n logging.debug(\"Extracting content from response.choices[i].message.reasoning\")\n else:\n reasoning_content = \"\"\n return f\"{reasoning_content}{msg_content}{message.get('content', '')}\"\n\n def _extract_tool_calls_from_response(self, response: OpenAIChatCompletion) -> ToolCalls | None:\n \"\"\"Extracts tool calls from the response.\"\"\"\n message = response.choices[0].message.to_dict()\n tool_calls = message.get(\"tool_calls\", None)\n if tool_calls is None:\n return None\n tool_call_list = []\n for tc in tool_calls: # type: ignore\n tool_call_list.append(\n ToolCall(\n name=tc[\"function\"][\"name\"],\n arguments=json.loads(tc[\"function\"][\"arguments\"]),\n raw_call=tc,\n )\n )\n if self.use_only_first_toolcall:\n break\n return ToolCalls(tool_calls=tool_call_list, raw_calls=response) # type: ignore\n\n def _extract_env_actions_from_toolcalls(self, toolcalls: ToolCalls) -> Any | None:\n \"\"\"Extracts actions from the response.\"\"\"\n if not toolcalls:\n return None\n\n actions = [\n AgentlabAction.convert_toolcall_to_agentlab_action_format(call) for call in toolcalls\n ]\n actions = (\n AgentlabAction.convert_multiactions_to_agentlab_action_format(actions)\n if len(actions) > 1\n else actions[0]\n )\n return actions\n\n def _extract_env_actions_from_text_response(\n self, response: \"OpenAIChatCompletion\"\n ) -> str | None:\n \"\"\"Extracts environment actions from the text response.\"\"\"\n # Use when action space is not given as tools.\n # TODO: Add support to pass action space as prompt in LiteLLM.\n # Check: https://docs.litellm.ai/docs/completion/function_call#function-calling-for-models-wout-function-calling-support\n pass\n\n @staticmethod\n def format_tools_for_chat_completion(tools):\n \"\"\"Formats response tools format for OpenAI Chat Completion API.\n Why we need this?\n Ans: actionset.to_tool_description() in bgym only returns description\n format valid for OpenAI Response API.\n\n Args:\n tools: List of tool descriptions to format for Chat Completion API.\n\n Returns:\n Formatted tools list compatible with OpenAI Chat Completion API, or None if tools is None.\n \"\"\"\n formatted_tools = None\n if tools is not None:\n formatted_tools = [\n {\n \"type\": tool[\"type\"],\n \"function\": {k: tool[k] for k in (\"name\", \"description\", \"parameters\")},\n }\n for tool in tools\n ]\n return formatted_tools\n\n\nclass LiteLLMAPIMessageBuilder(OpenAIChatCompletionAPIMessageBuilder):\n \"\"\"Message builder for LiteLLM API, extending OpenAIChatCompletionAPIMessageBuilder.\"\"\"\n\n def prepare_message(self, use_only_first_toolcall: bool = False) -> List[Message]:\n \"\"\"Prepare the message for the OpenAI API.\"\"\"\n content = []\n for item in self.content:\n content.append(self.convert_content_to_expected_format(item))\n output = [{\"role\": self.role, \"content\": content}]\n return output if self.role != \"tool\" else self.handle_tool_call(use_only_first_toolcall)\n\n def handle_tool_call(self, use_only_first_toolcall: bool = False) -> List[Message]:\n \"\"\"Handle the tool call response from the last raw response.\"\"\"\n if self.responded_tool_calls is None:\n raise ValueError(\"No tool calls found in responded_tool_calls\")\n output = []\n raw_call = self.responded_tool_calls.raw_calls.choices[0].message # type: ignore\n if use_only_first_toolcall:\n raw_call.tool_calls = raw_call.tool_calls[:1]\n output.append(raw_call) # add raw calls to output\n for fn_call in self.responded_tool_calls:\n raw_call = fn_call.raw_call\n assert (\n \"image\" not in fn_call.tool_response\n ), \"Image output is not supported in function calls response.\"\n # a function_call_output dict has keys \"role\", \"tool_call_id\" and \"content\"\n tool_call_reponse = {\n \"name\": raw_call[\"function\"][\"name\"], # required with OpenRouter\n \"role\": \"tool\",\n \"tool_call_id\": raw_call[\"id\"],\n \"content\": self.convert_content_to_expected_format(fn_call.tool_response)[\"text\"],\n }\n output.append(tool_call_reponse)\n\n return output\n\n\n@dataclass\nclass LiteLLMModelArgs(BaseModelArgs):\n \"\"\"Serializable arguments for LiteLMMModel.\"\"\"\n\n api = \"openai\" # tool description format used by actionset.to_tool_description() in bgym\n base_url: Optional[str] = None\n api_key: Optional[str] = None\n use_only_first_toolcall: bool = False\n\n def make_model(self):\n return LiteLLMModel(\n model_name=self.model_name,\n base_url=self.base_url,\n api_key=self.api_key,\n max_tokens=self.max_new_tokens,\n temperature=self.temperature,\n use_only_first_toolcall=self.use_only_first_toolcall,\n )\n\n def get_message_builder(self) -> Type[MessageBuilder]:\n \"\"\"Returns a message builder for the LiteLMMModel.\"\"\"\n return LiteLLMAPIMessageBuilder\n\n\nif __name__ == \"__main__\":\n \"\"\"\n Some simple tests to run the LiteLLMModel with different models.\n \"\"\"\n\n import os\n\n from agentlab.agents.tool_use_agent import DEFAULT_PROMPT_CONFIG, ToolUseAgentArgs\n from agentlab.experiments.study import Study\n from agentlab.llm.litellm_api import LiteLLMModelArgs\n\n os.environ[\"LITELLM_LOG\"] = \"WARNING\"\n\n def get_agent(model_name: str) -> ToolUseAgentArgs:\n return ToolUseAgentArgs(\n model_args=LiteLLMModelArgs(\n model_name=model_name,\n max_new_tokens=2000,\n temperature=None,\n ),\n config=DEFAULT_PROMPT_CONFIG,\n )\n\n models = [\n \"openai/gpt-4.1\",\n \"openai/gpt-4.1-mini\",\n \"openai/gpt-4.1-nano\",\n \"openai/o3-2025-04-16\",\n \"anthropic/claude-3-7-sonnet-20250219\",\n \"anthropic/claude-sonnet-4-20250514\",\n ## Add more models to test.\n ]\n agent_args = [get_agent(model) for model in models]\n\n study = Study(agent_args, \"miniwob_tiny_test\", logging_level_stdout=logging.WARNING)\n study.run(\n n_jobs=5,\n parallel_backend=\"ray\",\n strict_reproducibility=False,\n n_relaunch=3,\n )","source_hash":"f3f6175f1c2738428c62617db32724b4e1beb4e49a2e57a47037f8c91fdeddbc","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.litellm_api.LiteLLMModel","uri":"program://AgentLab/class/src.agentlab.llm.litellm_api.LiteLLMModel#L27-L222","kind":"class","name":"LiteLLMModel","path":"src/agentlab/llm/litellm_api.py","language":"python","start_line":27,"end_line":222,"context_start_line":7,"context_end_line":242,"code":"import litellm\nfrom litellm import completion\nfrom openai.types.chat import ChatCompletion as OpenAIChatCompletion\n\nfrom agentlab.llm.base_api import BaseModelArgs\nfrom agentlab.llm.response_api import (\n AgentlabAction,\n APIPayload,\n BaseModelWithPricing,\n LLMOutput,\n Message,\n MessageBuilder,\n OpenAIChatCompletionAPIMessageBuilder,\n ToolCall,\n ToolCalls,\n)\n\nlitellm.modify_params = True\n\n\nclass LiteLLMModel(BaseModelWithPricing):\n def __init__(\n self,\n model_name: str,\n base_url: Optional[str] = None,\n api_key: Optional[str] = None,\n temperature: float | None = None,\n max_tokens: int | None = 100,\n use_only_first_toolcall: bool = False,\n ):\n super().__init__(\n model_name=model_name,\n temperature=temperature,\n max_tokens=max_tokens,\n )\n self.action_space_as_tools = True # this should be a config\n client_args = {}\n if base_url is not None:\n client_args[\"base_url\"] = base_url\n if api_key is not None:\n client_args[\"api_key\"] = api_key\n self.client = partial(completion, **client_args)\n self.init_pricing_tracker(pricing_api=\"litellm\")\n self.use_only_first_toolcall = use_only_first_toolcall\n try:\n self.litellm_info = litellm.get_model_info(model_name)\n # maybe log this in xray\n\n except Exception as e:\n logging.error(f\"Failed to get litellm model info: {e}\")\n\n def _call_api(self, payload: APIPayload) -> \"OpenAIChatCompletion\":\n \"\"\"\n Calls the LiteLLM API with the given payload.\n\n Args:\n payload (APIPayload): The payload to send to the API.\n\n Returns:\n OpenAIChatCompletion: An object with the same keys as OpenAIChatCompletion.\n \"\"\"\n input = []\n for msg in payload.messages: # type: ignore\n input.extend(msg.prepare_message())\n api_params: Dict[str, Any] = {\n \"model\": self.model_name,\n \"messages\": input,\n }\n if self.temperature is not None:\n api_params[\"temperature\"] = self.temperature\n\n if self.max_tokens is not None:\n api_params[\"max_completion_tokens\"] = self.max_tokens\n\n if payload.tools is not None:\n api_params[\"tools\"] = (\n self.format_tools_for_chat_completion(payload.tools)\n if \"function\" not in payload.tools[0] # convert if responses_api_tools\n else payload.tools\n )\n\n if payload.tool_choice is not None and payload.force_call_tool is None:\n api_params[\"tool_choice\"] = (\n \"required\" if payload.tool_choice in (\"required\", \"any\") else payload.tool_choice\n )\n\n if payload.force_call_tool is not None:\n api_params[\"tool_choice\"] = {\n \"type\": \"function\",\n \"function\": {\"name\": payload.force_call_tool},\n }\n\n if payload.reasoning_effort is not None:\n api_params[\"reasoning_effort\"] = payload.reasoning_effort\n\n if \"tools\" in api_params and payload.cache_tool_definition:\n api_params[\"tools\"][-1][\"cache_control\"] = {\"type\": \"ephemeral\"} # type: ignore\n\n if payload.cache_complete_prompt:\n # Indicating cache control for the last message enables caching of the complete prompt.\n api_params[\"messages\"][-1][\"content\"][-1][\"cache_control\"] = {\"type\": \"ephemeral\"}\n\n response = self.client(**api_params, num_retries=5)\n\n return response # type: ignore\n\n def _parse_response(self, response: \"OpenAIChatCompletion\") -> LLMOutput:\n think_output = self._extract_thinking_content_from_response(response)\n tool_calls = self._extract_tool_calls_from_response(response)\n\n if self.action_space_as_tools:\n env_action = self._extract_env_actions_from_toolcalls(tool_calls) # type: ignore\n else:\n env_action = self._extract_env_actions_from_text_response(response)\n return LLMOutput(\n raw_response=response,\n think=think_output,\n action=env_action if env_action is not None else None,\n tool_calls=tool_calls if tool_calls is not None else None,\n )\n\n def _extract_thinking_content_from_response(\n self, response: OpenAIChatCompletion, wrap_tag=\"think\"\n ):\n \"\"\"Extracts the content from the message, including reasoning if available.\n It wraps the reasoning around ... for easy identification of reasoning content,\n When LLM produces 'text' and 'reasoning' in the same message.\n Note: The wrapping of 'thinking' content may not be nedeed and may be reconsidered.\n\n Args:\n response: The message object or dict containing content and reasoning.\n wrap_tag: The tag name to wrap reasoning content (default: \"think\").\n\n Returns:\n str: The extracted content with reasoning wrapped in specified tags.\n \"\"\"\n message = response.choices[0].message\n if not isinstance(message, dict):\n message = message.to_dict()\n\n reasoning_content = message.get(\"reasoning\", None)\n msg_content = message.get(\"text\", \"\") # works for Open-router\n if reasoning_content:\n # Wrap reasoning in tags with newlines for clarity\n reasoning_content = f\"<{wrap_tag}>{reasoning_content}\\n\"\n logging.debug(\"Extracting content from response.choices[i].message.reasoning\")\n else:\n reasoning_content = \"\"\n return f\"{reasoning_content}{msg_content}{message.get('content', '')}\"\n\n def _extract_tool_calls_from_response(self, response: OpenAIChatCompletion) -> ToolCalls | None:\n \"\"\"Extracts tool calls from the response.\"\"\"\n message = response.choices[0].message.to_dict()\n tool_calls = message.get(\"tool_calls\", None)\n if tool_calls is None:\n return None\n tool_call_list = []\n for tc in tool_calls: # type: ignore\n tool_call_list.append(\n ToolCall(\n name=tc[\"function\"][\"name\"],\n arguments=json.loads(tc[\"function\"][\"arguments\"]),\n raw_call=tc,\n )\n )\n if self.use_only_first_toolcall:\n break\n return ToolCalls(tool_calls=tool_call_list, raw_calls=response) # type: ignore\n\n def _extract_env_actions_from_toolcalls(self, toolcalls: ToolCalls) -> Any | None:\n \"\"\"Extracts actions from the response.\"\"\"\n if not toolcalls:\n return None\n\n actions = [\n AgentlabAction.convert_toolcall_to_agentlab_action_format(call) for call in toolcalls\n ]\n actions = (\n AgentlabAction.convert_multiactions_to_agentlab_action_format(actions)\n if len(actions) > 1\n else actions[0]\n )\n return actions\n\n def _extract_env_actions_from_text_response(\n self, response: \"OpenAIChatCompletion\"\n ) -> str | None:\n \"\"\"Extracts environment actions from the text response.\"\"\"\n # Use when action space is not given as tools.\n # TODO: Add support to pass action space as prompt in LiteLLM.\n # Check: https://docs.litellm.ai/docs/completion/function_call#function-calling-for-models-wout-function-calling-support\n pass\n\n @staticmethod\n def format_tools_for_chat_completion(tools):\n \"\"\"Formats response tools format for OpenAI Chat Completion API.\n Why we need this?\n Ans: actionset.to_tool_description() in bgym only returns description\n format valid for OpenAI Response API.\n\n Args:\n tools: List of tool descriptions to format for Chat Completion API.\n\n Returns:\n Formatted tools list compatible with OpenAI Chat Completion API, or None if tools is None.\n \"\"\"\n formatted_tools = None\n if tools is not None:\n formatted_tools = [\n {\n \"type\": tool[\"type\"],\n \"function\": {k: tool[k] for k in (\"name\", \"description\", \"parameters\")},\n }\n for tool in tools\n ]\n return formatted_tools\n\n\nclass LiteLLMAPIMessageBuilder(OpenAIChatCompletionAPIMessageBuilder):\n \"\"\"Message builder for LiteLLM API, extending OpenAIChatCompletionAPIMessageBuilder.\"\"\"\n\n def prepare_message(self, use_only_first_toolcall: bool = False) -> List[Message]:\n \"\"\"Prepare the message for the OpenAI API.\"\"\"\n content = []\n for item in self.content:\n content.append(self.convert_content_to_expected_format(item))\n output = [{\"role\": self.role, \"content\": content}]\n return output if self.role != \"tool\" else self.handle_tool_call(use_only_first_toolcall)\n\n def handle_tool_call(self, use_only_first_toolcall: bool = False) -> List[Message]:\n \"\"\"Handle the tool call response from the last raw response.\"\"\"\n if self.responded_tool_calls is None:\n raise ValueError(\"No tool calls found in responded_tool_calls\")\n output = []\n raw_call = self.responded_tool_calls.raw_calls.choices[0].message # type: ignore\n if use_only_first_toolcall:","source_hash":"f3f6175f1c2738428c62617db32724b4e1beb4e49a2e57a47037f8c91fdeddbc","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.litellm_api.LiteLLMAPIMessageBuilder","uri":"program://AgentLab/class/src.agentlab.llm.litellm_api.LiteLLMAPIMessageBuilder#L225-L259","kind":"class","name":"LiteLLMAPIMessageBuilder","path":"src/agentlab/llm/litellm_api.py","language":"python","start_line":225,"end_line":259,"context_start_line":205,"context_end_line":279,"code":" format valid for OpenAI Response API.\n\n Args:\n tools: List of tool descriptions to format for Chat Completion API.\n\n Returns:\n Formatted tools list compatible with OpenAI Chat Completion API, or None if tools is None.\n \"\"\"\n formatted_tools = None\n if tools is not None:\n formatted_tools = [\n {\n \"type\": tool[\"type\"],\n \"function\": {k: tool[k] for k in (\"name\", \"description\", \"parameters\")},\n }\n for tool in tools\n ]\n return formatted_tools\n\n\nclass LiteLLMAPIMessageBuilder(OpenAIChatCompletionAPIMessageBuilder):\n \"\"\"Message builder for LiteLLM API, extending OpenAIChatCompletionAPIMessageBuilder.\"\"\"\n\n def prepare_message(self, use_only_first_toolcall: bool = False) -> List[Message]:\n \"\"\"Prepare the message for the OpenAI API.\"\"\"\n content = []\n for item in self.content:\n content.append(self.convert_content_to_expected_format(item))\n output = [{\"role\": self.role, \"content\": content}]\n return output if self.role != \"tool\" else self.handle_tool_call(use_only_first_toolcall)\n\n def handle_tool_call(self, use_only_first_toolcall: bool = False) -> List[Message]:\n \"\"\"Handle the tool call response from the last raw response.\"\"\"\n if self.responded_tool_calls is None:\n raise ValueError(\"No tool calls found in responded_tool_calls\")\n output = []\n raw_call = self.responded_tool_calls.raw_calls.choices[0].message # type: ignore\n if use_only_first_toolcall:\n raw_call.tool_calls = raw_call.tool_calls[:1]\n output.append(raw_call) # add raw calls to output\n for fn_call in self.responded_tool_calls:\n raw_call = fn_call.raw_call\n assert (\n \"image\" not in fn_call.tool_response\n ), \"Image output is not supported in function calls response.\"\n # a function_call_output dict has keys \"role\", \"tool_call_id\" and \"content\"\n tool_call_reponse = {\n \"name\": raw_call[\"function\"][\"name\"], # required with OpenRouter\n \"role\": \"tool\",\n \"tool_call_id\": raw_call[\"id\"],\n \"content\": self.convert_content_to_expected_format(fn_call.tool_response)[\"text\"],\n }\n output.append(tool_call_reponse)\n\n return output\n\n\n@dataclass\nclass LiteLLMModelArgs(BaseModelArgs):\n \"\"\"Serializable arguments for LiteLMMModel.\"\"\"\n\n api = \"openai\" # tool description format used by actionset.to_tool_description() in bgym\n base_url: Optional[str] = None\n api_key: Optional[str] = None\n use_only_first_toolcall: bool = False\n\n def make_model(self):\n return LiteLLMModel(\n model_name=self.model_name,\n base_url=self.base_url,\n api_key=self.api_key,\n max_tokens=self.max_new_tokens,\n temperature=self.temperature,\n use_only_first_toolcall=self.use_only_first_toolcall,\n )","source_hash":"f3f6175f1c2738428c62617db32724b4e1beb4e49a2e57a47037f8c91fdeddbc","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.litellm_api.LiteLLMModelArgs","uri":"program://AgentLab/class/src.agentlab.llm.litellm_api.LiteLLMModelArgs#L263-L283","kind":"class","name":"LiteLLMModelArgs","path":"src/agentlab/llm/litellm_api.py","language":"python","start_line":263,"end_line":283,"context_start_line":243,"context_end_line":303,"code":" raw_call.tool_calls = raw_call.tool_calls[:1]\n output.append(raw_call) # add raw calls to output\n for fn_call in self.responded_tool_calls:\n raw_call = fn_call.raw_call\n assert (\n \"image\" not in fn_call.tool_response\n ), \"Image output is not supported in function calls response.\"\n # a function_call_output dict has keys \"role\", \"tool_call_id\" and \"content\"\n tool_call_reponse = {\n \"name\": raw_call[\"function\"][\"name\"], # required with OpenRouter\n \"role\": \"tool\",\n \"tool_call_id\": raw_call[\"id\"],\n \"content\": self.convert_content_to_expected_format(fn_call.tool_response)[\"text\"],\n }\n output.append(tool_call_reponse)\n\n return output\n\n\n@dataclass\nclass LiteLLMModelArgs(BaseModelArgs):\n \"\"\"Serializable arguments for LiteLMMModel.\"\"\"\n\n api = \"openai\" # tool description format used by actionset.to_tool_description() in bgym\n base_url: Optional[str] = None\n api_key: Optional[str] = None\n use_only_first_toolcall: bool = False\n\n def make_model(self):\n return LiteLLMModel(\n model_name=self.model_name,\n base_url=self.base_url,\n api_key=self.api_key,\n max_tokens=self.max_new_tokens,\n temperature=self.temperature,\n use_only_first_toolcall=self.use_only_first_toolcall,\n )\n\n def get_message_builder(self) -> Type[MessageBuilder]:\n \"\"\"Returns a message builder for the LiteLMMModel.\"\"\"\n return LiteLLMAPIMessageBuilder\n\n\nif __name__ == \"__main__\":\n \"\"\"\n Some simple tests to run the LiteLLMModel with different models.\n \"\"\"\n\n import os\n\n from agentlab.agents.tool_use_agent import DEFAULT_PROMPT_CONFIG, ToolUseAgentArgs\n from agentlab.experiments.study import Study\n from agentlab.llm.litellm_api import LiteLLMModelArgs\n\n os.environ[\"LITELLM_LOG\"] = \"WARNING\"\n\n def get_agent(model_name: str) -> ToolUseAgentArgs:\n return ToolUseAgentArgs(\n model_args=LiteLLMModelArgs(\n model_name=model_name,\n max_new_tokens=2000,","source_hash":"f3f6175f1c2738428c62617db32724b4e1beb4e49a2e57a47037f8c91fdeddbc","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.litellm_api.__init__","uri":"program://AgentLab/function/src.agentlab.llm.litellm_api.__init__#L28-L56","kind":"function","name":"__init__","path":"src/agentlab/llm/litellm_api.py","language":"python","start_line":28,"end_line":56,"context_start_line":8,"context_end_line":76,"code":"from litellm import completion\nfrom openai.types.chat import ChatCompletion as OpenAIChatCompletion\n\nfrom agentlab.llm.base_api import BaseModelArgs\nfrom agentlab.llm.response_api import (\n AgentlabAction,\n APIPayload,\n BaseModelWithPricing,\n LLMOutput,\n Message,\n MessageBuilder,\n OpenAIChatCompletionAPIMessageBuilder,\n ToolCall,\n ToolCalls,\n)\n\nlitellm.modify_params = True\n\n\nclass LiteLLMModel(BaseModelWithPricing):\n def __init__(\n self,\n model_name: str,\n base_url: Optional[str] = None,\n api_key: Optional[str] = None,\n temperature: float | None = None,\n max_tokens: int | None = 100,\n use_only_first_toolcall: bool = False,\n ):\n super().__init__(\n model_name=model_name,\n temperature=temperature,\n max_tokens=max_tokens,\n )\n self.action_space_as_tools = True # this should be a config\n client_args = {}\n if base_url is not None:\n client_args[\"base_url\"] = base_url\n if api_key is not None:\n client_args[\"api_key\"] = api_key\n self.client = partial(completion, **client_args)\n self.init_pricing_tracker(pricing_api=\"litellm\")\n self.use_only_first_toolcall = use_only_first_toolcall\n try:\n self.litellm_info = litellm.get_model_info(model_name)\n # maybe log this in xray\n\n except Exception as e:\n logging.error(f\"Failed to get litellm model info: {e}\")\n\n def _call_api(self, payload: APIPayload) -> \"OpenAIChatCompletion\":\n \"\"\"\n Calls the LiteLLM API with the given payload.\n\n Args:\n payload (APIPayload): The payload to send to the API.\n\n Returns:\n OpenAIChatCompletion: An object with the same keys as OpenAIChatCompletion.\n \"\"\"\n input = []\n for msg in payload.messages: # type: ignore\n input.extend(msg.prepare_message())\n api_params: Dict[str, Any] = {\n \"model\": self.model_name,\n \"messages\": input,\n }\n if self.temperature is not None:\n api_params[\"temperature\"] = self.temperature","source_hash":"f3f6175f1c2738428c62617db32724b4e1beb4e49a2e57a47037f8c91fdeddbc","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.litellm_api._call_api","uri":"program://AgentLab/function/src.agentlab.llm.litellm_api._call_api#L58-L111","kind":"function","name":"_call_api","path":"src/agentlab/llm/litellm_api.py","language":"python","start_line":58,"end_line":111,"context_start_line":38,"context_end_line":131,"code":" model_name=model_name,\n temperature=temperature,\n max_tokens=max_tokens,\n )\n self.action_space_as_tools = True # this should be a config\n client_args = {}\n if base_url is not None:\n client_args[\"base_url\"] = base_url\n if api_key is not None:\n client_args[\"api_key\"] = api_key\n self.client = partial(completion, **client_args)\n self.init_pricing_tracker(pricing_api=\"litellm\")\n self.use_only_first_toolcall = use_only_first_toolcall\n try:\n self.litellm_info = litellm.get_model_info(model_name)\n # maybe log this in xray\n\n except Exception as e:\n logging.error(f\"Failed to get litellm model info: {e}\")\n\n def _call_api(self, payload: APIPayload) -> \"OpenAIChatCompletion\":\n \"\"\"\n Calls the LiteLLM API with the given payload.\n\n Args:\n payload (APIPayload): The payload to send to the API.\n\n Returns:\n OpenAIChatCompletion: An object with the same keys as OpenAIChatCompletion.\n \"\"\"\n input = []\n for msg in payload.messages: # type: ignore\n input.extend(msg.prepare_message())\n api_params: Dict[str, Any] = {\n \"model\": self.model_name,\n \"messages\": input,\n }\n if self.temperature is not None:\n api_params[\"temperature\"] = self.temperature\n\n if self.max_tokens is not None:\n api_params[\"max_completion_tokens\"] = self.max_tokens\n\n if payload.tools is not None:\n api_params[\"tools\"] = (\n self.format_tools_for_chat_completion(payload.tools)\n if \"function\" not in payload.tools[0] # convert if responses_api_tools\n else payload.tools\n )\n\n if payload.tool_choice is not None and payload.force_call_tool is None:\n api_params[\"tool_choice\"] = (\n \"required\" if payload.tool_choice in (\"required\", \"any\") else payload.tool_choice\n )\n\n if payload.force_call_tool is not None:\n api_params[\"tool_choice\"] = {\n \"type\": \"function\",\n \"function\": {\"name\": payload.force_call_tool},\n }\n\n if payload.reasoning_effort is not None:\n api_params[\"reasoning_effort\"] = payload.reasoning_effort\n\n if \"tools\" in api_params and payload.cache_tool_definition:\n api_params[\"tools\"][-1][\"cache_control\"] = {\"type\": \"ephemeral\"} # type: ignore\n\n if payload.cache_complete_prompt:\n # Indicating cache control for the last message enables caching of the complete prompt.\n api_params[\"messages\"][-1][\"content\"][-1][\"cache_control\"] = {\"type\": \"ephemeral\"}\n\n response = self.client(**api_params, num_retries=5)\n\n return response # type: ignore\n\n def _parse_response(self, response: \"OpenAIChatCompletion\") -> LLMOutput:\n think_output = self._extract_thinking_content_from_response(response)\n tool_calls = self._extract_tool_calls_from_response(response)\n\n if self.action_space_as_tools:\n env_action = self._extract_env_actions_from_toolcalls(tool_calls) # type: ignore\n else:\n env_action = self._extract_env_actions_from_text_response(response)\n return LLMOutput(\n raw_response=response,\n think=think_output,\n action=env_action if env_action is not None else None,\n tool_calls=tool_calls if tool_calls is not None else None,\n )\n\n def _extract_thinking_content_from_response(\n self, response: OpenAIChatCompletion, wrap_tag=\"think\"\n ):\n \"\"\"Extracts the content from the message, including reasoning if available.","source_hash":"f3f6175f1c2738428c62617db32724b4e1beb4e49a2e57a47037f8c91fdeddbc","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.litellm_api._parse_response","uri":"program://AgentLab/function/src.agentlab.llm.litellm_api._parse_response#L113-L126","kind":"function","name":"_parse_response","path":"src/agentlab/llm/litellm_api.py","language":"python","start_line":113,"end_line":126,"context_start_line":93,"context_end_line":146,"code":" if payload.force_call_tool is not None:\n api_params[\"tool_choice\"] = {\n \"type\": \"function\",\n \"function\": {\"name\": payload.force_call_tool},\n }\n\n if payload.reasoning_effort is not None:\n api_params[\"reasoning_effort\"] = payload.reasoning_effort\n\n if \"tools\" in api_params and payload.cache_tool_definition:\n api_params[\"tools\"][-1][\"cache_control\"] = {\"type\": \"ephemeral\"} # type: ignore\n\n if payload.cache_complete_prompt:\n # Indicating cache control for the last message enables caching of the complete prompt.\n api_params[\"messages\"][-1][\"content\"][-1][\"cache_control\"] = {\"type\": \"ephemeral\"}\n\n response = self.client(**api_params, num_retries=5)\n\n return response # type: ignore\n\n def _parse_response(self, response: \"OpenAIChatCompletion\") -> LLMOutput:\n think_output = self._extract_thinking_content_from_response(response)\n tool_calls = self._extract_tool_calls_from_response(response)\n\n if self.action_space_as_tools:\n env_action = self._extract_env_actions_from_toolcalls(tool_calls) # type: ignore\n else:\n env_action = self._extract_env_actions_from_text_response(response)\n return LLMOutput(\n raw_response=response,\n think=think_output,\n action=env_action if env_action is not None else None,\n tool_calls=tool_calls if tool_calls is not None else None,\n )\n\n def _extract_thinking_content_from_response(\n self, response: OpenAIChatCompletion, wrap_tag=\"think\"\n ):\n \"\"\"Extracts the content from the message, including reasoning if available.\n It wraps the reasoning around ... for easy identification of reasoning content,\n When LLM produces 'text' and 'reasoning' in the same message.\n Note: The wrapping of 'thinking' content may not be nedeed and may be reconsidered.\n\n Args:\n response: The message object or dict containing content and reasoning.\n wrap_tag: The tag name to wrap reasoning content (default: \"think\").\n\n Returns:\n str: The extracted content with reasoning wrapped in specified tags.\n \"\"\"\n message = response.choices[0].message\n if not isinstance(message, dict):\n message = message.to_dict()\n","source_hash":"f3f6175f1c2738428c62617db32724b4e1beb4e49a2e57a47037f8c91fdeddbc","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.litellm_api._extract_thinking_content_from_response","uri":"program://AgentLab/function/src.agentlab.llm.litellm_api._extract_thinking_content_from_response#L128-L155","kind":"function","name":"_extract_thinking_content_from_response","path":"src/agentlab/llm/litellm_api.py","language":"python","start_line":128,"end_line":155,"context_start_line":108,"context_end_line":175,"code":"\n response = self.client(**api_params, num_retries=5)\n\n return response # type: ignore\n\n def _parse_response(self, response: \"OpenAIChatCompletion\") -> LLMOutput:\n think_output = self._extract_thinking_content_from_response(response)\n tool_calls = self._extract_tool_calls_from_response(response)\n\n if self.action_space_as_tools:\n env_action = self._extract_env_actions_from_toolcalls(tool_calls) # type: ignore\n else:\n env_action = self._extract_env_actions_from_text_response(response)\n return LLMOutput(\n raw_response=response,\n think=think_output,\n action=env_action if env_action is not None else None,\n tool_calls=tool_calls if tool_calls is not None else None,\n )\n\n def _extract_thinking_content_from_response(\n self, response: OpenAIChatCompletion, wrap_tag=\"think\"\n ):\n \"\"\"Extracts the content from the message, including reasoning if available.\n It wraps the reasoning around ... for easy identification of reasoning content,\n When LLM produces 'text' and 'reasoning' in the same message.\n Note: The wrapping of 'thinking' content may not be nedeed and may be reconsidered.\n\n Args:\n response: The message object or dict containing content and reasoning.\n wrap_tag: The tag name to wrap reasoning content (default: \"think\").\n\n Returns:\n str: The extracted content with reasoning wrapped in specified tags.\n \"\"\"\n message = response.choices[0].message\n if not isinstance(message, dict):\n message = message.to_dict()\n\n reasoning_content = message.get(\"reasoning\", None)\n msg_content = message.get(\"text\", \"\") # works for Open-router\n if reasoning_content:\n # Wrap reasoning in tags with newlines for clarity\n reasoning_content = f\"<{wrap_tag}>{reasoning_content}\\n\"\n logging.debug(\"Extracting content from response.choices[i].message.reasoning\")\n else:\n reasoning_content = \"\"\n return f\"{reasoning_content}{msg_content}{message.get('content', '')}\"\n\n def _extract_tool_calls_from_response(self, response: OpenAIChatCompletion) -> ToolCalls | None:\n \"\"\"Extracts tool calls from the response.\"\"\"\n message = response.choices[0].message.to_dict()\n tool_calls = message.get(\"tool_calls\", None)\n if tool_calls is None:\n return None\n tool_call_list = []\n for tc in tool_calls: # type: ignore\n tool_call_list.append(\n ToolCall(\n name=tc[\"function\"][\"name\"],\n arguments=json.loads(tc[\"function\"][\"arguments\"]),\n raw_call=tc,\n )\n )\n if self.use_only_first_toolcall:\n break\n return ToolCalls(tool_calls=tool_call_list, raw_calls=response) # type: ignore\n","source_hash":"f3f6175f1c2738428c62617db32724b4e1beb4e49a2e57a47037f8c91fdeddbc","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.litellm_api._extract_tool_calls_from_response","uri":"program://AgentLab/function/src.agentlab.llm.litellm_api._extract_tool_calls_from_response#L157-L174","kind":"function","name":"_extract_tool_calls_from_response","path":"src/agentlab/llm/litellm_api.py","language":"python","start_line":157,"end_line":174,"context_start_line":137,"context_end_line":194,"code":" response: The message object or dict containing content and reasoning.\n wrap_tag: The tag name to wrap reasoning content (default: \"think\").\n\n Returns:\n str: The extracted content with reasoning wrapped in specified tags.\n \"\"\"\n message = response.choices[0].message\n if not isinstance(message, dict):\n message = message.to_dict()\n\n reasoning_content = message.get(\"reasoning\", None)\n msg_content = message.get(\"text\", \"\") # works for Open-router\n if reasoning_content:\n # Wrap reasoning in tags with newlines for clarity\n reasoning_content = f\"<{wrap_tag}>{reasoning_content}\\n\"\n logging.debug(\"Extracting content from response.choices[i].message.reasoning\")\n else:\n reasoning_content = \"\"\n return f\"{reasoning_content}{msg_content}{message.get('content', '')}\"\n\n def _extract_tool_calls_from_response(self, response: OpenAIChatCompletion) -> ToolCalls | None:\n \"\"\"Extracts tool calls from the response.\"\"\"\n message = response.choices[0].message.to_dict()\n tool_calls = message.get(\"tool_calls\", None)\n if tool_calls is None:\n return None\n tool_call_list = []\n for tc in tool_calls: # type: ignore\n tool_call_list.append(\n ToolCall(\n name=tc[\"function\"][\"name\"],\n arguments=json.loads(tc[\"function\"][\"arguments\"]),\n raw_call=tc,\n )\n )\n if self.use_only_first_toolcall:\n break\n return ToolCalls(tool_calls=tool_call_list, raw_calls=response) # type: ignore\n\n def _extract_env_actions_from_toolcalls(self, toolcalls: ToolCalls) -> Any | None:\n \"\"\"Extracts actions from the response.\"\"\"\n if not toolcalls:\n return None\n\n actions = [\n AgentlabAction.convert_toolcall_to_agentlab_action_format(call) for call in toolcalls\n ]\n actions = (\n AgentlabAction.convert_multiactions_to_agentlab_action_format(actions)\n if len(actions) > 1\n else actions[0]\n )\n return actions\n\n def _extract_env_actions_from_text_response(\n self, response: \"OpenAIChatCompletion\"\n ) -> str | None:\n \"\"\"Extracts environment actions from the text response.\"\"\"","source_hash":"f3f6175f1c2738428c62617db32724b4e1beb4e49a2e57a47037f8c91fdeddbc","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.litellm_api._extract_env_actions_from_toolcalls","uri":"program://AgentLab/function/src.agentlab.llm.litellm_api._extract_env_actions_from_toolcalls#L176-L189","kind":"function","name":"_extract_env_actions_from_toolcalls","path":"src/agentlab/llm/litellm_api.py","language":"python","start_line":176,"end_line":189,"context_start_line":156,"context_end_line":209,"code":"\n def _extract_tool_calls_from_response(self, response: OpenAIChatCompletion) -> ToolCalls | None:\n \"\"\"Extracts tool calls from the response.\"\"\"\n message = response.choices[0].message.to_dict()\n tool_calls = message.get(\"tool_calls\", None)\n if tool_calls is None:\n return None\n tool_call_list = []\n for tc in tool_calls: # type: ignore\n tool_call_list.append(\n ToolCall(\n name=tc[\"function\"][\"name\"],\n arguments=json.loads(tc[\"function\"][\"arguments\"]),\n raw_call=tc,\n )\n )\n if self.use_only_first_toolcall:\n break\n return ToolCalls(tool_calls=tool_call_list, raw_calls=response) # type: ignore\n\n def _extract_env_actions_from_toolcalls(self, toolcalls: ToolCalls) -> Any | None:\n \"\"\"Extracts actions from the response.\"\"\"\n if not toolcalls:\n return None\n\n actions = [\n AgentlabAction.convert_toolcall_to_agentlab_action_format(call) for call in toolcalls\n ]\n actions = (\n AgentlabAction.convert_multiactions_to_agentlab_action_format(actions)\n if len(actions) > 1\n else actions[0]\n )\n return actions\n\n def _extract_env_actions_from_text_response(\n self, response: \"OpenAIChatCompletion\"\n ) -> str | None:\n \"\"\"Extracts environment actions from the text response.\"\"\"\n # Use when action space is not given as tools.\n # TODO: Add support to pass action space as prompt in LiteLLM.\n # Check: https://docs.litellm.ai/docs/completion/function_call#function-calling-for-models-wout-function-calling-support\n pass\n\n @staticmethod\n def format_tools_for_chat_completion(tools):\n \"\"\"Formats response tools format for OpenAI Chat Completion API.\n Why we need this?\n Ans: actionset.to_tool_description() in bgym only returns description\n format valid for OpenAI Response API.\n\n Args:\n tools: List of tool descriptions to format for Chat Completion API.\n","source_hash":"f3f6175f1c2738428c62617db32724b4e1beb4e49a2e57a47037f8c91fdeddbc","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.litellm_api._extract_env_actions_from_text_response","uri":"program://AgentLab/function/src.agentlab.llm.litellm_api._extract_env_actions_from_text_response#L191-L198","kind":"function","name":"_extract_env_actions_from_text_response","path":"src/agentlab/llm/litellm_api.py","language":"python","start_line":191,"end_line":198,"context_start_line":171,"context_end_line":218,"code":" )\n if self.use_only_first_toolcall:\n break\n return ToolCalls(tool_calls=tool_call_list, raw_calls=response) # type: ignore\n\n def _extract_env_actions_from_toolcalls(self, toolcalls: ToolCalls) -> Any | None:\n \"\"\"Extracts actions from the response.\"\"\"\n if not toolcalls:\n return None\n\n actions = [\n AgentlabAction.convert_toolcall_to_agentlab_action_format(call) for call in toolcalls\n ]\n actions = (\n AgentlabAction.convert_multiactions_to_agentlab_action_format(actions)\n if len(actions) > 1\n else actions[0]\n )\n return actions\n\n def _extract_env_actions_from_text_response(\n self, response: \"OpenAIChatCompletion\"\n ) -> str | None:\n \"\"\"Extracts environment actions from the text response.\"\"\"\n # Use when action space is not given as tools.\n # TODO: Add support to pass action space as prompt in LiteLLM.\n # Check: https://docs.litellm.ai/docs/completion/function_call#function-calling-for-models-wout-function-calling-support\n pass\n\n @staticmethod\n def format_tools_for_chat_completion(tools):\n \"\"\"Formats response tools format for OpenAI Chat Completion API.\n Why we need this?\n Ans: actionset.to_tool_description() in bgym only returns description\n format valid for OpenAI Response API.\n\n Args:\n tools: List of tool descriptions to format for Chat Completion API.\n\n Returns:\n Formatted tools list compatible with OpenAI Chat Completion API, or None if tools is None.\n \"\"\"\n formatted_tools = None\n if tools is not None:\n formatted_tools = [\n {\n \"type\": tool[\"type\"],\n \"function\": {k: tool[k] for k in (\"name\", \"description\", \"parameters\")},","source_hash":"f3f6175f1c2738428c62617db32724b4e1beb4e49a2e57a47037f8c91fdeddbc","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.litellm_api.format_tools_for_chat_completion","uri":"program://AgentLab/function/src.agentlab.llm.litellm_api.format_tools_for_chat_completion#L201-L222","kind":"function","name":"format_tools_for_chat_completion","path":"src/agentlab/llm/litellm_api.py","language":"python","start_line":201,"end_line":222,"context_start_line":181,"context_end_line":242,"code":" actions = [\n AgentlabAction.convert_toolcall_to_agentlab_action_format(call) for call in toolcalls\n ]\n actions = (\n AgentlabAction.convert_multiactions_to_agentlab_action_format(actions)\n if len(actions) > 1\n else actions[0]\n )\n return actions\n\n def _extract_env_actions_from_text_response(\n self, response: \"OpenAIChatCompletion\"\n ) -> str | None:\n \"\"\"Extracts environment actions from the text response.\"\"\"\n # Use when action space is not given as tools.\n # TODO: Add support to pass action space as prompt in LiteLLM.\n # Check: https://docs.litellm.ai/docs/completion/function_call#function-calling-for-models-wout-function-calling-support\n pass\n\n @staticmethod\n def format_tools_for_chat_completion(tools):\n \"\"\"Formats response tools format for OpenAI Chat Completion API.\n Why we need this?\n Ans: actionset.to_tool_description() in bgym only returns description\n format valid for OpenAI Response API.\n\n Args:\n tools: List of tool descriptions to format for Chat Completion API.\n\n Returns:\n Formatted tools list compatible with OpenAI Chat Completion API, or None if tools is None.\n \"\"\"\n formatted_tools = None\n if tools is not None:\n formatted_tools = [\n {\n \"type\": tool[\"type\"],\n \"function\": {k: tool[k] for k in (\"name\", \"description\", \"parameters\")},\n }\n for tool in tools\n ]\n return formatted_tools\n\n\nclass LiteLLMAPIMessageBuilder(OpenAIChatCompletionAPIMessageBuilder):\n \"\"\"Message builder for LiteLLM API, extending OpenAIChatCompletionAPIMessageBuilder.\"\"\"\n\n def prepare_message(self, use_only_first_toolcall: bool = False) -> List[Message]:\n \"\"\"Prepare the message for the OpenAI API.\"\"\"\n content = []\n for item in self.content:\n content.append(self.convert_content_to_expected_format(item))\n output = [{\"role\": self.role, \"content\": content}]\n return output if self.role != \"tool\" else self.handle_tool_call(use_only_first_toolcall)\n\n def handle_tool_call(self, use_only_first_toolcall: bool = False) -> List[Message]:\n \"\"\"Handle the tool call response from the last raw response.\"\"\"\n if self.responded_tool_calls is None:\n raise ValueError(\"No tool calls found in responded_tool_calls\")\n output = []\n raw_call = self.responded_tool_calls.raw_calls.choices[0].message # type: ignore\n if use_only_first_toolcall:","source_hash":"f3f6175f1c2738428c62617db32724b4e1beb4e49a2e57a47037f8c91fdeddbc","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.litellm_api.prepare_message","uri":"program://AgentLab/function/src.agentlab.llm.litellm_api.prepare_message#L228-L234","kind":"function","name":"prepare_message","path":"src/agentlab/llm/litellm_api.py","language":"python","start_line":228,"end_line":234,"context_start_line":208,"context_end_line":254,"code":" tools: List of tool descriptions to format for Chat Completion API.\n\n Returns:\n Formatted tools list compatible with OpenAI Chat Completion API, or None if tools is None.\n \"\"\"\n formatted_tools = None\n if tools is not None:\n formatted_tools = [\n {\n \"type\": tool[\"type\"],\n \"function\": {k: tool[k] for k in (\"name\", \"description\", \"parameters\")},\n }\n for tool in tools\n ]\n return formatted_tools\n\n\nclass LiteLLMAPIMessageBuilder(OpenAIChatCompletionAPIMessageBuilder):\n \"\"\"Message builder for LiteLLM API, extending OpenAIChatCompletionAPIMessageBuilder.\"\"\"\n\n def prepare_message(self, use_only_first_toolcall: bool = False) -> List[Message]:\n \"\"\"Prepare the message for the OpenAI API.\"\"\"\n content = []\n for item in self.content:\n content.append(self.convert_content_to_expected_format(item))\n output = [{\"role\": self.role, \"content\": content}]\n return output if self.role != \"tool\" else self.handle_tool_call(use_only_first_toolcall)\n\n def handle_tool_call(self, use_only_first_toolcall: bool = False) -> List[Message]:\n \"\"\"Handle the tool call response from the last raw response.\"\"\"\n if self.responded_tool_calls is None:\n raise ValueError(\"No tool calls found in responded_tool_calls\")\n output = []\n raw_call = self.responded_tool_calls.raw_calls.choices[0].message # type: ignore\n if use_only_first_toolcall:\n raw_call.tool_calls = raw_call.tool_calls[:1]\n output.append(raw_call) # add raw calls to output\n for fn_call in self.responded_tool_calls:\n raw_call = fn_call.raw_call\n assert (\n \"image\" not in fn_call.tool_response\n ), \"Image output is not supported in function calls response.\"\n # a function_call_output dict has keys \"role\", \"tool_call_id\" and \"content\"\n tool_call_reponse = {\n \"name\": raw_call[\"function\"][\"name\"], # required with OpenRouter\n \"role\": \"tool\",\n \"tool_call_id\": raw_call[\"id\"],","source_hash":"f3f6175f1c2738428c62617db32724b4e1beb4e49a2e57a47037f8c91fdeddbc","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.litellm_api.handle_tool_call","uri":"program://AgentLab/function/src.agentlab.llm.litellm_api.handle_tool_call#L236-L259","kind":"function","name":"handle_tool_call","path":"src/agentlab/llm/litellm_api.py","language":"python","start_line":236,"end_line":259,"context_start_line":216,"context_end_line":279,"code":" {\n \"type\": tool[\"type\"],\n \"function\": {k: tool[k] for k in (\"name\", \"description\", \"parameters\")},\n }\n for tool in tools\n ]\n return formatted_tools\n\n\nclass LiteLLMAPIMessageBuilder(OpenAIChatCompletionAPIMessageBuilder):\n \"\"\"Message builder for LiteLLM API, extending OpenAIChatCompletionAPIMessageBuilder.\"\"\"\n\n def prepare_message(self, use_only_first_toolcall: bool = False) -> List[Message]:\n \"\"\"Prepare the message for the OpenAI API.\"\"\"\n content = []\n for item in self.content:\n content.append(self.convert_content_to_expected_format(item))\n output = [{\"role\": self.role, \"content\": content}]\n return output if self.role != \"tool\" else self.handle_tool_call(use_only_first_toolcall)\n\n def handle_tool_call(self, use_only_first_toolcall: bool = False) -> List[Message]:\n \"\"\"Handle the tool call response from the last raw response.\"\"\"\n if self.responded_tool_calls is None:\n raise ValueError(\"No tool calls found in responded_tool_calls\")\n output = []\n raw_call = self.responded_tool_calls.raw_calls.choices[0].message # type: ignore\n if use_only_first_toolcall:\n raw_call.tool_calls = raw_call.tool_calls[:1]\n output.append(raw_call) # add raw calls to output\n for fn_call in self.responded_tool_calls:\n raw_call = fn_call.raw_call\n assert (\n \"image\" not in fn_call.tool_response\n ), \"Image output is not supported in function calls response.\"\n # a function_call_output dict has keys \"role\", \"tool_call_id\" and \"content\"\n tool_call_reponse = {\n \"name\": raw_call[\"function\"][\"name\"], # required with OpenRouter\n \"role\": \"tool\",\n \"tool_call_id\": raw_call[\"id\"],\n \"content\": self.convert_content_to_expected_format(fn_call.tool_response)[\"text\"],\n }\n output.append(tool_call_reponse)\n\n return output\n\n\n@dataclass\nclass LiteLLMModelArgs(BaseModelArgs):\n \"\"\"Serializable arguments for LiteLMMModel.\"\"\"\n\n api = \"openai\" # tool description format used by actionset.to_tool_description() in bgym\n base_url: Optional[str] = None\n api_key: Optional[str] = None\n use_only_first_toolcall: bool = False\n\n def make_model(self):\n return LiteLLMModel(\n model_name=self.model_name,\n base_url=self.base_url,\n api_key=self.api_key,\n max_tokens=self.max_new_tokens,\n temperature=self.temperature,\n use_only_first_toolcall=self.use_only_first_toolcall,\n )","source_hash":"f3f6175f1c2738428c62617db32724b4e1beb4e49a2e57a47037f8c91fdeddbc","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.litellm_api.make_model","uri":"program://AgentLab/function/src.agentlab.llm.litellm_api.make_model#L271-L279","kind":"function","name":"make_model","path":"src/agentlab/llm/litellm_api.py","language":"python","start_line":271,"end_line":279,"context_start_line":251,"context_end_line":299,"code":" tool_call_reponse = {\n \"name\": raw_call[\"function\"][\"name\"], # required with OpenRouter\n \"role\": \"tool\",\n \"tool_call_id\": raw_call[\"id\"],\n \"content\": self.convert_content_to_expected_format(fn_call.tool_response)[\"text\"],\n }\n output.append(tool_call_reponse)\n\n return output\n\n\n@dataclass\nclass LiteLLMModelArgs(BaseModelArgs):\n \"\"\"Serializable arguments for LiteLMMModel.\"\"\"\n\n api = \"openai\" # tool description format used by actionset.to_tool_description() in bgym\n base_url: Optional[str] = None\n api_key: Optional[str] = None\n use_only_first_toolcall: bool = False\n\n def make_model(self):\n return LiteLLMModel(\n model_name=self.model_name,\n base_url=self.base_url,\n api_key=self.api_key,\n max_tokens=self.max_new_tokens,\n temperature=self.temperature,\n use_only_first_toolcall=self.use_only_first_toolcall,\n )\n\n def get_message_builder(self) -> Type[MessageBuilder]:\n \"\"\"Returns a message builder for the LiteLMMModel.\"\"\"\n return LiteLLMAPIMessageBuilder\n\n\nif __name__ == \"__main__\":\n \"\"\"\n Some simple tests to run the LiteLLMModel with different models.\n \"\"\"\n\n import os\n\n from agentlab.agents.tool_use_agent import DEFAULT_PROMPT_CONFIG, ToolUseAgentArgs\n from agentlab.experiments.study import Study\n from agentlab.llm.litellm_api import LiteLLMModelArgs\n\n os.environ[\"LITELLM_LOG\"] = \"WARNING\"\n\n def get_agent(model_name: str) -> ToolUseAgentArgs:","source_hash":"f3f6175f1c2738428c62617db32724b4e1beb4e49a2e57a47037f8c91fdeddbc","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.litellm_api.get_message_builder","uri":"program://AgentLab/function/src.agentlab.llm.litellm_api.get_message_builder#L281-L283","kind":"function","name":"get_message_builder","path":"src/agentlab/llm/litellm_api.py","language":"python","start_line":281,"end_line":283,"context_start_line":261,"context_end_line":303,"code":"\n@dataclass\nclass LiteLLMModelArgs(BaseModelArgs):\n \"\"\"Serializable arguments for LiteLMMModel.\"\"\"\n\n api = \"openai\" # tool description format used by actionset.to_tool_description() in bgym\n base_url: Optional[str] = None\n api_key: Optional[str] = None\n use_only_first_toolcall: bool = False\n\n def make_model(self):\n return LiteLLMModel(\n model_name=self.model_name,\n base_url=self.base_url,\n api_key=self.api_key,\n max_tokens=self.max_new_tokens,\n temperature=self.temperature,\n use_only_first_toolcall=self.use_only_first_toolcall,\n )\n\n def get_message_builder(self) -> Type[MessageBuilder]:\n \"\"\"Returns a message builder for the LiteLMMModel.\"\"\"\n return LiteLLMAPIMessageBuilder\n\n\nif __name__ == \"__main__\":\n \"\"\"\n Some simple tests to run the LiteLLMModel with different models.\n \"\"\"\n\n import os\n\n from agentlab.agents.tool_use_agent import DEFAULT_PROMPT_CONFIG, ToolUseAgentArgs\n from agentlab.experiments.study import Study\n from agentlab.llm.litellm_api import LiteLLMModelArgs\n\n os.environ[\"LITELLM_LOG\"] = \"WARNING\"\n\n def get_agent(model_name: str) -> ToolUseAgentArgs:\n return ToolUseAgentArgs(\n model_args=LiteLLMModelArgs(\n model_name=model_name,\n max_new_tokens=2000,","source_hash":"f3f6175f1c2738428c62617db32724b4e1beb4e49a2e57a47037f8c91fdeddbc","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.litellm_api.get_agent","uri":"program://AgentLab/function/src.agentlab.llm.litellm_api.get_agent#L299-L307","kind":"function","name":"get_agent","path":"src/agentlab/llm/litellm_api.py","language":"python","start_line":299,"end_line":307,"context_start_line":279,"context_end_line":326,"code":" )\n\n def get_message_builder(self) -> Type[MessageBuilder]:\n \"\"\"Returns a message builder for the LiteLMMModel.\"\"\"\n return LiteLLMAPIMessageBuilder\n\n\nif __name__ == \"__main__\":\n \"\"\"\n Some simple tests to run the LiteLLMModel with different models.\n \"\"\"\n\n import os\n\n from agentlab.agents.tool_use_agent import DEFAULT_PROMPT_CONFIG, ToolUseAgentArgs\n from agentlab.experiments.study import Study\n from agentlab.llm.litellm_api import LiteLLMModelArgs\n\n os.environ[\"LITELLM_LOG\"] = \"WARNING\"\n\n def get_agent(model_name: str) -> ToolUseAgentArgs:\n return ToolUseAgentArgs(\n model_args=LiteLLMModelArgs(\n model_name=model_name,\n max_new_tokens=2000,\n temperature=None,\n ),\n config=DEFAULT_PROMPT_CONFIG,\n )\n\n models = [\n \"openai/gpt-4.1\",\n \"openai/gpt-4.1-mini\",\n \"openai/gpt-4.1-nano\",\n \"openai/o3-2025-04-16\",\n \"anthropic/claude-3-7-sonnet-20250219\",\n \"anthropic/claude-sonnet-4-20250514\",\n ## Add more models to test.\n ]\n agent_args = [get_agent(model) for model in models]\n\n study = Study(agent_args, \"miniwob_tiny_test\", logging_level_stdout=logging.WARNING)\n study.run(\n n_jobs=5,\n parallel_backend=\"ray\",\n strict_reproducibility=False,\n n_relaunch=3,\n )","source_hash":"f3f6175f1c2738428c62617db32724b4e1beb4e49a2e57a47037f8c91fdeddbc","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.chat_api","uri":"program://AgentLab/module/src.agentlab.llm.chat_api#L1-L555","kind":"module","name":"src.agentlab.llm.chat_api","path":"src/agentlab/llm/chat_api.py","language":"python","start_line":1,"end_line":555,"context_start_line":1,"context_end_line":555,"code":"import logging\nimport os\nimport re\nimport time\nfrom dataclasses import dataclass\nfrom functools import partial\nfrom typing import Optional\n\nimport anthropic\nimport openai\nfrom openai import NOT_GIVEN, OpenAI\n\nimport agentlab.llm.tracking as tracking\nfrom agentlab.llm.base_api import AbstractChatModel, BaseModelArgs\nfrom agentlab.llm.llm_utils import AIMessage, Discussion\n\n\ndef make_system_message(content: str) -> dict:\n return dict(role=\"system\", content=content)\n\n\ndef make_user_message(content: str) -> dict:\n return dict(role=\"user\", content=content)\n\n\ndef make_assistant_message(content: str) -> dict:\n return dict(role=\"assistant\", content=content)\n\n\nclass CheatMiniWoBLLM(AbstractChatModel):\n \"\"\"For unit-testing purposes only. It only work with miniwob.click-test task.\"\"\"\n\n def __init__(self, wait_time=0) -> None:\n self.wait_time = wait_time\n\n def __call__(self, messages) -> str:\n if self.wait_time > 0:\n print(f\"Waiting for {self.wait_time} seconds\")\n time.sleep(self.wait_time)\n\n if isinstance(messages, Discussion):\n prompt = messages.to_string()\n else:\n prompt = messages[1].get(\"content\", \"\")\n match = re.search(r\"^\\s*\\[(\\d+)\\].*button\", prompt, re.MULTILINE | re.IGNORECASE)\n\n if match:\n bid = match.group(1)\n action = f'click(\"{bid}\")'\n else:\n raise Exception(\"Can't find the button's bid\")\n\n answer = f\"\"\"I'm clicking the button as requested.\n\n{action}\n\n\"\"\"\n return make_assistant_message(answer)\n\n\n@dataclass\nclass CheatMiniWoBLLMArgs:\n model_name = \"test/cheat_miniwob_click_test\"\n max_total_tokens = 10240\n max_input_tokens = 8000\n max_new_tokens = 128\n wait_time: int = 0\n\n def make_model(self):\n return CheatMiniWoBLLM(self.wait_time)\n\n def prepare_server(self):\n pass\n\n def close_server(self):\n pass\n\n\n@dataclass\nclass OpenRouterModelArgs(BaseModelArgs):\n \"\"\"Serializable object for instantiating a generic chat model with an OpenAI\n model.\"\"\"\n\n def make_model(self):\n return OpenRouterChatModel(\n model_name=self.model_name,\n temperature=self.temperature,\n max_tokens=self.max_new_tokens,\n log_probs=self.log_probs,\n )\n\n\n@dataclass\nclass OpenAIModelArgs(BaseModelArgs):\n \"\"\"Serializable object for instantiating a generic chat model with an OpenAI\n model.\"\"\"\n\n def make_model(self):\n return OpenAIChatModel(\n model_name=self.model_name,\n temperature=self.temperature,\n max_tokens=self.max_new_tokens,\n log_probs=self.log_probs,\n )\n\n\n@dataclass\nclass AzureModelArgs(BaseModelArgs):\n \"\"\"Serializable object for instantiating a generic chat model with an Azure model.\"\"\"\n\n deployment_name: str = (\n None # NOTE: deployment_name is deprecated for Azure OpenAI and won't be used.\n )\n\n def make_model(self):\n return AzureChatModel(\n model_name=self.model_name,\n temperature=self.temperature,\n max_tokens=self.max_new_tokens,\n log_probs=self.log_probs,\n )\n\n\n@dataclass\nclass SelfHostedModelArgs(BaseModelArgs):\n \"\"\"Serializable object for instantiating a generic chat model with a self-hosted model.\"\"\"\n\n model_url: str = None\n token: str = None\n backend: str = \"huggingface\"\n n_retry_server: int = 4\n\n def make_model(self):\n if self.backend == \"huggingface\":\n # currently only huggingface tgi servers are supported\n if self.model_url is None:\n self.model_url = os.environ[\"AGENTLAB_MODEL_URL\"]\n if self.token is None:\n self.token = os.environ[\"AGENTLAB_MODEL_TOKEN\"]\n # Lazy import to avoid importing HF utilities on non-HF paths\n from agentlab.llm.huggingface_utils import HuggingFaceURLChatModel\n\n return HuggingFaceURLChatModel(\n model_name=self.model_name,\n model_url=self.model_url,\n token=self.token,\n temperature=self.temperature,\n max_new_tokens=self.max_new_tokens,\n n_retry_server=self.n_retry_server,\n log_probs=self.log_probs,\n )\n elif self.backend == \"vllm\":\n return VLLMChatModel(\n model_name=self.model_name,\n temperature=self.temperature,\n max_tokens=self.max_new_tokens,\n n_retry_server=self.n_retry_server,\n )\n else:\n raise ValueError(f\"Backend {self.backend} is not supported\")\n\n\n@dataclass\nclass ChatModelArgs(BaseModelArgs):\n \"\"\"Object added for backward compatibility with the old ChatModelArgs.\"\"\"\n\n model_path: str = None\n model_url: str = None\n model_size: str = None\n training_total_tokens: int = None\n hf_hosted: bool = False\n is_model_operational: str = False\n sliding_window: bool = False\n n_retry_server: int = 4\n infer_tokens_length: bool = False\n vision_support: bool = False\n shard_support: bool = True\n extra_tgi_args: dict = None\n tgi_image: str = None\n info: dict = None\n\n def __post_init__(self):\n import warnings\n\n warnings.simplefilter(\"always\", DeprecationWarning)\n warnings.warn(\n \"ChatModelArgs is deprecated and used only for xray. Use one of the specific model args classes instead.\",\n DeprecationWarning,\n )\n warnings.simplefilter(\"default\", DeprecationWarning)\n\n def make_model(self):\n pass\n\n\ndef _extract_wait_time(error_message, min_retry_wait_time=60):\n \"\"\"Extract the wait time from an OpenAI RateLimitError message.\"\"\"\n match = re.search(r\"try again in (\\d+(\\.\\d+)?)s\", error_message)\n if match:\n return max(min_retry_wait_time, float(match.group(1)))\n return min_retry_wait_time\n\n\nclass RetryError(Exception):\n pass\n\n\ndef handle_error(error, itr, min_retry_wait_time, max_retry):\n if not isinstance(error, openai.OpenAIError):\n raise error\n logging.warning(\n f\"Failed to get a response from the API: \\n{error}\\n\" f\"Retrying... ({itr+1}/{max_retry})\"\n )\n wait_time = _extract_wait_time(\n error.args[0],\n min_retry_wait_time=min_retry_wait_time,\n )\n logging.info(f\"Waiting for {wait_time} seconds\")\n time.sleep(wait_time)\n error_type = error.args[0]\n return error_type\n\n\nclass OpenRouterError(openai.OpenAIError):\n pass\n\n\nclass ChatModel(AbstractChatModel):\n def __init__(\n self,\n model_name,\n api_key=None,\n temperature=0.5,\n max_tokens=100,\n max_retry=4,\n min_retry_wait_time=60,\n api_key_env_var=None,\n client_class=OpenAI,\n client_args=None,\n pricing_func=None,\n log_probs=False,\n ):\n assert max_retry > 0, \"max_retry should be greater than 0\"\n\n self.model_name = model_name\n self.temperature = temperature\n self.max_tokens = max_tokens\n self.max_retry = max_retry\n self.min_retry_wait_time = min_retry_wait_time\n self.log_probs = log_probs\n\n # Get the API key from the environment variable if not provided\n if api_key_env_var:\n api_key = api_key or os.getenv(api_key_env_var)\n self.api_key = api_key\n\n # Get pricing information\n if pricing_func:\n pricings = pricing_func()\n try:\n self.input_cost = float(pricings[model_name][\"prompt\"])\n self.output_cost = float(pricings[model_name][\"completion\"])\n except KeyError:\n logging.warning(\n f\"Model {model_name} not found in the pricing information, prices are set to 0. Maybe try upgrading langchain_community.\"\n )\n self.input_cost = 0.0\n self.output_cost = 0.0\n else:\n self.input_cost = 0.0\n self.output_cost = 0.0\n\n client_args = client_args or {}\n self.client = client_class(\n api_key=api_key,\n **client_args,\n )\n\n def __call__(self, messages: list[dict], n_samples: int = 1, temperature: float = None) -> dict:\n # Initialize retry tracking attributes\n self.retries = 0\n self.success = False\n self.error_types = []\n\n completion = None\n e = None\n for itr in range(self.max_retry):\n self.retries += 1\n temperature = temperature if temperature is not None else self.temperature\n try:\n completion = self.client.chat.completions.create(\n model=self.model_name,\n messages=messages,\n n=n_samples,\n temperature=temperature,\n max_completion_tokens=self.max_tokens,\n logprobs=self.log_probs,\n )\n\n if completion.usage is None:\n raise OpenRouterError(\n \"The completion object does not contain usage information. This is likely a bug in the OpenRouter API.\"\n )\n\n self.success = True\n break\n except openai.OpenAIError as e:\n error_type = handle_error(e, itr, self.min_retry_wait_time, self.max_retry)\n self.error_types.append(error_type)\n\n if not completion:\n raise RetryError(\n f\"Failed to get a response from the API after {self.max_retry} retries\\n\"\n f\"Last error: {error_type}\"\n )\n\n input_tokens = completion.usage.prompt_tokens\n output_tokens = completion.usage.completion_tokens\n cost = input_tokens * self.input_cost + output_tokens * self.output_cost\n\n if hasattr(tracking.TRACKER, \"instance\") and isinstance(\n tracking.TRACKER.instance, tracking.LLMTracker\n ):\n tracking.TRACKER.instance(input_tokens, output_tokens, cost)\n\n if n_samples == 1:\n res = AIMessage(completion.choices[0].message.content)\n if self.log_probs:\n res[\"log_probs\"] = completion.choices[0].log_probs\n return res\n else:\n return [AIMessage(c.message.content) for c in completion.choices]\n\n def get_stats(self):\n return {\n \"n_retry_llm\": self.retries,\n # \"busted_retry_llm\": int(not self.success), # not logged if it occurs anyways\n }\n\n\nclass OpenAIChatModel(ChatModel):\n def __init__(\n self,\n model_name,\n api_key=None,\n temperature=0.5,\n max_tokens=100,\n max_retry=4,\n min_retry_wait_time=60,\n log_probs=False,\n ):\n if max_tokens is None:\n max_tokens = NOT_GIVEN\n super().__init__(\n model_name=model_name,\n api_key=api_key,\n temperature=temperature,\n max_tokens=max_tokens,\n max_retry=max_retry,\n min_retry_wait_time=min_retry_wait_time,\n api_key_env_var=\"OPENAI_API_KEY\",\n client_class=OpenAI,\n pricing_func=partial(tracking.get_pricing_litellm, model_name=model_name),\n log_probs=log_probs,\n )\n\n\nclass OpenRouterChatModel(ChatModel):\n def __init__(\n self,\n model_name,\n api_key=None,\n temperature=0.5,\n max_tokens=100,\n max_retry=4,\n min_retry_wait_time=60,\n log_probs=False,\n ):\n client_args = {\n \"base_url\": \"https://openrouter.ai/api/v1\",\n }\n super().__init__(\n model_name=model_name,\n api_key=api_key,\n temperature=temperature,\n max_tokens=max_tokens,\n max_retry=max_retry,\n min_retry_wait_time=min_retry_wait_time,\n api_key_env_var=\"OPENROUTER_API_KEY\",\n client_class=OpenAI,\n client_args=client_args,\n pricing_func=tracking.get_pricing_openrouter,\n log_probs=log_probs,\n )\n\n\nclass AzureChatModel(ChatModel):\n def __init__(\n self,\n model_name,\n api_key=None,\n temperature=0.5,\n deployment_name=None,\n max_tokens=100,\n max_retry=4,\n min_retry_wait_time=60,\n log_probs=False,\n ):\n api_key = api_key or os.getenv(\"AZURE_OPENAI_API_KEY\")\n assert (\n api_key\n ), \"AZURE_OPENAI_API_KEY has to be defined in the environment when using AzureChatModel\"\n endpoint = os.getenv(\"AZURE_OPENAI_ENDPOINT\")\n assert (\n endpoint\n ), \"AZURE_OPENAI_ENDPOINT has to be defined in the environment when using AzureChatModel\"\n\n if deployment_name is not None:\n logging.info(\n f\"Deployment name is deprecated for Azure OpenAI and won't be used. Using model name: {model_name}.\"\n )\n\n client_args = {\n \"base_url\": endpoint,\n \"default_query\": {\"api-version\": \"preview\"},\n }\n super().__init__(\n model_name=model_name,\n api_key=api_key,\n temperature=temperature,\n max_tokens=max_tokens,\n max_retry=max_retry,\n min_retry_wait_time=min_retry_wait_time,\n client_class=OpenAI,\n client_args=client_args,\n pricing_func=tracking.get_pricing_openai,\n log_probs=log_probs,\n )\n\n\ndef __getattr__(name: str):\n \"\"\"Lazy re-export of optional classes to keep imports light.\n\n This lets users import HuggingFaceURLChatModel from agentlab.llm.chat_api\n without importing heavy dependencies unless actually used.\n\n Args:\n name: The name of the attribute to retrieve.\n\n Returns:\n The requested class or raises AttributeError if not found.\n\n Raises:\n AttributeError: If the requested attribute is not available.\n \"\"\"\n if name == \"HuggingFaceURLChatModel\":\n from agentlab.llm.huggingface_utils import HuggingFaceURLChatModel\n\n return HuggingFaceURLChatModel\n raise AttributeError(name)\n\n\nclass VLLMChatModel(ChatModel):\n def __init__(\n self,\n model_name,\n api_key=None,\n temperature=0.5,\n max_tokens=100,\n n_retry_server=4,\n min_retry_wait_time=60,\n ):\n super().__init__(\n model_name=model_name,\n api_key=api_key,\n temperature=temperature,\n max_tokens=max_tokens,\n max_retry=n_retry_server,\n min_retry_wait_time=min_retry_wait_time,\n api_key_env_var=\"VLLM_API_KEY\",\n client_class=OpenAI,\n client_args={\"base_url\": \"http://0.0.0.0:8000/v1\"},\n pricing_func=None,\n )\n\n\nclass AnthropicChatModel(AbstractChatModel):\n def __init__(\n self,\n model_name,\n api_key=None,\n temperature=0.5,\n max_tokens=100,\n max_retry=4,\n ):\n self.model_name = model_name\n self.temperature = temperature\n self.max_tokens = max_tokens\n self.max_retry = max_retry\n\n api_key = api_key or os.getenv(\"ANTHROPIC_API_KEY\")\n self.client = anthropic.Anthropic(api_key=api_key)\n\n def __call__(self, messages: list[dict], n_samples: int = 1, temperature: float = None) -> dict:\n # Convert OpenAI format to Anthropic format\n system_message = None\n anthropic_messages = []\n\n for msg in messages:\n if msg[\"role\"] == \"system\":\n system_message = msg[\"content\"]\n else:\n anthropic_messages.append({\"role\": msg[\"role\"], \"content\": msg[\"content\"]})\n\n temperature = temperature if temperature is not None else self.temperature\n\n for attempt in range(self.max_retry):\n try:\n kwargs = {\n \"model\": self.model_name,\n \"messages\": anthropic_messages,\n \"max_tokens\": self.max_tokens,\n \"temperature\": temperature,\n }\n\n if system_message:\n kwargs[\"system\"] = system_message\n\n response = self.client.messages.create(**kwargs)\n\n # Track usage if available\n if hasattr(tracking.TRACKER, \"instance\"):\n tracking.TRACKER.instance(\n response.usage.input_tokens,\n response.usage.output_tokens,\n 0, # cost calculation would need pricing info\n )\n\n return AIMessage(response.content[0].text)\n\n except Exception as e:\n if attempt == self.max_retry - 1:\n raise e\n logging.warning(f\"Anthropic API error (attempt {attempt + 1}): {e}\")\n time.sleep(60) # Simple retry delay\n\n\n@dataclass\nclass AnthropicModelArgs(BaseModelArgs):\n def make_model(self):\n return AnthropicChatModel(\n model_name=self.model_name,\n temperature=self.temperature,\n max_tokens=self.max_new_tokens,\n )","source_hash":"f1cd0d3d0b86825e76bb18c7afbf8934d4974a38e67e41b452c636f0d6c1c887","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.chat_api.make_system_message","uri":"program://AgentLab/function/src.agentlab.llm.chat_api.make_system_message#L18-L19","kind":"function","name":"make_system_message","path":"src/agentlab/llm/chat_api.py","language":"python","start_line":18,"end_line":19,"context_start_line":1,"context_end_line":39,"code":"import logging\nimport os\nimport re\nimport time\nfrom dataclasses import dataclass\nfrom functools import partial\nfrom typing import Optional\n\nimport anthropic\nimport openai\nfrom openai import NOT_GIVEN, OpenAI\n\nimport agentlab.llm.tracking as tracking\nfrom agentlab.llm.base_api import AbstractChatModel, BaseModelArgs\nfrom agentlab.llm.llm_utils import AIMessage, Discussion\n\n\ndef make_system_message(content: str) -> dict:\n return dict(role=\"system\", content=content)\n\n\ndef make_user_message(content: str) -> dict:\n return dict(role=\"user\", content=content)\n\n\ndef make_assistant_message(content: str) -> dict:\n return dict(role=\"assistant\", content=content)\n\n\nclass CheatMiniWoBLLM(AbstractChatModel):\n \"\"\"For unit-testing purposes only. It only work with miniwob.click-test task.\"\"\"\n\n def __init__(self, wait_time=0) -> None:\n self.wait_time = wait_time\n\n def __call__(self, messages) -> str:\n if self.wait_time > 0:\n print(f\"Waiting for {self.wait_time} seconds\")\n time.sleep(self.wait_time)","source_hash":"f1cd0d3d0b86825e76bb18c7afbf8934d4974a38e67e41b452c636f0d6c1c887","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.chat_api.make_user_message","uri":"program://AgentLab/function/src.agentlab.llm.chat_api.make_user_message#L22-L23","kind":"function","name":"make_user_message","path":"src/agentlab/llm/chat_api.py","language":"python","start_line":22,"end_line":23,"context_start_line":2,"context_end_line":43,"code":"import os\nimport re\nimport time\nfrom dataclasses import dataclass\nfrom functools import partial\nfrom typing import Optional\n\nimport anthropic\nimport openai\nfrom openai import NOT_GIVEN, OpenAI\n\nimport agentlab.llm.tracking as tracking\nfrom agentlab.llm.base_api import AbstractChatModel, BaseModelArgs\nfrom agentlab.llm.llm_utils import AIMessage, Discussion\n\n\ndef make_system_message(content: str) -> dict:\n return dict(role=\"system\", content=content)\n\n\ndef make_user_message(content: str) -> dict:\n return dict(role=\"user\", content=content)\n\n\ndef make_assistant_message(content: str) -> dict:\n return dict(role=\"assistant\", content=content)\n\n\nclass CheatMiniWoBLLM(AbstractChatModel):\n \"\"\"For unit-testing purposes only. It only work with miniwob.click-test task.\"\"\"\n\n def __init__(self, wait_time=0) -> None:\n self.wait_time = wait_time\n\n def __call__(self, messages) -> str:\n if self.wait_time > 0:\n print(f\"Waiting for {self.wait_time} seconds\")\n time.sleep(self.wait_time)\n\n if isinstance(messages, Discussion):\n prompt = messages.to_string()\n else:","source_hash":"f1cd0d3d0b86825e76bb18c7afbf8934d4974a38e67e41b452c636f0d6c1c887","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.chat_api.make_assistant_message","uri":"program://AgentLab/function/src.agentlab.llm.chat_api.make_assistant_message#L26-L27","kind":"function","name":"make_assistant_message","path":"src/agentlab/llm/chat_api.py","language":"python","start_line":26,"end_line":27,"context_start_line":6,"context_end_line":47,"code":"from functools import partial\nfrom typing import Optional\n\nimport anthropic\nimport openai\nfrom openai import NOT_GIVEN, OpenAI\n\nimport agentlab.llm.tracking as tracking\nfrom agentlab.llm.base_api import AbstractChatModel, BaseModelArgs\nfrom agentlab.llm.llm_utils import AIMessage, Discussion\n\n\ndef make_system_message(content: str) -> dict:\n return dict(role=\"system\", content=content)\n\n\ndef make_user_message(content: str) -> dict:\n return dict(role=\"user\", content=content)\n\n\ndef make_assistant_message(content: str) -> dict:\n return dict(role=\"assistant\", content=content)\n\n\nclass CheatMiniWoBLLM(AbstractChatModel):\n \"\"\"For unit-testing purposes only. It only work with miniwob.click-test task.\"\"\"\n\n def __init__(self, wait_time=0) -> None:\n self.wait_time = wait_time\n\n def __call__(self, messages) -> str:\n if self.wait_time > 0:\n print(f\"Waiting for {self.wait_time} seconds\")\n time.sleep(self.wait_time)\n\n if isinstance(messages, Discussion):\n prompt = messages.to_string()\n else:\n prompt = messages[1].get(\"content\", \"\")\n match = re.search(r\"^\\s*\\[(\\d+)\\].*button\", prompt, re.MULTILINE | re.IGNORECASE)\n\n if match:","source_hash":"f1cd0d3d0b86825e76bb18c7afbf8934d4974a38e67e41b452c636f0d6c1c887","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.chat_api.CheatMiniWoBLLM","uri":"program://AgentLab/class/src.agentlab.llm.chat_api.CheatMiniWoBLLM#L30-L58","kind":"class","name":"CheatMiniWoBLLM","path":"src/agentlab/llm/chat_api.py","language":"python","start_line":30,"end_line":58,"context_start_line":10,"context_end_line":78,"code":"import openai\nfrom openai import NOT_GIVEN, OpenAI\n\nimport agentlab.llm.tracking as tracking\nfrom agentlab.llm.base_api import AbstractChatModel, BaseModelArgs\nfrom agentlab.llm.llm_utils import AIMessage, Discussion\n\n\ndef make_system_message(content: str) -> dict:\n return dict(role=\"system\", content=content)\n\n\ndef make_user_message(content: str) -> dict:\n return dict(role=\"user\", content=content)\n\n\ndef make_assistant_message(content: str) -> dict:\n return dict(role=\"assistant\", content=content)\n\n\nclass CheatMiniWoBLLM(AbstractChatModel):\n \"\"\"For unit-testing purposes only. It only work with miniwob.click-test task.\"\"\"\n\n def __init__(self, wait_time=0) -> None:\n self.wait_time = wait_time\n\n def __call__(self, messages) -> str:\n if self.wait_time > 0:\n print(f\"Waiting for {self.wait_time} seconds\")\n time.sleep(self.wait_time)\n\n if isinstance(messages, Discussion):\n prompt = messages.to_string()\n else:\n prompt = messages[1].get(\"content\", \"\")\n match = re.search(r\"^\\s*\\[(\\d+)\\].*button\", prompt, re.MULTILINE | re.IGNORECASE)\n\n if match:\n bid = match.group(1)\n action = f'click(\"{bid}\")'\n else:\n raise Exception(\"Can't find the button's bid\")\n\n answer = f\"\"\"I'm clicking the button as requested.\n\n{action}\n\n\"\"\"\n return make_assistant_message(answer)\n\n\n@dataclass\nclass CheatMiniWoBLLMArgs:\n model_name = \"test/cheat_miniwob_click_test\"\n max_total_tokens = 10240\n max_input_tokens = 8000\n max_new_tokens = 128\n wait_time: int = 0\n\n def make_model(self):\n return CheatMiniWoBLLM(self.wait_time)\n\n def prepare_server(self):\n pass\n\n def close_server(self):\n pass\n\n","source_hash":"f1cd0d3d0b86825e76bb18c7afbf8934d4974a38e67e41b452c636f0d6c1c887","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.chat_api.CheatMiniWoBLLMArgs","uri":"program://AgentLab/class/src.agentlab.llm.chat_api.CheatMiniWoBLLMArgs#L62-L76","kind":"class","name":"CheatMiniWoBLLMArgs","path":"src/agentlab/llm/chat_api.py","language":"python","start_line":62,"end_line":76,"context_start_line":42,"context_end_line":96,"code":" prompt = messages.to_string()\n else:\n prompt = messages[1].get(\"content\", \"\")\n match = re.search(r\"^\\s*\\[(\\d+)\\].*button\", prompt, re.MULTILINE | re.IGNORECASE)\n\n if match:\n bid = match.group(1)\n action = f'click(\"{bid}\")'\n else:\n raise Exception(\"Can't find the button's bid\")\n\n answer = f\"\"\"I'm clicking the button as requested.\n\n{action}\n\n\"\"\"\n return make_assistant_message(answer)\n\n\n@dataclass\nclass CheatMiniWoBLLMArgs:\n model_name = \"test/cheat_miniwob_click_test\"\n max_total_tokens = 10240\n max_input_tokens = 8000\n max_new_tokens = 128\n wait_time: int = 0\n\n def make_model(self):\n return CheatMiniWoBLLM(self.wait_time)\n\n def prepare_server(self):\n pass\n\n def close_server(self):\n pass\n\n\n@dataclass\nclass OpenRouterModelArgs(BaseModelArgs):\n \"\"\"Serializable object for instantiating a generic chat model with an OpenAI\n model.\"\"\"\n\n def make_model(self):\n return OpenRouterChatModel(\n model_name=self.model_name,\n temperature=self.temperature,\n max_tokens=self.max_new_tokens,\n log_probs=self.log_probs,\n )\n\n\n@dataclass\nclass OpenAIModelArgs(BaseModelArgs):\n \"\"\"Serializable object for instantiating a generic chat model with an OpenAI\n model.\"\"\"","source_hash":"f1cd0d3d0b86825e76bb18c7afbf8934d4974a38e67e41b452c636f0d6c1c887","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.chat_api.OpenRouterModelArgs","uri":"program://AgentLab/class/src.agentlab.llm.chat_api.OpenRouterModelArgs#L80-L90","kind":"class","name":"OpenRouterModelArgs","path":"src/agentlab/llm/chat_api.py","language":"python","start_line":80,"end_line":90,"context_start_line":60,"context_end_line":110,"code":"\n@dataclass\nclass CheatMiniWoBLLMArgs:\n model_name = \"test/cheat_miniwob_click_test\"\n max_total_tokens = 10240\n max_input_tokens = 8000\n max_new_tokens = 128\n wait_time: int = 0\n\n def make_model(self):\n return CheatMiniWoBLLM(self.wait_time)\n\n def prepare_server(self):\n pass\n\n def close_server(self):\n pass\n\n\n@dataclass\nclass OpenRouterModelArgs(BaseModelArgs):\n \"\"\"Serializable object for instantiating a generic chat model with an OpenAI\n model.\"\"\"\n\n def make_model(self):\n return OpenRouterChatModel(\n model_name=self.model_name,\n temperature=self.temperature,\n max_tokens=self.max_new_tokens,\n log_probs=self.log_probs,\n )\n\n\n@dataclass\nclass OpenAIModelArgs(BaseModelArgs):\n \"\"\"Serializable object for instantiating a generic chat model with an OpenAI\n model.\"\"\"\n\n def make_model(self):\n return OpenAIChatModel(\n model_name=self.model_name,\n temperature=self.temperature,\n max_tokens=self.max_new_tokens,\n log_probs=self.log_probs,\n )\n\n\n@dataclass\nclass AzureModelArgs(BaseModelArgs):\n \"\"\"Serializable object for instantiating a generic chat model with an Azure model.\"\"\"\n","source_hash":"f1cd0d3d0b86825e76bb18c7afbf8934d4974a38e67e41b452c636f0d6c1c887","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.chat_api.OpenAIModelArgs","uri":"program://AgentLab/class/src.agentlab.llm.chat_api.OpenAIModelArgs#L94-L104","kind":"class","name":"OpenAIModelArgs","path":"src/agentlab/llm/chat_api.py","language":"python","start_line":94,"end_line":104,"context_start_line":74,"context_end_line":124,"code":"\n def close_server(self):\n pass\n\n\n@dataclass\nclass OpenRouterModelArgs(BaseModelArgs):\n \"\"\"Serializable object for instantiating a generic chat model with an OpenAI\n model.\"\"\"\n\n def make_model(self):\n return OpenRouterChatModel(\n model_name=self.model_name,\n temperature=self.temperature,\n max_tokens=self.max_new_tokens,\n log_probs=self.log_probs,\n )\n\n\n@dataclass\nclass OpenAIModelArgs(BaseModelArgs):\n \"\"\"Serializable object for instantiating a generic chat model with an OpenAI\n model.\"\"\"\n\n def make_model(self):\n return OpenAIChatModel(\n model_name=self.model_name,\n temperature=self.temperature,\n max_tokens=self.max_new_tokens,\n log_probs=self.log_probs,\n )\n\n\n@dataclass\nclass AzureModelArgs(BaseModelArgs):\n \"\"\"Serializable object for instantiating a generic chat model with an Azure model.\"\"\"\n\n deployment_name: str = (\n None # NOTE: deployment_name is deprecated for Azure OpenAI and won't be used.\n )\n\n def make_model(self):\n return AzureChatModel(\n model_name=self.model_name,\n temperature=self.temperature,\n max_tokens=self.max_new_tokens,\n log_probs=self.log_probs,\n )\n\n\n@dataclass","source_hash":"f1cd0d3d0b86825e76bb18c7afbf8934d4974a38e67e41b452c636f0d6c1c887","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.chat_api.AzureModelArgs","uri":"program://AgentLab/class/src.agentlab.llm.chat_api.AzureModelArgs#L108-L121","kind":"class","name":"AzureModelArgs","path":"src/agentlab/llm/chat_api.py","language":"python","start_line":108,"end_line":121,"context_start_line":88,"context_end_line":141,"code":" max_tokens=self.max_new_tokens,\n log_probs=self.log_probs,\n )\n\n\n@dataclass\nclass OpenAIModelArgs(BaseModelArgs):\n \"\"\"Serializable object for instantiating a generic chat model with an OpenAI\n model.\"\"\"\n\n def make_model(self):\n return OpenAIChatModel(\n model_name=self.model_name,\n temperature=self.temperature,\n max_tokens=self.max_new_tokens,\n log_probs=self.log_probs,\n )\n\n\n@dataclass\nclass AzureModelArgs(BaseModelArgs):\n \"\"\"Serializable object for instantiating a generic chat model with an Azure model.\"\"\"\n\n deployment_name: str = (\n None # NOTE: deployment_name is deprecated for Azure OpenAI and won't be used.\n )\n\n def make_model(self):\n return AzureChatModel(\n model_name=self.model_name,\n temperature=self.temperature,\n max_tokens=self.max_new_tokens,\n log_probs=self.log_probs,\n )\n\n\n@dataclass\nclass SelfHostedModelArgs(BaseModelArgs):\n \"\"\"Serializable object for instantiating a generic chat model with a self-hosted model.\"\"\"\n\n model_url: str = None\n token: str = None\n backend: str = \"huggingface\"\n n_retry_server: int = 4\n\n def make_model(self):\n if self.backend == \"huggingface\":\n # currently only huggingface tgi servers are supported\n if self.model_url is None:\n self.model_url = os.environ[\"AGENTLAB_MODEL_URL\"]\n if self.token is None:\n self.token = os.environ[\"AGENTLAB_MODEL_TOKEN\"]\n # Lazy import to avoid importing HF utilities on non-HF paths\n from agentlab.llm.huggingface_utils import HuggingFaceURLChatModel","source_hash":"f1cd0d3d0b86825e76bb18c7afbf8934d4974a38e67e41b452c636f0d6c1c887","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.chat_api.SelfHostedModelArgs","uri":"program://AgentLab/class/src.agentlab.llm.chat_api.SelfHostedModelArgs#L125-L160","kind":"class","name":"SelfHostedModelArgs","path":"src/agentlab/llm/chat_api.py","language":"python","start_line":125,"end_line":160,"context_start_line":105,"context_end_line":180,"code":"\n\n@dataclass\nclass AzureModelArgs(BaseModelArgs):\n \"\"\"Serializable object for instantiating a generic chat model with an Azure model.\"\"\"\n\n deployment_name: str = (\n None # NOTE: deployment_name is deprecated for Azure OpenAI and won't be used.\n )\n\n def make_model(self):\n return AzureChatModel(\n model_name=self.model_name,\n temperature=self.temperature,\n max_tokens=self.max_new_tokens,\n log_probs=self.log_probs,\n )\n\n\n@dataclass\nclass SelfHostedModelArgs(BaseModelArgs):\n \"\"\"Serializable object for instantiating a generic chat model with a self-hosted model.\"\"\"\n\n model_url: str = None\n token: str = None\n backend: str = \"huggingface\"\n n_retry_server: int = 4\n\n def make_model(self):\n if self.backend == \"huggingface\":\n # currently only huggingface tgi servers are supported\n if self.model_url is None:\n self.model_url = os.environ[\"AGENTLAB_MODEL_URL\"]\n if self.token is None:\n self.token = os.environ[\"AGENTLAB_MODEL_TOKEN\"]\n # Lazy import to avoid importing HF utilities on non-HF paths\n from agentlab.llm.huggingface_utils import HuggingFaceURLChatModel\n\n return HuggingFaceURLChatModel(\n model_name=self.model_name,\n model_url=self.model_url,\n token=self.token,\n temperature=self.temperature,\n max_new_tokens=self.max_new_tokens,\n n_retry_server=self.n_retry_server,\n log_probs=self.log_probs,\n )\n elif self.backend == \"vllm\":\n return VLLMChatModel(\n model_name=self.model_name,\n temperature=self.temperature,\n max_tokens=self.max_new_tokens,\n n_retry_server=self.n_retry_server,\n )\n else:\n raise ValueError(f\"Backend {self.backend} is not supported\")\n\n\n@dataclass\nclass ChatModelArgs(BaseModelArgs):\n \"\"\"Object added for backward compatibility with the old ChatModelArgs.\"\"\"\n\n model_path: str = None\n model_url: str = None\n model_size: str = None\n training_total_tokens: int = None\n hf_hosted: bool = False\n is_model_operational: str = False\n sliding_window: bool = False\n n_retry_server: int = 4\n infer_tokens_length: bool = False\n vision_support: bool = False\n shard_support: bool = True\n extra_tgi_args: dict = None\n tgi_image: str = None\n info: dict = None","source_hash":"f1cd0d3d0b86825e76bb18c7afbf8934d4974a38e67e41b452c636f0d6c1c887","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.chat_api.ChatModelArgs","uri":"program://AgentLab/class/src.agentlab.llm.chat_api.ChatModelArgs#L164-L193","kind":"class","name":"ChatModelArgs","path":"src/agentlab/llm/chat_api.py","language":"python","start_line":164,"end_line":193,"context_start_line":144,"context_end_line":213,"code":" model_name=self.model_name,\n model_url=self.model_url,\n token=self.token,\n temperature=self.temperature,\n max_new_tokens=self.max_new_tokens,\n n_retry_server=self.n_retry_server,\n log_probs=self.log_probs,\n )\n elif self.backend == \"vllm\":\n return VLLMChatModel(\n model_name=self.model_name,\n temperature=self.temperature,\n max_tokens=self.max_new_tokens,\n n_retry_server=self.n_retry_server,\n )\n else:\n raise ValueError(f\"Backend {self.backend} is not supported\")\n\n\n@dataclass\nclass ChatModelArgs(BaseModelArgs):\n \"\"\"Object added for backward compatibility with the old ChatModelArgs.\"\"\"\n\n model_path: str = None\n model_url: str = None\n model_size: str = None\n training_total_tokens: int = None\n hf_hosted: bool = False\n is_model_operational: str = False\n sliding_window: bool = False\n n_retry_server: int = 4\n infer_tokens_length: bool = False\n vision_support: bool = False\n shard_support: bool = True\n extra_tgi_args: dict = None\n tgi_image: str = None\n info: dict = None\n\n def __post_init__(self):\n import warnings\n\n warnings.simplefilter(\"always\", DeprecationWarning)\n warnings.warn(\n \"ChatModelArgs is deprecated and used only for xray. Use one of the specific model args classes instead.\",\n DeprecationWarning,\n )\n warnings.simplefilter(\"default\", DeprecationWarning)\n\n def make_model(self):\n pass\n\n\ndef _extract_wait_time(error_message, min_retry_wait_time=60):\n \"\"\"Extract the wait time from an OpenAI RateLimitError message.\"\"\"\n match = re.search(r\"try again in (\\d+(\\.\\d+)?)s\", error_message)\n if match:\n return max(min_retry_wait_time, float(match.group(1)))\n return min_retry_wait_time\n\n\nclass RetryError(Exception):\n pass\n\n\ndef handle_error(error, itr, min_retry_wait_time, max_retry):\n if not isinstance(error, openai.OpenAIError):\n raise error\n logging.warning(\n f\"Failed to get a response from the API: \\n{error}\\n\" f\"Retrying... ({itr+1}/{max_retry})\"\n )","source_hash":"f1cd0d3d0b86825e76bb18c7afbf8934d4974a38e67e41b452c636f0d6c1c887","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.chat_api._extract_wait_time","uri":"program://AgentLab/function/src.agentlab.llm.chat_api._extract_wait_time#L196-L201","kind":"function","name":"_extract_wait_time","path":"src/agentlab/llm/chat_api.py","language":"python","start_line":196,"end_line":201,"context_start_line":176,"context_end_line":221,"code":" vision_support: bool = False\n shard_support: bool = True\n extra_tgi_args: dict = None\n tgi_image: str = None\n info: dict = None\n\n def __post_init__(self):\n import warnings\n\n warnings.simplefilter(\"always\", DeprecationWarning)\n warnings.warn(\n \"ChatModelArgs is deprecated and used only for xray. Use one of the specific model args classes instead.\",\n DeprecationWarning,\n )\n warnings.simplefilter(\"default\", DeprecationWarning)\n\n def make_model(self):\n pass\n\n\ndef _extract_wait_time(error_message, min_retry_wait_time=60):\n \"\"\"Extract the wait time from an OpenAI RateLimitError message.\"\"\"\n match = re.search(r\"try again in (\\d+(\\.\\d+)?)s\", error_message)\n if match:\n return max(min_retry_wait_time, float(match.group(1)))\n return min_retry_wait_time\n\n\nclass RetryError(Exception):\n pass\n\n\ndef handle_error(error, itr, min_retry_wait_time, max_retry):\n if not isinstance(error, openai.OpenAIError):\n raise error\n logging.warning(\n f\"Failed to get a response from the API: \\n{error}\\n\" f\"Retrying... ({itr+1}/{max_retry})\"\n )\n wait_time = _extract_wait_time(\n error.args[0],\n min_retry_wait_time=min_retry_wait_time,\n )\n logging.info(f\"Waiting for {wait_time} seconds\")\n time.sleep(wait_time)\n error_type = error.args[0]\n return error_type","source_hash":"f1cd0d3d0b86825e76bb18c7afbf8934d4974a38e67e41b452c636f0d6c1c887","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.chat_api.RetryError","uri":"program://AgentLab/class/src.agentlab.llm.chat_api.RetryError#L204-L205","kind":"class","name":"RetryError","path":"src/agentlab/llm/chat_api.py","language":"python","start_line":204,"end_line":205,"context_start_line":184,"context_end_line":225,"code":"\n warnings.simplefilter(\"always\", DeprecationWarning)\n warnings.warn(\n \"ChatModelArgs is deprecated and used only for xray. Use one of the specific model args classes instead.\",\n DeprecationWarning,\n )\n warnings.simplefilter(\"default\", DeprecationWarning)\n\n def make_model(self):\n pass\n\n\ndef _extract_wait_time(error_message, min_retry_wait_time=60):\n \"\"\"Extract the wait time from an OpenAI RateLimitError message.\"\"\"\n match = re.search(r\"try again in (\\d+(\\.\\d+)?)s\", error_message)\n if match:\n return max(min_retry_wait_time, float(match.group(1)))\n return min_retry_wait_time\n\n\nclass RetryError(Exception):\n pass\n\n\ndef handle_error(error, itr, min_retry_wait_time, max_retry):\n if not isinstance(error, openai.OpenAIError):\n raise error\n logging.warning(\n f\"Failed to get a response from the API: \\n{error}\\n\" f\"Retrying... ({itr+1}/{max_retry})\"\n )\n wait_time = _extract_wait_time(\n error.args[0],\n min_retry_wait_time=min_retry_wait_time,\n )\n logging.info(f\"Waiting for {wait_time} seconds\")\n time.sleep(wait_time)\n error_type = error.args[0]\n return error_type\n\n\nclass OpenRouterError(openai.OpenAIError):\n pass","source_hash":"f1cd0d3d0b86825e76bb18c7afbf8934d4974a38e67e41b452c636f0d6c1c887","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.chat_api.handle_error","uri":"program://AgentLab/function/src.agentlab.llm.chat_api.handle_error#L208-L221","kind":"function","name":"handle_error","path":"src/agentlab/llm/chat_api.py","language":"python","start_line":208,"end_line":221,"context_start_line":188,"context_end_line":241,"code":" DeprecationWarning,\n )\n warnings.simplefilter(\"default\", DeprecationWarning)\n\n def make_model(self):\n pass\n\n\ndef _extract_wait_time(error_message, min_retry_wait_time=60):\n \"\"\"Extract the wait time from an OpenAI RateLimitError message.\"\"\"\n match = re.search(r\"try again in (\\d+(\\.\\d+)?)s\", error_message)\n if match:\n return max(min_retry_wait_time, float(match.group(1)))\n return min_retry_wait_time\n\n\nclass RetryError(Exception):\n pass\n\n\ndef handle_error(error, itr, min_retry_wait_time, max_retry):\n if not isinstance(error, openai.OpenAIError):\n raise error\n logging.warning(\n f\"Failed to get a response from the API: \\n{error}\\n\" f\"Retrying... ({itr+1}/{max_retry})\"\n )\n wait_time = _extract_wait_time(\n error.args[0],\n min_retry_wait_time=min_retry_wait_time,\n )\n logging.info(f\"Waiting for {wait_time} seconds\")\n time.sleep(wait_time)\n error_type = error.args[0]\n return error_type\n\n\nclass OpenRouterError(openai.OpenAIError):\n pass\n\n\nclass ChatModel(AbstractChatModel):\n def __init__(\n self,\n model_name,\n api_key=None,\n temperature=0.5,\n max_tokens=100,\n max_retry=4,\n min_retry_wait_time=60,\n api_key_env_var=None,\n client_class=OpenAI,\n client_args=None,\n pricing_func=None,\n log_probs=False,","source_hash":"f1cd0d3d0b86825e76bb18c7afbf8934d4974a38e67e41b452c636f0d6c1c887","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.chat_api.OpenRouterError","uri":"program://AgentLab/class/src.agentlab.llm.chat_api.OpenRouterError#L224-L225","kind":"class","name":"OpenRouterError","path":"src/agentlab/llm/chat_api.py","language":"python","start_line":224,"end_line":225,"context_start_line":204,"context_end_line":245,"code":"class RetryError(Exception):\n pass\n\n\ndef handle_error(error, itr, min_retry_wait_time, max_retry):\n if not isinstance(error, openai.OpenAIError):\n raise error\n logging.warning(\n f\"Failed to get a response from the API: \\n{error}\\n\" f\"Retrying... ({itr+1}/{max_retry})\"\n )\n wait_time = _extract_wait_time(\n error.args[0],\n min_retry_wait_time=min_retry_wait_time,\n )\n logging.info(f\"Waiting for {wait_time} seconds\")\n time.sleep(wait_time)\n error_type = error.args[0]\n return error_type\n\n\nclass OpenRouterError(openai.OpenAIError):\n pass\n\n\nclass ChatModel(AbstractChatModel):\n def __init__(\n self,\n model_name,\n api_key=None,\n temperature=0.5,\n max_tokens=100,\n max_retry=4,\n min_retry_wait_time=60,\n api_key_env_var=None,\n client_class=OpenAI,\n client_args=None,\n pricing_func=None,\n log_probs=False,\n ):\n assert max_retry > 0, \"max_retry should be greater than 0\"\n\n self.model_name = model_name","source_hash":"f1cd0d3d0b86825e76bb18c7afbf8934d4974a38e67e41b452c636f0d6c1c887","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.chat_api.ChatModel","uri":"program://AgentLab/class/src.agentlab.llm.chat_api.ChatModel#L228-L338","kind":"class","name":"ChatModel","path":"src/agentlab/llm/chat_api.py","language":"python","start_line":228,"end_line":338,"context_start_line":208,"context_end_line":358,"code":"def handle_error(error, itr, min_retry_wait_time, max_retry):\n if not isinstance(error, openai.OpenAIError):\n raise error\n logging.warning(\n f\"Failed to get a response from the API: \\n{error}\\n\" f\"Retrying... ({itr+1}/{max_retry})\"\n )\n wait_time = _extract_wait_time(\n error.args[0],\n min_retry_wait_time=min_retry_wait_time,\n )\n logging.info(f\"Waiting for {wait_time} seconds\")\n time.sleep(wait_time)\n error_type = error.args[0]\n return error_type\n\n\nclass OpenRouterError(openai.OpenAIError):\n pass\n\n\nclass ChatModel(AbstractChatModel):\n def __init__(\n self,\n model_name,\n api_key=None,\n temperature=0.5,\n max_tokens=100,\n max_retry=4,\n min_retry_wait_time=60,\n api_key_env_var=None,\n client_class=OpenAI,\n client_args=None,\n pricing_func=None,\n log_probs=False,\n ):\n assert max_retry > 0, \"max_retry should be greater than 0\"\n\n self.model_name = model_name\n self.temperature = temperature\n self.max_tokens = max_tokens\n self.max_retry = max_retry\n self.min_retry_wait_time = min_retry_wait_time\n self.log_probs = log_probs\n\n # Get the API key from the environment variable if not provided\n if api_key_env_var:\n api_key = api_key or os.getenv(api_key_env_var)\n self.api_key = api_key\n\n # Get pricing information\n if pricing_func:\n pricings = pricing_func()\n try:\n self.input_cost = float(pricings[model_name][\"prompt\"])\n self.output_cost = float(pricings[model_name][\"completion\"])\n except KeyError:\n logging.warning(\n f\"Model {model_name} not found in the pricing information, prices are set to 0. Maybe try upgrading langchain_community.\"\n )\n self.input_cost = 0.0\n self.output_cost = 0.0\n else:\n self.input_cost = 0.0\n self.output_cost = 0.0\n\n client_args = client_args or {}\n self.client = client_class(\n api_key=api_key,\n **client_args,\n )\n\n def __call__(self, messages: list[dict], n_samples: int = 1, temperature: float = None) -> dict:\n # Initialize retry tracking attributes\n self.retries = 0\n self.success = False\n self.error_types = []\n\n completion = None\n e = None\n for itr in range(self.max_retry):\n self.retries += 1\n temperature = temperature if temperature is not None else self.temperature\n try:\n completion = self.client.chat.completions.create(\n model=self.model_name,\n messages=messages,\n n=n_samples,\n temperature=temperature,\n max_completion_tokens=self.max_tokens,\n logprobs=self.log_probs,\n )\n\n if completion.usage is None:\n raise OpenRouterError(\n \"The completion object does not contain usage information. This is likely a bug in the OpenRouter API.\"\n )\n\n self.success = True\n break\n except openai.OpenAIError as e:\n error_type = handle_error(e, itr, self.min_retry_wait_time, self.max_retry)\n self.error_types.append(error_type)\n\n if not completion:\n raise RetryError(\n f\"Failed to get a response from the API after {self.max_retry} retries\\n\"\n f\"Last error: {error_type}\"\n )\n\n input_tokens = completion.usage.prompt_tokens\n output_tokens = completion.usage.completion_tokens\n cost = input_tokens * self.input_cost + output_tokens * self.output_cost\n\n if hasattr(tracking.TRACKER, \"instance\") and isinstance(\n tracking.TRACKER.instance, tracking.LLMTracker\n ):\n tracking.TRACKER.instance(input_tokens, output_tokens, cost)\n\n if n_samples == 1:\n res = AIMessage(completion.choices[0].message.content)\n if self.log_probs:\n res[\"log_probs\"] = completion.choices[0].log_probs\n return res\n else:\n return [AIMessage(c.message.content) for c in completion.choices]\n\n def get_stats(self):\n return {\n \"n_retry_llm\": self.retries,\n # \"busted_retry_llm\": int(not self.success), # not logged if it occurs anyways\n }\n\n\nclass OpenAIChatModel(ChatModel):\n def __init__(\n self,\n model_name,\n api_key=None,\n temperature=0.5,\n max_tokens=100,\n max_retry=4,\n min_retry_wait_time=60,\n log_probs=False,\n ):\n if max_tokens is None:\n max_tokens = NOT_GIVEN\n super().__init__(\n model_name=model_name,\n api_key=api_key,\n temperature=temperature,\n max_tokens=max_tokens,","source_hash":"f1cd0d3d0b86825e76bb18c7afbf8934d4974a38e67e41b452c636f0d6c1c887","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.chat_api.OpenAIChatModel","uri":"program://AgentLab/class/src.agentlab.llm.chat_api.OpenAIChatModel#L341-L365","kind":"class","name":"OpenAIChatModel","path":"src/agentlab/llm/chat_api.py","language":"python","start_line":341,"end_line":365,"context_start_line":321,"context_end_line":385,"code":" if hasattr(tracking.TRACKER, \"instance\") and isinstance(\n tracking.TRACKER.instance, tracking.LLMTracker\n ):\n tracking.TRACKER.instance(input_tokens, output_tokens, cost)\n\n if n_samples == 1:\n res = AIMessage(completion.choices[0].message.content)\n if self.log_probs:\n res[\"log_probs\"] = completion.choices[0].log_probs\n return res\n else:\n return [AIMessage(c.message.content) for c in completion.choices]\n\n def get_stats(self):\n return {\n \"n_retry_llm\": self.retries,\n # \"busted_retry_llm\": int(not self.success), # not logged if it occurs anyways\n }\n\n\nclass OpenAIChatModel(ChatModel):\n def __init__(\n self,\n model_name,\n api_key=None,\n temperature=0.5,\n max_tokens=100,\n max_retry=4,\n min_retry_wait_time=60,\n log_probs=False,\n ):\n if max_tokens is None:\n max_tokens = NOT_GIVEN\n super().__init__(\n model_name=model_name,\n api_key=api_key,\n temperature=temperature,\n max_tokens=max_tokens,\n max_retry=max_retry,\n min_retry_wait_time=min_retry_wait_time,\n api_key_env_var=\"OPENAI_API_KEY\",\n client_class=OpenAI,\n pricing_func=partial(tracking.get_pricing_litellm, model_name=model_name),\n log_probs=log_probs,\n )\n\n\nclass OpenRouterChatModel(ChatModel):\n def __init__(\n self,\n model_name,\n api_key=None,\n temperature=0.5,\n max_tokens=100,\n max_retry=4,\n min_retry_wait_time=60,\n log_probs=False,\n ):\n client_args = {\n \"base_url\": \"https://openrouter.ai/api/v1\",\n }\n super().__init__(\n model_name=model_name,\n api_key=api_key,\n temperature=temperature,","source_hash":"f1cd0d3d0b86825e76bb18c7afbf8934d4974a38e67e41b452c636f0d6c1c887","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.chat_api.OpenRouterChatModel","uri":"program://AgentLab/class/src.agentlab.llm.chat_api.OpenRouterChatModel#L368-L394","kind":"class","name":"OpenRouterChatModel","path":"src/agentlab/llm/chat_api.py","language":"python","start_line":368,"end_line":394,"context_start_line":348,"context_end_line":414,"code":" max_retry=4,\n min_retry_wait_time=60,\n log_probs=False,\n ):\n if max_tokens is None:\n max_tokens = NOT_GIVEN\n super().__init__(\n model_name=model_name,\n api_key=api_key,\n temperature=temperature,\n max_tokens=max_tokens,\n max_retry=max_retry,\n min_retry_wait_time=min_retry_wait_time,\n api_key_env_var=\"OPENAI_API_KEY\",\n client_class=OpenAI,\n pricing_func=partial(tracking.get_pricing_litellm, model_name=model_name),\n log_probs=log_probs,\n )\n\n\nclass OpenRouterChatModel(ChatModel):\n def __init__(\n self,\n model_name,\n api_key=None,\n temperature=0.5,\n max_tokens=100,\n max_retry=4,\n min_retry_wait_time=60,\n log_probs=False,\n ):\n client_args = {\n \"base_url\": \"https://openrouter.ai/api/v1\",\n }\n super().__init__(\n model_name=model_name,\n api_key=api_key,\n temperature=temperature,\n max_tokens=max_tokens,\n max_retry=max_retry,\n min_retry_wait_time=min_retry_wait_time,\n api_key_env_var=\"OPENROUTER_API_KEY\",\n client_class=OpenAI,\n client_args=client_args,\n pricing_func=tracking.get_pricing_openrouter,\n log_probs=log_probs,\n )\n\n\nclass AzureChatModel(ChatModel):\n def __init__(\n self,\n model_name,\n api_key=None,\n temperature=0.5,\n deployment_name=None,\n max_tokens=100,\n max_retry=4,\n min_retry_wait_time=60,\n log_probs=False,\n ):\n api_key = api_key or os.getenv(\"AZURE_OPENAI_API_KEY\")\n assert (\n api_key\n ), \"AZURE_OPENAI_API_KEY has to be defined in the environment when using AzureChatModel\"\n endpoint = os.getenv(\"AZURE_OPENAI_ENDPOINT\")\n assert (","source_hash":"f1cd0d3d0b86825e76bb18c7afbf8934d4974a38e67e41b452c636f0d6c1c887","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.chat_api.AzureChatModel","uri":"program://AgentLab/class/src.agentlab.llm.chat_api.AzureChatModel#L397-L438","kind":"class","name":"AzureChatModel","path":"src/agentlab/llm/chat_api.py","language":"python","start_line":397,"end_line":438,"context_start_line":377,"context_end_line":458,"code":" log_probs=False,\n ):\n client_args = {\n \"base_url\": \"https://openrouter.ai/api/v1\",\n }\n super().__init__(\n model_name=model_name,\n api_key=api_key,\n temperature=temperature,\n max_tokens=max_tokens,\n max_retry=max_retry,\n min_retry_wait_time=min_retry_wait_time,\n api_key_env_var=\"OPENROUTER_API_KEY\",\n client_class=OpenAI,\n client_args=client_args,\n pricing_func=tracking.get_pricing_openrouter,\n log_probs=log_probs,\n )\n\n\nclass AzureChatModel(ChatModel):\n def __init__(\n self,\n model_name,\n api_key=None,\n temperature=0.5,\n deployment_name=None,\n max_tokens=100,\n max_retry=4,\n min_retry_wait_time=60,\n log_probs=False,\n ):\n api_key = api_key or os.getenv(\"AZURE_OPENAI_API_KEY\")\n assert (\n api_key\n ), \"AZURE_OPENAI_API_KEY has to be defined in the environment when using AzureChatModel\"\n endpoint = os.getenv(\"AZURE_OPENAI_ENDPOINT\")\n assert (\n endpoint\n ), \"AZURE_OPENAI_ENDPOINT has to be defined in the environment when using AzureChatModel\"\n\n if deployment_name is not None:\n logging.info(\n f\"Deployment name is deprecated for Azure OpenAI and won't be used. Using model name: {model_name}.\"\n )\n\n client_args = {\n \"base_url\": endpoint,\n \"default_query\": {\"api-version\": \"preview\"},\n }\n super().__init__(\n model_name=model_name,\n api_key=api_key,\n temperature=temperature,\n max_tokens=max_tokens,\n max_retry=max_retry,\n min_retry_wait_time=min_retry_wait_time,\n client_class=OpenAI,\n client_args=client_args,\n pricing_func=tracking.get_pricing_openai,\n log_probs=log_probs,\n )\n\n\ndef __getattr__(name: str):\n \"\"\"Lazy re-export of optional classes to keep imports light.\n\n This lets users import HuggingFaceURLChatModel from agentlab.llm.chat_api\n without importing heavy dependencies unless actually used.\n\n Args:\n name: The name of the attribute to retrieve.\n\n Returns:\n The requested class or raises AttributeError if not found.\n\n Raises:\n AttributeError: If the requested attribute is not available.\n \"\"\"\n if name == \"HuggingFaceURLChatModel\":\n from agentlab.llm.huggingface_utils import HuggingFaceURLChatModel\n","source_hash":"f1cd0d3d0b86825e76bb18c7afbf8934d4974a38e67e41b452c636f0d6c1c887","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.chat_api.__getattr__","uri":"program://AgentLab/function/src.agentlab.llm.chat_api.__getattr__#L441-L460","kind":"function","name":"__getattr__","path":"src/agentlab/llm/chat_api.py","language":"python","start_line":441,"end_line":460,"context_start_line":421,"context_end_line":480,"code":" )\n\n client_args = {\n \"base_url\": endpoint,\n \"default_query\": {\"api-version\": \"preview\"},\n }\n super().__init__(\n model_name=model_name,\n api_key=api_key,\n temperature=temperature,\n max_tokens=max_tokens,\n max_retry=max_retry,\n min_retry_wait_time=min_retry_wait_time,\n client_class=OpenAI,\n client_args=client_args,\n pricing_func=tracking.get_pricing_openai,\n log_probs=log_probs,\n )\n\n\ndef __getattr__(name: str):\n \"\"\"Lazy re-export of optional classes to keep imports light.\n\n This lets users import HuggingFaceURLChatModel from agentlab.llm.chat_api\n without importing heavy dependencies unless actually used.\n\n Args:\n name: The name of the attribute to retrieve.\n\n Returns:\n The requested class or raises AttributeError if not found.\n\n Raises:\n AttributeError: If the requested attribute is not available.\n \"\"\"\n if name == \"HuggingFaceURLChatModel\":\n from agentlab.llm.huggingface_utils import HuggingFaceURLChatModel\n\n return HuggingFaceURLChatModel\n raise AttributeError(name)\n\n\nclass VLLMChatModel(ChatModel):\n def __init__(\n self,\n model_name,\n api_key=None,\n temperature=0.5,\n max_tokens=100,\n n_retry_server=4,\n min_retry_wait_time=60,\n ):\n super().__init__(\n model_name=model_name,\n api_key=api_key,\n temperature=temperature,\n max_tokens=max_tokens,\n max_retry=n_retry_server,\n min_retry_wait_time=min_retry_wait_time,\n api_key_env_var=\"VLLM_API_KEY\",","source_hash":"f1cd0d3d0b86825e76bb18c7afbf8934d4974a38e67e41b452c636f0d6c1c887","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.chat_api.VLLMChatModel","uri":"program://AgentLab/class/src.agentlab.llm.chat_api.VLLMChatModel#L463-L484","kind":"class","name":"VLLMChatModel","path":"src/agentlab/llm/chat_api.py","language":"python","start_line":463,"end_line":484,"context_start_line":443,"context_end_line":504,"code":"\n This lets users import HuggingFaceURLChatModel from agentlab.llm.chat_api\n without importing heavy dependencies unless actually used.\n\n Args:\n name: The name of the attribute to retrieve.\n\n Returns:\n The requested class or raises AttributeError if not found.\n\n Raises:\n AttributeError: If the requested attribute is not available.\n \"\"\"\n if name == \"HuggingFaceURLChatModel\":\n from agentlab.llm.huggingface_utils import HuggingFaceURLChatModel\n\n return HuggingFaceURLChatModel\n raise AttributeError(name)\n\n\nclass VLLMChatModel(ChatModel):\n def __init__(\n self,\n model_name,\n api_key=None,\n temperature=0.5,\n max_tokens=100,\n n_retry_server=4,\n min_retry_wait_time=60,\n ):\n super().__init__(\n model_name=model_name,\n api_key=api_key,\n temperature=temperature,\n max_tokens=max_tokens,\n max_retry=n_retry_server,\n min_retry_wait_time=min_retry_wait_time,\n api_key_env_var=\"VLLM_API_KEY\",\n client_class=OpenAI,\n client_args={\"base_url\": \"http://0.0.0.0:8000/v1\"},\n pricing_func=None,\n )\n\n\nclass AnthropicChatModel(AbstractChatModel):\n def __init__(\n self,\n model_name,\n api_key=None,\n temperature=0.5,\n max_tokens=100,\n max_retry=4,\n ):\n self.model_name = model_name\n self.temperature = temperature\n self.max_tokens = max_tokens\n self.max_retry = max_retry\n\n api_key = api_key or os.getenv(\"ANTHROPIC_API_KEY\")\n self.client = anthropic.Anthropic(api_key=api_key)\n\n def __call__(self, messages: list[dict], n_samples: int = 1, temperature: float = None) -> dict:","source_hash":"f1cd0d3d0b86825e76bb18c7afbf8934d4974a38e67e41b452c636f0d6c1c887","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.chat_api.AnthropicChatModel","uri":"program://AgentLab/class/src.agentlab.llm.chat_api.AnthropicChatModel#L487-L545","kind":"class","name":"AnthropicChatModel","path":"src/agentlab/llm/chat_api.py","language":"python","start_line":487,"end_line":545,"context_start_line":467,"context_end_line":555,"code":" api_key=None,\n temperature=0.5,\n max_tokens=100,\n n_retry_server=4,\n min_retry_wait_time=60,\n ):\n super().__init__(\n model_name=model_name,\n api_key=api_key,\n temperature=temperature,\n max_tokens=max_tokens,\n max_retry=n_retry_server,\n min_retry_wait_time=min_retry_wait_time,\n api_key_env_var=\"VLLM_API_KEY\",\n client_class=OpenAI,\n client_args={\"base_url\": \"http://0.0.0.0:8000/v1\"},\n pricing_func=None,\n )\n\n\nclass AnthropicChatModel(AbstractChatModel):\n def __init__(\n self,\n model_name,\n api_key=None,\n temperature=0.5,\n max_tokens=100,\n max_retry=4,\n ):\n self.model_name = model_name\n self.temperature = temperature\n self.max_tokens = max_tokens\n self.max_retry = max_retry\n\n api_key = api_key or os.getenv(\"ANTHROPIC_API_KEY\")\n self.client = anthropic.Anthropic(api_key=api_key)\n\n def __call__(self, messages: list[dict], n_samples: int = 1, temperature: float = None) -> dict:\n # Convert OpenAI format to Anthropic format\n system_message = None\n anthropic_messages = []\n\n for msg in messages:\n if msg[\"role\"] == \"system\":\n system_message = msg[\"content\"]\n else:\n anthropic_messages.append({\"role\": msg[\"role\"], \"content\": msg[\"content\"]})\n\n temperature = temperature if temperature is not None else self.temperature\n\n for attempt in range(self.max_retry):\n try:\n kwargs = {\n \"model\": self.model_name,\n \"messages\": anthropic_messages,\n \"max_tokens\": self.max_tokens,\n \"temperature\": temperature,\n }\n\n if system_message:\n kwargs[\"system\"] = system_message\n\n response = self.client.messages.create(**kwargs)\n\n # Track usage if available\n if hasattr(tracking.TRACKER, \"instance\"):\n tracking.TRACKER.instance(\n response.usage.input_tokens,\n response.usage.output_tokens,\n 0, # cost calculation would need pricing info\n )\n\n return AIMessage(response.content[0].text)\n\n except Exception as e:\n if attempt == self.max_retry - 1:\n raise e\n logging.warning(f\"Anthropic API error (attempt {attempt + 1}): {e}\")\n time.sleep(60) # Simple retry delay\n\n\n@dataclass\nclass AnthropicModelArgs(BaseModelArgs):\n def make_model(self):\n return AnthropicChatModel(\n model_name=self.model_name,\n temperature=self.temperature,\n max_tokens=self.max_new_tokens,\n )","source_hash":"f1cd0d3d0b86825e76bb18c7afbf8934d4974a38e67e41b452c636f0d6c1c887","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.chat_api.AnthropicModelArgs","uri":"program://AgentLab/class/src.agentlab.llm.chat_api.AnthropicModelArgs#L549-L555","kind":"class","name":"AnthropicModelArgs","path":"src/agentlab/llm/chat_api.py","language":"python","start_line":549,"end_line":555,"context_start_line":529,"context_end_line":555,"code":" response = self.client.messages.create(**kwargs)\n\n # Track usage if available\n if hasattr(tracking.TRACKER, \"instance\"):\n tracking.TRACKER.instance(\n response.usage.input_tokens,\n response.usage.output_tokens,\n 0, # cost calculation would need pricing info\n )\n\n return AIMessage(response.content[0].text)\n\n except Exception as e:\n if attempt == self.max_retry - 1:\n raise e\n logging.warning(f\"Anthropic API error (attempt {attempt + 1}): {e}\")\n time.sleep(60) # Simple retry delay\n\n\n@dataclass\nclass AnthropicModelArgs(BaseModelArgs):\n def make_model(self):\n return AnthropicChatModel(\n model_name=self.model_name,\n temperature=self.temperature,\n max_tokens=self.max_new_tokens,\n )","source_hash":"f1cd0d3d0b86825e76bb18c7afbf8934d4974a38e67e41b452c636f0d6c1c887","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.chat_api.__init__","uri":"program://AgentLab/function/src.agentlab.llm.chat_api.__init__#L488-L502","kind":"function","name":"__init__","path":"src/agentlab/llm/chat_api.py","language":"python","start_line":488,"end_line":502,"context_start_line":468,"context_end_line":522,"code":" temperature=0.5,\n max_tokens=100,\n n_retry_server=4,\n min_retry_wait_time=60,\n ):\n super().__init__(\n model_name=model_name,\n api_key=api_key,\n temperature=temperature,\n max_tokens=max_tokens,\n max_retry=n_retry_server,\n min_retry_wait_time=min_retry_wait_time,\n api_key_env_var=\"VLLM_API_KEY\",\n client_class=OpenAI,\n client_args={\"base_url\": \"http://0.0.0.0:8000/v1\"},\n pricing_func=None,\n )\n\n\nclass AnthropicChatModel(AbstractChatModel):\n def __init__(\n self,\n model_name,\n api_key=None,\n temperature=0.5,\n max_tokens=100,\n max_retry=4,\n ):\n self.model_name = model_name\n self.temperature = temperature\n self.max_tokens = max_tokens\n self.max_retry = max_retry\n\n api_key = api_key or os.getenv(\"ANTHROPIC_API_KEY\")\n self.client = anthropic.Anthropic(api_key=api_key)\n\n def __call__(self, messages: list[dict], n_samples: int = 1, temperature: float = None) -> dict:\n # Convert OpenAI format to Anthropic format\n system_message = None\n anthropic_messages = []\n\n for msg in messages:\n if msg[\"role\"] == \"system\":\n system_message = msg[\"content\"]\n else:\n anthropic_messages.append({\"role\": msg[\"role\"], \"content\": msg[\"content\"]})\n\n temperature = temperature if temperature is not None else self.temperature\n\n for attempt in range(self.max_retry):\n try:\n kwargs = {\n \"model\": self.model_name,\n \"messages\": anthropic_messages,\n \"max_tokens\": self.max_tokens,","source_hash":"f1cd0d3d0b86825e76bb18c7afbf8934d4974a38e67e41b452c636f0d6c1c887","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.chat_api.__call__","uri":"program://AgentLab/function/src.agentlab.llm.chat_api.__call__#L504-L545","kind":"function","name":"__call__","path":"src/agentlab/llm/chat_api.py","language":"python","start_line":504,"end_line":545,"context_start_line":484,"context_end_line":555,"code":" )\n\n\nclass AnthropicChatModel(AbstractChatModel):\n def __init__(\n self,\n model_name,\n api_key=None,\n temperature=0.5,\n max_tokens=100,\n max_retry=4,\n ):\n self.model_name = model_name\n self.temperature = temperature\n self.max_tokens = max_tokens\n self.max_retry = max_retry\n\n api_key = api_key or os.getenv(\"ANTHROPIC_API_KEY\")\n self.client = anthropic.Anthropic(api_key=api_key)\n\n def __call__(self, messages: list[dict], n_samples: int = 1, temperature: float = None) -> dict:\n # Convert OpenAI format to Anthropic format\n system_message = None\n anthropic_messages = []\n\n for msg in messages:\n if msg[\"role\"] == \"system\":\n system_message = msg[\"content\"]\n else:\n anthropic_messages.append({\"role\": msg[\"role\"], \"content\": msg[\"content\"]})\n\n temperature = temperature if temperature is not None else self.temperature\n\n for attempt in range(self.max_retry):\n try:\n kwargs = {\n \"model\": self.model_name,\n \"messages\": anthropic_messages,\n \"max_tokens\": self.max_tokens,\n \"temperature\": temperature,\n }\n\n if system_message:\n kwargs[\"system\"] = system_message\n\n response = self.client.messages.create(**kwargs)\n\n # Track usage if available\n if hasattr(tracking.TRACKER, \"instance\"):\n tracking.TRACKER.instance(\n response.usage.input_tokens,\n response.usage.output_tokens,\n 0, # cost calculation would need pricing info\n )\n\n return AIMessage(response.content[0].text)\n\n except Exception as e:\n if attempt == self.max_retry - 1:\n raise e\n logging.warning(f\"Anthropic API error (attempt {attempt + 1}): {e}\")\n time.sleep(60) # Simple retry delay\n\n\n@dataclass\nclass AnthropicModelArgs(BaseModelArgs):\n def make_model(self):\n return AnthropicChatModel(\n model_name=self.model_name,\n temperature=self.temperature,\n max_tokens=self.max_new_tokens,\n )","source_hash":"f1cd0d3d0b86825e76bb18c7afbf8934d4974a38e67e41b452c636f0d6c1c887","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.chat_api.make_model","uri":"program://AgentLab/function/src.agentlab.llm.chat_api.make_model#L550-L555","kind":"function","name":"make_model","path":"src/agentlab/llm/chat_api.py","language":"python","start_line":550,"end_line":555,"context_start_line":530,"context_end_line":555,"code":"\n # Track usage if available\n if hasattr(tracking.TRACKER, \"instance\"):\n tracking.TRACKER.instance(\n response.usage.input_tokens,\n response.usage.output_tokens,\n 0, # cost calculation would need pricing info\n )\n\n return AIMessage(response.content[0].text)\n\n except Exception as e:\n if attempt == self.max_retry - 1:\n raise e\n logging.warning(f\"Anthropic API error (attempt {attempt + 1}): {e}\")\n time.sleep(60) # Simple retry delay\n\n\n@dataclass\nclass AnthropicModelArgs(BaseModelArgs):\n def make_model(self):\n return AnthropicChatModel(\n model_name=self.model_name,\n temperature=self.temperature,\n max_tokens=self.max_new_tokens,\n )","source_hash":"f1cd0d3d0b86825e76bb18c7afbf8934d4974a38e67e41b452c636f0d6c1c887","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.chat_api.prepare_server","uri":"program://AgentLab/function/src.agentlab.llm.chat_api.prepare_server#L72-L73","kind":"function","name":"prepare_server","path":"src/agentlab/llm/chat_api.py","language":"python","start_line":72,"end_line":73,"context_start_line":52,"context_end_line":93,"code":"\n answer = f\"\"\"I'm clicking the button as requested.\n\n{action}\n\n\"\"\"\n return make_assistant_message(answer)\n\n\n@dataclass\nclass CheatMiniWoBLLMArgs:\n model_name = \"test/cheat_miniwob_click_test\"\n max_total_tokens = 10240\n max_input_tokens = 8000\n max_new_tokens = 128\n wait_time: int = 0\n\n def make_model(self):\n return CheatMiniWoBLLM(self.wait_time)\n\n def prepare_server(self):\n pass\n\n def close_server(self):\n pass\n\n\n@dataclass\nclass OpenRouterModelArgs(BaseModelArgs):\n \"\"\"Serializable object for instantiating a generic chat model with an OpenAI\n model.\"\"\"\n\n def make_model(self):\n return OpenRouterChatModel(\n model_name=self.model_name,\n temperature=self.temperature,\n max_tokens=self.max_new_tokens,\n log_probs=self.log_probs,\n )\n\n\n@dataclass","source_hash":"f1cd0d3d0b86825e76bb18c7afbf8934d4974a38e67e41b452c636f0d6c1c887","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.chat_api.close_server","uri":"program://AgentLab/function/src.agentlab.llm.chat_api.close_server#L75-L76","kind":"function","name":"close_server","path":"src/agentlab/llm/chat_api.py","language":"python","start_line":75,"end_line":76,"context_start_line":55,"context_end_line":96,"code":"{action}\n
    \n\"\"\"\n return make_assistant_message(answer)\n\n\n@dataclass\nclass CheatMiniWoBLLMArgs:\n model_name = \"test/cheat_miniwob_click_test\"\n max_total_tokens = 10240\n max_input_tokens = 8000\n max_new_tokens = 128\n wait_time: int = 0\n\n def make_model(self):\n return CheatMiniWoBLLM(self.wait_time)\n\n def prepare_server(self):\n pass\n\n def close_server(self):\n pass\n\n\n@dataclass\nclass OpenRouterModelArgs(BaseModelArgs):\n \"\"\"Serializable object for instantiating a generic chat model with an OpenAI\n model.\"\"\"\n\n def make_model(self):\n return OpenRouterChatModel(\n model_name=self.model_name,\n temperature=self.temperature,\n max_tokens=self.max_new_tokens,\n log_probs=self.log_probs,\n )\n\n\n@dataclass\nclass OpenAIModelArgs(BaseModelArgs):\n \"\"\"Serializable object for instantiating a generic chat model with an OpenAI\n model.\"\"\"","source_hash":"f1cd0d3d0b86825e76bb18c7afbf8934d4974a38e67e41b452c636f0d6c1c887","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.chat_api.__post_init__","uri":"program://AgentLab/function/src.agentlab.llm.chat_api.__post_init__#L182-L190","kind":"function","name":"__post_init__","path":"src/agentlab/llm/chat_api.py","language":"python","start_line":182,"end_line":190,"context_start_line":162,"context_end_line":210,"code":"\n@dataclass\nclass ChatModelArgs(BaseModelArgs):\n \"\"\"Object added for backward compatibility with the old ChatModelArgs.\"\"\"\n\n model_path: str = None\n model_url: str = None\n model_size: str = None\n training_total_tokens: int = None\n hf_hosted: bool = False\n is_model_operational: str = False\n sliding_window: bool = False\n n_retry_server: int = 4\n infer_tokens_length: bool = False\n vision_support: bool = False\n shard_support: bool = True\n extra_tgi_args: dict = None\n tgi_image: str = None\n info: dict = None\n\n def __post_init__(self):\n import warnings\n\n warnings.simplefilter(\"always\", DeprecationWarning)\n warnings.warn(\n \"ChatModelArgs is deprecated and used only for xray. Use one of the specific model args classes instead.\",\n DeprecationWarning,\n )\n warnings.simplefilter(\"default\", DeprecationWarning)\n\n def make_model(self):\n pass\n\n\ndef _extract_wait_time(error_message, min_retry_wait_time=60):\n \"\"\"Extract the wait time from an OpenAI RateLimitError message.\"\"\"\n match = re.search(r\"try again in (\\d+(\\.\\d+)?)s\", error_message)\n if match:\n return max(min_retry_wait_time, float(match.group(1)))\n return min_retry_wait_time\n\n\nclass RetryError(Exception):\n pass\n\n\ndef handle_error(error, itr, min_retry_wait_time, max_retry):\n if not isinstance(error, openai.OpenAIError):\n raise error","source_hash":"f1cd0d3d0b86825e76bb18c7afbf8934d4974a38e67e41b452c636f0d6c1c887","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.chat_api.get_stats","uri":"program://AgentLab/function/src.agentlab.llm.chat_api.get_stats#L334-L338","kind":"function","name":"get_stats","path":"src/agentlab/llm/chat_api.py","language":"python","start_line":334,"end_line":338,"context_start_line":314,"context_end_line":358,"code":" f\"Last error: {error_type}\"\n )\n\n input_tokens = completion.usage.prompt_tokens\n output_tokens = completion.usage.completion_tokens\n cost = input_tokens * self.input_cost + output_tokens * self.output_cost\n\n if hasattr(tracking.TRACKER, \"instance\") and isinstance(\n tracking.TRACKER.instance, tracking.LLMTracker\n ):\n tracking.TRACKER.instance(input_tokens, output_tokens, cost)\n\n if n_samples == 1:\n res = AIMessage(completion.choices[0].message.content)\n if self.log_probs:\n res[\"log_probs\"] = completion.choices[0].log_probs\n return res\n else:\n return [AIMessage(c.message.content) for c in completion.choices]\n\n def get_stats(self):\n return {\n \"n_retry_llm\": self.retries,\n # \"busted_retry_llm\": int(not self.success), # not logged if it occurs anyways\n }\n\n\nclass OpenAIChatModel(ChatModel):\n def __init__(\n self,\n model_name,\n api_key=None,\n temperature=0.5,\n max_tokens=100,\n max_retry=4,\n min_retry_wait_time=60,\n log_probs=False,\n ):\n if max_tokens is None:\n max_tokens = NOT_GIVEN\n super().__init__(\n model_name=model_name,\n api_key=api_key,\n temperature=temperature,\n max_tokens=max_tokens,","source_hash":"f1cd0d3d0b86825e76bb18c7afbf8934d4974a38e67e41b452c636f0d6c1c887","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.llm_configs","uri":"program://AgentLab/module/src.agentlab.llm.llm_configs#L1-L360","kind":"module","name":"src.agentlab.llm.llm_configs","path":"src/agentlab/llm/llm_configs.py","language":"python","start_line":1,"end_line":360,"context_start_line":1,"context_end_line":360,"code":"from openai import NOT_GIVEN\n\nfrom agentlab.llm.chat_api import (\n AnthropicModelArgs,\n AzureModelArgs,\n OpenAIModelArgs,\n OpenRouterModelArgs,\n SelfHostedModelArgs,\n)\n\ndefault_oss_llms_args = {\n \"n_retry_server\": 4,\n \"temperature\": 0.01,\n}\n\nCLOSED_SOURCE_APIS = [\n \"openai\",\n \"reka\",\n \"test\",\n]\n\nCHAT_MODEL_ARGS_DICT = {\n \"openai/gpt-5-2025-08-07\": OpenAIModelArgs(\n model_name=\"gpt-5-2025-08-07\",\n max_total_tokens=400_000,\n max_input_tokens=256_000,\n max_new_tokens=128_000,\n temperature=1, # gpt-5 supports temperature of 1 only\n vision_support=True,\n ),\n \"openai/gpt-5-nano-2025-08-07\": OpenAIModelArgs(\n model_name=\"gpt-5-nano-2025-08-07\",\n max_total_tokens=400_000,\n max_input_tokens=256_000,\n max_new_tokens=128_000,\n temperature=1, # gpt-5 supports temperature of 1 only\n vision_support=True,\n ),\n \"openai/gpt-5-mini-2025-08-07\": OpenAIModelArgs(\n model_name=\"gpt-5-mini-2025-08-07\",\n max_total_tokens=400_000,\n max_input_tokens=256_000,\n max_new_tokens=128_000,\n temperature=1, # gpt-5 supports temperature of 1 only\n vision_support=True,\n ),\n \"openai/gpt-4.1-nano-2025-04-14\": OpenAIModelArgs(\n model_name=\"gpt-4.1-nano-2025-04-14\",\n max_total_tokens=128_000,\n max_input_tokens=128_000,\n max_new_tokens=16_384,\n vision_support=True,\n ),\n \"openai/gpt-4.1-mini-2025-04-14\": OpenAIModelArgs(\n model_name=\"gpt-4.1-mini-2025-04-14\",\n max_total_tokens=128_000,\n max_input_tokens=128_000,\n max_new_tokens=16_384,\n vision_support=True,\n ),\n \"openai/gpt-4.1-2025-04-14\": OpenAIModelArgs(\n model_name=\"gpt-4.1-2025-04-14\",\n max_total_tokens=128_000,\n max_input_tokens=128_000,\n max_new_tokens=16_384,\n vision_support=True,\n ),\n \"openai/o3-mini-2025-01-31\": OpenAIModelArgs(\n model_name=\"o3-mini-2025-01-31\",\n max_total_tokens=200_000,\n max_input_tokens=200_000,\n max_new_tokens=100_000,\n vision_support=False,\n ),\n \"openai/o3-2025-04-16\": OpenAIModelArgs(\n model_name=\"o3-2025-04-16\",\n max_total_tokens=200_000,\n max_input_tokens=200_000,\n max_new_tokens=None,\n temperature=1,\n vision_support=True,\n ),\n \"openai/gpt-4o-mini-2024-07-18\": OpenAIModelArgs(\n model_name=\"gpt-4o-mini-2024-07-18\",\n max_total_tokens=128_000,\n max_input_tokens=128_000,\n max_new_tokens=16_384,\n vision_support=True,\n ),\n \"openai/gpt-4-1106-preview\": OpenAIModelArgs(\n model_name=\"gpt-4-1106-preview\",\n max_total_tokens=128_000,\n max_input_tokens=128_000,\n max_new_tokens=4_096,\n ),\n \"openai/gpt-4-vision-preview\": OpenAIModelArgs(\n model_name=\"gpt-4-vision-preview\",\n max_total_tokens=128_000,\n max_input_tokens=128_000,\n max_new_tokens=16_384, # I think this model has very small default value if we don't set max_new_tokens\n vision_support=True,\n ),\n \"openai/gpt-4o-2024-05-13\": OpenAIModelArgs(\n model_name=\"gpt-4o-2024-05-13\",\n max_total_tokens=128_000,\n max_input_tokens=128_000,\n max_new_tokens=4_096, # I think this model has very small default value if we don't set max_new_tokens\n vision_support=True,\n ),\n \"openai/gpt-3.5-turbo-0125\": OpenAIModelArgs(\n model_name=\"gpt-3.5-turbo-0125\",\n max_total_tokens=16_384,\n max_input_tokens=16_384,\n max_new_tokens=4096,\n ),\n \"openai/gpt-3.5-turbo-1106\": OpenAIModelArgs(\n model_name=\"gpt-3.5-turbo-1106\",\n max_total_tokens=16_384,\n max_input_tokens=16_384,\n max_new_tokens=4096,\n ),\n \"openai/o1-mini\": OpenAIModelArgs(\n model_name=\"openai/o1-mini\",\n max_total_tokens=128_000,\n max_input_tokens=128_000,\n max_new_tokens=64_000,\n temperature=1e-1,\n ),\n \"azure/gpt-35-turbo/gpt-35-turbo\": AzureModelArgs(\n model_name=\"gpt-35-turbo\",\n max_total_tokens=8_192,\n max_input_tokens=7500,\n max_new_tokens=500,\n ),\n \"azure/gpt-4o-2024-05-13\": AzureModelArgs(\n model_name=\"gpt-4o\",\n max_total_tokens=128_000,\n max_input_tokens=100_000,\n max_new_tokens=16_384,\n vision_support=True,\n ),\n \"azure/gpt-4o-2024-08-06\": AzureModelArgs(\n model_name=\"gpt-4o\",\n max_total_tokens=128_000,\n max_input_tokens=128_000,\n max_new_tokens=16_384,\n vision_support=True,\n ),\n \"azure/gpt-4o-mini-2024-07-18\": AzureModelArgs(\n model_name=\"gpt-4o-mini\",\n max_total_tokens=128_000,\n max_input_tokens=128_000,\n max_new_tokens=16_384,\n vision_support=True,\n ),\n \"azure/gpt-4.1-2025-04-14\": AzureModelArgs(\n model_name=\"gpt-4.1\",\n max_total_tokens=128_000,\n max_input_tokens=128_000,\n max_new_tokens=16_384,\n vision_support=True,\n ),\n \"azure/gpt-4.1-mini-2025-04-14\": AzureModelArgs(\n model_name=\"gpt-4.1-mini\",\n max_total_tokens=128_000,\n max_input_tokens=128_000,\n max_new_tokens=16_384,\n vision_support=True,\n ),\n \"azure/gpt-4.1-nano-2025-04-14\": AzureModelArgs(\n model_name=\"gpt-4.1-nano\",\n max_total_tokens=128_000,\n max_input_tokens=128_000,\n max_new_tokens=16_384,\n vision_support=True,\n ),\n \"azure/gpt-5-2025-08-07\": AzureModelArgs(\n model_name=\"gpt-5\",\n max_total_tokens=400_000,\n max_input_tokens=256_000,\n max_new_tokens=128_000,\n temperature=1, # temperature param not supported by gpt-5\n vision_support=True,\n ),\n \"azure/gpt-5-mini-2025-08-07\": AzureModelArgs(\n model_name=\"gpt-5-mini\",\n max_total_tokens=400_000,\n max_input_tokens=256_000,\n max_new_tokens=128_000,\n temperature=1, # temperature param not supported by gpt-5\n vision_support=True,\n ),\n \"azure/gpt-5-nano-2025-08-07\": AzureModelArgs(\n model_name=\"gpt-5-nano\",\n max_total_tokens=400_000,\n max_input_tokens=256_000,\n max_new_tokens=128_000,\n temperature=1, # temperature param not supported by gpt-5\n vision_support=True,\n ),\n # ---------------- Anthropic ----------------#\n \"anthropic/claude-3-7-sonnet-20250219\": AnthropicModelArgs(\n model_name=\"claude-3-7-sonnet-20250219\",\n max_new_tokens=16_384,\n temperature=1e-1,\n ),\n \"anthropic/claude-sonnet-4-20250514\": AnthropicModelArgs(\n model_name=\"claude-sonnet-4-20250514\",\n max_new_tokens=16_384,\n temperature=1e-1,\n ),\n # ---------------- OSS LLMs ----------------#\n \"meta-llama/Meta-Llama-3-70B-Instruct\": SelfHostedModelArgs(\n model_name=\"meta-llama/Meta-Llama-3-70B-Instruct\",\n max_total_tokens=8_192,\n max_input_tokens=8_192 - 512,\n max_new_tokens=512,\n backend=\"huggingface\",\n **default_oss_llms_args,\n ),\n \"meta-llama/Meta-Llama-3-8B-Instruct\": SelfHostedModelArgs(\n model_name=\"meta-llama/Meta-Llama-3-8B-Instruct\",\n max_total_tokens=16_384,\n max_input_tokens=16_384 - 512,\n max_new_tokens=512,\n backend=\"huggingface\",\n **default_oss_llms_args,\n ),\n \"mistralai/Mixtral-8x22B-Instruct-v0.1\": SelfHostedModelArgs(\n model_name=\"mistralai/Mixtral-8x22B-Instruct-v0.1\",\n max_total_tokens=32_000,\n max_input_tokens=30_000,\n max_new_tokens=2_000,\n backend=\"huggingface\",\n **default_oss_llms_args,\n ),\n # ---------------- OPENROUTER ----------------#\n \"openrouter/deepseek/deepseek-r1\": OpenRouterModelArgs(\n model_name=\"deepseek/deepseek-r1\",\n max_total_tokens=128_000,\n max_input_tokens=100_000,\n max_new_tokens=128_000,\n temperature=1e-1,\n ),\n \"openrouter/meta-llama/llama-3.1-405b-instruct\": OpenRouterModelArgs(\n model_name=\"meta-llama/llama-3.1-405b-instruct\",\n max_total_tokens=128_000,\n max_input_tokens=100_000,\n max_new_tokens=28_000,\n temperature=1e-1,\n ),\n \"openrouter/meta-llama/llama-3.1-70b-instruct\": OpenRouterModelArgs(\n model_name=\"meta-llama/llama-3.1-70b-instruct\",\n max_total_tokens=128_000,\n max_input_tokens=100_000,\n max_new_tokens=28_000,\n temperature=1e-1,\n ),\n \"openrouter/meta-llama/llama-3-70b-instruct\": OpenRouterModelArgs(\n model_name=\"meta-llama/llama-3-70b-instruct\",\n max_total_tokens=128_000,\n max_input_tokens=100_000,\n max_new_tokens=28_000,\n temperature=1e-1,\n ),\n \"openrouter/meta-llama/llama-4-maverick\": OpenRouterModelArgs(\n model_name=\"meta-llama/llama-4-maverick\",\n max_total_tokens=128_000,\n max_input_tokens=100_000,\n max_new_tokens=28_000,\n temperature=1e-1,\n vision_support=True,\n ),\n \"openrouter/meta-llama/llama-3.1-8b-instruct:free\": OpenRouterModelArgs(\n model_name=\"meta-llama/llama-3.1-8b-instruct:free\",\n max_total_tokens=128_000,\n max_input_tokens=100_000,\n max_new_tokens=28_000,\n temperature=1e-1,\n ),\n \"openrouter/meta-llama/llama-3.1-8b-instruct\": OpenRouterModelArgs(\n model_name=\"meta-llama/llama-3.1-8b-instruct\",\n max_total_tokens=128_000,\n max_input_tokens=100_000,\n max_new_tokens=28_000,\n temperature=1e-1,\n ),\n \"openrouter/anthropic/claude-3.5-sonnet:beta\": OpenRouterModelArgs(\n model_name=\"anthropic/claude-3.5-sonnet:beta\",\n max_total_tokens=200_000,\n max_input_tokens=200_000,\n max_new_tokens=8_192,\n temperature=1e-1,\n vision_support=True,\n ),\n \"openrouter/qwen/qwen-2-72b-instruct\": OpenRouterModelArgs(\n model_name=\"qwen/qwen-2-72b-instruct\",\n max_total_tokens=32_000,\n max_input_tokens=30_000,\n max_new_tokens=2_000,\n temperature=1e-1,\n ),\n \"openrouter/anthropic/claude-3.7-sonnet\": OpenRouterModelArgs(\n model_name=\"anthropic/claude-3.7-sonnet\",\n max_total_tokens=200_000,\n max_input_tokens=200_000,\n max_new_tokens=8_192,\n temperature=1e-1,\n vision_support=True,\n ),\n \"openrouter/openai/o1-mini-2024-09-12\": OpenRouterModelArgs(\n model_name=\"openai/o1-mini-2024-09-12\",\n max_total_tokens=128_000,\n max_input_tokens=128_000,\n max_new_tokens=64_000,\n temperature=1e-1,\n ),\n \"openrouter/openai/gpt-oss-120b\": OpenRouterModelArgs(\n model_name=\"openai/gpt-oss-120b\",\n max_total_tokens=131_072,\n max_input_tokens=131_072 - 2000,\n max_new_tokens=2000,\n temperature=1e-1,\n ),\n \"openrouter/openai/gpt-oss-20b\": OpenRouterModelArgs(\n model_name=\"openai/gpt-oss-20b\",\n max_total_tokens=131_072,\n max_input_tokens=131_072 - 2000,\n max_new_tokens=2000,\n temperature=1e-1,\n ),\n \"openrouter/openai/gpt-5-nano\": OpenRouterModelArgs(\n model_name=\"openai/gpt-5-nano\",\n max_total_tokens=400_000,\n max_input_tokens=400_000 - 4_000,\n max_new_tokens=4_000,\n temperature=1e-1,\n ),\n \"openrouter/openai/gpt-5-mini\": OpenRouterModelArgs(\n model_name=\"openai/gpt-5-mini\",\n max_total_tokens=400_000,\n max_input_tokens=400_000 - 4_000,\n max_new_tokens=4_000,\n temperature=1e-1,\n ),\n \"openrouter/openai/gpt-5-chat\": OpenRouterModelArgs(\n model_name=\"openai/gpt-5-chat\",\n max_total_tokens=400_000,\n max_input_tokens=400_000 - 4_000,\n max_new_tokens=4_000,\n temperature=1e-1,\n ),\n \"openrouter/openai/o3-mini\": OpenRouterModelArgs(\n model_name=\"openai/o3-mini\",\n max_total_tokens=200_000,\n max_input_tokens=100_000 - 4_000,\n max_new_tokens=4_000,\n temperature=1e-1,\n ),\n}","source_hash":"31b18587dcd9b3994a48302d8d832f7691cca607352131d2b5405f03f19606d8","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.llm_utils","uri":"program://AgentLab/module/src.agentlab.llm.llm_utils#L1-L928","kind":"module","name":"src.agentlab.llm.llm_utils","path":"src/agentlab/llm/llm_utils.py","language":"python","start_line":1,"end_line":928,"context_start_line":1,"context_end_line":928,"code":"import base64\nimport collections\nimport importlib\nimport io\nimport json\nimport logging\nimport os\nimport re\nimport time\nfrom copy import deepcopy\nfrom functools import cache\nfrom typing import TYPE_CHECKING, Any, Union\nfrom warnings import warn\n\nimport anthropic\nimport numpy as np\nimport openai\nimport tiktoken\nimport yaml\nfrom PIL import Image\n\nlangchain_community = importlib.util.find_spec(\"langchain_community\")\nif langchain_community is not None:\n from langchain.schema import BaseMessage as LangchainBaseMessage\n from langchain_community.adapters.openai import convert_message_to_dict\nelse:\n LangchainBaseMessage = None\n convert_message_to_dict = None\n\nif TYPE_CHECKING:\n from agentlab.llm.chat_api import ChatModel\n\n\ndef messages_to_dict(messages: list[dict] | list[LangchainBaseMessage]) -> dict:\n new_messages = Discussion()\n for m in messages:\n if isinstance(m, dict):\n new_messages.add_message(m)\n elif isinstance(m, str):\n new_messages.add_message({\"role\": \"\", \"content\": m})\n elif LangchainBaseMessage is not None and isinstance(m, LangchainBaseMessage):\n new_messages.add_message(convert_message_to_dict(m))\n else:\n raise ValueError(f\"Unknown message type: {type(m)}\")\n return new_messages\n\n\nclass RetryError(ValueError):\n pass\n\n\ndef retry(\n chat: \"ChatModel\",\n messages: \"Discussion\",\n n_retry: int,\n parser: callable,\n log: bool = True,\n):\n \"\"\"Retry querying the chat models with the response from the parser until it\n returns a valid value.\n\n If the answer is not valid, it will retry and append to the chat the retry\n message. It will stop after `n_retry`.\n\n Note, each retry has to resend the whole prompt to the API. This can be slow\n and expensive.\n\n Args:\n chat (ChatModel): a ChatModel object taking a list of messages and\n returning a list of answers, all in OpenAI format.\n messages (list): the list of messages so far. This list will be modified with\n the new messages and the retry messages.\n n_retry (int): the maximum number of sequential retries.\n parser (callable): a function taking a message and retruning a parsed value,\n or raising a ParseError\n log (bool): whether to log the retry messages.\n\n Returns:\n dict: the parsed value, with a string at key \"action\".\n\n Raises:\n ParseError: if the parser could not parse the response after n_retry retries.\n \"\"\"\n tries = 0\n while tries < n_retry:\n answer = chat(messages)\n # TODO: could we change this to not use inplace modifications ?\n messages.append(answer)\n try:\n return parser(answer[\"content\"])\n except ParseError as parsing_error:\n tries += 1\n if log:\n msg = f\"Query failed. Retrying {tries}/{n_retry}.\\n[LLM]:\\n{answer['content']}\\n[User]:\\n{str(parsing_error)}\"\n logging.info(msg)\n messages.append(dict(role=\"user\", content=str(parsing_error)))\n\n raise ParseError(f\"Could not parse a valid value after {n_retry} retries.\")\n\n\ndef generic_call_api_with_retries(\n client_function,\n api_params,\n is_response_valid_fn,\n rate_limit_exceptions,\n api_error_exceptions,\n get_status_code_fn=None,\n max_retries=10,\n initial_retry_delay_seconds=20,\n max_retry_delay_seconds=60 * 5,\n):\n \"\"\"\n Makes an API call with retries for transient failures, rate limiting,\n and responses deemed invalid by a custom validation function.\n (Refactored for improved readability with helper functions)\n\n Args:\n client_function: The API client function to call.\n api_params: Parameters to pass to the client function.\n is_response_valid_fn: Function to validate if the response is valid.\n rate_limit_exceptions: Tuple of exception types for rate limiting.\n api_error_exceptions: Tuple of exception types for API errors.\n get_status_code_fn: Optional function to extract status code from exceptions.\n max_retries: Maximum number of retry attempts.\n initial_retry_delay_seconds: Initial delay between retries in seconds.\n max_retry_delay_seconds: Maximum delay between retries in seconds.\n\n Returns:\n The API response if successful.\n\n Raises:\n Exception: For unexpected errors that are immediately re-raised.\n RuntimeError: If API call fails after maximum retries.\n \"\"\"\n\n def _calculate_delay(\n current_attempt, initial_delay, max_delay, is_first_attempt_for_type=False\n ):\n \"\"\"Calculates exponential backoff delay.\"\"\"\n # For invalid response content (not an exception), the first \"attempt\" at retrying this specific issue\n # might use a slightly different delay calculation if desired (e.g. attempt-1 for the exponent).\n # For exceptions, the attempt number directly applies.\n # Here, we use 'current_attempt' for exception-driven retries,\n # and 'current_attempt -1' for the first retry due to invalid content (is_first_attempt_for_type).\n if is_first_attempt_for_type: # First retry due to invalid content\n # The first retry after an invalid response (attempt 1 for this *type* of failure)\n effective_attempt = current_attempt - 1 # Use 0 for the first exponent\n else: # Retries due to exceptions or subsequent invalid content retries\n effective_attempt = current_attempt # Use current_attempt for exponent\n\n # Ensure effective_attempt for exponent is at least 0\n exponent_attempt = max(\n 0, effective_attempt if not is_first_attempt_for_type else current_attempt - 1\n )\n\n return min(initial_delay * (2**exponent_attempt), max_delay)\n\n def _handle_invalid_response_content(attempt):\n logging.warning(\n f\"[Attempt {attempt}/{max_retries}] API response deemed invalid by validation function. Retrying after delay...\"\n )\n if attempt < max_retries:\n # For the first retry due to invalid content, use attempt-1 for exponent\n delay = _calculate_delay(\n attempt,\n initial_retry_delay_seconds,\n max_retry_delay_seconds,\n is_first_attempt_for_type=True,\n )\n logging.debug(f\"Sleeping for {delay:.2f} seconds due to invalid response content.\")\n time.sleep(delay)\n return True # Indicate retry\n return False # Max retries reached for this path\n\n def _handle_rate_limit_error(e, attempt):\n logging.warning(\n f\"[Attempt {attempt}/{max_retries}] Rate limit error: {e}. Retrying after delay...\"\n )\n if attempt < max_retries:\n delay = _calculate_delay(attempt, initial_retry_delay_seconds, max_retry_delay_seconds)\n logging.debug(f\"Sleeping for {delay:.2f} seconds due to rate limit.\")\n time.sleep(delay)\n return True # Indicate retry\n return False # Max retries reached for this path\n\n def _handle_api_error(e, attempt):\n logging.error(f\"[Attempt {attempt}/{max_retries}] APIError: {e}\")\n status_code = None\n if get_status_code_fn:\n try:\n status_code = get_status_code_fn(e)\n except Exception as ex_status_fn:\n logging.warning(\n f\"Could not get status code from exception {type(e)} using get_status_code_fn: {ex_status_fn}\"\n )\n\n if status_code == 429 or (status_code and status_code >= 500):\n log_msg = \"Rate limit (429)\" if status_code == 429 else f\"Server error ({status_code})\"\n logging.warning(f\"{log_msg} indicated by status code. Retrying after delay...\")\n if attempt < max_retries:\n delay = _calculate_delay(\n attempt, initial_retry_delay_seconds, max_retry_delay_seconds\n )\n logging.debug(\n f\"Sleeping for {delay:.2f} seconds due to API error status {status_code}.\"\n )\n time.sleep(delay)\n return True # Indicate retry\n return False # Max retries reached for this path\n else:\n logging.error(\n f\"Non-retriable or unrecognized API error occurred (status: {status_code}). Raising.\"\n )\n raise e # Re-raise non-retriable error\n\n # Main retry loop\n for attempt in range(1, max_retries + 1):\n try:\n response = client_function(**api_params)\n\n if is_response_valid_fn(response):\n logging.info(f\"[Attempt {attempt}/{max_retries}] API call succeeded.\")\n return response\n else:\n if _handle_invalid_response_content(attempt):\n continue\n else: # Max retries reached after invalid content\n break\n\n except rate_limit_exceptions as e:\n if _handle_rate_limit_error(e, attempt):\n continue\n else: # Max retries reached after rate limit\n break\n\n except api_error_exceptions as e:\n # _handle_api_error will raise if non-retriable, or return True to continue\n if _handle_api_error(e, attempt):\n continue\n else: # Max retries reached for retriable API error\n break\n\n except Exception as e: # Catch-all for truly unexpected errors\n logging.exception(\n f\"[Attempt {attempt}/{max_retries}] Unexpected exception: {e}. Raising.\"\n )\n raise e # Re-raise unexpected errors immediately\n\n logging.error(f\"Exceeded maximum {max_retries} retry attempts. API call failed.\")\n raise RuntimeError(f\"API call failed after {max_retries} retries.\")\n\n\ndef call_openai_api_with_retries(client_function, api_params, max_retries=10):\n \"\"\"\n Makes an OpenAI API call with retries for transient failures,\n rate limiting, and invalid or error-containing responses.\n (This is now a wrapper around generic_call_api_with_retries for OpenAI)\n\n Args:\n client_function: The OpenAI API client function to call.\n api_params: Parameters to pass to the client function.\n max_retries: Maximum number of retry attempts.\n\n Returns:\n The OpenAI API response if successful.\n \"\"\"\n\n def is_openai_response_valid(response):\n # Check for explicit error field in response object first\n if getattr(response, \"error\", None):\n logging.warning(f\"OpenAI API response contains an error attribute: {response.error}\")\n return False # Treat as invalid for retry purposes\n if hasattr(response, \"choices\") and response.choices: # Chat Completion API\n return True\n if hasattr(response, \"output\") and response.output: # Response API\n return True\n logging.warning(\"OpenAI API response is missing 'choices' or 'output' is empty.\")\n return False\n\n def get_openai_status_code(exception):\n return getattr(exception, \"http_status\", None)\n\n return generic_call_api_with_retries(\n client_function=client_function,\n api_params=api_params,\n is_response_valid_fn=is_openai_response_valid,\n rate_limit_exceptions=(openai.RateLimitError,),\n api_error_exceptions=(openai.APIError,), # openai.RateLimitError is caught first\n get_status_code_fn=get_openai_status_code,\n max_retries=max_retries,\n # You can also pass initial_retry_delay_seconds and max_retry_delay_seconds\n # if you want to customize them from their defaults in the generic function.\n )\n\n\ndef call_anthropic_api_with_retries(client_function, api_params, max_retries=10):\n \"\"\"\n Makes an Anthropic API call with retries for transient failures,\n rate limiting, and invalid responses.\n (This is a wrapper around generic_call_api_with_retries for Anthropic)\n\n Args:\n client_function: The Anthropic API client function to call.\n api_params: Parameters to pass to the client function.\n max_retries: Maximum number of retry attempts.\n\n Returns:\n The Anthropic API response if successful.\n \"\"\"\n\n def is_anthropic_response_valid(response):\n \"\"\"Checks if the Anthropic response is valid.\"\"\"\n # A successful Anthropic message response typically has:\n # - a 'type' attribute equal to 'message' (for message creation)\n # - a 'content' attribute which is a list of blocks\n # - no 'error' attribute at the top level of the response object itself\n # (errors are usually raised as exceptions by the client)\n\n if not response:\n logging.warning(\"Anthropic API response is None or empty.\")\n return False\n\n # Check for explicit error type if the API might return it in a 200 OK\n # For anthropic.types.Message, an error would typically be an exception.\n # However, if the client_function could return a dict with an 'error' key:\n if isinstance(response, dict) and response.get(\"type\") == \"error\":\n logging.warning(f\"Anthropic API response indicates an error: {response.get('error')}\")\n return False\n\n # For anthropic.types.Message objects from client.messages.create\n if hasattr(response, \"type\") and response.type == \"message\":\n if hasattr(response, \"content\") and isinstance(response.content, list):\n # Optionally, check if content is not empty, though an empty content list\n # might be valid for some assistant stop reasons.\n return True\n else:\n logging.warning(\n \"Anthropic API response is of type 'message' but missing valid 'content'.\"\n )\n return False\n\n logging.warning(\n f\"Anthropic API response does not appear to be a valid message object. Type: {getattr(response, 'type', 'N/A')}\"\n )\n return False\n\n def get_anthropic_status_code(exception):\n \"\"\"Extracts HTTP status code from an Anthropic exception.\"\"\"\n # anthropic.APIStatusError has a 'status_code' attribute\n return getattr(exception, \"status_code\", None)\n\n # Define Anthropic specific exceptions.\n # anthropic.RateLimitError for specific rate limit errors.\n # anthropic.APIError is a base class for many errors.\n # anthropic.APIStatusError provides status_code.\n # anthropic.APIConnectionError for network issues.\n # Order can matter if there's inheritance; specific ones first.\n\n # Ensure these are the correct exception types from your installed anthropic library version.\n anthropic_rate_limit_exception = anthropic.RateLimitError\n # Broader API errors, APIStatusError is more specific for HTTP status related issues.\n # APIConnectionError for network problems. APIError as a general catch-all.\n anthropic_api_error_exceptions = (\n anthropic.APIStatusError, # Catches errors with a status_code\n anthropic.APIConnectionError, # Catches network-related issues\n anthropic.APIError, # General base class for other Anthropic API errors\n )\n\n return generic_call_api_with_retries(\n client_function=client_function,\n api_params=api_params,\n is_response_valid_fn=is_anthropic_response_valid,\n rate_limit_exceptions=(anthropic_rate_limit_exception,),\n api_error_exceptions=anthropic_api_error_exceptions,\n get_status_code_fn=get_anthropic_status_code,\n max_retries=max_retries,\n # You can also pass initial_retry_delay_seconds and max_retry_delay_seconds\n # if you want to customize them from their defaults in the generic function.\n )\n\n\ndef supports_tool_calling_for_openrouter(\n model_name: str,\n) -> bool:\n \"\"\"\n Check if the openrouter model supports tool calling.\n\n Args:\n model_name (str): The name of the model.\n\n Returns:\n bool: True if the model supports tool calling, False otherwise.\n \"\"\"\n import os\n\n import openai\n\n client = openai.Client(\n api_key=os.getenv(\"OPENROUTER_API_KEY\"), base_url=\"https://openrouter.ai/api/v1\"\n )\n try:\n response = client.chat.completions.create(\n model=model_name,\n messages=[{\"role\": \"user\", \"content\": \"Call the test tool\"}],\n tools=[\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"dummy_tool\",\n \"description\": \"Just a test tool\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {},\n },\n },\n }\n ],\n tool_choice=\"required\",\n )\n response = response.to_dict()\n return \"tool_calls\" in response[\"choices\"][0][\"message\"]\n except Exception as e:\n print(f\"Skipping tool callign support check in openrouter for {model_name}: {e}\")\n return True\n\n\ndef retry_multiple(\n chat: \"ChatModel\",\n messages: \"Discussion\",\n n_retry: int,\n parser: callable,\n log: bool = True,\n num_samples: int = 1,\n):\n \"\"\"Retry querying the chat models with the response from the parser until it\n returns a valid value.\n\n If the answer is not valid, it will retry and append to the chat the retry\n message. It will stop after `n_retry`.\n\n Note, each retry has to resend the whole prompt to the API. This can be slow\n and expensive.\n\n Args:\n chat (ChatModel): a ChatModel object taking a list of messages and\n returning a list of answers, all in OpenAI format.\n messages (list): the list of messages so far. This list will be modified with\n the new messages and the retry messages.\n n_retry (int): the maximum number of sequential retries.\n parser (callable): a function taking a message and retruning a parsed value,\n or raising a ParseError\n log (bool): whether to log the retry messages.\n num_samples (int): the number of samples to generate from the model.\n\n Returns:\n list[dict]: the parsed value, with a string at key \"action\".\n\n Raises:\n ParseError: if the parser could not parse the response after n_retry retries.\n \"\"\"\n tries = 0\n while tries < n_retry:\n answer_list = chat(messages, n_samples=num_samples)\n # TODO: could we change this to not use inplace modifications ?\n if not isinstance(answer_list, list):\n answer_list = [answer_list]\n\n # TODO taking the 1st hides the other generated answers in AgentXRay\n messages.append(answer_list[0])\n parsed_answers = []\n errors = []\n for answer in answer_list:\n try:\n parsed_answers.append(parser(answer[\"content\"]))\n except ParseError as parsing_error:\n errors.append(str(parsing_error))\n # if we have a valid answer, return it\n if parsed_answers:\n return parsed_answers, tries\n else:\n tries += 1\n if log:\n msg = f\"Query failed. Retrying {tries}/{n_retry}.\\n[LLM]:\\n{answer['content']}\\n[User]:\\n{str(errors)}\"\n logging.info(msg)\n messages.append(dict(role=\"user\", content=str(errors)))\n\n raise ParseError(f\"Could not parse a valid value after {n_retry} retries.\")\n\n\ndef truncate_tokens(text, max_tokens=8000, start=0, model_name=\"gpt-4\"):\n \"\"\"Use tiktoken to truncate a text to a maximum number of tokens.\"\"\"\n enc = tiktoken.encoding_for_model(model_name)\n tokens = enc.encode(text)\n if len(tokens) - start > max_tokens:\n return enc.decode(tokens[start : (start + max_tokens)])\n else:\n return te\n# ... truncated ...","source_hash":"c1e24a6f26b52277a012fcf031064732bd24955844738a647dd60caed2a715d1","truncated":true} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.llm_utils.messages_to_dict","uri":"program://AgentLab/function/src.agentlab.llm.llm_utils.messages_to_dict#L34-L45","kind":"function","name":"messages_to_dict","path":"src/agentlab/llm/llm_utils.py","language":"python","start_line":34,"end_line":45,"context_start_line":14,"context_end_line":65,"code":"\nimport anthropic\nimport numpy as np\nimport openai\nimport tiktoken\nimport yaml\nfrom PIL import Image\n\nlangchain_community = importlib.util.find_spec(\"langchain_community\")\nif langchain_community is not None:\n from langchain.schema import BaseMessage as LangchainBaseMessage\n from langchain_community.adapters.openai import convert_message_to_dict\nelse:\n LangchainBaseMessage = None\n convert_message_to_dict = None\n\nif TYPE_CHECKING:\n from agentlab.llm.chat_api import ChatModel\n\n\ndef messages_to_dict(messages: list[dict] | list[LangchainBaseMessage]) -> dict:\n new_messages = Discussion()\n for m in messages:\n if isinstance(m, dict):\n new_messages.add_message(m)\n elif isinstance(m, str):\n new_messages.add_message({\"role\": \"\", \"content\": m})\n elif LangchainBaseMessage is not None and isinstance(m, LangchainBaseMessage):\n new_messages.add_message(convert_message_to_dict(m))\n else:\n raise ValueError(f\"Unknown message type: {type(m)}\")\n return new_messages\n\n\nclass RetryError(ValueError):\n pass\n\n\ndef retry(\n chat: \"ChatModel\",\n messages: \"Discussion\",\n n_retry: int,\n parser: callable,\n log: bool = True,\n):\n \"\"\"Retry querying the chat models with the response from the parser until it\n returns a valid value.\n\n If the answer is not valid, it will retry and append to the chat the retry\n message. It will stop after `n_retry`.\n\n Note, each retry has to resend the whole prompt to the API. This can be slow","source_hash":"c1e24a6f26b52277a012fcf031064732bd24955844738a647dd60caed2a715d1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.llm_utils.RetryError","uri":"program://AgentLab/class/src.agentlab.llm.llm_utils.RetryError#L48-L49","kind":"class","name":"RetryError","path":"src/agentlab/llm/llm_utils.py","language":"python","start_line":48,"end_line":49,"context_start_line":28,"context_end_line":69,"code":" convert_message_to_dict = None\n\nif TYPE_CHECKING:\n from agentlab.llm.chat_api import ChatModel\n\n\ndef messages_to_dict(messages: list[dict] | list[LangchainBaseMessage]) -> dict:\n new_messages = Discussion()\n for m in messages:\n if isinstance(m, dict):\n new_messages.add_message(m)\n elif isinstance(m, str):\n new_messages.add_message({\"role\": \"\", \"content\": m})\n elif LangchainBaseMessage is not None and isinstance(m, LangchainBaseMessage):\n new_messages.add_message(convert_message_to_dict(m))\n else:\n raise ValueError(f\"Unknown message type: {type(m)}\")\n return new_messages\n\n\nclass RetryError(ValueError):\n pass\n\n\ndef retry(\n chat: \"ChatModel\",\n messages: \"Discussion\",\n n_retry: int,\n parser: callable,\n log: bool = True,\n):\n \"\"\"Retry querying the chat models with the response from the parser until it\n returns a valid value.\n\n If the answer is not valid, it will retry and append to the chat the retry\n message. It will stop after `n_retry`.\n\n Note, each retry has to resend the whole prompt to the API. This can be slow\n and expensive.\n\n Args:\n chat (ChatModel): a ChatModel object taking a list of messages and","source_hash":"c1e24a6f26b52277a012fcf031064732bd24955844738a647dd60caed2a715d1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.llm_utils.retry","uri":"program://AgentLab/function/src.agentlab.llm.llm_utils.retry#L52-L98","kind":"function","name":"retry","path":"src/agentlab/llm/llm_utils.py","language":"python","start_line":52,"end_line":98,"context_start_line":32,"context_end_line":118,"code":"\n\ndef messages_to_dict(messages: list[dict] | list[LangchainBaseMessage]) -> dict:\n new_messages = Discussion()\n for m in messages:\n if isinstance(m, dict):\n new_messages.add_message(m)\n elif isinstance(m, str):\n new_messages.add_message({\"role\": \"\", \"content\": m})\n elif LangchainBaseMessage is not None and isinstance(m, LangchainBaseMessage):\n new_messages.add_message(convert_message_to_dict(m))\n else:\n raise ValueError(f\"Unknown message type: {type(m)}\")\n return new_messages\n\n\nclass RetryError(ValueError):\n pass\n\n\ndef retry(\n chat: \"ChatModel\",\n messages: \"Discussion\",\n n_retry: int,\n parser: callable,\n log: bool = True,\n):\n \"\"\"Retry querying the chat models with the response from the parser until it\n returns a valid value.\n\n If the answer is not valid, it will retry and append to the chat the retry\n message. It will stop after `n_retry`.\n\n Note, each retry has to resend the whole prompt to the API. This can be slow\n and expensive.\n\n Args:\n chat (ChatModel): a ChatModel object taking a list of messages and\n returning a list of answers, all in OpenAI format.\n messages (list): the list of messages so far. This list will be modified with\n the new messages and the retry messages.\n n_retry (int): the maximum number of sequential retries.\n parser (callable): a function taking a message and retruning a parsed value,\n or raising a ParseError\n log (bool): whether to log the retry messages.\n\n Returns:\n dict: the parsed value, with a string at key \"action\".\n\n Raises:\n ParseError: if the parser could not parse the response after n_retry retries.\n \"\"\"\n tries = 0\n while tries < n_retry:\n answer = chat(messages)\n # TODO: could we change this to not use inplace modifications ?\n messages.append(answer)\n try:\n return parser(answer[\"content\"])\n except ParseError as parsing_error:\n tries += 1\n if log:\n msg = f\"Query failed. Retrying {tries}/{n_retry}.\\n[LLM]:\\n{answer['content']}\\n[User]:\\n{str(parsing_error)}\"\n logging.info(msg)\n messages.append(dict(role=\"user\", content=str(parsing_error)))\n\n raise ParseError(f\"Could not parse a valid value after {n_retry} retries.\")\n\n\ndef generic_call_api_with_retries(\n client_function,\n api_params,\n is_response_valid_fn,\n rate_limit_exceptions,\n api_error_exceptions,\n get_status_code_fn=None,\n max_retries=10,\n initial_retry_delay_seconds=20,\n max_retry_delay_seconds=60 * 5,\n):\n \"\"\"\n Makes an API call with retries for transient failures, rate limiting,\n and responses deemed invalid by a custom validation function.\n (Refactored for improved readability with helper functions)\n\n Args:\n client_function: The API client function to call.","source_hash":"c1e24a6f26b52277a012fcf031064732bd24955844738a647dd60caed2a715d1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.llm_utils.generic_call_api_with_retries","uri":"program://AgentLab/function/src.agentlab.llm.llm_utils.generic_call_api_with_retries#L101-L250","kind":"function","name":"generic_call_api_with_retries","path":"src/agentlab/llm/llm_utils.py","language":"python","start_line":101,"end_line":250,"context_start_line":81,"context_end_line":270,"code":" Raises:\n ParseError: if the parser could not parse the response after n_retry retries.\n \"\"\"\n tries = 0\n while tries < n_retry:\n answer = chat(messages)\n # TODO: could we change this to not use inplace modifications ?\n messages.append(answer)\n try:\n return parser(answer[\"content\"])\n except ParseError as parsing_error:\n tries += 1\n if log:\n msg = f\"Query failed. Retrying {tries}/{n_retry}.\\n[LLM]:\\n{answer['content']}\\n[User]:\\n{str(parsing_error)}\"\n logging.info(msg)\n messages.append(dict(role=\"user\", content=str(parsing_error)))\n\n raise ParseError(f\"Could not parse a valid value after {n_retry} retries.\")\n\n\ndef generic_call_api_with_retries(\n client_function,\n api_params,\n is_response_valid_fn,\n rate_limit_exceptions,\n api_error_exceptions,\n get_status_code_fn=None,\n max_retries=10,\n initial_retry_delay_seconds=20,\n max_retry_delay_seconds=60 * 5,\n):\n \"\"\"\n Makes an API call with retries for transient failures, rate limiting,\n and responses deemed invalid by a custom validation function.\n (Refactored for improved readability with helper functions)\n\n Args:\n client_function: The API client function to call.\n api_params: Parameters to pass to the client function.\n is_response_valid_fn: Function to validate if the response is valid.\n rate_limit_exceptions: Tuple of exception types for rate limiting.\n api_error_exceptions: Tuple of exception types for API errors.\n get_status_code_fn: Optional function to extract status code from exceptions.\n max_retries: Maximum number of retry attempts.\n initial_retry_delay_seconds: Initial delay between retries in seconds.\n max_retry_delay_seconds: Maximum delay between retries in seconds.\n\n Returns:\n The API response if successful.\n\n Raises:\n Exception: For unexpected errors that are immediately re-raised.\n RuntimeError: If API call fails after maximum retries.\n \"\"\"\n\n def _calculate_delay(\n current_attempt, initial_delay, max_delay, is_first_attempt_for_type=False\n ):\n \"\"\"Calculates exponential backoff delay.\"\"\"\n # For invalid response content (not an exception), the first \"attempt\" at retrying this specific issue\n # might use a slightly different delay calculation if desired (e.g. attempt-1 for the exponent).\n # For exceptions, the attempt number directly applies.\n # Here, we use 'current_attempt' for exception-driven retries,\n # and 'current_attempt -1' for the first retry due to invalid content (is_first_attempt_for_type).\n if is_first_attempt_for_type: # First retry due to invalid content\n # The first retry after an invalid response (attempt 1 for this *type* of failure)\n effective_attempt = current_attempt - 1 # Use 0 for the first exponent\n else: # Retries due to exceptions or subsequent invalid content retries\n effective_attempt = current_attempt # Use current_attempt for exponent\n\n # Ensure effective_attempt for exponent is at least 0\n exponent_attempt = max(\n 0, effective_attempt if not is_first_attempt_for_type else current_attempt - 1\n )\n\n return min(initial_delay * (2**exponent_attempt), max_delay)\n\n def _handle_invalid_response_content(attempt):\n logging.warning(\n f\"[Attempt {attempt}/{max_retries}] API response deemed invalid by validation function. Retrying after delay...\"\n )\n if attempt < max_retries:\n # For the first retry due to invalid content, use attempt-1 for exponent\n delay = _calculate_delay(\n attempt,\n initial_retry_delay_seconds,\n max_retry_delay_seconds,\n is_first_attempt_for_type=True,\n )\n logging.debug(f\"Sleeping for {delay:.2f} seconds due to invalid response content.\")\n time.sleep(delay)\n return True # Indicate retry\n return False # Max retries reached for this path\n\n def _handle_rate_limit_error(e, attempt):\n logging.warning(\n f\"[Attempt {attempt}/{max_retries}] Rate limit error: {e}. Retrying after delay...\"\n )\n if attempt < max_retries:\n delay = _calculate_delay(attempt, initial_retry_delay_seconds, max_retry_delay_seconds)\n logging.debug(f\"Sleeping for {delay:.2f} seconds due to rate limit.\")\n time.sleep(delay)\n return True # Indicate retry\n return False # Max retries reached for this path\n\n def _handle_api_error(e, attempt):\n logging.error(f\"[Attempt {attempt}/{max_retries}] APIError: {e}\")\n status_code = None\n if get_status_code_fn:\n try:\n status_code = get_status_code_fn(e)\n except Exception as ex_status_fn:\n logging.warning(\n f\"Could not get status code from exception {type(e)} using get_status_code_fn: {ex_status_fn}\"\n )\n\n if status_code == 429 or (status_code and status_code >= 500):\n log_msg = \"Rate limit (429)\" if status_code == 429 else f\"Server error ({status_code})\"\n logging.warning(f\"{log_msg} indicated by status code. Retrying after delay...\")\n if attempt < max_retries:\n delay = _calculate_delay(\n attempt, initial_retry_delay_seconds, max_retry_delay_seconds\n )\n logging.debug(\n f\"Sleeping for {delay:.2f} seconds due to API error status {status_code}.\"\n )\n time.sleep(delay)\n return True # Indicate retry\n return False # Max retries reached for this path\n else:\n logging.error(\n f\"Non-retriable or unrecognized API error occurred (status: {status_code}). Raising.\"\n )\n raise e # Re-raise non-retriable error\n\n # Main retry loop\n for attempt in range(1, max_retries + 1):\n try:\n response = client_function(**api_params)\n\n if is_response_valid_fn(response):\n logging.info(f\"[Attempt {attempt}/{max_retries}] API call succeeded.\")\n return response\n else:\n if _handle_invalid_response_content(attempt):\n continue\n else: # Max retries reached after invalid content\n break\n\n except rate_limit_exceptions as e:\n if _handle_rate_limit_error(e, attempt):\n continue\n else: # Max retries reached after rate limit\n break\n\n except api_error_exceptions as e:\n # _handle_api_error will raise if non-retriable, or return True to continue\n if _handle_api_error(e, attempt):\n continue\n else: # Max retries reached for retriable API error\n break\n\n except Exception as e: # Catch-all for truly unexpected errors\n logging.exception(\n f\"[Attempt {attempt}/{max_retries}] Unexpected exception: {e}. Raising.\"\n )\n raise e # Re-raise unexpected errors immediately\n\n logging.error(f\"Exceeded maximum {max_retries} retry attempts. API call failed.\")\n raise RuntimeError(f\"API call failed after {max_retries} retries.\")\n\n\ndef call_openai_api_with_retries(client_function, api_params, max_retries=10):\n \"\"\"\n Makes an OpenAI API call with retries for transient failures,\n rate limiting, and invalid or error-containing responses.\n (This is now a wrapper around generic_call_api_with_retries for OpenAI)\n\n Args:\n client_function: The OpenAI API client function to call.\n api_params: Parameters to pass to the client function.\n max_retries: Maximum number of retry attempts.\n\n Returns:\n The OpenAI API response if successful.\n \"\"\"\n\n def is_openai_response_valid(response):\n # Check for explicit error field in response object first\n if getattr(response, \"error\", None):","source_hash":"c1e24a6f26b52277a012fcf031064732bd24955844738a647dd60caed2a715d1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.llm_utils.call_openai_api_with_retries","uri":"program://AgentLab/function/src.agentlab.llm.llm_utils.call_openai_api_with_retries#L253-L293","kind":"function","name":"call_openai_api_with_retries","path":"src/agentlab/llm/llm_utils.py","language":"python","start_line":253,"end_line":293,"context_start_line":233,"context_end_line":313,"code":" else: # Max retries reached after rate limit\n break\n\n except api_error_exceptions as e:\n # _handle_api_error will raise if non-retriable, or return True to continue\n if _handle_api_error(e, attempt):\n continue\n else: # Max retries reached for retriable API error\n break\n\n except Exception as e: # Catch-all for truly unexpected errors\n logging.exception(\n f\"[Attempt {attempt}/{max_retries}] Unexpected exception: {e}. Raising.\"\n )\n raise e # Re-raise unexpected errors immediately\n\n logging.error(f\"Exceeded maximum {max_retries} retry attempts. API call failed.\")\n raise RuntimeError(f\"API call failed after {max_retries} retries.\")\n\n\ndef call_openai_api_with_retries(client_function, api_params, max_retries=10):\n \"\"\"\n Makes an OpenAI API call with retries for transient failures,\n rate limiting, and invalid or error-containing responses.\n (This is now a wrapper around generic_call_api_with_retries for OpenAI)\n\n Args:\n client_function: The OpenAI API client function to call.\n api_params: Parameters to pass to the client function.\n max_retries: Maximum number of retry attempts.\n\n Returns:\n The OpenAI API response if successful.\n \"\"\"\n\n def is_openai_response_valid(response):\n # Check for explicit error field in response object first\n if getattr(response, \"error\", None):\n logging.warning(f\"OpenAI API response contains an error attribute: {response.error}\")\n return False # Treat as invalid for retry purposes\n if hasattr(response, \"choices\") and response.choices: # Chat Completion API\n return True\n if hasattr(response, \"output\") and response.output: # Response API\n return True\n logging.warning(\"OpenAI API response is missing 'choices' or 'output' is empty.\")\n return False\n\n def get_openai_status_code(exception):\n return getattr(exception, \"http_status\", None)\n\n return generic_call_api_with_retries(\n client_function=client_function,\n api_params=api_params,\n is_response_valid_fn=is_openai_response_valid,\n rate_limit_exceptions=(openai.RateLimitError,),\n api_error_exceptions=(openai.APIError,), # openai.RateLimitError is caught first\n get_status_code_fn=get_openai_status_code,\n max_retries=max_retries,\n # You can also pass initial_retry_delay_seconds and max_retry_delay_seconds\n # if you want to customize them from their defaults in the generic function.\n )\n\n\ndef call_anthropic_api_with_retries(client_function, api_params, max_retries=10):\n \"\"\"\n Makes an Anthropic API call with retries for transient failures,\n rate limiting, and invalid responses.\n (This is a wrapper around generic_call_api_with_retries for Anthropic)\n\n Args:\n client_function: The Anthropic API client function to call.\n api_params: Parameters to pass to the client function.\n max_retries: Maximum number of retry attempts.\n\n Returns:\n The Anthropic API response if successful.\n \"\"\"\n\n def is_anthropic_response_valid(response):\n \"\"\"Checks if the Anthropic response is valid.\"\"\"\n # A successful Anthropic message response typically has:","source_hash":"c1e24a6f26b52277a012fcf031064732bd24955844738a647dd60caed2a715d1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.llm_utils.call_anthropic_api_with_retries","uri":"program://AgentLab/function/src.agentlab.llm.llm_utils.call_anthropic_api_with_retries#L296-L379","kind":"function","name":"call_anthropic_api_with_retries","path":"src/agentlab/llm/llm_utils.py","language":"python","start_line":296,"end_line":379,"context_start_line":276,"context_end_line":399,"code":" return True\n logging.warning(\"OpenAI API response is missing 'choices' or 'output' is empty.\")\n return False\n\n def get_openai_status_code(exception):\n return getattr(exception, \"http_status\", None)\n\n return generic_call_api_with_retries(\n client_function=client_function,\n api_params=api_params,\n is_response_valid_fn=is_openai_response_valid,\n rate_limit_exceptions=(openai.RateLimitError,),\n api_error_exceptions=(openai.APIError,), # openai.RateLimitError is caught first\n get_status_code_fn=get_openai_status_code,\n max_retries=max_retries,\n # You can also pass initial_retry_delay_seconds and max_retry_delay_seconds\n # if you want to customize them from their defaults in the generic function.\n )\n\n\ndef call_anthropic_api_with_retries(client_function, api_params, max_retries=10):\n \"\"\"\n Makes an Anthropic API call with retries for transient failures,\n rate limiting, and invalid responses.\n (This is a wrapper around generic_call_api_with_retries for Anthropic)\n\n Args:\n client_function: The Anthropic API client function to call.\n api_params: Parameters to pass to the client function.\n max_retries: Maximum number of retry attempts.\n\n Returns:\n The Anthropic API response if successful.\n \"\"\"\n\n def is_anthropic_response_valid(response):\n \"\"\"Checks if the Anthropic response is valid.\"\"\"\n # A successful Anthropic message response typically has:\n # - a 'type' attribute equal to 'message' (for message creation)\n # - a 'content' attribute which is a list of blocks\n # - no 'error' attribute at the top level of the response object itself\n # (errors are usually raised as exceptions by the client)\n\n if not response:\n logging.warning(\"Anthropic API response is None or empty.\")\n return False\n\n # Check for explicit error type if the API might return it in a 200 OK\n # For anthropic.types.Message, an error would typically be an exception.\n # However, if the client_function could return a dict with an 'error' key:\n if isinstance(response, dict) and response.get(\"type\") == \"error\":\n logging.warning(f\"Anthropic API response indicates an error: {response.get('error')}\")\n return False\n\n # For anthropic.types.Message objects from client.messages.create\n if hasattr(response, \"type\") and response.type == \"message\":\n if hasattr(response, \"content\") and isinstance(response.content, list):\n # Optionally, check if content is not empty, though an empty content list\n # might be valid for some assistant stop reasons.\n return True\n else:\n logging.warning(\n \"Anthropic API response is of type 'message' but missing valid 'content'.\"\n )\n return False\n\n logging.warning(\n f\"Anthropic API response does not appear to be a valid message object. Type: {getattr(response, 'type', 'N/A')}\"\n )\n return False\n\n def get_anthropic_status_code(exception):\n \"\"\"Extracts HTTP status code from an Anthropic exception.\"\"\"\n # anthropic.APIStatusError has a 'status_code' attribute\n return getattr(exception, \"status_code\", None)\n\n # Define Anthropic specific exceptions.\n # anthropic.RateLimitError for specific rate limit errors.\n # anthropic.APIError is a base class for many errors.\n # anthropic.APIStatusError provides status_code.\n # anthropic.APIConnectionError for network issues.\n # Order can matter if there's inheritance; specific ones first.\n\n # Ensure these are the correct exception types from your installed anthropic library version.\n anthropic_rate_limit_exception = anthropic.RateLimitError\n # Broader API errors, APIStatusError is more specific for HTTP status related issues.\n # APIConnectionError for network problems. APIError as a general catch-all.\n anthropic_api_error_exceptions = (\n anthropic.APIStatusError, # Catches errors with a status_code\n anthropic.APIConnectionError, # Catches network-related issues\n anthropic.APIError, # General base class for other Anthropic API errors\n )\n\n return generic_call_api_with_retries(\n client_function=client_function,\n api_params=api_params,\n is_response_valid_fn=is_anthropic_response_valid,\n rate_limit_exceptions=(anthropic_rate_limit_exception,),\n api_error_exceptions=anthropic_api_error_exceptions,\n get_status_code_fn=get_anthropic_status_code,\n max_retries=max_retries,\n # You can also pass initial_retry_delay_seconds and max_retry_delay_seconds\n # if you want to customize them from their defaults in the generic function.\n )\n\n\ndef supports_tool_calling_for_openrouter(\n model_name: str,\n) -> bool:\n \"\"\"\n Check if the openrouter model supports tool calling.\n\n Args:\n model_name (str): The name of the model.\n\n Returns:\n bool: True if the model supports tool calling, False otherwise.\n \"\"\"\n import os\n\n import openai\n\n client = openai.Client(\n api_key=os.getenv(\"OPENROUTER_API_KEY\"), base_url=\"https://openrouter.ai/api/v1\"","source_hash":"c1e24a6f26b52277a012fcf031064732bd24955844738a647dd60caed2a715d1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.llm_utils.supports_tool_calling_for_openrouter","uri":"program://AgentLab/function/src.agentlab.llm.llm_utils.supports_tool_calling_for_openrouter#L382-L424","kind":"function","name":"supports_tool_calling_for_openrouter","path":"src/agentlab/llm/llm_utils.py","language":"python","start_line":382,"end_line":424,"context_start_line":362,"context_end_line":444,"code":" # APIConnectionError for network problems. APIError as a general catch-all.\n anthropic_api_error_exceptions = (\n anthropic.APIStatusError, # Catches errors with a status_code\n anthropic.APIConnectionError, # Catches network-related issues\n anthropic.APIError, # General base class for other Anthropic API errors\n )\n\n return generic_call_api_with_retries(\n client_function=client_function,\n api_params=api_params,\n is_response_valid_fn=is_anthropic_response_valid,\n rate_limit_exceptions=(anthropic_rate_limit_exception,),\n api_error_exceptions=anthropic_api_error_exceptions,\n get_status_code_fn=get_anthropic_status_code,\n max_retries=max_retries,\n # You can also pass initial_retry_delay_seconds and max_retry_delay_seconds\n # if you want to customize them from their defaults in the generic function.\n )\n\n\ndef supports_tool_calling_for_openrouter(\n model_name: str,\n) -> bool:\n \"\"\"\n Check if the openrouter model supports tool calling.\n\n Args:\n model_name (str): The name of the model.\n\n Returns:\n bool: True if the model supports tool calling, False otherwise.\n \"\"\"\n import os\n\n import openai\n\n client = openai.Client(\n api_key=os.getenv(\"OPENROUTER_API_KEY\"), base_url=\"https://openrouter.ai/api/v1\"\n )\n try:\n response = client.chat.completions.create(\n model=model_name,\n messages=[{\"role\": \"user\", \"content\": \"Call the test tool\"}],\n tools=[\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"dummy_tool\",\n \"description\": \"Just a test tool\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {},\n },\n },\n }\n ],\n tool_choice=\"required\",\n )\n response = response.to_dict()\n return \"tool_calls\" in response[\"choices\"][0][\"message\"]\n except Exception as e:\n print(f\"Skipping tool callign support check in openrouter for {model_name}: {e}\")\n return True\n\n\ndef retry_multiple(\n chat: \"ChatModel\",\n messages: \"Discussion\",\n n_retry: int,\n parser: callable,\n log: bool = True,\n num_samples: int = 1,\n):\n \"\"\"Retry querying the chat models with the response from the parser until it\n returns a valid value.\n\n If the answer is not valid, it will retry and append to the chat the retry\n message. It will stop after `n_retry`.\n\n Note, each retry has to resend the whole prompt to the API. This can be slow\n and expensive.\n\n Args:","source_hash":"c1e24a6f26b52277a012fcf031064732bd24955844738a647dd60caed2a715d1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.llm_utils.retry_multiple","uri":"program://AgentLab/function/src.agentlab.llm.llm_utils.retry_multiple#L427-L487","kind":"function","name":"retry_multiple","path":"src/agentlab/llm/llm_utils.py","language":"python","start_line":427,"end_line":487,"context_start_line":407,"context_end_line":507,"code":" \"type\": \"function\",\n \"function\": {\n \"name\": \"dummy_tool\",\n \"description\": \"Just a test tool\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {},\n },\n },\n }\n ],\n tool_choice=\"required\",\n )\n response = response.to_dict()\n return \"tool_calls\" in response[\"choices\"][0][\"message\"]\n except Exception as e:\n print(f\"Skipping tool callign support check in openrouter for {model_name}: {e}\")\n return True\n\n\ndef retry_multiple(\n chat: \"ChatModel\",\n messages: \"Discussion\",\n n_retry: int,\n parser: callable,\n log: bool = True,\n num_samples: int = 1,\n):\n \"\"\"Retry querying the chat models with the response from the parser until it\n returns a valid value.\n\n If the answer is not valid, it will retry and append to the chat the retry\n message. It will stop after `n_retry`.\n\n Note, each retry has to resend the whole prompt to the API. This can be slow\n and expensive.\n\n Args:\n chat (ChatModel): a ChatModel object taking a list of messages and\n returning a list of answers, all in OpenAI format.\n messages (list): the list of messages so far. This list will be modified with\n the new messages and the retry messages.\n n_retry (int): the maximum number of sequential retries.\n parser (callable): a function taking a message and retruning a parsed value,\n or raising a ParseError\n log (bool): whether to log the retry messages.\n num_samples (int): the number of samples to generate from the model.\n\n Returns:\n list[dict]: the parsed value, with a string at key \"action\".\n\n Raises:\n ParseError: if the parser could not parse the response after n_retry retries.\n \"\"\"\n tries = 0\n while tries < n_retry:\n answer_list = chat(messages, n_samples=num_samples)\n # TODO: could we change this to not use inplace modifications ?\n if not isinstance(answer_list, list):\n answer_list = [answer_list]\n\n # TODO taking the 1st hides the other generated answers in AgentXRay\n messages.append(answer_list[0])\n parsed_answers = []\n errors = []\n for answer in answer_list:\n try:\n parsed_answers.append(parser(answer[\"content\"]))\n except ParseError as parsing_error:\n errors.append(str(parsing_error))\n # if we have a valid answer, return it\n if parsed_answers:\n return parsed_answers, tries\n else:\n tries += 1\n if log:\n msg = f\"Query failed. Retrying {tries}/{n_retry}.\\n[LLM]:\\n{answer['content']}\\n[User]:\\n{str(errors)}\"\n logging.info(msg)\n messages.append(dict(role=\"user\", content=str(errors)))\n\n raise ParseError(f\"Could not parse a valid value after {n_retry} retries.\")\n\n\ndef truncate_tokens(text, max_tokens=8000, start=0, model_name=\"gpt-4\"):\n \"\"\"Use tiktoken to truncate a text to a maximum number of tokens.\"\"\"\n enc = tiktoken.encoding_for_model(model_name)\n tokens = enc.encode(text)\n if len(tokens) - start > max_tokens:\n return enc.decode(tokens[start : (start + max_tokens)])\n else:\n return text\n\n\n@cache\ndef get_tokenizer_old(model_name=\"openai/gpt-4\"):\n if model_name.startswith(\"test\"):\n return tiktoken.encoding_for_model(\"gpt-4\")\n if model_name.startswith(\"openai\"):\n return tiktoken.encoding_for_model(model_name.split(\"/\")[-1])\n if model_name.startswith(\"azure\"):\n return tiktoken.encoding_for_model(model_name.split(\"/\")[1])","source_hash":"c1e24a6f26b52277a012fcf031064732bd24955844738a647dd60caed2a715d1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.llm_utils.truncate_tokens","uri":"program://AgentLab/function/src.agentlab.llm.llm_utils.truncate_tokens#L490-L497","kind":"function","name":"truncate_tokens","path":"src/agentlab/llm/llm_utils.py","language":"python","start_line":490,"end_line":497,"context_start_line":470,"context_end_line":517,"code":" parsed_answers = []\n errors = []\n for answer in answer_list:\n try:\n parsed_answers.append(parser(answer[\"content\"]))\n except ParseError as parsing_error:\n errors.append(str(parsing_error))\n # if we have a valid answer, return it\n if parsed_answers:\n return parsed_answers, tries\n else:\n tries += 1\n if log:\n msg = f\"Query failed. Retrying {tries}/{n_retry}.\\n[LLM]:\\n{answer['content']}\\n[User]:\\n{str(errors)}\"\n logging.info(msg)\n messages.append(dict(role=\"user\", content=str(errors)))\n\n raise ParseError(f\"Could not parse a valid value after {n_retry} retries.\")\n\n\ndef truncate_tokens(text, max_tokens=8000, start=0, model_name=\"gpt-4\"):\n \"\"\"Use tiktoken to truncate a text to a maximum number of tokens.\"\"\"\n enc = tiktoken.encoding_for_model(model_name)\n tokens = enc.encode(text)\n if len(tokens) - start > max_tokens:\n return enc.decode(tokens[start : (start + max_tokens)])\n else:\n return text\n\n\n@cache\ndef get_tokenizer_old(model_name=\"openai/gpt-4\"):\n if model_name.startswith(\"test\"):\n return tiktoken.encoding_for_model(\"gpt-4\")\n if model_name.startswith(\"openai\"):\n return tiktoken.encoding_for_model(model_name.split(\"/\")[-1])\n if model_name.startswith(\"azure\"):\n return tiktoken.encoding_for_model(model_name.split(\"/\")[1])\n if model_name.startswith(\"reka\"):\n logging.warning(\n \"Reka models don't have a tokenizer implemented yet. Using the default one.\"\n )\n return tiktoken.encoding_for_model(\"gpt-4\")\n else:\n # Lazy import of transformers only when needed\n try:\n from transformers import AutoTokenizer # type: ignore\n except Exception as e:","source_hash":"c1e24a6f26b52277a012fcf031064732bd24955844738a647dd60caed2a715d1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.llm_utils.get_tokenizer_old","uri":"program://AgentLab/function/src.agentlab.llm.llm_utils.get_tokenizer_old#L501-L521","kind":"function","name":"get_tokenizer_old","path":"src/agentlab/llm/llm_utils.py","language":"python","start_line":501,"end_line":521,"context_start_line":481,"context_end_line":541,"code":" tries += 1\n if log:\n msg = f\"Query failed. Retrying {tries}/{n_retry}.\\n[LLM]:\\n{answer['content']}\\n[User]:\\n{str(errors)}\"\n logging.info(msg)\n messages.append(dict(role=\"user\", content=str(errors)))\n\n raise ParseError(f\"Could not parse a valid value after {n_retry} retries.\")\n\n\ndef truncate_tokens(text, max_tokens=8000, start=0, model_name=\"gpt-4\"):\n \"\"\"Use tiktoken to truncate a text to a maximum number of tokens.\"\"\"\n enc = tiktoken.encoding_for_model(model_name)\n tokens = enc.encode(text)\n if len(tokens) - start > max_tokens:\n return enc.decode(tokens[start : (start + max_tokens)])\n else:\n return text\n\n\n@cache\ndef get_tokenizer_old(model_name=\"openai/gpt-4\"):\n if model_name.startswith(\"test\"):\n return tiktoken.encoding_for_model(\"gpt-4\")\n if model_name.startswith(\"openai\"):\n return tiktoken.encoding_for_model(model_name.split(\"/\")[-1])\n if model_name.startswith(\"azure\"):\n return tiktoken.encoding_for_model(model_name.split(\"/\")[1])\n if model_name.startswith(\"reka\"):\n logging.warning(\n \"Reka models don't have a tokenizer implemented yet. Using the default one.\"\n )\n return tiktoken.encoding_for_model(\"gpt-4\")\n else:\n # Lazy import of transformers only when needed\n try:\n from transformers import AutoTokenizer # type: ignore\n except Exception as e:\n raise ImportError(\n \"The 'transformers' package is required to use non-OpenAI/Azure tokenizers.\"\n ) from e\n return AutoTokenizer.from_pretrained(model_name)\n\n\n@cache\ndef get_tokenizer(model_name=\"gpt-4\"):\n try:\n return tiktoken.encoding_for_model(model_name)\n except KeyError:\n logging.info(f\"Could not find a tokenizer for model {model_name}. Trying HuggingFace.\")\n try:\n from transformers import AutoTokenizer # type: ignore\n\n return AutoTokenizer.from_pretrained(model_name)\n except Exception as e:\n logging.info(f\"Could not find a tokenizer for model {model_name}: {e} Defaulting to gpt-4.\")\n return tiktoken.encoding_for_model(\"gpt-4\")\n\n\ndef count_tokens(text, model=\"openai/gpt-4\"):\n enc = get_tokenizer(model)\n return len(enc.encode(text))","source_hash":"c1e24a6f26b52277a012fcf031064732bd24955844738a647dd60caed2a715d1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.llm_utils.get_tokenizer","uri":"program://AgentLab/function/src.agentlab.llm.llm_utils.get_tokenizer#L525-L536","kind":"function","name":"get_tokenizer","path":"src/agentlab/llm/llm_utils.py","language":"python","start_line":525,"end_line":536,"context_start_line":505,"context_end_line":556,"code":" return tiktoken.encoding_for_model(model_name.split(\"/\")[-1])\n if model_name.startswith(\"azure\"):\n return tiktoken.encoding_for_model(model_name.split(\"/\")[1])\n if model_name.startswith(\"reka\"):\n logging.warning(\n \"Reka models don't have a tokenizer implemented yet. Using the default one.\"\n )\n return tiktoken.encoding_for_model(\"gpt-4\")\n else:\n # Lazy import of transformers only when needed\n try:\n from transformers import AutoTokenizer # type: ignore\n except Exception as e:\n raise ImportError(\n \"The 'transformers' package is required to use non-OpenAI/Azure tokenizers.\"\n ) from e\n return AutoTokenizer.from_pretrained(model_name)\n\n\n@cache\ndef get_tokenizer(model_name=\"gpt-4\"):\n try:\n return tiktoken.encoding_for_model(model_name)\n except KeyError:\n logging.info(f\"Could not find a tokenizer for model {model_name}. Trying HuggingFace.\")\n try:\n from transformers import AutoTokenizer # type: ignore\n\n return AutoTokenizer.from_pretrained(model_name)\n except Exception as e:\n logging.info(f\"Could not find a tokenizer for model {model_name}: {e} Defaulting to gpt-4.\")\n return tiktoken.encoding_for_model(\"gpt-4\")\n\n\ndef count_tokens(text, model=\"openai/gpt-4\"):\n enc = get_tokenizer(model)\n return len(enc.encode(text))\n\n\ndef json_parser(message):\n \"\"\"Parse a json message for the retry function.\"\"\"\n\n try:\n value = json.loads(message)\n valid = True\n retry_message = \"\"\n except json.JSONDecodeError as e:\n warn(e)\n value = {}\n valid = False\n retry_message = \"Your response is not a valid json. Please try again and be careful to the format. Don't add any apology or comment, just the answer.\"\n return value, valid, retry_message","source_hash":"c1e24a6f26b52277a012fcf031064732bd24955844738a647dd60caed2a715d1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.llm_utils.count_tokens","uri":"program://AgentLab/function/src.agentlab.llm.llm_utils.count_tokens#L539-L541","kind":"function","name":"count_tokens","path":"src/agentlab/llm/llm_utils.py","language":"python","start_line":539,"end_line":541,"context_start_line":519,"context_end_line":561,"code":" \"The 'transformers' package is required to use non-OpenAI/Azure tokenizers.\"\n ) from e\n return AutoTokenizer.from_pretrained(model_name)\n\n\n@cache\ndef get_tokenizer(model_name=\"gpt-4\"):\n try:\n return tiktoken.encoding_for_model(model_name)\n except KeyError:\n logging.info(f\"Could not find a tokenizer for model {model_name}. Trying HuggingFace.\")\n try:\n from transformers import AutoTokenizer # type: ignore\n\n return AutoTokenizer.from_pretrained(model_name)\n except Exception as e:\n logging.info(f\"Could not find a tokenizer for model {model_name}: {e} Defaulting to gpt-4.\")\n return tiktoken.encoding_for_model(\"gpt-4\")\n\n\ndef count_tokens(text, model=\"openai/gpt-4\"):\n enc = get_tokenizer(model)\n return len(enc.encode(text))\n\n\ndef json_parser(message):\n \"\"\"Parse a json message for the retry function.\"\"\"\n\n try:\n value = json.loads(message)\n valid = True\n retry_message = \"\"\n except json.JSONDecodeError as e:\n warn(e)\n value = {}\n valid = False\n retry_message = \"Your response is not a valid json. Please try again and be careful to the format. Don't add any apology or comment, just the answer.\"\n return value, valid, retry_message\n\n\ndef yaml_parser(message):\n \"\"\"Parse a yaml message for the retry function.\"\"\"\n","source_hash":"c1e24a6f26b52277a012fcf031064732bd24955844738a647dd60caed2a715d1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.llm_utils.json_parser","uri":"program://AgentLab/function/src.agentlab.llm.llm_utils.json_parser#L544-L556","kind":"function","name":"json_parser","path":"src/agentlab/llm/llm_utils.py","language":"python","start_line":544,"end_line":556,"context_start_line":524,"context_end_line":576,"code":"@cache\ndef get_tokenizer(model_name=\"gpt-4\"):\n try:\n return tiktoken.encoding_for_model(model_name)\n except KeyError:\n logging.info(f\"Could not find a tokenizer for model {model_name}. Trying HuggingFace.\")\n try:\n from transformers import AutoTokenizer # type: ignore\n\n return AutoTokenizer.from_pretrained(model_name)\n except Exception as e:\n logging.info(f\"Could not find a tokenizer for model {model_name}: {e} Defaulting to gpt-4.\")\n return tiktoken.encoding_for_model(\"gpt-4\")\n\n\ndef count_tokens(text, model=\"openai/gpt-4\"):\n enc = get_tokenizer(model)\n return len(enc.encode(text))\n\n\ndef json_parser(message):\n \"\"\"Parse a json message for the retry function.\"\"\"\n\n try:\n value = json.loads(message)\n valid = True\n retry_message = \"\"\n except json.JSONDecodeError as e:\n warn(e)\n value = {}\n valid = False\n retry_message = \"Your response is not a valid json. Please try again and be careful to the format. Don't add any apology or comment, just the answer.\"\n return value, valid, retry_message\n\n\ndef yaml_parser(message):\n \"\"\"Parse a yaml message for the retry function.\"\"\"\n\n # saves gpt-3.5 from some yaml parsing errors\n message = re.sub(r\":\\s*\\n(?=\\S|\\n)\", \": \", message)\n\n try:\n value = yaml.safe_load(message)\n valid = True\n retry_message = \"\"\n except yaml.YAMLError as e:\n warn(str(e))\n value = {}\n valid = False\n retry_message = \"Your response is not a valid yaml. Please try again and be careful to the format. Don't add any apology or comment, just the answer.\"\n return value, valid, retry_message\n\n","source_hash":"c1e24a6f26b52277a012fcf031064732bd24955844738a647dd60caed2a715d1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.llm_utils.yaml_parser","uri":"program://AgentLab/function/src.agentlab.llm.llm_utils.yaml_parser#L559-L574","kind":"function","name":"yaml_parser","path":"src/agentlab/llm/llm_utils.py","language":"python","start_line":559,"end_line":574,"context_start_line":539,"context_end_line":594,"code":"def count_tokens(text, model=\"openai/gpt-4\"):\n enc = get_tokenizer(model)\n return len(enc.encode(text))\n\n\ndef json_parser(message):\n \"\"\"Parse a json message for the retry function.\"\"\"\n\n try:\n value = json.loads(message)\n valid = True\n retry_message = \"\"\n except json.JSONDecodeError as e:\n warn(e)\n value = {}\n valid = False\n retry_message = \"Your response is not a valid json. Please try again and be careful to the format. Don't add any apology or comment, just the answer.\"\n return value, valid, retry_message\n\n\ndef yaml_parser(message):\n \"\"\"Parse a yaml message for the retry function.\"\"\"\n\n # saves gpt-3.5 from some yaml parsing errors\n message = re.sub(r\":\\s*\\n(?=\\S|\\n)\", \": \", message)\n\n try:\n value = yaml.safe_load(message)\n valid = True\n retry_message = \"\"\n except yaml.YAMLError as e:\n warn(str(e))\n value = {}\n valid = False\n retry_message = \"Your response is not a valid yaml. Please try again and be careful to the format. Don't add any apology or comment, just the answer.\"\n return value, valid, retry_message\n\n\ndef _compress_chunks(text, identifier, skip_list, split_regex=\"\\n\\n+\"):\n \"\"\"Compress a string by replacing redundant chunks by identifiers. Chunks are defined by the split_regex.\"\"\"\n text_list = re.split(split_regex, text)\n text_list = [chunk.strip() for chunk in text_list]\n counter = collections.Counter(text_list)\n def_dict = {}\n id = 0\n\n # Store items that occur more than once in a dictionary\n for item, count in counter.items():\n if count > 1 and item not in skip_list and len(item) > 10:\n def_dict[f\"{identifier}-{id}\"] = item\n id += 1\n\n # Replace redundant items with their identifiers in the text\n compressed_text = \"\\n\".join(text_list)\n for key, value in def_dict.items():\n compressed_text = compressed_text.replace(value, key)","source_hash":"c1e24a6f26b52277a012fcf031064732bd24955844738a647dd60caed2a715d1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.llm_utils._compress_chunks","uri":"program://AgentLab/function/src.agentlab.llm.llm_utils._compress_chunks#L577-L596","kind":"function","name":"_compress_chunks","path":"src/agentlab/llm/llm_utils.py","language":"python","start_line":577,"end_line":596,"context_start_line":557,"context_end_line":616,"code":"\n\ndef yaml_parser(message):\n \"\"\"Parse a yaml message for the retry function.\"\"\"\n\n # saves gpt-3.5 from some yaml parsing errors\n message = re.sub(r\":\\s*\\n(?=\\S|\\n)\", \": \", message)\n\n try:\n value = yaml.safe_load(message)\n valid = True\n retry_message = \"\"\n except yaml.YAMLError as e:\n warn(str(e))\n value = {}\n valid = False\n retry_message = \"Your response is not a valid yaml. Please try again and be careful to the format. Don't add any apology or comment, just the answer.\"\n return value, valid, retry_message\n\n\ndef _compress_chunks(text, identifier, skip_list, split_regex=\"\\n\\n+\"):\n \"\"\"Compress a string by replacing redundant chunks by identifiers. Chunks are defined by the split_regex.\"\"\"\n text_list = re.split(split_regex, text)\n text_list = [chunk.strip() for chunk in text_list]\n counter = collections.Counter(text_list)\n def_dict = {}\n id = 0\n\n # Store items that occur more than once in a dictionary\n for item, count in counter.items():\n if count > 1 and item not in skip_list and len(item) > 10:\n def_dict[f\"{identifier}-{id}\"] = item\n id += 1\n\n # Replace redundant items with their identifiers in the text\n compressed_text = \"\\n\".join(text_list)\n for key, value in def_dict.items():\n compressed_text = compressed_text.replace(value, key)\n\n return def_dict, compressed_text\n\n\ndef compress_string(text):\n \"\"\"Compress a string by replacing redundant paragraphs and lines with identifiers.\"\"\"\n\n # Perform paragraph-level compression\n def_dict, compressed_text = _compress_chunks(\n text, identifier=\"§\", skip_list=[], split_regex=\"\\n\\n+\"\n )\n\n # Perform line-level compression, skipping any paragraph identifiers\n line_dict, compressed_text = _compress_chunks(\n compressed_text, \"¶\", list(def_dict.keys()), split_regex=\"\\n+\"\n )\n def_dict.update(line_dict)\n\n # Create a definitions section\n def_lines = [\"\"]\n for key, value in def_dict.items():\n def_lines.append(f\"{key}:\\n{value}\")","source_hash":"c1e24a6f26b52277a012fcf031064732bd24955844738a647dd60caed2a715d1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.llm_utils.compress_string","uri":"program://AgentLab/function/src.agentlab.llm.llm_utils.compress_string#L599-L620","kind":"function","name":"compress_string","path":"src/agentlab/llm/llm_utils.py","language":"python","start_line":599,"end_line":620,"context_start_line":579,"context_end_line":640,"code":" text_list = re.split(split_regex, text)\n text_list = [chunk.strip() for chunk in text_list]\n counter = collections.Counter(text_list)\n def_dict = {}\n id = 0\n\n # Store items that occur more than once in a dictionary\n for item, count in counter.items():\n if count > 1 and item not in skip_list and len(item) > 10:\n def_dict[f\"{identifier}-{id}\"] = item\n id += 1\n\n # Replace redundant items with their identifiers in the text\n compressed_text = \"\\n\".join(text_list)\n for key, value in def_dict.items():\n compressed_text = compressed_text.replace(value, key)\n\n return def_dict, compressed_text\n\n\ndef compress_string(text):\n \"\"\"Compress a string by replacing redundant paragraphs and lines with identifiers.\"\"\"\n\n # Perform paragraph-level compression\n def_dict, compressed_text = _compress_chunks(\n text, identifier=\"§\", skip_list=[], split_regex=\"\\n\\n+\"\n )\n\n # Perform line-level compression, skipping any paragraph identifiers\n line_dict, compressed_text = _compress_chunks(\n compressed_text, \"¶\", list(def_dict.keys()), split_regex=\"\\n+\"\n )\n def_dict.update(line_dict)\n\n # Create a definitions section\n def_lines = [\"\"]\n for key, value in def_dict.items():\n def_lines.append(f\"{key}:\\n{value}\")\n def_lines.append(\"\")\n definitions = \"\\n\".join(def_lines)\n\n return definitions + \"\\n\" + compressed_text\n\n\ndef extract_html_tags(text, keys):\n \"\"\"Extract the content within HTML tags for a list of keys.\n\n All text and keys will be converted to lowercase before matching.\n\n Args:\n text (str): The input string containing the HTML tags.\n keys (list[str]): The HTML tags to extract the content from.\n\n Returns:\n dict: A dictionary mapping each key to a list of subset in `text` that match the key.\n \"\"\"\n content_dict = {}\n # text = text.lower()\n # keys = set([k.lower() for k in keys])\n for key in keys:\n pattern = f\"<{key}>(.*?)\"\n matches = re.findall(pattern, text, re.DOTALL)","source_hash":"c1e24a6f26b52277a012fcf031064732bd24955844738a647dd60caed2a715d1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.llm_utils.extract_html_tags","uri":"program://AgentLab/function/src.agentlab.llm.llm_utils.extract_html_tags#L623-L643","kind":"function","name":"extract_html_tags","path":"src/agentlab/llm/llm_utils.py","language":"python","start_line":623,"end_line":643,"context_start_line":603,"context_end_line":663,"code":" def_dict, compressed_text = _compress_chunks(\n text, identifier=\"§\", skip_list=[], split_regex=\"\\n\\n+\"\n )\n\n # Perform line-level compression, skipping any paragraph identifiers\n line_dict, compressed_text = _compress_chunks(\n compressed_text, \"¶\", list(def_dict.keys()), split_regex=\"\\n+\"\n )\n def_dict.update(line_dict)\n\n # Create a definitions section\n def_lines = [\"\"]\n for key, value in def_dict.items():\n def_lines.append(f\"{key}:\\n{value}\")\n def_lines.append(\"\")\n definitions = \"\\n\".join(def_lines)\n\n return definitions + \"\\n\" + compressed_text\n\n\ndef extract_html_tags(text, keys):\n \"\"\"Extract the content within HTML tags for a list of keys.\n\n All text and keys will be converted to lowercase before matching.\n\n Args:\n text (str): The input string containing the HTML tags.\n keys (list[str]): The HTML tags to extract the content from.\n\n Returns:\n dict: A dictionary mapping each key to a list of subset in `text` that match the key.\n \"\"\"\n content_dict = {}\n # text = text.lower()\n # keys = set([k.lower() for k in keys])\n for key in keys:\n pattern = f\"<{key}>(.*?)\"\n matches = re.findall(pattern, text, re.DOTALL)\n if matches:\n content_dict[key] = [match.strip() for match in matches]\n return content_dict\n\n\nclass ParseError(Exception):\n pass\n\n\ndef extract_code_blocks(text) -> list[tuple[str, str]]:\n pattern = re.compile(r\"```(\\w*\\n)?(.*?)```\", re.DOTALL)\n\n matches = pattern.findall(text)\n return [(match[0].strip(), match[1].strip()) for match in matches]\n\n\ndef parse_html_tags_raise(text, keys=(), optional_keys=(), merge_multiple=False):\n \"\"\"A version of parse_html_tags that raises an exception if the parsing is not successful.\"\"\"\n content_dict, valid, retry_message = parse_html_tags(\n text, keys, optional_keys, merge_multiple=merge_multiple\n )\n if not valid:\n raise ParseError(retry_message)","source_hash":"c1e24a6f26b52277a012fcf031064732bd24955844738a647dd60caed2a715d1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.llm_utils.ParseError","uri":"program://AgentLab/class/src.agentlab.llm.llm_utils.ParseError#L646-L647","kind":"class","name":"ParseError","path":"src/agentlab/llm/llm_utils.py","language":"python","start_line":646,"end_line":647,"context_start_line":626,"context_end_line":667,"code":" All text and keys will be converted to lowercase before matching.\n\n Args:\n text (str): The input string containing the HTML tags.\n keys (list[str]): The HTML tags to extract the content from.\n\n Returns:\n dict: A dictionary mapping each key to a list of subset in `text` that match the key.\n \"\"\"\n content_dict = {}\n # text = text.lower()\n # keys = set([k.lower() for k in keys])\n for key in keys:\n pattern = f\"<{key}>(.*?)\"\n matches = re.findall(pattern, text, re.DOTALL)\n if matches:\n content_dict[key] = [match.strip() for match in matches]\n return content_dict\n\n\nclass ParseError(Exception):\n pass\n\n\ndef extract_code_blocks(text) -> list[tuple[str, str]]:\n pattern = re.compile(r\"```(\\w*\\n)?(.*?)```\", re.DOTALL)\n\n matches = pattern.findall(text)\n return [(match[0].strip(), match[1].strip()) for match in matches]\n\n\ndef parse_html_tags_raise(text, keys=(), optional_keys=(), merge_multiple=False):\n \"\"\"A version of parse_html_tags that raises an exception if the parsing is not successful.\"\"\"\n content_dict, valid, retry_message = parse_html_tags(\n text, keys, optional_keys, merge_multiple=merge_multiple\n )\n if not valid:\n raise ParseError(retry_message)\n return content_dict\n\n\ndef parse_html_tags(text, keys=(), optional_keys=(), merge_multiple=False):","source_hash":"c1e24a6f26b52277a012fcf031064732bd24955844738a647dd60caed2a715d1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.llm_utils.extract_code_blocks","uri":"program://AgentLab/function/src.agentlab.llm.llm_utils.extract_code_blocks#L650-L654","kind":"function","name":"extract_code_blocks","path":"src/agentlab/llm/llm_utils.py","language":"python","start_line":650,"end_line":654,"context_start_line":630,"context_end_line":674,"code":" keys (list[str]): The HTML tags to extract the content from.\n\n Returns:\n dict: A dictionary mapping each key to a list of subset in `text` that match the key.\n \"\"\"\n content_dict = {}\n # text = text.lower()\n # keys = set([k.lower() for k in keys])\n for key in keys:\n pattern = f\"<{key}>(.*?)\"\n matches = re.findall(pattern, text, re.DOTALL)\n if matches:\n content_dict[key] = [match.strip() for match in matches]\n return content_dict\n\n\nclass ParseError(Exception):\n pass\n\n\ndef extract_code_blocks(text) -> list[tuple[str, str]]:\n pattern = re.compile(r\"```(\\w*\\n)?(.*?)```\", re.DOTALL)\n\n matches = pattern.findall(text)\n return [(match[0].strip(), match[1].strip()) for match in matches]\n\n\ndef parse_html_tags_raise(text, keys=(), optional_keys=(), merge_multiple=False):\n \"\"\"A version of parse_html_tags that raises an exception if the parsing is not successful.\"\"\"\n content_dict, valid, retry_message = parse_html_tags(\n text, keys, optional_keys, merge_multiple=merge_multiple\n )\n if not valid:\n raise ParseError(retry_message)\n return content_dict\n\n\ndef parse_html_tags(text, keys=(), optional_keys=(), merge_multiple=False):\n \"\"\"Satisfy the parse api, extracts 1 match per key and validates that all keys are present\n\n Args:\n text (str): The input string containing the HTML tags.\n keys (list[str]): The HTML tags to extract the content from.\n optional_keys (list[str]): The HTML tags to extract the content from, but are optional.\n merge_multiple (bool): Whether to merge multiple instances of the same key.","source_hash":"c1e24a6f26b52277a012fcf031064732bd24955844738a647dd60caed2a715d1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.llm_utils.parse_html_tags_raise","uri":"program://AgentLab/function/src.agentlab.llm.llm_utils.parse_html_tags_raise#L657-L664","kind":"function","name":"parse_html_tags_raise","path":"src/agentlab/llm/llm_utils.py","language":"python","start_line":657,"end_line":664,"context_start_line":637,"context_end_line":684,"code":" # keys = set([k.lower() for k in keys])\n for key in keys:\n pattern = f\"<{key}>(.*?)\"\n matches = re.findall(pattern, text, re.DOTALL)\n if matches:\n content_dict[key] = [match.strip() for match in matches]\n return content_dict\n\n\nclass ParseError(Exception):\n pass\n\n\ndef extract_code_blocks(text) -> list[tuple[str, str]]:\n pattern = re.compile(r\"```(\\w*\\n)?(.*?)```\", re.DOTALL)\n\n matches = pattern.findall(text)\n return [(match[0].strip(), match[1].strip()) for match in matches]\n\n\ndef parse_html_tags_raise(text, keys=(), optional_keys=(), merge_multiple=False):\n \"\"\"A version of parse_html_tags that raises an exception if the parsing is not successful.\"\"\"\n content_dict, valid, retry_message = parse_html_tags(\n text, keys, optional_keys, merge_multiple=merge_multiple\n )\n if not valid:\n raise ParseError(retry_message)\n return content_dict\n\n\ndef parse_html_tags(text, keys=(), optional_keys=(), merge_multiple=False):\n \"\"\"Satisfy the parse api, extracts 1 match per key and validates that all keys are present\n\n Args:\n text (str): The input string containing the HTML tags.\n keys (list[str]): The HTML tags to extract the content from.\n optional_keys (list[str]): The HTML tags to extract the content from, but are optional.\n merge_multiple (bool): Whether to merge multiple instances of the same key.\n\n Returns:\n dict: A dictionary mapping each key to a subset of `text` that match the key.\n bool: Whether the parsing was successful.\n str: A message to be displayed to the agent if the parsing was not successful.\n\n \"\"\"\n all_keys = tuple(keys) + tuple(optional_keys)\n content_dict = extract_html_tags(text, all_keys)\n retry_messages = []","source_hash":"c1e24a6f26b52277a012fcf031064732bd24955844738a647dd60caed2a715d1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.llm_utils.parse_html_tags","uri":"program://AgentLab/function/src.agentlab.llm.llm_utils.parse_html_tags#L667-L704","kind":"function","name":"parse_html_tags","path":"src/agentlab/llm/llm_utils.py","language":"python","start_line":667,"end_line":704,"context_start_line":647,"context_end_line":724,"code":" pass\n\n\ndef extract_code_blocks(text) -> list[tuple[str, str]]:\n pattern = re.compile(r\"```(\\w*\\n)?(.*?)```\", re.DOTALL)\n\n matches = pattern.findall(text)\n return [(match[0].strip(), match[1].strip()) for match in matches]\n\n\ndef parse_html_tags_raise(text, keys=(), optional_keys=(), merge_multiple=False):\n \"\"\"A version of parse_html_tags that raises an exception if the parsing is not successful.\"\"\"\n content_dict, valid, retry_message = parse_html_tags(\n text, keys, optional_keys, merge_multiple=merge_multiple\n )\n if not valid:\n raise ParseError(retry_message)\n return content_dict\n\n\ndef parse_html_tags(text, keys=(), optional_keys=(), merge_multiple=False):\n \"\"\"Satisfy the parse api, extracts 1 match per key and validates that all keys are present\n\n Args:\n text (str): The input string containing the HTML tags.\n keys (list[str]): The HTML tags to extract the content from.\n optional_keys (list[str]): The HTML tags to extract the content from, but are optional.\n merge_multiple (bool): Whether to merge multiple instances of the same key.\n\n Returns:\n dict: A dictionary mapping each key to a subset of `text` that match the key.\n bool: Whether the parsing was successful.\n str: A message to be displayed to the agent if the parsing was not successful.\n\n \"\"\"\n all_keys = tuple(keys) + tuple(optional_keys)\n content_dict = extract_html_tags(text, all_keys)\n retry_messages = []\n\n for key in all_keys:\n if key not in content_dict:\n if key not in optional_keys:\n retry_messages.append(f\"Missing the key <{key}> in the answer.\")\n else:\n val = content_dict[key]\n content_dict[key] = val[0]\n if len(val) > 1:\n if not merge_multiple:\n retry_messages.append(\n f\"Found multiple instances of the key {key}. You should have only one of them.\"\n )\n else:\n # merge the multiple instances\n content_dict[key] = \"\\n\".join(val)\n\n valid = len(retry_messages) == 0\n retry_message = \"\\n\".join(retry_messages)\n return content_dict, valid, retry_message\n\n\ndef download_and_save_model(model_name: str, save_dir: str = \".\"):\n # Lazy import of transformers only when explicitly downloading a model\n try:\n from transformers import AutoModel # type: ignore\n except Exception as e:\n raise ImportError(\n \"The 'transformers' package is required to download and save models.\"\n ) from e\n model = AutoModel.from_pretrained(model_name)\n model.save_pretrained(save_dir)\n print(f\"Model downloaded and saved to {save_dir}\")\n\n\ndef image_to_jpg_base64_url(image: np.ndarray | Image.Image):\n \"\"\"Convert a numpy array to a base64 encoded image url.\"\"\"\n\n if isinstance(image, np.ndarray):\n image = Image.fromarray(image)","source_hash":"c1e24a6f26b52277a012fcf031064732bd24955844738a647dd60caed2a715d1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.llm_utils.download_and_save_model","uri":"program://AgentLab/function/src.agentlab.llm.llm_utils.download_and_save_model#L707-L717","kind":"function","name":"download_and_save_model","path":"src/agentlab/llm/llm_utils.py","language":"python","start_line":707,"end_line":717,"context_start_line":687,"context_end_line":737,"code":" if key not in content_dict:\n if key not in optional_keys:\n retry_messages.append(f\"Missing the key <{key}> in the answer.\")\n else:\n val = content_dict[key]\n content_dict[key] = val[0]\n if len(val) > 1:\n if not merge_multiple:\n retry_messages.append(\n f\"Found multiple instances of the key {key}. You should have only one of them.\"\n )\n else:\n # merge the multiple instances\n content_dict[key] = \"\\n\".join(val)\n\n valid = len(retry_messages) == 0\n retry_message = \"\\n\".join(retry_messages)\n return content_dict, valid, retry_message\n\n\ndef download_and_save_model(model_name: str, save_dir: str = \".\"):\n # Lazy import of transformers only when explicitly downloading a model\n try:\n from transformers import AutoModel # type: ignore\n except Exception as e:\n raise ImportError(\n \"The 'transformers' package is required to download and save models.\"\n ) from e\n model = AutoModel.from_pretrained(model_name)\n model.save_pretrained(save_dir)\n print(f\"Model downloaded and saved to {save_dir}\")\n\n\ndef image_to_jpg_base64_url(image: np.ndarray | Image.Image):\n \"\"\"Convert a numpy array to a base64 encoded image url.\"\"\"\n\n if isinstance(image, np.ndarray):\n image = Image.fromarray(image)\n if image.mode in (\"RGBA\", \"LA\"):\n image = image.convert(\"RGB\")\n buffered = io.BytesIO()\n image.save(buffered, format=\"JPEG\")\n\n image_base64 = base64.b64encode(buffered.getvalue()).decode()\n return f\"data:image/jpeg;base64,{image_base64}\"\n\n\ndef image_to_png_base64_url(image: np.ndarray | Image.Image):\n if isinstance(image, np.ndarray):\n image = Image.fromarray(image)\n if image.mode in (\"RGBA\", \"LA\"):","source_hash":"c1e24a6f26b52277a012fcf031064732bd24955844738a647dd60caed2a715d1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.llm_utils.image_to_jpg_base64_url","uri":"program://AgentLab/function/src.agentlab.llm.llm_utils.image_to_jpg_base64_url#L720-L731","kind":"function","name":"image_to_jpg_base64_url","path":"src/agentlab/llm/llm_utils.py","language":"python","start_line":720,"end_line":731,"context_start_line":700,"context_end_line":751,"code":" content_dict[key] = \"\\n\".join(val)\n\n valid = len(retry_messages) == 0\n retry_message = \"\\n\".join(retry_messages)\n return content_dict, valid, retry_message\n\n\ndef download_and_save_model(model_name: str, save_dir: str = \".\"):\n # Lazy import of transformers only when explicitly downloading a model\n try:\n from transformers import AutoModel # type: ignore\n except Exception as e:\n raise ImportError(\n \"The 'transformers' package is required to download and save models.\"\n ) from e\n model = AutoModel.from_pretrained(model_name)\n model.save_pretrained(save_dir)\n print(f\"Model downloaded and saved to {save_dir}\")\n\n\ndef image_to_jpg_base64_url(image: np.ndarray | Image.Image):\n \"\"\"Convert a numpy array to a base64 encoded image url.\"\"\"\n\n if isinstance(image, np.ndarray):\n image = Image.fromarray(image)\n if image.mode in (\"RGBA\", \"LA\"):\n image = image.convert(\"RGB\")\n buffered = io.BytesIO()\n image.save(buffered, format=\"JPEG\")\n\n image_base64 = base64.b64encode(buffered.getvalue()).decode()\n return f\"data:image/jpeg;base64,{image_base64}\"\n\n\ndef image_to_png_base64_url(image: np.ndarray | Image.Image):\n if isinstance(image, np.ndarray):\n image = Image.fromarray(image)\n if image.mode in (\"RGBA\", \"LA\"):\n image = image.convert(\"RGB\")\n buffered = io.BytesIO()\n image.save(buffered, \"PNG\")\n image_base64 = base64.b64encode(buffered.getvalue()).decode()\n return f\"data:image/png;base64,{image_base64}\"\n\n\ndef img_to_base_64(image: Image.Image | np.ndarray) -> str:\n \"\"\"Converts a PIL Image or NumPy array to a base64-encoded string.\"\"\"\n if isinstance(image, np.ndarray):\n image = Image.fromarray(image)\n buffer = io.BytesIO()\n image.save(buffer, format=\"PNG\")\n b64_str = base64.b64encode(buffer.getvalue()).decode(\"utf-8\")","source_hash":"c1e24a6f26b52277a012fcf031064732bd24955844738a647dd60caed2a715d1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.llm_utils.image_to_png_base64_url","uri":"program://AgentLab/function/src.agentlab.llm.llm_utils.image_to_png_base64_url#L734-L742","kind":"function","name":"image_to_png_base64_url","path":"src/agentlab/llm/llm_utils.py","language":"python","start_line":734,"end_line":742,"context_start_line":714,"context_end_line":762,"code":" ) from e\n model = AutoModel.from_pretrained(model_name)\n model.save_pretrained(save_dir)\n print(f\"Model downloaded and saved to {save_dir}\")\n\n\ndef image_to_jpg_base64_url(image: np.ndarray | Image.Image):\n \"\"\"Convert a numpy array to a base64 encoded image url.\"\"\"\n\n if isinstance(image, np.ndarray):\n image = Image.fromarray(image)\n if image.mode in (\"RGBA\", \"LA\"):\n image = image.convert(\"RGB\")\n buffered = io.BytesIO()\n image.save(buffered, format=\"JPEG\")\n\n image_base64 = base64.b64encode(buffered.getvalue()).decode()\n return f\"data:image/jpeg;base64,{image_base64}\"\n\n\ndef image_to_png_base64_url(image: np.ndarray | Image.Image):\n if isinstance(image, np.ndarray):\n image = Image.fromarray(image)\n if image.mode in (\"RGBA\", \"LA\"):\n image = image.convert(\"RGB\")\n buffered = io.BytesIO()\n image.save(buffered, \"PNG\")\n image_base64 = base64.b64encode(buffered.getvalue()).decode()\n return f\"data:image/png;base64,{image_base64}\"\n\n\ndef img_to_base_64(image: Image.Image | np.ndarray) -> str:\n \"\"\"Converts a PIL Image or NumPy array to a base64-encoded string.\"\"\"\n if isinstance(image, np.ndarray):\n image = Image.fromarray(image)\n buffer = io.BytesIO()\n image.save(buffer, format=\"PNG\")\n b64_str = base64.b64encode(buffer.getvalue()).decode(\"utf-8\")\n return b64_str\n\n\nclass BaseMessage(dict):\n def __init__(self, role: str, content: Union[str, list[dict]], **kwargs):\n allowed_attrs = {\"log_probs\"}\n invalid_attrs = set(kwargs.keys()) - allowed_attrs\n if invalid_attrs:\n raise ValueError(f\"Invalid attributes: {invalid_attrs}\")\n self[\"role\"] = role\n self[\"content\"] = deepcopy(content)","source_hash":"c1e24a6f26b52277a012fcf031064732bd24955844738a647dd60caed2a715d1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.llm_utils.img_to_base_64","uri":"program://AgentLab/function/src.agentlab.llm.llm_utils.img_to_base_64#L745-L752","kind":"function","name":"img_to_base_64","path":"src/agentlab/llm/llm_utils.py","language":"python","start_line":745,"end_line":752,"context_start_line":725,"context_end_line":772,"code":" if image.mode in (\"RGBA\", \"LA\"):\n image = image.convert(\"RGB\")\n buffered = io.BytesIO()\n image.save(buffered, format=\"JPEG\")\n\n image_base64 = base64.b64encode(buffered.getvalue()).decode()\n return f\"data:image/jpeg;base64,{image_base64}\"\n\n\ndef image_to_png_base64_url(image: np.ndarray | Image.Image):\n if isinstance(image, np.ndarray):\n image = Image.fromarray(image)\n if image.mode in (\"RGBA\", \"LA\"):\n image = image.convert(\"RGB\")\n buffered = io.BytesIO()\n image.save(buffered, \"PNG\")\n image_base64 = base64.b64encode(buffered.getvalue()).decode()\n return f\"data:image/png;base64,{image_base64}\"\n\n\ndef img_to_base_64(image: Image.Image | np.ndarray) -> str:\n \"\"\"Converts a PIL Image or NumPy array to a base64-encoded string.\"\"\"\n if isinstance(image, np.ndarray):\n image = Image.fromarray(image)\n buffer = io.BytesIO()\n image.save(buffer, format=\"PNG\")\n b64_str = base64.b64encode(buffer.getvalue()).decode(\"utf-8\")\n return b64_str\n\n\nclass BaseMessage(dict):\n def __init__(self, role: str, content: Union[str, list[dict]], **kwargs):\n allowed_attrs = {\"log_probs\"}\n invalid_attrs = set(kwargs.keys()) - allowed_attrs\n if invalid_attrs:\n raise ValueError(f\"Invalid attributes: {invalid_attrs}\")\n self[\"role\"] = role\n self[\"content\"] = deepcopy(content)\n self.update(kwargs)\n\n def __str__(self, warn_if_image=False) -> str:\n if isinstance(self[\"content\"], str):\n return self[\"content\"]\n if not all(elem[\"type\"] == \"text\" for elem in self[\"content\"]):\n msg = \"The content of the message has images, which are not displayed in the string representation.\"\n if warn_if_image:\n logging.warning(msg)\n else:","source_hash":"c1e24a6f26b52277a012fcf031064732bd24955844738a647dd60caed2a715d1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.llm_utils.BaseMessage","uri":"program://AgentLab/class/src.agentlab.llm.llm_utils.BaseMessage#L755-L835","kind":"class","name":"BaseMessage","path":"src/agentlab/llm/llm_utils.py","language":"python","start_line":755,"end_line":835,"context_start_line":735,"context_end_line":855,"code":" if isinstance(image, np.ndarray):\n image = Image.fromarray(image)\n if image.mode in (\"RGBA\", \"LA\"):\n image = image.convert(\"RGB\")\n buffered = io.BytesIO()\n image.save(buffered, \"PNG\")\n image_base64 = base64.b64encode(buffered.getvalue()).decode()\n return f\"data:image/png;base64,{image_base64}\"\n\n\ndef img_to_base_64(image: Image.Image | np.ndarray) -> str:\n \"\"\"Converts a PIL Image or NumPy array to a base64-encoded string.\"\"\"\n if isinstance(image, np.ndarray):\n image = Image.fromarray(image)\n buffer = io.BytesIO()\n image.save(buffer, format=\"PNG\")\n b64_str = base64.b64encode(buffer.getvalue()).decode(\"utf-8\")\n return b64_str\n\n\nclass BaseMessage(dict):\n def __init__(self, role: str, content: Union[str, list[dict]], **kwargs):\n allowed_attrs = {\"log_probs\"}\n invalid_attrs = set(kwargs.keys()) - allowed_attrs\n if invalid_attrs:\n raise ValueError(f\"Invalid attributes: {invalid_attrs}\")\n self[\"role\"] = role\n self[\"content\"] = deepcopy(content)\n self.update(kwargs)\n\n def __str__(self, warn_if_image=False) -> str:\n if isinstance(self[\"content\"], str):\n return self[\"content\"]\n if not all(elem[\"type\"] == \"text\" for elem in self[\"content\"]):\n msg = \"The content of the message has images, which are not displayed in the string representation.\"\n if warn_if_image:\n logging.warning(msg)\n else:\n logging.info(msg)\n\n return \"\\n\".join(\n [\n elem[\"text\"]\n for elem in self[\"content\"]\n if elem[\"type\"] == \"text\" or elem[\"type\"] == \"input_text\"\n ]\n )\n\n def add_content(self, type: str, content: Any):\n if isinstance(self[\"content\"], str):\n text = self[\"content\"]\n self[\"content\"] = []\n self[\"content\"].append({\"type\": \"text\", \"text\": text})\n self[\"content\"].append({\"type\": type, type: content})\n\n def add_text(self, text: str):\n self.add_content(\"text\", text)\n\n def add_image(self, image: np.ndarray | Image.Image | str, detail: str = None):\n if not isinstance(image, str):\n image_url = image_to_jpg_base64_url(image)\n else:\n image_url = image\n if detail:\n self.add_content(\"image_url\", {\"url\": image_url, \"detail\": detail})\n else:\n self.add_content(\"image_url\", {\"url\": image_url})\n\n def to_markdown(self):\n if isinstance(self[\"content\"], str):\n return f\"\\n```\\n{self['content']}\\n```\\n\"\n res = []\n for elem in self[\"content\"]:\n # add texts between ticks and images\n if elem[\"type\"] == \"text\":\n res.append(f\"\\n```\\n{elem['text']}\\n```\\n\")\n elif elem[\"type\"] == \"image_url\":\n img_str = (\n elem[\"image_url\"]\n if isinstance(elem[\"image_url\"], str)\n else elem[\"image_url\"][\"url\"]\n )\n res.append(f\"![image]({img_str})\")\n return \"\\n\".join(res)\n\n def merge(self):\n \"\"\"Merges content elements of type 'text' if they are adjacent.\"\"\"\n if isinstance(self[\"content\"], str):\n return\n new_content = []\n for elem in self[\"content\"]:\n if elem[\"type\"] == \"text\":\n if new_content and new_content[-1][\"type\"] == \"text\":\n new_content[-1][\"text\"] += \"\\n\" + elem[\"text\"]\n else:\n new_content.append(elem)\n else:\n new_content.append(elem)\n self[\"content\"] = new_content\n if len(self[\"content\"]) == 1:\n self[\"content\"] = self[\"content\"][0][\"text\"]\n\n\nclass SystemMessage(BaseMessage):\n def __init__(self, content: Union[str, list[dict]]):\n super().__init__(\"system\", content)\n\n\nclass HumanMessage(BaseMessage):\n def __init__(self, content: Union[str, list[dict]]):\n super().__init__(\"user\", content)\n\n\nclass AIMessage(BaseMessage):\n def __init__(self, content: Union[str, list[dict]], log_probs=None):\n super().__init__(\"assistant\", content, log_probs=log_probs)\n\n\nclass Discussion:\n def __init__(self, messages: Union[list[BaseMessage], BaseMessage] = None):\n if isinstance(messages, BaseMessage):","source_hash":"c1e24a6f26b52277a012fcf031064732bd24955844738a647dd60caed2a715d1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.llm_utils.SystemMessage","uri":"program://AgentLab/class/src.agentlab.llm.llm_utils.SystemMessage#L838-L840","kind":"class","name":"SystemMessage","path":"src/agentlab/llm/llm_utils.py","language":"python","start_line":838,"end_line":840,"context_start_line":818,"context_end_line":860,"code":" return \"\\n\".join(res)\n\n def merge(self):\n \"\"\"Merges content elements of type 'text' if they are adjacent.\"\"\"\n if isinstance(self[\"content\"], str):\n return\n new_content = []\n for elem in self[\"content\"]:\n if elem[\"type\"] == \"text\":\n if new_content and new_content[-1][\"type\"] == \"text\":\n new_content[-1][\"text\"] += \"\\n\" + elem[\"text\"]\n else:\n new_content.append(elem)\n else:\n new_content.append(elem)\n self[\"content\"] = new_content\n if len(self[\"content\"]) == 1:\n self[\"content\"] = self[\"content\"][0][\"text\"]\n\n\nclass SystemMessage(BaseMessage):\n def __init__(self, content: Union[str, list[dict]]):\n super().__init__(\"system\", content)\n\n\nclass HumanMessage(BaseMessage):\n def __init__(self, content: Union[str, list[dict]]):\n super().__init__(\"user\", content)\n\n\nclass AIMessage(BaseMessage):\n def __init__(self, content: Union[str, list[dict]], log_probs=None):\n super().__init__(\"assistant\", content, log_probs=log_probs)\n\n\nclass Discussion:\n def __init__(self, messages: Union[list[BaseMessage], BaseMessage] = None):\n if isinstance(messages, BaseMessage):\n messages = [messages]\n elif messages is None:\n messages = []\n self.messages = messages\n","source_hash":"c1e24a6f26b52277a012fcf031064732bd24955844738a647dd60caed2a715d1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.llm_utils.HumanMessage","uri":"program://AgentLab/class/src.agentlab.llm.llm_utils.HumanMessage#L843-L845","kind":"class","name":"HumanMessage","path":"src/agentlab/llm/llm_utils.py","language":"python","start_line":843,"end_line":845,"context_start_line":823,"context_end_line":865,"code":" return\n new_content = []\n for elem in self[\"content\"]:\n if elem[\"type\"] == \"text\":\n if new_content and new_content[-1][\"type\"] == \"text\":\n new_content[-1][\"text\"] += \"\\n\" + elem[\"text\"]\n else:\n new_content.append(elem)\n else:\n new_content.append(elem)\n self[\"content\"] = new_content\n if len(self[\"content\"]) == 1:\n self[\"content\"] = self[\"content\"][0][\"text\"]\n\n\nclass SystemMessage(BaseMessage):\n def __init__(self, content: Union[str, list[dict]]):\n super().__init__(\"system\", content)\n\n\nclass HumanMessage(BaseMessage):\n def __init__(self, content: Union[str, list[dict]]):\n super().__init__(\"user\", content)\n\n\nclass AIMessage(BaseMessage):\n def __init__(self, content: Union[str, list[dict]], log_probs=None):\n super().__init__(\"assistant\", content, log_probs=log_probs)\n\n\nclass Discussion:\n def __init__(self, messages: Union[list[BaseMessage], BaseMessage] = None):\n if isinstance(messages, BaseMessage):\n messages = [messages]\n elif messages is None:\n messages = []\n self.messages = messages\n\n @property\n def last_message(self):\n return self.messages[-1]\n\n def merge(self):","source_hash":"c1e24a6f26b52277a012fcf031064732bd24955844738a647dd60caed2a715d1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.llm_utils.AIMessage","uri":"program://AgentLab/class/src.agentlab.llm.llm_utils.AIMessage#L848-L850","kind":"class","name":"AIMessage","path":"src/agentlab/llm/llm_utils.py","language":"python","start_line":848,"end_line":850,"context_start_line":828,"context_end_line":870,"code":" new_content[-1][\"text\"] += \"\\n\" + elem[\"text\"]\n else:\n new_content.append(elem)\n else:\n new_content.append(elem)\n self[\"content\"] = new_content\n if len(self[\"content\"]) == 1:\n self[\"content\"] = self[\"content\"][0][\"text\"]\n\n\nclass SystemMessage(BaseMessage):\n def __init__(self, content: Union[str, list[dict]]):\n super().__init__(\"system\", content)\n\n\nclass HumanMessage(BaseMessage):\n def __init__(self, content: Union[str, list[dict]]):\n super().__init__(\"user\", content)\n\n\nclass AIMessage(BaseMessage):\n def __init__(self, content: Union[str, list[dict]], log_probs=None):\n super().__init__(\"assistant\", content, log_probs=log_probs)\n\n\nclass Discussion:\n def __init__(self, messages: Union[list[BaseMessage], BaseMessage] = None):\n if isinstance(messages, BaseMessage):\n messages = [messages]\n elif messages is None:\n messages = []\n self.messages = messages\n\n @property\n def last_message(self):\n return self.messages[-1]\n\n def merge(self):\n for m in self.messages:\n m.merge()\n\n def __str__(self) -> str:\n return \"\\n\".join(str(m) for m in self.messages)","source_hash":"c1e24a6f26b52277a012fcf031064732bd24955844738a647dd60caed2a715d1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.llm_utils.Discussion","uri":"program://AgentLab/class/src.agentlab.llm.llm_utils.Discussion#L853-L919","kind":"class","name":"Discussion","path":"src/agentlab/llm/llm_utils.py","language":"python","start_line":853,"end_line":919,"context_start_line":833,"context_end_line":928,"code":" self[\"content\"] = new_content\n if len(self[\"content\"]) == 1:\n self[\"content\"] = self[\"content\"][0][\"text\"]\n\n\nclass SystemMessage(BaseMessage):\n def __init__(self, content: Union[str, list[dict]]):\n super().__init__(\"system\", content)\n\n\nclass HumanMessage(BaseMessage):\n def __init__(self, content: Union[str, list[dict]]):\n super().__init__(\"user\", content)\n\n\nclass AIMessage(BaseMessage):\n def __init__(self, content: Union[str, list[dict]], log_probs=None):\n super().__init__(\"assistant\", content, log_probs=log_probs)\n\n\nclass Discussion:\n def __init__(self, messages: Union[list[BaseMessage], BaseMessage] = None):\n if isinstance(messages, BaseMessage):\n messages = [messages]\n elif messages is None:\n messages = []\n self.messages = messages\n\n @property\n def last_message(self):\n return self.messages[-1]\n\n def merge(self):\n for m in self.messages:\n m.merge()\n\n def __str__(self) -> str:\n return \"\\n\".join(str(m) for m in self.messages)\n\n def to_string(self):\n self.merge()\n return str(self)\n\n def to_openai(self):\n self.merge()\n return self.messages\n\n def add_message(\n self,\n message: BaseMessage | dict = None,\n role: str = None,\n content: Union[str, list[dict]] = None,\n ):\n if message is None:\n message = BaseMessage(role, content)\n else:\n if isinstance(message, dict):\n message = BaseMessage(**message)\n self.messages.append(message)\n\n def append(self, message: BaseMessage | dict):\n self.add_message(message)\n\n def add_content(self, type: str, content: Any):\n \"\"\"Add content to the last message.\"\"\"\n self.last_message.add_content(type, content)\n\n def add_text(self, text: str):\n \"\"\"Add text to the last message.\"\"\"\n self.last_message.add_text(text)\n\n def add_image(self, image: np.ndarray | Image.Image | str, detail: str = None):\n \"\"\"Add an image to the last message.\"\"\"\n self.last_message.add_image(image, detail)\n\n def __iter__(self):\n return iter(self.messages)\n\n def __len__(self):\n return len(self.messages)\n\n def __getitem__(self, key):\n return self.messages[key]\n\n def to_markdown(self):\n self.merge()\n return \"\\n\".join([f\"Message {i}\\n{m.to_markdown()}\\n\" for i, m in enumerate(self.messages)])\n\n\nif __name__ == \"__main__\":\n # model_to_download = \"THUDM/agentlm-70b\"\n model_to_download = \"databricks/dbrx-instruct\"\n save_dir = \"/mnt/ui_copilot/data_rw/base_models/\"\n # set the following env variable to enable the transfer of the model\n os.environ[\"HF_HUB_ENABLE_HF_TRANSFER\"] = \"1\"\n download_and_save_model(model_to_download, save_dir=save_dir)","source_hash":"c1e24a6f26b52277a012fcf031064732bd24955844738a647dd60caed2a715d1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.llm_utils._calculate_delay","uri":"program://AgentLab/function/src.agentlab.llm.llm_utils._calculate_delay#L136-L156","kind":"function","name":"_calculate_delay","path":"src/agentlab/llm/llm_utils.py","language":"python","start_line":136,"end_line":156,"context_start_line":116,"context_end_line":176,"code":"\n Args:\n client_function: The API client function to call.\n api_params: Parameters to pass to the client function.\n is_response_valid_fn: Function to validate if the response is valid.\n rate_limit_exceptions: Tuple of exception types for rate limiting.\n api_error_exceptions: Tuple of exception types for API errors.\n get_status_code_fn: Optional function to extract status code from exceptions.\n max_retries: Maximum number of retry attempts.\n initial_retry_delay_seconds: Initial delay between retries in seconds.\n max_retry_delay_seconds: Maximum delay between retries in seconds.\n\n Returns:\n The API response if successful.\n\n Raises:\n Exception: For unexpected errors that are immediately re-raised.\n RuntimeError: If API call fails after maximum retries.\n \"\"\"\n\n def _calculate_delay(\n current_attempt, initial_delay, max_delay, is_first_attempt_for_type=False\n ):\n \"\"\"Calculates exponential backoff delay.\"\"\"\n # For invalid response content (not an exception), the first \"attempt\" at retrying this specific issue\n # might use a slightly different delay calculation if desired (e.g. attempt-1 for the exponent).\n # For exceptions, the attempt number directly applies.\n # Here, we use 'current_attempt' for exception-driven retries,\n # and 'current_attempt -1' for the first retry due to invalid content (is_first_attempt_for_type).\n if is_first_attempt_for_type: # First retry due to invalid content\n # The first retry after an invalid response (attempt 1 for this *type* of failure)\n effective_attempt = current_attempt - 1 # Use 0 for the first exponent\n else: # Retries due to exceptions or subsequent invalid content retries\n effective_attempt = current_attempt # Use current_attempt for exponent\n\n # Ensure effective_attempt for exponent is at least 0\n exponent_attempt = max(\n 0, effective_attempt if not is_first_attempt_for_type else current_attempt - 1\n )\n\n return min(initial_delay * (2**exponent_attempt), max_delay)\n\n def _handle_invalid_response_content(attempt):\n logging.warning(\n f\"[Attempt {attempt}/{max_retries}] API response deemed invalid by validation function. Retrying after delay...\"\n )\n if attempt < max_retries:\n # For the first retry due to invalid content, use attempt-1 for exponent\n delay = _calculate_delay(\n attempt,\n initial_retry_delay_seconds,\n max_retry_delay_seconds,\n is_first_attempt_for_type=True,\n )\n logging.debug(f\"Sleeping for {delay:.2f} seconds due to invalid response content.\")\n time.sleep(delay)\n return True # Indicate retry\n return False # Max retries reached for this path\n\n def _handle_rate_limit_error(e, attempt):\n logging.warning(","source_hash":"c1e24a6f26b52277a012fcf031064732bd24955844738a647dd60caed2a715d1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.llm_utils._handle_invalid_response_content","uri":"program://AgentLab/function/src.agentlab.llm.llm_utils._handle_invalid_response_content#L158-L173","kind":"function","name":"_handle_invalid_response_content","path":"src/agentlab/llm/llm_utils.py","language":"python","start_line":158,"end_line":173,"context_start_line":138,"context_end_line":193,"code":" ):\n \"\"\"Calculates exponential backoff delay.\"\"\"\n # For invalid response content (not an exception), the first \"attempt\" at retrying this specific issue\n # might use a slightly different delay calculation if desired (e.g. attempt-1 for the exponent).\n # For exceptions, the attempt number directly applies.\n # Here, we use 'current_attempt' for exception-driven retries,\n # and 'current_attempt -1' for the first retry due to invalid content (is_first_attempt_for_type).\n if is_first_attempt_for_type: # First retry due to invalid content\n # The first retry after an invalid response (attempt 1 for this *type* of failure)\n effective_attempt = current_attempt - 1 # Use 0 for the first exponent\n else: # Retries due to exceptions or subsequent invalid content retries\n effective_attempt = current_attempt # Use current_attempt for exponent\n\n # Ensure effective_attempt for exponent is at least 0\n exponent_attempt = max(\n 0, effective_attempt if not is_first_attempt_for_type else current_attempt - 1\n )\n\n return min(initial_delay * (2**exponent_attempt), max_delay)\n\n def _handle_invalid_response_content(attempt):\n logging.warning(\n f\"[Attempt {attempt}/{max_retries}] API response deemed invalid by validation function. Retrying after delay...\"\n )\n if attempt < max_retries:\n # For the first retry due to invalid content, use attempt-1 for exponent\n delay = _calculate_delay(\n attempt,\n initial_retry_delay_seconds,\n max_retry_delay_seconds,\n is_first_attempt_for_type=True,\n )\n logging.debug(f\"Sleeping for {delay:.2f} seconds due to invalid response content.\")\n time.sleep(delay)\n return True # Indicate retry\n return False # Max retries reached for this path\n\n def _handle_rate_limit_error(e, attempt):\n logging.warning(\n f\"[Attempt {attempt}/{max_retries}] Rate limit error: {e}. Retrying after delay...\"\n )\n if attempt < max_retries:\n delay = _calculate_delay(attempt, initial_retry_delay_seconds, max_retry_delay_seconds)\n logging.debug(f\"Sleeping for {delay:.2f} seconds due to rate limit.\")\n time.sleep(delay)\n return True # Indicate retry\n return False # Max retries reached for this path\n\n def _handle_api_error(e, attempt):\n logging.error(f\"[Attempt {attempt}/{max_retries}] APIError: {e}\")\n status_code = None\n if get_status_code_fn:\n try:\n status_code = get_status_code_fn(e)\n except Exception as ex_status_fn:\n logging.warning(","source_hash":"c1e24a6f26b52277a012fcf031064732bd24955844738a647dd60caed2a715d1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.llm_utils._handle_rate_limit_error","uri":"program://AgentLab/function/src.agentlab.llm.llm_utils._handle_rate_limit_error#L175-L184","kind":"function","name":"_handle_rate_limit_error","path":"src/agentlab/llm/llm_utils.py","language":"python","start_line":175,"end_line":184,"context_start_line":155,"context_end_line":204,"code":"\n return min(initial_delay * (2**exponent_attempt), max_delay)\n\n def _handle_invalid_response_content(attempt):\n logging.warning(\n f\"[Attempt {attempt}/{max_retries}] API response deemed invalid by validation function. Retrying after delay...\"\n )\n if attempt < max_retries:\n # For the first retry due to invalid content, use attempt-1 for exponent\n delay = _calculate_delay(\n attempt,\n initial_retry_delay_seconds,\n max_retry_delay_seconds,\n is_first_attempt_for_type=True,\n )\n logging.debug(f\"Sleeping for {delay:.2f} seconds due to invalid response content.\")\n time.sleep(delay)\n return True # Indicate retry\n return False # Max retries reached for this path\n\n def _handle_rate_limit_error(e, attempt):\n logging.warning(\n f\"[Attempt {attempt}/{max_retries}] Rate limit error: {e}. Retrying after delay...\"\n )\n if attempt < max_retries:\n delay = _calculate_delay(attempt, initial_retry_delay_seconds, max_retry_delay_seconds)\n logging.debug(f\"Sleeping for {delay:.2f} seconds due to rate limit.\")\n time.sleep(delay)\n return True # Indicate retry\n return False # Max retries reached for this path\n\n def _handle_api_error(e, attempt):\n logging.error(f\"[Attempt {attempt}/{max_retries}] APIError: {e}\")\n status_code = None\n if get_status_code_fn:\n try:\n status_code = get_status_code_fn(e)\n except Exception as ex_status_fn:\n logging.warning(\n f\"Could not get status code from exception {type(e)} using get_status_code_fn: {ex_status_fn}\"\n )\n\n if status_code == 429 or (status_code and status_code >= 500):\n log_msg = \"Rate limit (429)\" if status_code == 429 else f\"Server error ({status_code})\"\n logging.warning(f\"{log_msg} indicated by status code. Retrying after delay...\")\n if attempt < max_retries:\n delay = _calculate_delay(\n attempt, initial_retry_delay_seconds, max_retry_delay_seconds\n )\n logging.debug(","source_hash":"c1e24a6f26b52277a012fcf031064732bd24955844738a647dd60caed2a715d1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.llm_utils._handle_api_error","uri":"program://AgentLab/function/src.agentlab.llm.llm_utils._handle_api_error#L186-L214","kind":"function","name":"_handle_api_error","path":"src/agentlab/llm/llm_utils.py","language":"python","start_line":186,"end_line":214,"context_start_line":166,"context_end_line":234,"code":" initial_retry_delay_seconds,\n max_retry_delay_seconds,\n is_first_attempt_for_type=True,\n )\n logging.debug(f\"Sleeping for {delay:.2f} seconds due to invalid response content.\")\n time.sleep(delay)\n return True # Indicate retry\n return False # Max retries reached for this path\n\n def _handle_rate_limit_error(e, attempt):\n logging.warning(\n f\"[Attempt {attempt}/{max_retries}] Rate limit error: {e}. Retrying after delay...\"\n )\n if attempt < max_retries:\n delay = _calculate_delay(attempt, initial_retry_delay_seconds, max_retry_delay_seconds)\n logging.debug(f\"Sleeping for {delay:.2f} seconds due to rate limit.\")\n time.sleep(delay)\n return True # Indicate retry\n return False # Max retries reached for this path\n\n def _handle_api_error(e, attempt):\n logging.error(f\"[Attempt {attempt}/{max_retries}] APIError: {e}\")\n status_code = None\n if get_status_code_fn:\n try:\n status_code = get_status_code_fn(e)\n except Exception as ex_status_fn:\n logging.warning(\n f\"Could not get status code from exception {type(e)} using get_status_code_fn: {ex_status_fn}\"\n )\n\n if status_code == 429 or (status_code and status_code >= 500):\n log_msg = \"Rate limit (429)\" if status_code == 429 else f\"Server error ({status_code})\"\n logging.warning(f\"{log_msg} indicated by status code. Retrying after delay...\")\n if attempt < max_retries:\n delay = _calculate_delay(\n attempt, initial_retry_delay_seconds, max_retry_delay_seconds\n )\n logging.debug(\n f\"Sleeping for {delay:.2f} seconds due to API error status {status_code}.\"\n )\n time.sleep(delay)\n return True # Indicate retry\n return False # Max retries reached for this path\n else:\n logging.error(\n f\"Non-retriable or unrecognized API error occurred (status: {status_code}). Raising.\"\n )\n raise e # Re-raise non-retriable error\n\n # Main retry loop\n for attempt in range(1, max_retries + 1):\n try:\n response = client_function(**api_params)\n\n if is_response_valid_fn(response):\n logging.info(f\"[Attempt {attempt}/{max_retries}] API call succeeded.\")\n return response\n else:\n if _handle_invalid_response_content(attempt):\n continue\n else: # Max retries reached after invalid content\n break\n\n except rate_limit_exceptions as e:\n if _handle_rate_limit_error(e, attempt):\n continue\n else: # Max retries reached after rate limit\n break","source_hash":"c1e24a6f26b52277a012fcf031064732bd24955844738a647dd60caed2a715d1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.llm_utils.is_openai_response_valid","uri":"program://AgentLab/function/src.agentlab.llm.llm_utils.is_openai_response_valid#L268-L278","kind":"function","name":"is_openai_response_valid","path":"src/agentlab/llm/llm_utils.py","language":"python","start_line":268,"end_line":278,"context_start_line":248,"context_end_line":298,"code":"\n logging.error(f\"Exceeded maximum {max_retries} retry attempts. API call failed.\")\n raise RuntimeError(f\"API call failed after {max_retries} retries.\")\n\n\ndef call_openai_api_with_retries(client_function, api_params, max_retries=10):\n \"\"\"\n Makes an OpenAI API call with retries for transient failures,\n rate limiting, and invalid or error-containing responses.\n (This is now a wrapper around generic_call_api_with_retries for OpenAI)\n\n Args:\n client_function: The OpenAI API client function to call.\n api_params: Parameters to pass to the client function.\n max_retries: Maximum number of retry attempts.\n\n Returns:\n The OpenAI API response if successful.\n \"\"\"\n\n def is_openai_response_valid(response):\n # Check for explicit error field in response object first\n if getattr(response, \"error\", None):\n logging.warning(f\"OpenAI API response contains an error attribute: {response.error}\")\n return False # Treat as invalid for retry purposes\n if hasattr(response, \"choices\") and response.choices: # Chat Completion API\n return True\n if hasattr(response, \"output\") and response.output: # Response API\n return True\n logging.warning(\"OpenAI API response is missing 'choices' or 'output' is empty.\")\n return False\n\n def get_openai_status_code(exception):\n return getattr(exception, \"http_status\", None)\n\n return generic_call_api_with_retries(\n client_function=client_function,\n api_params=api_params,\n is_response_valid_fn=is_openai_response_valid,\n rate_limit_exceptions=(openai.RateLimitError,),\n api_error_exceptions=(openai.APIError,), # openai.RateLimitError is caught first\n get_status_code_fn=get_openai_status_code,\n max_retries=max_retries,\n # You can also pass initial_retry_delay_seconds and max_retry_delay_seconds\n # if you want to customize them from their defaults in the generic function.\n )\n\n\ndef call_anthropic_api_with_retries(client_function, api_params, max_retries=10):\n \"\"\"\n Makes an Anthropic API call with retries for transient failures,","source_hash":"c1e24a6f26b52277a012fcf031064732bd24955844738a647dd60caed2a715d1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.llm_utils.get_openai_status_code","uri":"program://AgentLab/function/src.agentlab.llm.llm_utils.get_openai_status_code#L280-L281","kind":"function","name":"get_openai_status_code","path":"src/agentlab/llm/llm_utils.py","language":"python","start_line":280,"end_line":281,"context_start_line":260,"context_end_line":301,"code":" client_function: The OpenAI API client function to call.\n api_params: Parameters to pass to the client function.\n max_retries: Maximum number of retry attempts.\n\n Returns:\n The OpenAI API response if successful.\n \"\"\"\n\n def is_openai_response_valid(response):\n # Check for explicit error field in response object first\n if getattr(response, \"error\", None):\n logging.warning(f\"OpenAI API response contains an error attribute: {response.error}\")\n return False # Treat as invalid for retry purposes\n if hasattr(response, \"choices\") and response.choices: # Chat Completion API\n return True\n if hasattr(response, \"output\") and response.output: # Response API\n return True\n logging.warning(\"OpenAI API response is missing 'choices' or 'output' is empty.\")\n return False\n\n def get_openai_status_code(exception):\n return getattr(exception, \"http_status\", None)\n\n return generic_call_api_with_retries(\n client_function=client_function,\n api_params=api_params,\n is_response_valid_fn=is_openai_response_valid,\n rate_limit_exceptions=(openai.RateLimitError,),\n api_error_exceptions=(openai.APIError,), # openai.RateLimitError is caught first\n get_status_code_fn=get_openai_status_code,\n max_retries=max_retries,\n # You can also pass initial_retry_delay_seconds and max_retry_delay_seconds\n # if you want to customize them from their defaults in the generic function.\n )\n\n\ndef call_anthropic_api_with_retries(client_function, api_params, max_retries=10):\n \"\"\"\n Makes an Anthropic API call with retries for transient failures,\n rate limiting, and invalid responses.\n (This is a wrapper around generic_call_api_with_retries for Anthropic)\n","source_hash":"c1e24a6f26b52277a012fcf031064732bd24955844738a647dd60caed2a715d1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.llm_utils.is_anthropic_response_valid","uri":"program://AgentLab/function/src.agentlab.llm.llm_utils.is_anthropic_response_valid#L311-L345","kind":"function","name":"is_anthropic_response_valid","path":"src/agentlab/llm/llm_utils.py","language":"python","start_line":311,"end_line":345,"context_start_line":291,"context_end_line":365,"code":" # You can also pass initial_retry_delay_seconds and max_retry_delay_seconds\n # if you want to customize them from their defaults in the generic function.\n )\n\n\ndef call_anthropic_api_with_retries(client_function, api_params, max_retries=10):\n \"\"\"\n Makes an Anthropic API call with retries for transient failures,\n rate limiting, and invalid responses.\n (This is a wrapper around generic_call_api_with_retries for Anthropic)\n\n Args:\n client_function: The Anthropic API client function to call.\n api_params: Parameters to pass to the client function.\n max_retries: Maximum number of retry attempts.\n\n Returns:\n The Anthropic API response if successful.\n \"\"\"\n\n def is_anthropic_response_valid(response):\n \"\"\"Checks if the Anthropic response is valid.\"\"\"\n # A successful Anthropic message response typically has:\n # - a 'type' attribute equal to 'message' (for message creation)\n # - a 'content' attribute which is a list of blocks\n # - no 'error' attribute at the top level of the response object itself\n # (errors are usually raised as exceptions by the client)\n\n if not response:\n logging.warning(\"Anthropic API response is None or empty.\")\n return False\n\n # Check for explicit error type if the API might return it in a 200 OK\n # For anthropic.types.Message, an error would typically be an exception.\n # However, if the client_function could return a dict with an 'error' key:\n if isinstance(response, dict) and response.get(\"type\") == \"error\":\n logging.warning(f\"Anthropic API response indicates an error: {response.get('error')}\")\n return False\n\n # For anthropic.types.Message objects from client.messages.create\n if hasattr(response, \"type\") and response.type == \"message\":\n if hasattr(response, \"content\") and isinstance(response.content, list):\n # Optionally, check if content is not empty, though an empty content list\n # might be valid for some assistant stop reasons.\n return True\n else:\n logging.warning(\n \"Anthropic API response is of type 'message' but missing valid 'content'.\"\n )\n return False\n\n logging.warning(\n f\"Anthropic API response does not appear to be a valid message object. Type: {getattr(response, 'type', 'N/A')}\"\n )\n return False\n\n def get_anthropic_status_code(exception):\n \"\"\"Extracts HTTP status code from an Anthropic exception.\"\"\"\n # anthropic.APIStatusError has a 'status_code' attribute\n return getattr(exception, \"status_code\", None)\n\n # Define Anthropic specific exceptions.\n # anthropic.RateLimitError for specific rate limit errors.\n # anthropic.APIError is a base class for many errors.\n # anthropic.APIStatusError provides status_code.\n # anthropic.APIConnectionError for network issues.\n # Order can matter if there's inheritance; specific ones first.\n\n # Ensure these are the correct exception types from your installed anthropic library version.\n anthropic_rate_limit_exception = anthropic.RateLimitError\n # Broader API errors, APIStatusError is more specific for HTTP status related issues.\n # APIConnectionError for network problems. APIError as a general catch-all.\n anthropic_api_error_exceptions = (\n anthropic.APIStatusError, # Catches errors with a status_code\n anthropic.APIConnectionError, # Catches network-related issues","source_hash":"c1e24a6f26b52277a012fcf031064732bd24955844738a647dd60caed2a715d1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.llm_utils.get_anthropic_status_code","uri":"program://AgentLab/function/src.agentlab.llm.llm_utils.get_anthropic_status_code#L347-L350","kind":"function","name":"get_anthropic_status_code","path":"src/agentlab/llm/llm_utils.py","language":"python","start_line":347,"end_line":350,"context_start_line":327,"context_end_line":370,"code":" logging.warning(f\"Anthropic API response indicates an error: {response.get('error')}\")\n return False\n\n # For anthropic.types.Message objects from client.messages.create\n if hasattr(response, \"type\") and response.type == \"message\":\n if hasattr(response, \"content\") and isinstance(response.content, list):\n # Optionally, check if content is not empty, though an empty content list\n # might be valid for some assistant stop reasons.\n return True\n else:\n logging.warning(\n \"Anthropic API response is of type 'message' but missing valid 'content'.\"\n )\n return False\n\n logging.warning(\n f\"Anthropic API response does not appear to be a valid message object. Type: {getattr(response, 'type', 'N/A')}\"\n )\n return False\n\n def get_anthropic_status_code(exception):\n \"\"\"Extracts HTTP status code from an Anthropic exception.\"\"\"\n # anthropic.APIStatusError has a 'status_code' attribute\n return getattr(exception, \"status_code\", None)\n\n # Define Anthropic specific exceptions.\n # anthropic.RateLimitError for specific rate limit errors.\n # anthropic.APIError is a base class for many errors.\n # anthropic.APIStatusError provides status_code.\n # anthropic.APIConnectionError for network issues.\n # Order can matter if there's inheritance; specific ones first.\n\n # Ensure these are the correct exception types from your installed anthropic library version.\n anthropic_rate_limit_exception = anthropic.RateLimitError\n # Broader API errors, APIStatusError is more specific for HTTP status related issues.\n # APIConnectionError for network problems. APIError as a general catch-all.\n anthropic_api_error_exceptions = (\n anthropic.APIStatusError, # Catches errors with a status_code\n anthropic.APIConnectionError, # Catches network-related issues\n anthropic.APIError, # General base class for other Anthropic API errors\n )\n\n return generic_call_api_with_retries(\n client_function=client_function,","source_hash":"c1e24a6f26b52277a012fcf031064732bd24955844738a647dd60caed2a715d1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.llm_utils.__init__","uri":"program://AgentLab/function/src.agentlab.llm.llm_utils.__init__#L854-L859","kind":"function","name":"__init__","path":"src/agentlab/llm/llm_utils.py","language":"python","start_line":854,"end_line":859,"context_start_line":834,"context_end_line":879,"code":" if len(self[\"content\"]) == 1:\n self[\"content\"] = self[\"content\"][0][\"text\"]\n\n\nclass SystemMessage(BaseMessage):\n def __init__(self, content: Union[str, list[dict]]):\n super().__init__(\"system\", content)\n\n\nclass HumanMessage(BaseMessage):\n def __init__(self, content: Union[str, list[dict]]):\n super().__init__(\"user\", content)\n\n\nclass AIMessage(BaseMessage):\n def __init__(self, content: Union[str, list[dict]], log_probs=None):\n super().__init__(\"assistant\", content, log_probs=log_probs)\n\n\nclass Discussion:\n def __init__(self, messages: Union[list[BaseMessage], BaseMessage] = None):\n if isinstance(messages, BaseMessage):\n messages = [messages]\n elif messages is None:\n messages = []\n self.messages = messages\n\n @property\n def last_message(self):\n return self.messages[-1]\n\n def merge(self):\n for m in self.messages:\n m.merge()\n\n def __str__(self) -> str:\n return \"\\n\".join(str(m) for m in self.messages)\n\n def to_string(self):\n self.merge()\n return str(self)\n\n def to_openai(self):\n self.merge()\n return self.messages\n","source_hash":"c1e24a6f26b52277a012fcf031064732bd24955844738a647dd60caed2a715d1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.llm_utils.__str__","uri":"program://AgentLab/function/src.agentlab.llm.llm_utils.__str__#L869-L870","kind":"function","name":"__str__","path":"src/agentlab/llm/llm_utils.py","language":"python","start_line":869,"end_line":870,"context_start_line":849,"context_end_line":890,"code":" def __init__(self, content: Union[str, list[dict]], log_probs=None):\n super().__init__(\"assistant\", content, log_probs=log_probs)\n\n\nclass Discussion:\n def __init__(self, messages: Union[list[BaseMessage], BaseMessage] = None):\n if isinstance(messages, BaseMessage):\n messages = [messages]\n elif messages is None:\n messages = []\n self.messages = messages\n\n @property\n def last_message(self):\n return self.messages[-1]\n\n def merge(self):\n for m in self.messages:\n m.merge()\n\n def __str__(self) -> str:\n return \"\\n\".join(str(m) for m in self.messages)\n\n def to_string(self):\n self.merge()\n return str(self)\n\n def to_openai(self):\n self.merge()\n return self.messages\n\n def add_message(\n self,\n message: BaseMessage | dict = None,\n role: str = None,\n content: Union[str, list[dict]] = None,\n ):\n if message is None:\n message = BaseMessage(role, content)\n else:\n if isinstance(message, dict):\n message = BaseMessage(**message)","source_hash":"c1e24a6f26b52277a012fcf031064732bd24955844738a647dd60caed2a715d1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.llm_utils.add_content","uri":"program://AgentLab/function/src.agentlab.llm.llm_utils.add_content#L896-L898","kind":"function","name":"add_content","path":"src/agentlab/llm/llm_utils.py","language":"python","start_line":896,"end_line":898,"context_start_line":876,"context_end_line":918,"code":" def to_openai(self):\n self.merge()\n return self.messages\n\n def add_message(\n self,\n message: BaseMessage | dict = None,\n role: str = None,\n content: Union[str, list[dict]] = None,\n ):\n if message is None:\n message = BaseMessage(role, content)\n else:\n if isinstance(message, dict):\n message = BaseMessage(**message)\n self.messages.append(message)\n\n def append(self, message: BaseMessage | dict):\n self.add_message(message)\n\n def add_content(self, type: str, content: Any):\n \"\"\"Add content to the last message.\"\"\"\n self.last_message.add_content(type, content)\n\n def add_text(self, text: str):\n \"\"\"Add text to the last message.\"\"\"\n self.last_message.add_text(text)\n\n def add_image(self, image: np.ndarray | Image.Image | str, detail: str = None):\n \"\"\"Add an image to the last message.\"\"\"\n self.last_message.add_image(image, detail)\n\n def __iter__(self):\n return iter(self.messages)\n\n def __len__(self):\n return len(self.messages)\n\n def __getitem__(self, key):\n return self.messages[key]\n\n def to_markdown(self):\n self.merge()","source_hash":"c1e24a6f26b52277a012fcf031064732bd24955844738a647dd60caed2a715d1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.llm_utils.add_text","uri":"program://AgentLab/function/src.agentlab.llm.llm_utils.add_text#L900-L902","kind":"function","name":"add_text","path":"src/agentlab/llm/llm_utils.py","language":"python","start_line":900,"end_line":902,"context_start_line":880,"context_end_line":922,"code":" def add_message(\n self,\n message: BaseMessage | dict = None,\n role: str = None,\n content: Union[str, list[dict]] = None,\n ):\n if message is None:\n message = BaseMessage(role, content)\n else:\n if isinstance(message, dict):\n message = BaseMessage(**message)\n self.messages.append(message)\n\n def append(self, message: BaseMessage | dict):\n self.add_message(message)\n\n def add_content(self, type: str, content: Any):\n \"\"\"Add content to the last message.\"\"\"\n self.last_message.add_content(type, content)\n\n def add_text(self, text: str):\n \"\"\"Add text to the last message.\"\"\"\n self.last_message.add_text(text)\n\n def add_image(self, image: np.ndarray | Image.Image | str, detail: str = None):\n \"\"\"Add an image to the last message.\"\"\"\n self.last_message.add_image(image, detail)\n\n def __iter__(self):\n return iter(self.messages)\n\n def __len__(self):\n return len(self.messages)\n\n def __getitem__(self, key):\n return self.messages[key]\n\n def to_markdown(self):\n self.merge()\n return \"\\n\".join([f\"Message {i}\\n{m.to_markdown()}\\n\" for i, m in enumerate(self.messages)])\n\n\nif __name__ == \"__main__\":","source_hash":"c1e24a6f26b52277a012fcf031064732bd24955844738a647dd60caed2a715d1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.llm_utils.add_image","uri":"program://AgentLab/function/src.agentlab.llm.llm_utils.add_image#L904-L906","kind":"function","name":"add_image","path":"src/agentlab/llm/llm_utils.py","language":"python","start_line":904,"end_line":906,"context_start_line":884,"context_end_line":926,"code":" content: Union[str, list[dict]] = None,\n ):\n if message is None:\n message = BaseMessage(role, content)\n else:\n if isinstance(message, dict):\n message = BaseMessage(**message)\n self.messages.append(message)\n\n def append(self, message: BaseMessage | dict):\n self.add_message(message)\n\n def add_content(self, type: str, content: Any):\n \"\"\"Add content to the last message.\"\"\"\n self.last_message.add_content(type, content)\n\n def add_text(self, text: str):\n \"\"\"Add text to the last message.\"\"\"\n self.last_message.add_text(text)\n\n def add_image(self, image: np.ndarray | Image.Image | str, detail: str = None):\n \"\"\"Add an image to the last message.\"\"\"\n self.last_message.add_image(image, detail)\n\n def __iter__(self):\n return iter(self.messages)\n\n def __len__(self):\n return len(self.messages)\n\n def __getitem__(self, key):\n return self.messages[key]\n\n def to_markdown(self):\n self.merge()\n return \"\\n\".join([f\"Message {i}\\n{m.to_markdown()}\\n\" for i, m in enumerate(self.messages)])\n\n\nif __name__ == \"__main__\":\n # model_to_download = \"THUDM/agentlm-70b\"\n model_to_download = \"databricks/dbrx-instruct\"\n save_dir = \"/mnt/ui_copilot/data_rw/base_models/\"\n # set the following env variable to enable the transfer of the model","source_hash":"c1e24a6f26b52277a012fcf031064732bd24955844738a647dd60caed2a715d1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.llm_utils.to_markdown","uri":"program://AgentLab/function/src.agentlab.llm.llm_utils.to_markdown#L917-L919","kind":"function","name":"to_markdown","path":"src/agentlab/llm/llm_utils.py","language":"python","start_line":917,"end_line":919,"context_start_line":897,"context_end_line":928,"code":" \"\"\"Add content to the last message.\"\"\"\n self.last_message.add_content(type, content)\n\n def add_text(self, text: str):\n \"\"\"Add text to the last message.\"\"\"\n self.last_message.add_text(text)\n\n def add_image(self, image: np.ndarray | Image.Image | str, detail: str = None):\n \"\"\"Add an image to the last message.\"\"\"\n self.last_message.add_image(image, detail)\n\n def __iter__(self):\n return iter(self.messages)\n\n def __len__(self):\n return len(self.messages)\n\n def __getitem__(self, key):\n return self.messages[key]\n\n def to_markdown(self):\n self.merge()\n return \"\\n\".join([f\"Message {i}\\n{m.to_markdown()}\\n\" for i, m in enumerate(self.messages)])\n\n\nif __name__ == \"__main__\":\n # model_to_download = \"THUDM/agentlm-70b\"\n model_to_download = \"databricks/dbrx-instruct\"\n save_dir = \"/mnt/ui_copilot/data_rw/base_models/\"\n # set the following env variable to enable the transfer of the model\n os.environ[\"HF_HUB_ENABLE_HF_TRANSFER\"] = \"1\"\n download_and_save_model(model_to_download, save_dir=save_dir)","source_hash":"c1e24a6f26b52277a012fcf031064732bd24955844738a647dd60caed2a715d1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.llm_utils.merge","uri":"program://AgentLab/function/src.agentlab.llm.llm_utils.merge#L865-L867","kind":"function","name":"merge","path":"src/agentlab/llm/llm_utils.py","language":"python","start_line":865,"end_line":867,"context_start_line":845,"context_end_line":887,"code":" super().__init__(\"user\", content)\n\n\nclass AIMessage(BaseMessage):\n def __init__(self, content: Union[str, list[dict]], log_probs=None):\n super().__init__(\"assistant\", content, log_probs=log_probs)\n\n\nclass Discussion:\n def __init__(self, messages: Union[list[BaseMessage], BaseMessage] = None):\n if isinstance(messages, BaseMessage):\n messages = [messages]\n elif messages is None:\n messages = []\n self.messages = messages\n\n @property\n def last_message(self):\n return self.messages[-1]\n\n def merge(self):\n for m in self.messages:\n m.merge()\n\n def __str__(self) -> str:\n return \"\\n\".join(str(m) for m in self.messages)\n\n def to_string(self):\n self.merge()\n return str(self)\n\n def to_openai(self):\n self.merge()\n return self.messages\n\n def add_message(\n self,\n message: BaseMessage | dict = None,\n role: str = None,\n content: Union[str, list[dict]] = None,\n ):\n if message is None:\n message = BaseMessage(role, content)","source_hash":"c1e24a6f26b52277a012fcf031064732bd24955844738a647dd60caed2a715d1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.llm_utils.last_message","uri":"program://AgentLab/function/src.agentlab.llm.llm_utils.last_message#L862-L863","kind":"function","name":"last_message","path":"src/agentlab/llm/llm_utils.py","language":"python","start_line":862,"end_line":863,"context_start_line":842,"context_end_line":883,"code":"\nclass HumanMessage(BaseMessage):\n def __init__(self, content: Union[str, list[dict]]):\n super().__init__(\"user\", content)\n\n\nclass AIMessage(BaseMessage):\n def __init__(self, content: Union[str, list[dict]], log_probs=None):\n super().__init__(\"assistant\", content, log_probs=log_probs)\n\n\nclass Discussion:\n def __init__(self, messages: Union[list[BaseMessage], BaseMessage] = None):\n if isinstance(messages, BaseMessage):\n messages = [messages]\n elif messages is None:\n messages = []\n self.messages = messages\n\n @property\n def last_message(self):\n return self.messages[-1]\n\n def merge(self):\n for m in self.messages:\n m.merge()\n\n def __str__(self) -> str:\n return \"\\n\".join(str(m) for m in self.messages)\n\n def to_string(self):\n self.merge()\n return str(self)\n\n def to_openai(self):\n self.merge()\n return self.messages\n\n def add_message(\n self,\n message: BaseMessage | dict = None,\n role: str = None,","source_hash":"c1e24a6f26b52277a012fcf031064732bd24955844738a647dd60caed2a715d1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.llm_utils.to_string","uri":"program://AgentLab/function/src.agentlab.llm.llm_utils.to_string#L872-L874","kind":"function","name":"to_string","path":"src/agentlab/llm/llm_utils.py","language":"python","start_line":872,"end_line":874,"context_start_line":852,"context_end_line":894,"code":"\nclass Discussion:\n def __init__(self, messages: Union[list[BaseMessage], BaseMessage] = None):\n if isinstance(messages, BaseMessage):\n messages = [messages]\n elif messages is None:\n messages = []\n self.messages = messages\n\n @property\n def last_message(self):\n return self.messages[-1]\n\n def merge(self):\n for m in self.messages:\n m.merge()\n\n def __str__(self) -> str:\n return \"\\n\".join(str(m) for m in self.messages)\n\n def to_string(self):\n self.merge()\n return str(self)\n\n def to_openai(self):\n self.merge()\n return self.messages\n\n def add_message(\n self,\n message: BaseMessage | dict = None,\n role: str = None,\n content: Union[str, list[dict]] = None,\n ):\n if message is None:\n message = BaseMessage(role, content)\n else:\n if isinstance(message, dict):\n message = BaseMessage(**message)\n self.messages.append(message)\n\n def append(self, message: BaseMessage | dict):\n self.add_message(message)","source_hash":"c1e24a6f26b52277a012fcf031064732bd24955844738a647dd60caed2a715d1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.llm_utils.to_openai","uri":"program://AgentLab/function/src.agentlab.llm.llm_utils.to_openai#L876-L878","kind":"function","name":"to_openai","path":"src/agentlab/llm/llm_utils.py","language":"python","start_line":876,"end_line":878,"context_start_line":856,"context_end_line":898,"code":" messages = [messages]\n elif messages is None:\n messages = []\n self.messages = messages\n\n @property\n def last_message(self):\n return self.messages[-1]\n\n def merge(self):\n for m in self.messages:\n m.merge()\n\n def __str__(self) -> str:\n return \"\\n\".join(str(m) for m in self.messages)\n\n def to_string(self):\n self.merge()\n return str(self)\n\n def to_openai(self):\n self.merge()\n return self.messages\n\n def add_message(\n self,\n message: BaseMessage | dict = None,\n role: str = None,\n content: Union[str, list[dict]] = None,\n ):\n if message is None:\n message = BaseMessage(role, content)\n else:\n if isinstance(message, dict):\n message = BaseMessage(**message)\n self.messages.append(message)\n\n def append(self, message: BaseMessage | dict):\n self.add_message(message)\n\n def add_content(self, type: str, content: Any):\n \"\"\"Add content to the last message.\"\"\"\n self.last_message.add_content(type, content)","source_hash":"c1e24a6f26b52277a012fcf031064732bd24955844738a647dd60caed2a715d1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.llm_utils.add_message","uri":"program://AgentLab/function/src.agentlab.llm.llm_utils.add_message#L880-L891","kind":"function","name":"add_message","path":"src/agentlab/llm/llm_utils.py","language":"python","start_line":880,"end_line":891,"context_start_line":860,"context_end_line":911,"code":"\n @property\n def last_message(self):\n return self.messages[-1]\n\n def merge(self):\n for m in self.messages:\n m.merge()\n\n def __str__(self) -> str:\n return \"\\n\".join(str(m) for m in self.messages)\n\n def to_string(self):\n self.merge()\n return str(self)\n\n def to_openai(self):\n self.merge()\n return self.messages\n\n def add_message(\n self,\n message: BaseMessage | dict = None,\n role: str = None,\n content: Union[str, list[dict]] = None,\n ):\n if message is None:\n message = BaseMessage(role, content)\n else:\n if isinstance(message, dict):\n message = BaseMessage(**message)\n self.messages.append(message)\n\n def append(self, message: BaseMessage | dict):\n self.add_message(message)\n\n def add_content(self, type: str, content: Any):\n \"\"\"Add content to the last message.\"\"\"\n self.last_message.add_content(type, content)\n\n def add_text(self, text: str):\n \"\"\"Add text to the last message.\"\"\"\n self.last_message.add_text(text)\n\n def add_image(self, image: np.ndarray | Image.Image | str, detail: str = None):\n \"\"\"Add an image to the last message.\"\"\"\n self.last_message.add_image(image, detail)\n\n def __iter__(self):\n return iter(self.messages)\n\n def __len__(self):","source_hash":"c1e24a6f26b52277a012fcf031064732bd24955844738a647dd60caed2a715d1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.llm_utils.append","uri":"program://AgentLab/function/src.agentlab.llm.llm_utils.append#L893-L894","kind":"function","name":"append","path":"src/agentlab/llm/llm_utils.py","language":"python","start_line":893,"end_line":894,"context_start_line":873,"context_end_line":914,"code":" self.merge()\n return str(self)\n\n def to_openai(self):\n self.merge()\n return self.messages\n\n def add_message(\n self,\n message: BaseMessage | dict = None,\n role: str = None,\n content: Union[str, list[dict]] = None,\n ):\n if message is None:\n message = BaseMessage(role, content)\n else:\n if isinstance(message, dict):\n message = BaseMessage(**message)\n self.messages.append(message)\n\n def append(self, message: BaseMessage | dict):\n self.add_message(message)\n\n def add_content(self, type: str, content: Any):\n \"\"\"Add content to the last message.\"\"\"\n self.last_message.add_content(type, content)\n\n def add_text(self, text: str):\n \"\"\"Add text to the last message.\"\"\"\n self.last_message.add_text(text)\n\n def add_image(self, image: np.ndarray | Image.Image | str, detail: str = None):\n \"\"\"Add an image to the last message.\"\"\"\n self.last_message.add_image(image, detail)\n\n def __iter__(self):\n return iter(self.messages)\n\n def __len__(self):\n return len(self.messages)\n\n def __getitem__(self, key):","source_hash":"c1e24a6f26b52277a012fcf031064732bd24955844738a647dd60caed2a715d1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.llm_utils.__iter__","uri":"program://AgentLab/function/src.agentlab.llm.llm_utils.__iter__#L908-L909","kind":"function","name":"__iter__","path":"src/agentlab/llm/llm_utils.py","language":"python","start_line":908,"end_line":909,"context_start_line":888,"context_end_line":928,"code":" else:\n if isinstance(message, dict):\n message = BaseMessage(**message)\n self.messages.append(message)\n\n def append(self, message: BaseMessage | dict):\n self.add_message(message)\n\n def add_content(self, type: str, content: Any):\n \"\"\"Add content to the last message.\"\"\"\n self.last_message.add_content(type, content)\n\n def add_text(self, text: str):\n \"\"\"Add text to the last message.\"\"\"\n self.last_message.add_text(text)\n\n def add_image(self, image: np.ndarray | Image.Image | str, detail: str = None):\n \"\"\"Add an image to the last message.\"\"\"\n self.last_message.add_image(image, detail)\n\n def __iter__(self):\n return iter(self.messages)\n\n def __len__(self):\n return len(self.messages)\n\n def __getitem__(self, key):\n return self.messages[key]\n\n def to_markdown(self):\n self.merge()\n return \"\\n\".join([f\"Message {i}\\n{m.to_markdown()}\\n\" for i, m in enumerate(self.messages)])\n\n\nif __name__ == \"__main__\":\n # model_to_download = \"THUDM/agentlm-70b\"\n model_to_download = \"databricks/dbrx-instruct\"\n save_dir = \"/mnt/ui_copilot/data_rw/base_models/\"\n # set the following env variable to enable the transfer of the model\n os.environ[\"HF_HUB_ENABLE_HF_TRANSFER\"] = \"1\"\n download_and_save_model(model_to_download, save_dir=save_dir)","source_hash":"c1e24a6f26b52277a012fcf031064732bd24955844738a647dd60caed2a715d1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.llm_utils.__len__","uri":"program://AgentLab/function/src.agentlab.llm.llm_utils.__len__#L911-L912","kind":"function","name":"__len__","path":"src/agentlab/llm/llm_utils.py","language":"python","start_line":911,"end_line":912,"context_start_line":891,"context_end_line":928,"code":" self.messages.append(message)\n\n def append(self, message: BaseMessage | dict):\n self.add_message(message)\n\n def add_content(self, type: str, content: Any):\n \"\"\"Add content to the last message.\"\"\"\n self.last_message.add_content(type, content)\n\n def add_text(self, text: str):\n \"\"\"Add text to the last message.\"\"\"\n self.last_message.add_text(text)\n\n def add_image(self, image: np.ndarray | Image.Image | str, detail: str = None):\n \"\"\"Add an image to the last message.\"\"\"\n self.last_message.add_image(image, detail)\n\n def __iter__(self):\n return iter(self.messages)\n\n def __len__(self):\n return len(self.messages)\n\n def __getitem__(self, key):\n return self.messages[key]\n\n def to_markdown(self):\n self.merge()\n return \"\\n\".join([f\"Message {i}\\n{m.to_markdown()}\\n\" for i, m in enumerate(self.messages)])\n\n\nif __name__ == \"__main__\":\n # model_to_download = \"THUDM/agentlm-70b\"\n model_to_download = \"databricks/dbrx-instruct\"\n save_dir = \"/mnt/ui_copilot/data_rw/base_models/\"\n # set the following env variable to enable the transfer of the model\n os.environ[\"HF_HUB_ENABLE_HF_TRANSFER\"] = \"1\"\n download_and_save_model(model_to_download, save_dir=save_dir)","source_hash":"c1e24a6f26b52277a012fcf031064732bd24955844738a647dd60caed2a715d1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.llm_utils.__getitem__","uri":"program://AgentLab/function/src.agentlab.llm.llm_utils.__getitem__#L914-L915","kind":"function","name":"__getitem__","path":"src/agentlab/llm/llm_utils.py","language":"python","start_line":914,"end_line":915,"context_start_line":894,"context_end_line":928,"code":" self.add_message(message)\n\n def add_content(self, type: str, content: Any):\n \"\"\"Add content to the last message.\"\"\"\n self.last_message.add_content(type, content)\n\n def add_text(self, text: str):\n \"\"\"Add text to the last message.\"\"\"\n self.last_message.add_text(text)\n\n def add_image(self, image: np.ndarray | Image.Image | str, detail: str = None):\n \"\"\"Add an image to the last message.\"\"\"\n self.last_message.add_image(image, detail)\n\n def __iter__(self):\n return iter(self.messages)\n\n def __len__(self):\n return len(self.messages)\n\n def __getitem__(self, key):\n return self.messages[key]\n\n def to_markdown(self):\n self.merge()\n return \"\\n\".join([f\"Message {i}\\n{m.to_markdown()}\\n\" for i, m in enumerate(self.messages)])\n\n\nif __name__ == \"__main__\":\n # model_to_download = \"THUDM/agentlm-70b\"\n model_to_download = \"databricks/dbrx-instruct\"\n save_dir = \"/mnt/ui_copilot/data_rw/base_models/\"\n # set the following env variable to enable the transfer of the model\n os.environ[\"HF_HUB_ENABLE_HF_TRANSFER\"] = \"1\"\n download_and_save_model(model_to_download, save_dir=save_dir)","source_hash":"c1e24a6f26b52277a012fcf031064732bd24955844738a647dd60caed2a715d1","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.prompt_templates","uri":"program://AgentLab/module/src.agentlab.llm.prompt_templates#L1-L89","kind":"module","name":"src.agentlab.llm.prompt_templates","path":"src/agentlab/llm/prompt_templates.py","language":"python","start_line":1,"end_line":89,"context_start_line":1,"context_end_line":89,"code":"from dataclasses import dataclass\nfrom typing import List\n\n\"\"\"\nTo use this class, you should have the ``openai`` python package installed, and the\nenvironment variable ``OPENAI_API_KEY`` set with your API key.\n\"\"\"\n\n\n@dataclass\nclass PromptTemplate:\n \"\"\"\n Base class for prompt templates.\n\n Defines a standard interface for prompt templates, ensuring that they contain\n the required fields for the CustomLLMChatbot.\n \"\"\"\n\n system: str\n human: str\n ai: str\n prompt_end: str = \"\"\n\n def format_message(self, message: dict) -> str:\n \"\"\"\n Formats a given message based on its type.\n\n Args:\n message (dict): The message to be formatted.\n\n Returns:\n str: The formatted message.\n\n Raises:\n ValueError: If the message type is not supported.\n \"\"\"\n if message[\"role\"] == \"system\":\n return self.system.format(input=message[\"content\"])\n elif message[\"role\"] == \"user\":\n return self.human.format(input=message[\"content\"])\n elif message[\"role\"] == \"assistant\":\n return self.ai.format(input=message[\"content\"])\n else:\n raise ValueError(f\"Message role {message['role']} not supported\")\n\n def construct_prompt(self, messages: List[dict]) -> str:\n \"\"\"\n Constructs a prompt from a list of messages.\n\n Args:\n messages (List[BaseMessage]): The list of messages to be formatted.\n\n Returns:\n str: The constructed prompt.\n\n Raises:\n ValueError: If any element in the list is not of type BaseMessage.\n \"\"\"\n if not all(isinstance(m, dict) and \"role\" in m and \"content\" in m for m in messages):\n raise ValueError(\"All elements in the list must be in openai format\")\n\n prompt = \"\".join([self.format_message(m) for m in messages])\n prompt += self.prompt_end\n return prompt\n\n\ndef get_prompt_template(model_name):\n for key, value in MODEL_PREFIX_TO_PROMPT_TEMPLATES.items():\n if key in model_name:\n return value\n raise NotImplementedError(f\"Model {model_name} has no supported chat template\")\n\n\n## Prompt templates\n\nSTARCHAT_PROMPT_TEMPLATE = PromptTemplate(\n system=\"<|system|>\\n{input}<|end|>\\n\",\n human=\"<|user|>\\n{input}<|end|>\\n\",\n ai=\"<|assistant|>\\n{input}<|end|>\\n\",\n prompt_end=\"<|assistant|>\",\n)\n\n\n## Model prefix to prompt template mapping\n\nMODEL_PREFIX_TO_PROMPT_TEMPLATES = {\n \"starcoder\": STARCHAT_PROMPT_TEMPLATE,\n \"starchat\": STARCHAT_PROMPT_TEMPLATE,\n}","source_hash":"3da81094279e3091ad880202e48336f663aeab6597eebec6c6fdc3a22aa3ac67","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.prompt_templates.PromptTemplate","uri":"program://AgentLab/class/src.agentlab.llm.prompt_templates.PromptTemplate#L11-L64","kind":"class","name":"PromptTemplate","path":"src/agentlab/llm/prompt_templates.py","language":"python","start_line":11,"end_line":64,"context_start_line":1,"context_end_line":84,"code":"from dataclasses import dataclass\nfrom typing import List\n\n\"\"\"\nTo use this class, you should have the ``openai`` python package installed, and the\nenvironment variable ``OPENAI_API_KEY`` set with your API key.\n\"\"\"\n\n\n@dataclass\nclass PromptTemplate:\n \"\"\"\n Base class for prompt templates.\n\n Defines a standard interface for prompt templates, ensuring that they contain\n the required fields for the CustomLLMChatbot.\n \"\"\"\n\n system: str\n human: str\n ai: str\n prompt_end: str = \"\"\n\n def format_message(self, message: dict) -> str:\n \"\"\"\n Formats a given message based on its type.\n\n Args:\n message (dict): The message to be formatted.\n\n Returns:\n str: The formatted message.\n\n Raises:\n ValueError: If the message type is not supported.\n \"\"\"\n if message[\"role\"] == \"system\":\n return self.system.format(input=message[\"content\"])\n elif message[\"role\"] == \"user\":\n return self.human.format(input=message[\"content\"])\n elif message[\"role\"] == \"assistant\":\n return self.ai.format(input=message[\"content\"])\n else:\n raise ValueError(f\"Message role {message['role']} not supported\")\n\n def construct_prompt(self, messages: List[dict]) -> str:\n \"\"\"\n Constructs a prompt from a list of messages.\n\n Args:\n messages (List[BaseMessage]): The list of messages to be formatted.\n\n Returns:\n str: The constructed prompt.\n\n Raises:\n ValueError: If any element in the list is not of type BaseMessage.\n \"\"\"\n if not all(isinstance(m, dict) and \"role\" in m and \"content\" in m for m in messages):\n raise ValueError(\"All elements in the list must be in openai format\")\n\n prompt = \"\".join([self.format_message(m) for m in messages])\n prompt += self.prompt_end\n return prompt\n\n\ndef get_prompt_template(model_name):\n for key, value in MODEL_PREFIX_TO_PROMPT_TEMPLATES.items():\n if key in model_name:\n return value\n raise NotImplementedError(f\"Model {model_name} has no supported chat template\")\n\n\n## Prompt templates\n\nSTARCHAT_PROMPT_TEMPLATE = PromptTemplate(\n system=\"<|system|>\\n{input}<|end|>\\n\",\n human=\"<|user|>\\n{input}<|end|>\\n\",\n ai=\"<|assistant|>\\n{input}<|end|>\\n\",\n prompt_end=\"<|assistant|>\",\n)\n\n\n## Model prefix to prompt template mapping","source_hash":"3da81094279e3091ad880202e48336f663aeab6597eebec6c6fdc3a22aa3ac67","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.prompt_templates.get_prompt_template","uri":"program://AgentLab/function/src.agentlab.llm.prompt_templates.get_prompt_template#L67-L71","kind":"function","name":"get_prompt_template","path":"src/agentlab/llm/prompt_templates.py","language":"python","start_line":67,"end_line":71,"context_start_line":47,"context_end_line":89,"code":" \"\"\"\n Constructs a prompt from a list of messages.\n\n Args:\n messages (List[BaseMessage]): The list of messages to be formatted.\n\n Returns:\n str: The constructed prompt.\n\n Raises:\n ValueError: If any element in the list is not of type BaseMessage.\n \"\"\"\n if not all(isinstance(m, dict) and \"role\" in m and \"content\" in m for m in messages):\n raise ValueError(\"All elements in the list must be in openai format\")\n\n prompt = \"\".join([self.format_message(m) for m in messages])\n prompt += self.prompt_end\n return prompt\n\n\ndef get_prompt_template(model_name):\n for key, value in MODEL_PREFIX_TO_PROMPT_TEMPLATES.items():\n if key in model_name:\n return value\n raise NotImplementedError(f\"Model {model_name} has no supported chat template\")\n\n\n## Prompt templates\n\nSTARCHAT_PROMPT_TEMPLATE = PromptTemplate(\n system=\"<|system|>\\n{input}<|end|>\\n\",\n human=\"<|user|>\\n{input}<|end|>\\n\",\n ai=\"<|assistant|>\\n{input}<|end|>\\n\",\n prompt_end=\"<|assistant|>\",\n)\n\n\n## Model prefix to prompt template mapping\n\nMODEL_PREFIX_TO_PROMPT_TEMPLATES = {\n \"starcoder\": STARCHAT_PROMPT_TEMPLATE,\n \"starchat\": STARCHAT_PROMPT_TEMPLATE,\n}","source_hash":"3da81094279e3091ad880202e48336f663aeab6597eebec6c6fdc3a22aa3ac67","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.prompt_templates.format_message","uri":"program://AgentLab/function/src.agentlab.llm.prompt_templates.format_message#L24-L44","kind":"function","name":"format_message","path":"src/agentlab/llm/prompt_templates.py","language":"python","start_line":24,"end_line":44,"context_start_line":4,"context_end_line":64,"code":"\"\"\"\nTo use this class, you should have the ``openai`` python package installed, and the\nenvironment variable ``OPENAI_API_KEY`` set with your API key.\n\"\"\"\n\n\n@dataclass\nclass PromptTemplate:\n \"\"\"\n Base class for prompt templates.\n\n Defines a standard interface for prompt templates, ensuring that they contain\n the required fields for the CustomLLMChatbot.\n \"\"\"\n\n system: str\n human: str\n ai: str\n prompt_end: str = \"\"\n\n def format_message(self, message: dict) -> str:\n \"\"\"\n Formats a given message based on its type.\n\n Args:\n message (dict): The message to be formatted.\n\n Returns:\n str: The formatted message.\n\n Raises:\n ValueError: If the message type is not supported.\n \"\"\"\n if message[\"role\"] == \"system\":\n return self.system.format(input=message[\"content\"])\n elif message[\"role\"] == \"user\":\n return self.human.format(input=message[\"content\"])\n elif message[\"role\"] == \"assistant\":\n return self.ai.format(input=message[\"content\"])\n else:\n raise ValueError(f\"Message role {message['role']} not supported\")\n\n def construct_prompt(self, messages: List[dict]) -> str:\n \"\"\"\n Constructs a prompt from a list of messages.\n\n Args:\n messages (List[BaseMessage]): The list of messages to be formatted.\n\n Returns:\n str: The constructed prompt.\n\n Raises:\n ValueError: If any element in the list is not of type BaseMessage.\n \"\"\"\n if not all(isinstance(m, dict) and \"role\" in m and \"content\" in m for m in messages):\n raise ValueError(\"All elements in the list must be in openai format\")\n\n prompt = \"\".join([self.format_message(m) for m in messages])\n prompt += self.prompt_end\n return prompt","source_hash":"3da81094279e3091ad880202e48336f663aeab6597eebec6c6fdc3a22aa3ac67","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.prompt_templates.construct_prompt","uri":"program://AgentLab/function/src.agentlab.llm.prompt_templates.construct_prompt#L46-L64","kind":"function","name":"construct_prompt","path":"src/agentlab/llm/prompt_templates.py","language":"python","start_line":46,"end_line":64,"context_start_line":26,"context_end_line":84,"code":" Formats a given message based on its type.\n\n Args:\n message (dict): The message to be formatted.\n\n Returns:\n str: The formatted message.\n\n Raises:\n ValueError: If the message type is not supported.\n \"\"\"\n if message[\"role\"] == \"system\":\n return self.system.format(input=message[\"content\"])\n elif message[\"role\"] == \"user\":\n return self.human.format(input=message[\"content\"])\n elif message[\"role\"] == \"assistant\":\n return self.ai.format(input=message[\"content\"])\n else:\n raise ValueError(f\"Message role {message['role']} not supported\")\n\n def construct_prompt(self, messages: List[dict]) -> str:\n \"\"\"\n Constructs a prompt from a list of messages.\n\n Args:\n messages (List[BaseMessage]): The list of messages to be formatted.\n\n Returns:\n str: The constructed prompt.\n\n Raises:\n ValueError: If any element in the list is not of type BaseMessage.\n \"\"\"\n if not all(isinstance(m, dict) and \"role\" in m and \"content\" in m for m in messages):\n raise ValueError(\"All elements in the list must be in openai format\")\n\n prompt = \"\".join([self.format_message(m) for m in messages])\n prompt += self.prompt_end\n return prompt\n\n\ndef get_prompt_template(model_name):\n for key, value in MODEL_PREFIX_TO_PROMPT_TEMPLATES.items():\n if key in model_name:\n return value\n raise NotImplementedError(f\"Model {model_name} has no supported chat template\")\n\n\n## Prompt templates\n\nSTARCHAT_PROMPT_TEMPLATE = PromptTemplate(\n system=\"<|system|>\\n{input}<|end|>\\n\",\n human=\"<|user|>\\n{input}<|end|>\\n\",\n ai=\"<|assistant|>\\n{input}<|end|>\\n\",\n prompt_end=\"<|assistant|>\",\n)\n\n\n## Model prefix to prompt template mapping","source_hash":"3da81094279e3091ad880202e48336f663aeab6597eebec6c6fdc3a22aa3ac67","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.tracking","uri":"program://AgentLab/module/src.agentlab.llm.tracking#L1-L397","kind":"module","name":"src.agentlab.llm.tracking","path":"src/agentlab/llm/tracking.py","language":"python","start_line":1,"end_line":397,"context_start_line":1,"context_end_line":397,"code":"import importlib\nimport logging\nimport os\nimport re\nimport threading\nfrom collections import defaultdict\nfrom contextlib import contextmanager\nfrom dataclasses import dataclass, field\nfrom functools import cache, partial\nfrom typing import Optional\n\nimport requests\n\nlangchain_community = importlib.util.find_spec(\"langchain_community\")\nif langchain_community is not None:\n from langchain_community.callbacks import bedrock_anthropic_callback, openai_info\nelse:\n bedrock_anthropic_callback = None\n openai_info = None\nfrom litellm import completion_cost, get_model_info\n\nTRACKER = threading.local()\n\nANTHROPIC_CACHE_PRICING_FACTOR = {\n \"cache_read_tokens\": 0.1, # Cost for 5 min ephemeral cache. See Pricing Here: https://docs.anthropic.com/en/docs/about-claude/pricing#model-pricing\n \"cache_write_tokens\": 1.25,\n}\n\nOPENAI_CACHE_PRICING_FACTOR = {\n \"cache_read_tokens\": 0.5, # This is a an upper bound. See Pricing Here: https://platform.openai.com/docs/pricing\n \"cache_write_tokens\": 1,\n}\n\n\nclass LLMTracker:\n def __init__(self, suffix=\"\"):\n self.input_tokens = 0\n self.output_tokens = 0\n self.cost = 0.0\n self.input_tokens_key = \"input_tokens_\" + suffix if suffix else \"input_tokens\"\n self.output_tokens_key = \"output_tokens_\" + suffix if suffix else \"output_tokens\"\n self.cost_key = \"cost_\" + suffix if suffix else \"cost\"\n\n def __call__(self, input_tokens: int, output_tokens: int, cost: float):\n self.input_tokens += input_tokens\n self.output_tokens += output_tokens\n self.cost += cost\n\n @property\n def stats(self):\n return {\n self.input_tokens_key: self.input_tokens,\n self.output_tokens_key: self.output_tokens,\n self.cost_key: self.cost,\n }\n\n def add_tracker(self, tracker: \"LLMTracker\"):\n self(tracker.input_tokens, tracker.output_tokens, tracker.cost)\n\n def __repr__(self):\n return f\"LLMTracker(input_tokens={self.input_tokens}, output_tokens={self.output_tokens}, cost={self.cost})\"\n\n\n@contextmanager\ndef set_tracker(suffix=\"\"):\n global TRACKER\n if not hasattr(TRACKER, \"instance\"):\n TRACKER.instance = None\n previous_tracker = TRACKER.instance # type: LLMTracker\n TRACKER.instance = LLMTracker(suffix)\n try:\n yield TRACKER.instance\n finally:\n # If there was a previous tracker, add the current one to it\n if isinstance(previous_tracker, LLMTracker):\n previous_tracker.add_tracker(TRACKER.instance)\n # Restore the previous tracker\n TRACKER.instance = previous_tracker\n\n\ndef cost_tracker_decorator(get_action, suffix=\"\"):\n def wrapper(self, obs):\n with set_tracker(suffix) as tracker:\n action, agent_info = get_action(self, obs)\n agent_info.get(\"stats\").update(tracker.stats)\n return action, agent_info\n\n return wrapper\n\n\n@cache\ndef get_pricing_openrouter():\n \"\"\"Returns a dictionary of model pricing for OpenRouter models.\"\"\"\n api_key = os.getenv(\"OPENROUTER_API_KEY\")\n assert api_key, \"OpenRouter API key is required\"\n # query api to get model metadata\n url = \"https://openrouter.ai/api/v1/models\"\n headers = {\"Authorization\": f\"Bearer {api_key}\"}\n response = requests.get(url, headers=headers)\n\n if response.status_code != 200:\n raise ValueError(\"Failed to get model metadata\")\n\n model_metadata = response.json()\n return {\n model[\"id\"]: {k: float(v) for k, v in model[\"pricing\"].items()}\n for model in model_metadata[\"data\"]\n }\n\n\ndef get_pricing_openai():\n \"\"\"Returns a dictionary of model pricing for OpenAI models.\"\"\"\n try:\n cost_dict = openai_info.MODEL_COST_PER_1K_TOKENS\n except Exception as e:\n logging.warning(\n f\"Failed to get OpenAI pricing: {e}. \"\n \"Please install langchain-community or use LiteLLM API for pricing information.\"\n )\n return {}\n cost_dict = {k: v / 1000 for k, v in cost_dict.items()}\n res = {}\n for k in cost_dict:\n if k.endswith(\"-completion\"):\n continue\n prompt_key = k\n completion_key = k + \"-completion\"\n if completion_key in cost_dict:\n res[k] = {\n \"prompt\": cost_dict[prompt_key],\n \"completion\": cost_dict[completion_key],\n }\n return res\n\n\ndef _remove_version_suffix(model_name):\n no_version = re.sub(r\"-v\\d+(?:[.:]\\d+)?$\", \"\", model_name)\n return re.sub(r\"anthropic.\", \"\", no_version)\n\n\ndef get_pricing_anthropic():\n \"\"\"Returns a dictionary of model pricing for Anthropic models.\"\"\"\n try:\n input_cost_dict = bedrock_anthropic_callback.MODEL_COST_PER_1K_INPUT_TOKENS\n output_cost_dict = bedrock_anthropic_callback.MODEL_COST_PER_1K_OUTPUT_TOKENS\n except Exception as e:\n logging.warning(\n f\"Failed to get Anthropic pricing: {e}. \"\n \"Please install langchain-community or use LiteLLM API for pricing information.\"\n )\n return {}\n\n res = {}\n for k, v in input_cost_dict.items():\n k = _remove_version_suffix(k)\n res[k] = {\"prompt\": v / 1000}\n\n for k, v in output_cost_dict.items():\n k = _remove_version_suffix(k)\n if k not in res:\n res[k] = {}\n res[k][\"completion\"] = v / 1000\n return res\n\n\ndef get_pricing_litellm(model_name):\n \"\"\"Returns a dictionary of model pricing for a LiteLLM model.\"\"\"\n try:\n info = get_model_info(model_name)\n except Exception as e:\n logging.error(f\"Error fetching model info for {model_name}: {e} from litellm\")\n info = {}\n return {\n model_name: {\n \"prompt\": info.get(\"input_cost_per_token\", 0.0),\n \"completion\": info.get(\"output_cost_per_token\", 0.0),\n }\n }\n\n\nclass TrackAPIPricingMixin:\n \"\"\"Mixin class to handle pricing information for different models.\n This populates the tracker.stats used by the cost_tracker_decorator\n\n Usage: provide the pricing_api to use in the constructor.\n \"\"\"\n\n def reset_stats(self):\n self.stats = Stats()\n\n def init_pricing_tracker(self, pricing_api=None):\n \"\"\"Initialize the pricing tracker with the given API.\"\"\"\n self._pricing_api = pricing_api\n self.set_pricing_attributes()\n self.reset_stats()\n\n def __call__(self, *args, **kwargs):\n \"\"\"Call the API and update the pricing tracker.\"\"\"\n # 'self' here calls ._call_api() method of the subclass\n response = self._call_api(*args, **kwargs)\n usage = dict(getattr(response, \"usage\", {}))\n if \"prompt_tokens_details\" in usage and usage[\"prompt_tokens_details\"]:\n usage[\"cached_tokens\"] = usage[\"prompt_tokens_details\"].cached_tokens\n if \"input_tokens_details\" in usage and usage[\"input_tokens_details\"]:\n usage[\"cached_tokens\"] = usage[\"input_tokens_details\"].cached_tokens\n usage = {f\"usage_{k}\": v for k, v in usage.items() if isinstance(v, (int, float))}\n usage |= {\"n_api_calls\": 1}\n usage |= {\"effective_cost\": self.get_effective_cost(response)}\n self.stats.increment_stats_dict(usage)\n self.update_pricing_tracker(response)\n return self._parse_response(response)\n\n def fetch_pricing_information_from_provider(self) -> Optional[dict]:\n \"\"\"\n Fetch the pricing information dictionary for the given provider.\n\n Returns:\n Optional[dict]: A dict mapping model names to pricing info, or None if not found.\n \"\"\"\n pricing_fn_map = {\n \"openai\": get_pricing_openai,\n \"anthropic\": get_pricing_anthropic,\n \"openrouter\": get_pricing_openrouter,\n \"litellm\": partial(get_pricing_litellm, self.model_name),\n }\n pricing_fn = pricing_fn_map.get(self._pricing_api, None)\n if pricing_fn is None:\n logging.warning(\n f\"Unsupported provider: {self._pricing_api}. Supported providers are: {list(pricing_fn_map.keys())}\"\n )\n return None\n return pricing_fn()\n\n def set_pricing_attributes(self) -> None:\n \"\"\"Set the pricing attributes for the model based on the provider.\"\"\"\n model_to_price_dict = self.fetch_pricing_information_from_provider()\n model_costs = model_to_price_dict.get(self.model_name) if model_to_price_dict else None\n if model_costs:\n self.input_cost = float(model_costs[\"prompt\"])\n self.output_cost = float(model_costs[\"completion\"])\n else:\n # use litellm to get model info if not found in the pricing dict\n try:\n model_info = get_model_info(self.model_name)\n self.input_cost = float(model_info.get(\"input_cost_per_token\", 0.0))\n self.output_cost = float(model_info.get(\"output_cost_per_token\", 0.0))\n except Exception as e:\n logging.warning(f\"Failed to fetch pricing for {self.model_name}: {e}\")\n self.input_cost = 0.0\n self.output_cost = 0.0\n\n def update_pricing_tracker(self, raw_response) -> None:\n \"\"\"Update the pricing tracker with the input and output tokens and cost.\"\"\"\n\n input_tokens, output_tokens = self.get_tokens_counts_from_response(raw_response)\n cost = input_tokens * self.input_cost + output_tokens * self.output_cost\n\n if hasattr(TRACKER, \"instance\") and isinstance(TRACKER.instance, LLMTracker):\n TRACKER.instance(input_tokens, output_tokens, cost)\n\n def get_tokens_counts_from_response(self, response) -> tuple:\n \"\"\"Get the input and output tokens counts from the response, provider-agnostic.\"\"\"\n # Try OpenAI/Anthropic style\n usage = getattr(response, \"usage\", None)\n if usage:\n input_tokens = getattr(usage, \"input_tokens\", None) or getattr(\n usage, \"prompt_tokens\", None\n )\n output_tokens = getattr(usage, \"output_tokens\", None) or getattr(\n usage, \"completion_tokens\", None\n )\n if input_tokens is not None and output_tokens is not None:\n return input_tokens, output_tokens\n\n # Try dict style\n if isinstance(response, dict) and \"usage\" in response:\n usage = response[\"usage\"]\n input_tokens = usage.get(\"input_tokens\") or usage.get(\"prompt_tokens\")\n output_tokens = usage.get(\"output_tokens\") or usage.get(\"completion_tokens\")\n if input_tokens is not None and output_tokens is not None:\n return input_tokens, output_tokens\n\n logging.warning(\n \"Unable to extract input and output tokens from the response. Defaulting to 0.\"\n )\n return 0, 0\n\n def get_effective_cost(self, response):\n \"\"\"Get the effective cost from the response based on the provider.\"\"\"\n if self._pricing_api == \"anthropic\":\n return self.get_effective_cost_from_antrophic_api(response)\n elif self._pricing_api == \"openai\":\n return self.get_effective_cost_from_openai_api(response)\n elif self._pricing_api == \"litellm\":\n return completion_cost(response)\n else:\n logging.warning(\n f\"Unsupported provider: {self._pricing_api}. No effective cost calculated.\"\n )\n return 0.0\n\n def get_effective_cost_from_antrophic_api(self, response) -> float:\n \"\"\"\n Get the effective cost from the Anthropic API response.\n\n Anthropic usage 'input_tokens' are new input tokens (tokens that are not cached).\n Anthropic has different pricing for cache write and cache read tokens.\n See https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching#tracking-cache-performance\n\n Args:\n response: The response object from the Anthropic API.\n\n Returns:\n float: The effective cost calculated from the response.\n \"\"\"\n usage = getattr(response, \"usage\", {})\n new_input_tokens = getattr(usage, \"input_tokens\", 0) # new input tokens\n output_tokens = getattr(usage, \"output_tokens\", 0)\n cache_read_tokens = getattr(usage, \"cache_input_tokens\", 0)\n cache_write_tokens = getattr(usage, \"cache_creation_input_tokens\", 0)\n\n cache_read_cost = self.input_cost * ANTHROPIC_CACHE_PRICING_FACTOR[\"cache_read_tokens\"]\n cache_write_cost = self.input_cost * ANTHROPIC_CACHE_PRICING_FACTOR[\"cache_write_tokens\"]\n\n # Calculate the effective cost\n effective_cost = (\n new_input_tokens * self.input_cost\n + output_tokens * self.output_cost\n + cache_read_tokens * cache_read_cost\n + cache_write_tokens * cache_write_cost\n )\n if effective_cost < 0:\n logging.warning(\n \"Anthropic: Negative effective cost detected.(Impossible! Likely a bug)\"\n )\n return effective_cost\n\n def get_effective_cost_from_openai_api(self, response) -> float:\n \"\"\"\n Get the effective cost from the OpenAI API response.\n\n OpenAI usage 'prompt_tokens' are the total input tokens (cache read tokens + new input tokens).\n See https://openai.com/index/api-prompt-caching/\n OpenAI has only one price for cache tokens, i.e., cache read price (generally 50% cheaper).\n OpenAI has no extra charge for cache write tokens.\n See Pricing Here: https://platform.openai.com/docs/pricing\n\n Args:\n response: The response object from the OpenAI API.\n\n Returns:\n float: The effective cost calculated from the response.\n \"\"\"\n usage = getattr(response, \"usage\", None)\n if usage is None:\n logging.warning(\"No usage information found in the response. Defaulting cost to 0.0.\")\n return 0.0\n api_type = \"chatcompletion\" if hasattr(usage, \"prompt_tokens_details\") else \"response\"\n if api_type == \"chatcompletion\":\n total_input_tokens = usage.prompt_tokens # (cache read tokens + new input tokens)\n output_tokens = usage.completion_tokens\n cached_input_tokens = (\n usage.prompt_tokens_details.cached_tokens if usage.prompt_tokens_details else 0\n )\n new_input_tokens = total_input_tokens - cached_input_tokens\n elif api_type == \"response\":\n total_input_tokens = usage.input_tokens # (cache read tokens + new input tokens)\n output_tokens = usage.output_tokens\n cached_input_tokens = (\n usage.input_tokens_details.cached_tokens if usage.input_tokens_details else 0\n )\n new_input_tokens = total_input_tokens - cached_input_tokens\n else:\n logging.warning(f\"Unsupported API type: {api_type}. Defaulting cost to 0.0.\")\n return 0.0\n cache_read_cost = self.input_cost * OPENAI_CACHE_PRICING_FACTOR[\"cache_read_tokens\"]\n effective_cost = (\n self.input_cost * new_input_tokens\n + cached_input_tokens * cache_read_cost\n + self.output_cost * output_tokens\n )\n if effective_cost < 0:\n logging.warning(\n f\"OpenAI: Negative effective cost detected.(Impossible! Likely a bug). \"\n f\"New input tokens: {total_input_tokens}\"\n )\n return effective_cost\n\n\n@dataclass\nclass Stats:\n stats_dict: dict = field(default_factory=lambda: defaultdict(float))\n\n def increment_stats_dict(self, stats_dict: dict):\n \"\"\"increment the stats_dict with the given values.\"\"\"\n for k, v in stats_dict.items():\n self.stats_dict[k] += v","source_hash":"2bdfa7569198f98ab9d4fb35ee9455450b6585e301295e394b34fb7b0490974f","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.tracking.LLMTracker","uri":"program://AgentLab/class/src.agentlab.llm.tracking.LLMTracker#L35-L61","kind":"class","name":"LLMTracker","path":"src/agentlab/llm/tracking.py","language":"python","start_line":35,"end_line":61,"context_start_line":15,"context_end_line":81,"code":"if langchain_community is not None:\n from langchain_community.callbacks import bedrock_anthropic_callback, openai_info\nelse:\n bedrock_anthropic_callback = None\n openai_info = None\nfrom litellm import completion_cost, get_model_info\n\nTRACKER = threading.local()\n\nANTHROPIC_CACHE_PRICING_FACTOR = {\n \"cache_read_tokens\": 0.1, # Cost for 5 min ephemeral cache. See Pricing Here: https://docs.anthropic.com/en/docs/about-claude/pricing#model-pricing\n \"cache_write_tokens\": 1.25,\n}\n\nOPENAI_CACHE_PRICING_FACTOR = {\n \"cache_read_tokens\": 0.5, # This is a an upper bound. See Pricing Here: https://platform.openai.com/docs/pricing\n \"cache_write_tokens\": 1,\n}\n\n\nclass LLMTracker:\n def __init__(self, suffix=\"\"):\n self.input_tokens = 0\n self.output_tokens = 0\n self.cost = 0.0\n self.input_tokens_key = \"input_tokens_\" + suffix if suffix else \"input_tokens\"\n self.output_tokens_key = \"output_tokens_\" + suffix if suffix else \"output_tokens\"\n self.cost_key = \"cost_\" + suffix if suffix else \"cost\"\n\n def __call__(self, input_tokens: int, output_tokens: int, cost: float):\n self.input_tokens += input_tokens\n self.output_tokens += output_tokens\n self.cost += cost\n\n @property\n def stats(self):\n return {\n self.input_tokens_key: self.input_tokens,\n self.output_tokens_key: self.output_tokens,\n self.cost_key: self.cost,\n }\n\n def add_tracker(self, tracker: \"LLMTracker\"):\n self(tracker.input_tokens, tracker.output_tokens, tracker.cost)\n\n def __repr__(self):\n return f\"LLMTracker(input_tokens={self.input_tokens}, output_tokens={self.output_tokens}, cost={self.cost})\"\n\n\n@contextmanager\ndef set_tracker(suffix=\"\"):\n global TRACKER\n if not hasattr(TRACKER, \"instance\"):\n TRACKER.instance = None\n previous_tracker = TRACKER.instance # type: LLMTracker\n TRACKER.instance = LLMTracker(suffix)\n try:\n yield TRACKER.instance\n finally:\n # If there was a previous tracker, add the current one to it\n if isinstance(previous_tracker, LLMTracker):\n previous_tracker.add_tracker(TRACKER.instance)\n # Restore the previous tracker\n TRACKER.instance = previous_tracker\n\n\ndef cost_tracker_decorator(get_action, suffix=\"\"):","source_hash":"2bdfa7569198f98ab9d4fb35ee9455450b6585e301295e394b34fb7b0490974f","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.tracking.set_tracker","uri":"program://AgentLab/function/src.agentlab.llm.tracking.set_tracker#L65-L78","kind":"function","name":"set_tracker","path":"src/agentlab/llm/tracking.py","language":"python","start_line":65,"end_line":78,"context_start_line":45,"context_end_line":98,"code":" self.input_tokens += input_tokens\n self.output_tokens += output_tokens\n self.cost += cost\n\n @property\n def stats(self):\n return {\n self.input_tokens_key: self.input_tokens,\n self.output_tokens_key: self.output_tokens,\n self.cost_key: self.cost,\n }\n\n def add_tracker(self, tracker: \"LLMTracker\"):\n self(tracker.input_tokens, tracker.output_tokens, tracker.cost)\n\n def __repr__(self):\n return f\"LLMTracker(input_tokens={self.input_tokens}, output_tokens={self.output_tokens}, cost={self.cost})\"\n\n\n@contextmanager\ndef set_tracker(suffix=\"\"):\n global TRACKER\n if not hasattr(TRACKER, \"instance\"):\n TRACKER.instance = None\n previous_tracker = TRACKER.instance # type: LLMTracker\n TRACKER.instance = LLMTracker(suffix)\n try:\n yield TRACKER.instance\n finally:\n # If there was a previous tracker, add the current one to it\n if isinstance(previous_tracker, LLMTracker):\n previous_tracker.add_tracker(TRACKER.instance)\n # Restore the previous tracker\n TRACKER.instance = previous_tracker\n\n\ndef cost_tracker_decorator(get_action, suffix=\"\"):\n def wrapper(self, obs):\n with set_tracker(suffix) as tracker:\n action, agent_info = get_action(self, obs)\n agent_info.get(\"stats\").update(tracker.stats)\n return action, agent_info\n\n return wrapper\n\n\n@cache\ndef get_pricing_openrouter():\n \"\"\"Returns a dictionary of model pricing for OpenRouter models.\"\"\"\n api_key = os.getenv(\"OPENROUTER_API_KEY\")\n assert api_key, \"OpenRouter API key is required\"\n # query api to get model metadata\n url = \"https://openrouter.ai/api/v1/models\"\n headers = {\"Authorization\": f\"Bearer {api_key}\"}","source_hash":"2bdfa7569198f98ab9d4fb35ee9455450b6585e301295e394b34fb7b0490974f","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.tracking.cost_tracker_decorator","uri":"program://AgentLab/function/src.agentlab.llm.tracking.cost_tracker_decorator#L81-L88","kind":"function","name":"cost_tracker_decorator","path":"src/agentlab/llm/tracking.py","language":"python","start_line":81,"end_line":88,"context_start_line":61,"context_end_line":108,"code":" return f\"LLMTracker(input_tokens={self.input_tokens}, output_tokens={self.output_tokens}, cost={self.cost})\"\n\n\n@contextmanager\ndef set_tracker(suffix=\"\"):\n global TRACKER\n if not hasattr(TRACKER, \"instance\"):\n TRACKER.instance = None\n previous_tracker = TRACKER.instance # type: LLMTracker\n TRACKER.instance = LLMTracker(suffix)\n try:\n yield TRACKER.instance\n finally:\n # If there was a previous tracker, add the current one to it\n if isinstance(previous_tracker, LLMTracker):\n previous_tracker.add_tracker(TRACKER.instance)\n # Restore the previous tracker\n TRACKER.instance = previous_tracker\n\n\ndef cost_tracker_decorator(get_action, suffix=\"\"):\n def wrapper(self, obs):\n with set_tracker(suffix) as tracker:\n action, agent_info = get_action(self, obs)\n agent_info.get(\"stats\").update(tracker.stats)\n return action, agent_info\n\n return wrapper\n\n\n@cache\ndef get_pricing_openrouter():\n \"\"\"Returns a dictionary of model pricing for OpenRouter models.\"\"\"\n api_key = os.getenv(\"OPENROUTER_API_KEY\")\n assert api_key, \"OpenRouter API key is required\"\n # query api to get model metadata\n url = \"https://openrouter.ai/api/v1/models\"\n headers = {\"Authorization\": f\"Bearer {api_key}\"}\n response = requests.get(url, headers=headers)\n\n if response.status_code != 200:\n raise ValueError(\"Failed to get model metadata\")\n\n model_metadata = response.json()\n return {\n model[\"id\"]: {k: float(v) for k, v in model[\"pricing\"].items()}\n for model in model_metadata[\"data\"]\n }","source_hash":"2bdfa7569198f98ab9d4fb35ee9455450b6585e301295e394b34fb7b0490974f","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.tracking.get_pricing_openrouter","uri":"program://AgentLab/function/src.agentlab.llm.tracking.get_pricing_openrouter#L92-L108","kind":"function","name":"get_pricing_openrouter","path":"src/agentlab/llm/tracking.py","language":"python","start_line":92,"end_line":108,"context_start_line":72,"context_end_line":128,"code":" yield TRACKER.instance\n finally:\n # If there was a previous tracker, add the current one to it\n if isinstance(previous_tracker, LLMTracker):\n previous_tracker.add_tracker(TRACKER.instance)\n # Restore the previous tracker\n TRACKER.instance = previous_tracker\n\n\ndef cost_tracker_decorator(get_action, suffix=\"\"):\n def wrapper(self, obs):\n with set_tracker(suffix) as tracker:\n action, agent_info = get_action(self, obs)\n agent_info.get(\"stats\").update(tracker.stats)\n return action, agent_info\n\n return wrapper\n\n\n@cache\ndef get_pricing_openrouter():\n \"\"\"Returns a dictionary of model pricing for OpenRouter models.\"\"\"\n api_key = os.getenv(\"OPENROUTER_API_KEY\")\n assert api_key, \"OpenRouter API key is required\"\n # query api to get model metadata\n url = \"https://openrouter.ai/api/v1/models\"\n headers = {\"Authorization\": f\"Bearer {api_key}\"}\n response = requests.get(url, headers=headers)\n\n if response.status_code != 200:\n raise ValueError(\"Failed to get model metadata\")\n\n model_metadata = response.json()\n return {\n model[\"id\"]: {k: float(v) for k, v in model[\"pricing\"].items()}\n for model in model_metadata[\"data\"]\n }\n\n\ndef get_pricing_openai():\n \"\"\"Returns a dictionary of model pricing for OpenAI models.\"\"\"\n try:\n cost_dict = openai_info.MODEL_COST_PER_1K_TOKENS\n except Exception as e:\n logging.warning(\n f\"Failed to get OpenAI pricing: {e}. \"\n \"Please install langchain-community or use LiteLLM API for pricing information.\"\n )\n return {}\n cost_dict = {k: v / 1000 for k, v in cost_dict.items()}\n res = {}\n for k in cost_dict:\n if k.endswith(\"-completion\"):\n continue\n prompt_key = k\n completion_key = k + \"-completion\"\n if completion_key in cost_dict:","source_hash":"2bdfa7569198f98ab9d4fb35ee9455450b6585e301295e394b34fb7b0490974f","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.tracking.get_pricing_openai","uri":"program://AgentLab/function/src.agentlab.llm.tracking.get_pricing_openai#L111-L133","kind":"function","name":"get_pricing_openai","path":"src/agentlab/llm/tracking.py","language":"python","start_line":111,"end_line":133,"context_start_line":91,"context_end_line":153,"code":"@cache\ndef get_pricing_openrouter():\n \"\"\"Returns a dictionary of model pricing for OpenRouter models.\"\"\"\n api_key = os.getenv(\"OPENROUTER_API_KEY\")\n assert api_key, \"OpenRouter API key is required\"\n # query api to get model metadata\n url = \"https://openrouter.ai/api/v1/models\"\n headers = {\"Authorization\": f\"Bearer {api_key}\"}\n response = requests.get(url, headers=headers)\n\n if response.status_code != 200:\n raise ValueError(\"Failed to get model metadata\")\n\n model_metadata = response.json()\n return {\n model[\"id\"]: {k: float(v) for k, v in model[\"pricing\"].items()}\n for model in model_metadata[\"data\"]\n }\n\n\ndef get_pricing_openai():\n \"\"\"Returns a dictionary of model pricing for OpenAI models.\"\"\"\n try:\n cost_dict = openai_info.MODEL_COST_PER_1K_TOKENS\n except Exception as e:\n logging.warning(\n f\"Failed to get OpenAI pricing: {e}. \"\n \"Please install langchain-community or use LiteLLM API for pricing information.\"\n )\n return {}\n cost_dict = {k: v / 1000 for k, v in cost_dict.items()}\n res = {}\n for k in cost_dict:\n if k.endswith(\"-completion\"):\n continue\n prompt_key = k\n completion_key = k + \"-completion\"\n if completion_key in cost_dict:\n res[k] = {\n \"prompt\": cost_dict[prompt_key],\n \"completion\": cost_dict[completion_key],\n }\n return res\n\n\ndef _remove_version_suffix(model_name):\n no_version = re.sub(r\"-v\\d+(?:[.:]\\d+)?$\", \"\", model_name)\n return re.sub(r\"anthropic.\", \"\", no_version)\n\n\ndef get_pricing_anthropic():\n \"\"\"Returns a dictionary of model pricing for Anthropic models.\"\"\"\n try:\n input_cost_dict = bedrock_anthropic_callback.MODEL_COST_PER_1K_INPUT_TOKENS\n output_cost_dict = bedrock_anthropic_callback.MODEL_COST_PER_1K_OUTPUT_TOKENS\n except Exception as e:\n logging.warning(\n f\"Failed to get Anthropic pricing: {e}. \"\n \"Please install langchain-community or use LiteLLM API for pricing information.\"\n )\n return {}\n\n res = {}","source_hash":"2bdfa7569198f98ab9d4fb35ee9455450b6585e301295e394b34fb7b0490974f","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.tracking._remove_version_suffix","uri":"program://AgentLab/function/src.agentlab.llm.tracking._remove_version_suffix#L136-L138","kind":"function","name":"_remove_version_suffix","path":"src/agentlab/llm/tracking.py","language":"python","start_line":136,"end_line":138,"context_start_line":116,"context_end_line":158,"code":" logging.warning(\n f\"Failed to get OpenAI pricing: {e}. \"\n \"Please install langchain-community or use LiteLLM API for pricing information.\"\n )\n return {}\n cost_dict = {k: v / 1000 for k, v in cost_dict.items()}\n res = {}\n for k in cost_dict:\n if k.endswith(\"-completion\"):\n continue\n prompt_key = k\n completion_key = k + \"-completion\"\n if completion_key in cost_dict:\n res[k] = {\n \"prompt\": cost_dict[prompt_key],\n \"completion\": cost_dict[completion_key],\n }\n return res\n\n\ndef _remove_version_suffix(model_name):\n no_version = re.sub(r\"-v\\d+(?:[.:]\\d+)?$\", \"\", model_name)\n return re.sub(r\"anthropic.\", \"\", no_version)\n\n\ndef get_pricing_anthropic():\n \"\"\"Returns a dictionary of model pricing for Anthropic models.\"\"\"\n try:\n input_cost_dict = bedrock_anthropic_callback.MODEL_COST_PER_1K_INPUT_TOKENS\n output_cost_dict = bedrock_anthropic_callback.MODEL_COST_PER_1K_OUTPUT_TOKENS\n except Exception as e:\n logging.warning(\n f\"Failed to get Anthropic pricing: {e}. \"\n \"Please install langchain-community or use LiteLLM API for pricing information.\"\n )\n return {}\n\n res = {}\n for k, v in input_cost_dict.items():\n k = _remove_version_suffix(k)\n res[k] = {\"prompt\": v / 1000}\n\n for k, v in output_cost_dict.items():","source_hash":"2bdfa7569198f98ab9d4fb35ee9455450b6585e301295e394b34fb7b0490974f","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.tracking.get_pricing_anthropic","uri":"program://AgentLab/function/src.agentlab.llm.tracking.get_pricing_anthropic#L141-L163","kind":"function","name":"get_pricing_anthropic","path":"src/agentlab/llm/tracking.py","language":"python","start_line":141,"end_line":163,"context_start_line":121,"context_end_line":183,"code":" cost_dict = {k: v / 1000 for k, v in cost_dict.items()}\n res = {}\n for k in cost_dict:\n if k.endswith(\"-completion\"):\n continue\n prompt_key = k\n completion_key = k + \"-completion\"\n if completion_key in cost_dict:\n res[k] = {\n \"prompt\": cost_dict[prompt_key],\n \"completion\": cost_dict[completion_key],\n }\n return res\n\n\ndef _remove_version_suffix(model_name):\n no_version = re.sub(r\"-v\\d+(?:[.:]\\d+)?$\", \"\", model_name)\n return re.sub(r\"anthropic.\", \"\", no_version)\n\n\ndef get_pricing_anthropic():\n \"\"\"Returns a dictionary of model pricing for Anthropic models.\"\"\"\n try:\n input_cost_dict = bedrock_anthropic_callback.MODEL_COST_PER_1K_INPUT_TOKENS\n output_cost_dict = bedrock_anthropic_callback.MODEL_COST_PER_1K_OUTPUT_TOKENS\n except Exception as e:\n logging.warning(\n f\"Failed to get Anthropic pricing: {e}. \"\n \"Please install langchain-community or use LiteLLM API for pricing information.\"\n )\n return {}\n\n res = {}\n for k, v in input_cost_dict.items():\n k = _remove_version_suffix(k)\n res[k] = {\"prompt\": v / 1000}\n\n for k, v in output_cost_dict.items():\n k = _remove_version_suffix(k)\n if k not in res:\n res[k] = {}\n res[k][\"completion\"] = v / 1000\n return res\n\n\ndef get_pricing_litellm(model_name):\n \"\"\"Returns a dictionary of model pricing for a LiteLLM model.\"\"\"\n try:\n info = get_model_info(model_name)\n except Exception as e:\n logging.error(f\"Error fetching model info for {model_name}: {e} from litellm\")\n info = {}\n return {\n model_name: {\n \"prompt\": info.get(\"input_cost_per_token\", 0.0),\n \"completion\": info.get(\"output_cost_per_token\", 0.0),\n }\n }\n\n\nclass TrackAPIPricingMixin:\n \"\"\"Mixin class to handle pricing information for different models.\n This populates the tracker.stats used by the cost_tracker_decorator","source_hash":"2bdfa7569198f98ab9d4fb35ee9455450b6585e301295e394b34fb7b0490974f","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.tracking.get_pricing_litellm","uri":"program://AgentLab/function/src.agentlab.llm.tracking.get_pricing_litellm#L166-L178","kind":"function","name":"get_pricing_litellm","path":"src/agentlab/llm/tracking.py","language":"python","start_line":166,"end_line":178,"context_start_line":146,"context_end_line":198,"code":" except Exception as e:\n logging.warning(\n f\"Failed to get Anthropic pricing: {e}. \"\n \"Please install langchain-community or use LiteLLM API for pricing information.\"\n )\n return {}\n\n res = {}\n for k, v in input_cost_dict.items():\n k = _remove_version_suffix(k)\n res[k] = {\"prompt\": v / 1000}\n\n for k, v in output_cost_dict.items():\n k = _remove_version_suffix(k)\n if k not in res:\n res[k] = {}\n res[k][\"completion\"] = v / 1000\n return res\n\n\ndef get_pricing_litellm(model_name):\n \"\"\"Returns a dictionary of model pricing for a LiteLLM model.\"\"\"\n try:\n info = get_model_info(model_name)\n except Exception as e:\n logging.error(f\"Error fetching model info for {model_name}: {e} from litellm\")\n info = {}\n return {\n model_name: {\n \"prompt\": info.get(\"input_cost_per_token\", 0.0),\n \"completion\": info.get(\"output_cost_per_token\", 0.0),\n }\n }\n\n\nclass TrackAPIPricingMixin:\n \"\"\"Mixin class to handle pricing information for different models.\n This populates the tracker.stats used by the cost_tracker_decorator\n\n Usage: provide the pricing_api to use in the constructor.\n \"\"\"\n\n def reset_stats(self):\n self.stats = Stats()\n\n def init_pricing_tracker(self, pricing_api=None):\n \"\"\"Initialize the pricing tracker with the given API.\"\"\"\n self._pricing_api = pricing_api\n self.set_pricing_attributes()\n self.reset_stats()\n\n def __call__(self, *args, **kwargs):\n \"\"\"Call the API and update the pricing tracker.\"\"\"","source_hash":"2bdfa7569198f98ab9d4fb35ee9455450b6585e301295e394b34fb7b0490974f","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.tracking.TrackAPIPricingMixin","uri":"program://AgentLab/class/src.agentlab.llm.tracking.TrackAPIPricingMixin#L181-L387","kind":"class","name":"TrackAPIPricingMixin","path":"src/agentlab/llm/tracking.py","language":"python","start_line":181,"end_line":387,"context_start_line":161,"context_end_line":397,"code":" res[k] = {}\n res[k][\"completion\"] = v / 1000\n return res\n\n\ndef get_pricing_litellm(model_name):\n \"\"\"Returns a dictionary of model pricing for a LiteLLM model.\"\"\"\n try:\n info = get_model_info(model_name)\n except Exception as e:\n logging.error(f\"Error fetching model info for {model_name}: {e} from litellm\")\n info = {}\n return {\n model_name: {\n \"prompt\": info.get(\"input_cost_per_token\", 0.0),\n \"completion\": info.get(\"output_cost_per_token\", 0.0),\n }\n }\n\n\nclass TrackAPIPricingMixin:\n \"\"\"Mixin class to handle pricing information for different models.\n This populates the tracker.stats used by the cost_tracker_decorator\n\n Usage: provide the pricing_api to use in the constructor.\n \"\"\"\n\n def reset_stats(self):\n self.stats = Stats()\n\n def init_pricing_tracker(self, pricing_api=None):\n \"\"\"Initialize the pricing tracker with the given API.\"\"\"\n self._pricing_api = pricing_api\n self.set_pricing_attributes()\n self.reset_stats()\n\n def __call__(self, *args, **kwargs):\n \"\"\"Call the API and update the pricing tracker.\"\"\"\n # 'self' here calls ._call_api() method of the subclass\n response = self._call_api(*args, **kwargs)\n usage = dict(getattr(response, \"usage\", {}))\n if \"prompt_tokens_details\" in usage and usage[\"prompt_tokens_details\"]:\n usage[\"cached_tokens\"] = usage[\"prompt_tokens_details\"].cached_tokens\n if \"input_tokens_details\" in usage and usage[\"input_tokens_details\"]:\n usage[\"cached_tokens\"] = usage[\"input_tokens_details\"].cached_tokens\n usage = {f\"usage_{k}\": v for k, v in usage.items() if isinstance(v, (int, float))}\n usage |= {\"n_api_calls\": 1}\n usage |= {\"effective_cost\": self.get_effective_cost(response)}\n self.stats.increment_stats_dict(usage)\n self.update_pricing_tracker(response)\n return self._parse_response(response)\n\n def fetch_pricing_information_from_provider(self) -> Optional[dict]:\n \"\"\"\n Fetch the pricing information dictionary for the given provider.\n\n Returns:\n Optional[dict]: A dict mapping model names to pricing info, or None if not found.\n \"\"\"\n pricing_fn_map = {\n \"openai\": get_pricing_openai,\n \"anthropic\": get_pricing_anthropic,\n \"openrouter\": get_pricing_openrouter,\n \"litellm\": partial(get_pricing_litellm, self.model_name),\n }\n pricing_fn = pricing_fn_map.get(self._pricing_api, None)\n if pricing_fn is None:\n logging.warning(\n f\"Unsupported provider: {self._pricing_api}. Supported providers are: {list(pricing_fn_map.keys())}\"\n )\n return None\n return pricing_fn()\n\n def set_pricing_attributes(self) -> None:\n \"\"\"Set the pricing attributes for the model based on the provider.\"\"\"\n model_to_price_dict = self.fetch_pricing_information_from_provider()\n model_costs = model_to_price_dict.get(self.model_name) if model_to_price_dict else None\n if model_costs:\n self.input_cost = float(model_costs[\"prompt\"])\n self.output_cost = float(model_costs[\"completion\"])\n else:\n # use litellm to get model info if not found in the pricing dict\n try:\n model_info = get_model_info(self.model_name)\n self.input_cost = float(model_info.get(\"input_cost_per_token\", 0.0))\n self.output_cost = float(model_info.get(\"output_cost_per_token\", 0.0))\n except Exception as e:\n logging.warning(f\"Failed to fetch pricing for {self.model_name}: {e}\")\n self.input_cost = 0.0\n self.output_cost = 0.0\n\n def update_pricing_tracker(self, raw_response) -> None:\n \"\"\"Update the pricing tracker with the input and output tokens and cost.\"\"\"\n\n input_tokens, output_tokens = self.get_tokens_counts_from_response(raw_response)\n cost = input_tokens * self.input_cost + output_tokens * self.output_cost\n\n if hasattr(TRACKER, \"instance\") and isinstance(TRACKER.instance, LLMTracker):\n TRACKER.instance(input_tokens, output_tokens, cost)\n\n def get_tokens_counts_from_response(self, response) -> tuple:\n \"\"\"Get the input and output tokens counts from the response, provider-agnostic.\"\"\"\n # Try OpenAI/Anthropic style\n usage = getattr(response, \"usage\", None)\n if usage:\n input_tokens = getattr(usage, \"input_tokens\", None) or getattr(\n usage, \"prompt_tokens\", None\n )\n output_tokens = getattr(usage, \"output_tokens\", None) or getattr(\n usage, \"completion_tokens\", None\n )\n if input_tokens is not None and output_tokens is not None:\n return input_tokens, output_tokens\n\n # Try dict style\n if isinstance(response, dict) and \"usage\" in response:\n usage = response[\"usage\"]\n input_tokens = usage.get(\"input_tokens\") or usage.get(\"prompt_tokens\")\n output_tokens = usage.get(\"output_tokens\") or usage.get(\"completion_tokens\")\n if input_tokens is not None and output_tokens is not None:\n return input_tokens, output_tokens\n\n logging.warning(\n \"Unable to extract input and output tokens from the response. Defaulting to 0.\"\n )\n return 0, 0\n\n def get_effective_cost(self, response):\n \"\"\"Get the effective cost from the response based on the provider.\"\"\"\n if self._pricing_api == \"anthropic\":\n return self.get_effective_cost_from_antrophic_api(response)\n elif self._pricing_api == \"openai\":\n return self.get_effective_cost_from_openai_api(response)\n elif self._pricing_api == \"litellm\":\n return completion_cost(response)\n else:\n logging.warning(\n f\"Unsupported provider: {self._pricing_api}. No effective cost calculated.\"\n )\n return 0.0\n\n def get_effective_cost_from_antrophic_api(self, response) -> float:\n \"\"\"\n Get the effective cost from the Anthropic API response.\n\n Anthropic usage 'input_tokens' are new input tokens (tokens that are not cached).\n Anthropic has different pricing for cache write and cache read tokens.\n See https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching#tracking-cache-performance\n\n Args:\n response: The response object from the Anthropic API.\n\n Returns:\n float: The effective cost calculated from the response.\n \"\"\"\n usage = getattr(response, \"usage\", {})\n new_input_tokens = getattr(usage, \"input_tokens\", 0) # new input tokens\n output_tokens = getattr(usage, \"output_tokens\", 0)\n cache_read_tokens = getattr(usage, \"cache_input_tokens\", 0)\n cache_write_tokens = getattr(usage, \"cache_creation_input_tokens\", 0)\n\n cache_read_cost = self.input_cost * ANTHROPIC_CACHE_PRICING_FACTOR[\"cache_read_tokens\"]\n cache_write_cost = self.input_cost * ANTHROPIC_CACHE_PRICING_FACTOR[\"cache_write_tokens\"]\n\n # Calculate the effective cost\n effective_cost = (\n new_input_tokens * self.input_cost\n + output_tokens * self.output_cost\n + cache_read_tokens * cache_read_cost\n + cache_write_tokens * cache_write_cost\n )\n if effective_cost < 0:\n logging.warning(\n \"Anthropic: Negative effective cost detected.(Impossible! Likely a bug)\"\n )\n return effective_cost\n\n def get_effective_cost_from_openai_api(self, response) -> float:\n \"\"\"\n Get the effective cost from the OpenAI API response.\n\n OpenAI usage 'prompt_tokens' are the total input tokens (cache read tokens + new input tokens).\n See https://openai.com/index/api-prompt-caching/\n OpenAI has only one price for cache tokens, i.e., cache read price (generally 50% cheaper).\n OpenAI has no extra charge for cache write tokens.\n See Pricing Here: https://platform.openai.com/docs/pricing\n\n Args:\n response: The response object from the OpenAI API.\n\n Returns:\n float: The effective cost calculated from the response.\n \"\"\"\n usage = getattr(response, \"usage\", None)\n if usage is None:\n logging.warning(\"No usage information found in the response. Defaulting cost to 0.0.\")\n return 0.0\n api_type = \"chatcompletion\" if hasattr(usage, \"prompt_tokens_details\") else \"response\"\n if api_type == \"chatcompletion\":\n total_input_tokens = usage.prompt_tokens # (cache read tokens + new input tokens)\n output_tokens = usage.completion_tokens\n cached_input_tokens = (\n usage.prompt_tokens_details.cached_tokens if usage.prompt_tokens_details else 0\n )\n new_input_tokens = total_input_tokens - cached_input_tokens\n elif api_type == \"response\":\n total_input_tokens = usage.input_tokens # (cache read tokens + new input tokens)\n output_tokens = usage.output_tokens\n cached_input_tokens = (\n usage.input_tokens_details.cached_tokens if usage.input_tokens_details else 0\n )\n new_input_tokens = total_input_tokens - cached_input_tokens\n else:\n logging.warning(f\"Unsupported API type: {api_type}. Defaulting cost to 0.0.\")\n return 0.0\n cache_read_cost = self.input_cost * OPENAI_CACHE_PRICING_FACTOR[\"cache_read_tokens\"]\n effective_cost = (\n self.input_cost * new_input_tokens\n + cached_input_tokens * cache_read_cost\n + self.output_cost * output_tokens\n )\n if effective_cost < 0:\n logging.warning(\n f\"OpenAI: Negative effective cost detected.(Impossible! Likely a bug). \"\n f\"New input tokens: {total_input_tokens}\"\n )\n return effective_cost\n\n\n@dataclass\nclass Stats:\n stats_dict: dict = field(default_factory=lambda: defaultdict(float))\n\n def increment_stats_dict(self, stats_dict: dict):\n \"\"\"increment the stats_dict with the given values.\"\"\"\n for k, v in stats_dict.items():\n self.stats_dict[k] += v","source_hash":"2bdfa7569198f98ab9d4fb35ee9455450b6585e301295e394b34fb7b0490974f","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.tracking.Stats","uri":"program://AgentLab/class/src.agentlab.llm.tracking.Stats#L391-L397","kind":"class","name":"Stats","path":"src/agentlab/llm/tracking.py","language":"python","start_line":391,"end_line":397,"context_start_line":371,"context_end_line":397,"code":" )\n new_input_tokens = total_input_tokens - cached_input_tokens\n else:\n logging.warning(f\"Unsupported API type: {api_type}. Defaulting cost to 0.0.\")\n return 0.0\n cache_read_cost = self.input_cost * OPENAI_CACHE_PRICING_FACTOR[\"cache_read_tokens\"]\n effective_cost = (\n self.input_cost * new_input_tokens\n + cached_input_tokens * cache_read_cost\n + self.output_cost * output_tokens\n )\n if effective_cost < 0:\n logging.warning(\n f\"OpenAI: Negative effective cost detected.(Impossible! Likely a bug). \"\n f\"New input tokens: {total_input_tokens}\"\n )\n return effective_cost\n\n\n@dataclass\nclass Stats:\n stats_dict: dict = field(default_factory=lambda: defaultdict(float))\n\n def increment_stats_dict(self, stats_dict: dict):\n \"\"\"increment the stats_dict with the given values.\"\"\"\n for k, v in stats_dict.items():\n self.stats_dict[k] += v","source_hash":"2bdfa7569198f98ab9d4fb35ee9455450b6585e301295e394b34fb7b0490974f","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.tracking.__init__","uri":"program://AgentLab/function/src.agentlab.llm.tracking.__init__#L36-L42","kind":"function","name":"__init__","path":"src/agentlab/llm/tracking.py","language":"python","start_line":36,"end_line":42,"context_start_line":16,"context_end_line":62,"code":" from langchain_community.callbacks import bedrock_anthropic_callback, openai_info\nelse:\n bedrock_anthropic_callback = None\n openai_info = None\nfrom litellm import completion_cost, get_model_info\n\nTRACKER = threading.local()\n\nANTHROPIC_CACHE_PRICING_FACTOR = {\n \"cache_read_tokens\": 0.1, # Cost for 5 min ephemeral cache. See Pricing Here: https://docs.anthropic.com/en/docs/about-claude/pricing#model-pricing\n \"cache_write_tokens\": 1.25,\n}\n\nOPENAI_CACHE_PRICING_FACTOR = {\n \"cache_read_tokens\": 0.5, # This is a an upper bound. See Pricing Here: https://platform.openai.com/docs/pricing\n \"cache_write_tokens\": 1,\n}\n\n\nclass LLMTracker:\n def __init__(self, suffix=\"\"):\n self.input_tokens = 0\n self.output_tokens = 0\n self.cost = 0.0\n self.input_tokens_key = \"input_tokens_\" + suffix if suffix else \"input_tokens\"\n self.output_tokens_key = \"output_tokens_\" + suffix if suffix else \"output_tokens\"\n self.cost_key = \"cost_\" + suffix if suffix else \"cost\"\n\n def __call__(self, input_tokens: int, output_tokens: int, cost: float):\n self.input_tokens += input_tokens\n self.output_tokens += output_tokens\n self.cost += cost\n\n @property\n def stats(self):\n return {\n self.input_tokens_key: self.input_tokens,\n self.output_tokens_key: self.output_tokens,\n self.cost_key: self.cost,\n }\n\n def add_tracker(self, tracker: \"LLMTracker\"):\n self(tracker.input_tokens, tracker.output_tokens, tracker.cost)\n\n def __repr__(self):\n return f\"LLMTracker(input_tokens={self.input_tokens}, output_tokens={self.output_tokens}, cost={self.cost})\"\n","source_hash":"2bdfa7569198f98ab9d4fb35ee9455450b6585e301295e394b34fb7b0490974f","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.tracking.__call__","uri":"program://AgentLab/function/src.agentlab.llm.tracking.__call__#L197-L211","kind":"function","name":"__call__","path":"src/agentlab/llm/tracking.py","language":"python","start_line":197,"end_line":211,"context_start_line":177,"context_end_line":231,"code":" }\n }\n\n\nclass TrackAPIPricingMixin:\n \"\"\"Mixin class to handle pricing information for different models.\n This populates the tracker.stats used by the cost_tracker_decorator\n\n Usage: provide the pricing_api to use in the constructor.\n \"\"\"\n\n def reset_stats(self):\n self.stats = Stats()\n\n def init_pricing_tracker(self, pricing_api=None):\n \"\"\"Initialize the pricing tracker with the given API.\"\"\"\n self._pricing_api = pricing_api\n self.set_pricing_attributes()\n self.reset_stats()\n\n def __call__(self, *args, **kwargs):\n \"\"\"Call the API and update the pricing tracker.\"\"\"\n # 'self' here calls ._call_api() method of the subclass\n response = self._call_api(*args, **kwargs)\n usage = dict(getattr(response, \"usage\", {}))\n if \"prompt_tokens_details\" in usage and usage[\"prompt_tokens_details\"]:\n usage[\"cached_tokens\"] = usage[\"prompt_tokens_details\"].cached_tokens\n if \"input_tokens_details\" in usage and usage[\"input_tokens_details\"]:\n usage[\"cached_tokens\"] = usage[\"input_tokens_details\"].cached_tokens\n usage = {f\"usage_{k}\": v for k, v in usage.items() if isinstance(v, (int, float))}\n usage |= {\"n_api_calls\": 1}\n usage |= {\"effective_cost\": self.get_effective_cost(response)}\n self.stats.increment_stats_dict(usage)\n self.update_pricing_tracker(response)\n return self._parse_response(response)\n\n def fetch_pricing_information_from_provider(self) -> Optional[dict]:\n \"\"\"\n Fetch the pricing information dictionary for the given provider.\n\n Returns:\n Optional[dict]: A dict mapping model names to pricing info, or None if not found.\n \"\"\"\n pricing_fn_map = {\n \"openai\": get_pricing_openai,\n \"anthropic\": get_pricing_anthropic,\n \"openrouter\": get_pricing_openrouter,\n \"litellm\": partial(get_pricing_litellm, self.model_name),\n }\n pricing_fn = pricing_fn_map.get(self._pricing_api, None)\n if pricing_fn is None:\n logging.warning(\n f\"Unsupported provider: {self._pricing_api}. Supported providers are: {list(pricing_fn_map.keys())}\"\n )\n return None","source_hash":"2bdfa7569198f98ab9d4fb35ee9455450b6585e301295e394b34fb7b0490974f","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.tracking.stats","uri":"program://AgentLab/function/src.agentlab.llm.tracking.stats#L50-L55","kind":"function","name":"stats","path":"src/agentlab/llm/tracking.py","language":"python","start_line":50,"end_line":55,"context_start_line":30,"context_end_line":75,"code":" \"cache_read_tokens\": 0.5, # This is a an upper bound. See Pricing Here: https://platform.openai.com/docs/pricing\n \"cache_write_tokens\": 1,\n}\n\n\nclass LLMTracker:\n def __init__(self, suffix=\"\"):\n self.input_tokens = 0\n self.output_tokens = 0\n self.cost = 0.0\n self.input_tokens_key = \"input_tokens_\" + suffix if suffix else \"input_tokens\"\n self.output_tokens_key = \"output_tokens_\" + suffix if suffix else \"output_tokens\"\n self.cost_key = \"cost_\" + suffix if suffix else \"cost\"\n\n def __call__(self, input_tokens: int, output_tokens: int, cost: float):\n self.input_tokens += input_tokens\n self.output_tokens += output_tokens\n self.cost += cost\n\n @property\n def stats(self):\n return {\n self.input_tokens_key: self.input_tokens,\n self.output_tokens_key: self.output_tokens,\n self.cost_key: self.cost,\n }\n\n def add_tracker(self, tracker: \"LLMTracker\"):\n self(tracker.input_tokens, tracker.output_tokens, tracker.cost)\n\n def __repr__(self):\n return f\"LLMTracker(input_tokens={self.input_tokens}, output_tokens={self.output_tokens}, cost={self.cost})\"\n\n\n@contextmanager\ndef set_tracker(suffix=\"\"):\n global TRACKER\n if not hasattr(TRACKER, \"instance\"):\n TRACKER.instance = None\n previous_tracker = TRACKER.instance # type: LLMTracker\n TRACKER.instance = LLMTracker(suffix)\n try:\n yield TRACKER.instance\n finally:\n # If there was a previous tracker, add the current one to it\n if isinstance(previous_tracker, LLMTracker):","source_hash":"2bdfa7569198f98ab9d4fb35ee9455450b6585e301295e394b34fb7b0490974f","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.tracking.add_tracker","uri":"program://AgentLab/function/src.agentlab.llm.tracking.add_tracker#L57-L58","kind":"function","name":"add_tracker","path":"src/agentlab/llm/tracking.py","language":"python","start_line":57,"end_line":58,"context_start_line":37,"context_end_line":78,"code":" self.input_tokens = 0\n self.output_tokens = 0\n self.cost = 0.0\n self.input_tokens_key = \"input_tokens_\" + suffix if suffix else \"input_tokens\"\n self.output_tokens_key = \"output_tokens_\" + suffix if suffix else \"output_tokens\"\n self.cost_key = \"cost_\" + suffix if suffix else \"cost\"\n\n def __call__(self, input_tokens: int, output_tokens: int, cost: float):\n self.input_tokens += input_tokens\n self.output_tokens += output_tokens\n self.cost += cost\n\n @property\n def stats(self):\n return {\n self.input_tokens_key: self.input_tokens,\n self.output_tokens_key: self.output_tokens,\n self.cost_key: self.cost,\n }\n\n def add_tracker(self, tracker: \"LLMTracker\"):\n self(tracker.input_tokens, tracker.output_tokens, tracker.cost)\n\n def __repr__(self):\n return f\"LLMTracker(input_tokens={self.input_tokens}, output_tokens={self.output_tokens}, cost={self.cost})\"\n\n\n@contextmanager\ndef set_tracker(suffix=\"\"):\n global TRACKER\n if not hasattr(TRACKER, \"instance\"):\n TRACKER.instance = None\n previous_tracker = TRACKER.instance # type: LLMTracker\n TRACKER.instance = LLMTracker(suffix)\n try:\n yield TRACKER.instance\n finally:\n # If there was a previous tracker, add the current one to it\n if isinstance(previous_tracker, LLMTracker):\n previous_tracker.add_tracker(TRACKER.instance)\n # Restore the previous tracker\n TRACKER.instance = previous_tracker","source_hash":"2bdfa7569198f98ab9d4fb35ee9455450b6585e301295e394b34fb7b0490974f","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.tracking.__repr__","uri":"program://AgentLab/function/src.agentlab.llm.tracking.__repr__#L60-L61","kind":"function","name":"__repr__","path":"src/agentlab/llm/tracking.py","language":"python","start_line":60,"end_line":61,"context_start_line":40,"context_end_line":81,"code":" self.input_tokens_key = \"input_tokens_\" + suffix if suffix else \"input_tokens\"\n self.output_tokens_key = \"output_tokens_\" + suffix if suffix else \"output_tokens\"\n self.cost_key = \"cost_\" + suffix if suffix else \"cost\"\n\n def __call__(self, input_tokens: int, output_tokens: int, cost: float):\n self.input_tokens += input_tokens\n self.output_tokens += output_tokens\n self.cost += cost\n\n @property\n def stats(self):\n return {\n self.input_tokens_key: self.input_tokens,\n self.output_tokens_key: self.output_tokens,\n self.cost_key: self.cost,\n }\n\n def add_tracker(self, tracker: \"LLMTracker\"):\n self(tracker.input_tokens, tracker.output_tokens, tracker.cost)\n\n def __repr__(self):\n return f\"LLMTracker(input_tokens={self.input_tokens}, output_tokens={self.output_tokens}, cost={self.cost})\"\n\n\n@contextmanager\ndef set_tracker(suffix=\"\"):\n global TRACKER\n if not hasattr(TRACKER, \"instance\"):\n TRACKER.instance = None\n previous_tracker = TRACKER.instance # type: LLMTracker\n TRACKER.instance = LLMTracker(suffix)\n try:\n yield TRACKER.instance\n finally:\n # If there was a previous tracker, add the current one to it\n if isinstance(previous_tracker, LLMTracker):\n previous_tracker.add_tracker(TRACKER.instance)\n # Restore the previous tracker\n TRACKER.instance = previous_tracker\n\n\ndef cost_tracker_decorator(get_action, suffix=\"\"):","source_hash":"2bdfa7569198f98ab9d4fb35ee9455450b6585e301295e394b34fb7b0490974f","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.tracking.wrapper","uri":"program://AgentLab/function/src.agentlab.llm.tracking.wrapper#L82-L86","kind":"function","name":"wrapper","path":"src/agentlab/llm/tracking.py","language":"python","start_line":82,"end_line":86,"context_start_line":62,"context_end_line":106,"code":"\n\n@contextmanager\ndef set_tracker(suffix=\"\"):\n global TRACKER\n if not hasattr(TRACKER, \"instance\"):\n TRACKER.instance = None\n previous_tracker = TRACKER.instance # type: LLMTracker\n TRACKER.instance = LLMTracker(suffix)\n try:\n yield TRACKER.instance\n finally:\n # If there was a previous tracker, add the current one to it\n if isinstance(previous_tracker, LLMTracker):\n previous_tracker.add_tracker(TRACKER.instance)\n # Restore the previous tracker\n TRACKER.instance = previous_tracker\n\n\ndef cost_tracker_decorator(get_action, suffix=\"\"):\n def wrapper(self, obs):\n with set_tracker(suffix) as tracker:\n action, agent_info = get_action(self, obs)\n agent_info.get(\"stats\").update(tracker.stats)\n return action, agent_info\n\n return wrapper\n\n\n@cache\ndef get_pricing_openrouter():\n \"\"\"Returns a dictionary of model pricing for OpenRouter models.\"\"\"\n api_key = os.getenv(\"OPENROUTER_API_KEY\")\n assert api_key, \"OpenRouter API key is required\"\n # query api to get model metadata\n url = \"https://openrouter.ai/api/v1/models\"\n headers = {\"Authorization\": f\"Bearer {api_key}\"}\n response = requests.get(url, headers=headers)\n\n if response.status_code != 200:\n raise ValueError(\"Failed to get model metadata\")\n\n model_metadata = response.json()\n return {\n model[\"id\"]: {k: float(v) for k, v in model[\"pricing\"].items()}","source_hash":"2bdfa7569198f98ab9d4fb35ee9455450b6585e301295e394b34fb7b0490974f","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.tracking.reset_stats","uri":"program://AgentLab/function/src.agentlab.llm.tracking.reset_stats#L188-L189","kind":"function","name":"reset_stats","path":"src/agentlab/llm/tracking.py","language":"python","start_line":188,"end_line":189,"context_start_line":168,"context_end_line":209,"code":" try:\n info = get_model_info(model_name)\n except Exception as e:\n logging.error(f\"Error fetching model info for {model_name}: {e} from litellm\")\n info = {}\n return {\n model_name: {\n \"prompt\": info.get(\"input_cost_per_token\", 0.0),\n \"completion\": info.get(\"output_cost_per_token\", 0.0),\n }\n }\n\n\nclass TrackAPIPricingMixin:\n \"\"\"Mixin class to handle pricing information for different models.\n This populates the tracker.stats used by the cost_tracker_decorator\n\n Usage: provide the pricing_api to use in the constructor.\n \"\"\"\n\n def reset_stats(self):\n self.stats = Stats()\n\n def init_pricing_tracker(self, pricing_api=None):\n \"\"\"Initialize the pricing tracker with the given API.\"\"\"\n self._pricing_api = pricing_api\n self.set_pricing_attributes()\n self.reset_stats()\n\n def __call__(self, *args, **kwargs):\n \"\"\"Call the API and update the pricing tracker.\"\"\"\n # 'self' here calls ._call_api() method of the subclass\n response = self._call_api(*args, **kwargs)\n usage = dict(getattr(response, \"usage\", {}))\n if \"prompt_tokens_details\" in usage and usage[\"prompt_tokens_details\"]:\n usage[\"cached_tokens\"] = usage[\"prompt_tokens_details\"].cached_tokens\n if \"input_tokens_details\" in usage and usage[\"input_tokens_details\"]:\n usage[\"cached_tokens\"] = usage[\"input_tokens_details\"].cached_tokens\n usage = {f\"usage_{k}\": v for k, v in usage.items() if isinstance(v, (int, float))}\n usage |= {\"n_api_calls\": 1}\n usage |= {\"effective_cost\": self.get_effective_cost(response)}\n self.stats.increment_stats_dict(usage)","source_hash":"2bdfa7569198f98ab9d4fb35ee9455450b6585e301295e394b34fb7b0490974f","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.tracking.init_pricing_tracker","uri":"program://AgentLab/function/src.agentlab.llm.tracking.init_pricing_tracker#L191-L195","kind":"function","name":"init_pricing_tracker","path":"src/agentlab/llm/tracking.py","language":"python","start_line":191,"end_line":195,"context_start_line":171,"context_end_line":215,"code":" logging.error(f\"Error fetching model info for {model_name}: {e} from litellm\")\n info = {}\n return {\n model_name: {\n \"prompt\": info.get(\"input_cost_per_token\", 0.0),\n \"completion\": info.get(\"output_cost_per_token\", 0.0),\n }\n }\n\n\nclass TrackAPIPricingMixin:\n \"\"\"Mixin class to handle pricing information for different models.\n This populates the tracker.stats used by the cost_tracker_decorator\n\n Usage: provide the pricing_api to use in the constructor.\n \"\"\"\n\n def reset_stats(self):\n self.stats = Stats()\n\n def init_pricing_tracker(self, pricing_api=None):\n \"\"\"Initialize the pricing tracker with the given API.\"\"\"\n self._pricing_api = pricing_api\n self.set_pricing_attributes()\n self.reset_stats()\n\n def __call__(self, *args, **kwargs):\n \"\"\"Call the API and update the pricing tracker.\"\"\"\n # 'self' here calls ._call_api() method of the subclass\n response = self._call_api(*args, **kwargs)\n usage = dict(getattr(response, \"usage\", {}))\n if \"prompt_tokens_details\" in usage and usage[\"prompt_tokens_details\"]:\n usage[\"cached_tokens\"] = usage[\"prompt_tokens_details\"].cached_tokens\n if \"input_tokens_details\" in usage and usage[\"input_tokens_details\"]:\n usage[\"cached_tokens\"] = usage[\"input_tokens_details\"].cached_tokens\n usage = {f\"usage_{k}\": v for k, v in usage.items() if isinstance(v, (int, float))}\n usage |= {\"n_api_calls\": 1}\n usage |= {\"effective_cost\": self.get_effective_cost(response)}\n self.stats.increment_stats_dict(usage)\n self.update_pricing_tracker(response)\n return self._parse_response(response)\n\n def fetch_pricing_information_from_provider(self) -> Optional[dict]:\n \"\"\"\n Fetch the pricing information dictionary for the given provider.","source_hash":"2bdfa7569198f98ab9d4fb35ee9455450b6585e301295e394b34fb7b0490974f","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.tracking.fetch_pricing_information_from_provider","uri":"program://AgentLab/function/src.agentlab.llm.tracking.fetch_pricing_information_from_provider#L213-L232","kind":"function","name":"fetch_pricing_information_from_provider","path":"src/agentlab/llm/tracking.py","language":"python","start_line":213,"end_line":232,"context_start_line":193,"context_end_line":252,"code":" self._pricing_api = pricing_api\n self.set_pricing_attributes()\n self.reset_stats()\n\n def __call__(self, *args, **kwargs):\n \"\"\"Call the API and update the pricing tracker.\"\"\"\n # 'self' here calls ._call_api() method of the subclass\n response = self._call_api(*args, **kwargs)\n usage = dict(getattr(response, \"usage\", {}))\n if \"prompt_tokens_details\" in usage and usage[\"prompt_tokens_details\"]:\n usage[\"cached_tokens\"] = usage[\"prompt_tokens_details\"].cached_tokens\n if \"input_tokens_details\" in usage and usage[\"input_tokens_details\"]:\n usage[\"cached_tokens\"] = usage[\"input_tokens_details\"].cached_tokens\n usage = {f\"usage_{k}\": v for k, v in usage.items() if isinstance(v, (int, float))}\n usage |= {\"n_api_calls\": 1}\n usage |= {\"effective_cost\": self.get_effective_cost(response)}\n self.stats.increment_stats_dict(usage)\n self.update_pricing_tracker(response)\n return self._parse_response(response)\n\n def fetch_pricing_information_from_provider(self) -> Optional[dict]:\n \"\"\"\n Fetch the pricing information dictionary for the given provider.\n\n Returns:\n Optional[dict]: A dict mapping model names to pricing info, or None if not found.\n \"\"\"\n pricing_fn_map = {\n \"openai\": get_pricing_openai,\n \"anthropic\": get_pricing_anthropic,\n \"openrouter\": get_pricing_openrouter,\n \"litellm\": partial(get_pricing_litellm, self.model_name),\n }\n pricing_fn = pricing_fn_map.get(self._pricing_api, None)\n if pricing_fn is None:\n logging.warning(\n f\"Unsupported provider: {self._pricing_api}. Supported providers are: {list(pricing_fn_map.keys())}\"\n )\n return None\n return pricing_fn()\n\n def set_pricing_attributes(self) -> None:\n \"\"\"Set the pricing attributes for the model based on the provider.\"\"\"\n model_to_price_dict = self.fetch_pricing_information_from_provider()\n model_costs = model_to_price_dict.get(self.model_name) if model_to_price_dict else None\n if model_costs:\n self.input_cost = float(model_costs[\"prompt\"])\n self.output_cost = float(model_costs[\"completion\"])\n else:\n # use litellm to get model info if not found in the pricing dict\n try:\n model_info = get_model_info(self.model_name)\n self.input_cost = float(model_info.get(\"input_cost_per_token\", 0.0))\n self.output_cost = float(model_info.get(\"output_cost_per_token\", 0.0))\n except Exception as e:\n logging.warning(f\"Failed to fetch pricing for {self.model_name}: {e}\")\n self.input_cost = 0.0\n self.output_cost = 0.0\n\n def update_pricing_tracker(self, raw_response) -> None:","source_hash":"2bdfa7569198f98ab9d4fb35ee9455450b6585e301295e394b34fb7b0490974f","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.tracking.set_pricing_attributes","uri":"program://AgentLab/function/src.agentlab.llm.tracking.set_pricing_attributes#L234-L250","kind":"function","name":"set_pricing_attributes","path":"src/agentlab/llm/tracking.py","language":"python","start_line":234,"end_line":250,"context_start_line":214,"context_end_line":270,"code":" \"\"\"\n Fetch the pricing information dictionary for the given provider.\n\n Returns:\n Optional[dict]: A dict mapping model names to pricing info, or None if not found.\n \"\"\"\n pricing_fn_map = {\n \"openai\": get_pricing_openai,\n \"anthropic\": get_pricing_anthropic,\n \"openrouter\": get_pricing_openrouter,\n \"litellm\": partial(get_pricing_litellm, self.model_name),\n }\n pricing_fn = pricing_fn_map.get(self._pricing_api, None)\n if pricing_fn is None:\n logging.warning(\n f\"Unsupported provider: {self._pricing_api}. Supported providers are: {list(pricing_fn_map.keys())}\"\n )\n return None\n return pricing_fn()\n\n def set_pricing_attributes(self) -> None:\n \"\"\"Set the pricing attributes for the model based on the provider.\"\"\"\n model_to_price_dict = self.fetch_pricing_information_from_provider()\n model_costs = model_to_price_dict.get(self.model_name) if model_to_price_dict else None\n if model_costs:\n self.input_cost = float(model_costs[\"prompt\"])\n self.output_cost = float(model_costs[\"completion\"])\n else:\n # use litellm to get model info if not found in the pricing dict\n try:\n model_info = get_model_info(self.model_name)\n self.input_cost = float(model_info.get(\"input_cost_per_token\", 0.0))\n self.output_cost = float(model_info.get(\"output_cost_per_token\", 0.0))\n except Exception as e:\n logging.warning(f\"Failed to fetch pricing for {self.model_name}: {e}\")\n self.input_cost = 0.0\n self.output_cost = 0.0\n\n def update_pricing_tracker(self, raw_response) -> None:\n \"\"\"Update the pricing tracker with the input and output tokens and cost.\"\"\"\n\n input_tokens, output_tokens = self.get_tokens_counts_from_response(raw_response)\n cost = input_tokens * self.input_cost + output_tokens * self.output_cost\n\n if hasattr(TRACKER, \"instance\") and isinstance(TRACKER.instance, LLMTracker):\n TRACKER.instance(input_tokens, output_tokens, cost)\n\n def get_tokens_counts_from_response(self, response) -> tuple:\n \"\"\"Get the input and output tokens counts from the response, provider-agnostic.\"\"\"\n # Try OpenAI/Anthropic style\n usage = getattr(response, \"usage\", None)\n if usage:\n input_tokens = getattr(usage, \"input_tokens\", None) or getattr(\n usage, \"prompt_tokens\", None\n )\n output_tokens = getattr(usage, \"output_tokens\", None) or getattr(\n usage, \"completion_tokens\", None","source_hash":"2bdfa7569198f98ab9d4fb35ee9455450b6585e301295e394b34fb7b0490974f","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.tracking.update_pricing_tracker","uri":"program://AgentLab/function/src.agentlab.llm.tracking.update_pricing_tracker#L252-L259","kind":"function","name":"update_pricing_tracker","path":"src/agentlab/llm/tracking.py","language":"python","start_line":252,"end_line":259,"context_start_line":232,"context_end_line":279,"code":" return pricing_fn()\n\n def set_pricing_attributes(self) -> None:\n \"\"\"Set the pricing attributes for the model based on the provider.\"\"\"\n model_to_price_dict = self.fetch_pricing_information_from_provider()\n model_costs = model_to_price_dict.get(self.model_name) if model_to_price_dict else None\n if model_costs:\n self.input_cost = float(model_costs[\"prompt\"])\n self.output_cost = float(model_costs[\"completion\"])\n else:\n # use litellm to get model info if not found in the pricing dict\n try:\n model_info = get_model_info(self.model_name)\n self.input_cost = float(model_info.get(\"input_cost_per_token\", 0.0))\n self.output_cost = float(model_info.get(\"output_cost_per_token\", 0.0))\n except Exception as e:\n logging.warning(f\"Failed to fetch pricing for {self.model_name}: {e}\")\n self.input_cost = 0.0\n self.output_cost = 0.0\n\n def update_pricing_tracker(self, raw_response) -> None:\n \"\"\"Update the pricing tracker with the input and output tokens and cost.\"\"\"\n\n input_tokens, output_tokens = self.get_tokens_counts_from_response(raw_response)\n cost = input_tokens * self.input_cost + output_tokens * self.output_cost\n\n if hasattr(TRACKER, \"instance\") and isinstance(TRACKER.instance, LLMTracker):\n TRACKER.instance(input_tokens, output_tokens, cost)\n\n def get_tokens_counts_from_response(self, response) -> tuple:\n \"\"\"Get the input and output tokens counts from the response, provider-agnostic.\"\"\"\n # Try OpenAI/Anthropic style\n usage = getattr(response, \"usage\", None)\n if usage:\n input_tokens = getattr(usage, \"input_tokens\", None) or getattr(\n usage, \"prompt_tokens\", None\n )\n output_tokens = getattr(usage, \"output_tokens\", None) or getattr(\n usage, \"completion_tokens\", None\n )\n if input_tokens is not None and output_tokens is not None:\n return input_tokens, output_tokens\n\n # Try dict style\n if isinstance(response, dict) and \"usage\" in response:\n usage = response[\"usage\"]\n input_tokens = usage.get(\"input_tokens\") or usage.get(\"prompt_tokens\")\n output_tokens = usage.get(\"output_tokens\") or usage.get(\"completion_tokens\")","source_hash":"2bdfa7569198f98ab9d4fb35ee9455450b6585e301295e394b34fb7b0490974f","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.tracking.get_tokens_counts_from_response","uri":"program://AgentLab/function/src.agentlab.llm.tracking.get_tokens_counts_from_response#L261-L286","kind":"function","name":"get_tokens_counts_from_response","path":"src/agentlab/llm/tracking.py","language":"python","start_line":261,"end_line":286,"context_start_line":241,"context_end_line":306,"code":" else:\n # use litellm to get model info if not found in the pricing dict\n try:\n model_info = get_model_info(self.model_name)\n self.input_cost = float(model_info.get(\"input_cost_per_token\", 0.0))\n self.output_cost = float(model_info.get(\"output_cost_per_token\", 0.0))\n except Exception as e:\n logging.warning(f\"Failed to fetch pricing for {self.model_name}: {e}\")\n self.input_cost = 0.0\n self.output_cost = 0.0\n\n def update_pricing_tracker(self, raw_response) -> None:\n \"\"\"Update the pricing tracker with the input and output tokens and cost.\"\"\"\n\n input_tokens, output_tokens = self.get_tokens_counts_from_response(raw_response)\n cost = input_tokens * self.input_cost + output_tokens * self.output_cost\n\n if hasattr(TRACKER, \"instance\") and isinstance(TRACKER.instance, LLMTracker):\n TRACKER.instance(input_tokens, output_tokens, cost)\n\n def get_tokens_counts_from_response(self, response) -> tuple:\n \"\"\"Get the input and output tokens counts from the response, provider-agnostic.\"\"\"\n # Try OpenAI/Anthropic style\n usage = getattr(response, \"usage\", None)\n if usage:\n input_tokens = getattr(usage, \"input_tokens\", None) or getattr(\n usage, \"prompt_tokens\", None\n )\n output_tokens = getattr(usage, \"output_tokens\", None) or getattr(\n usage, \"completion_tokens\", None\n )\n if input_tokens is not None and output_tokens is not None:\n return input_tokens, output_tokens\n\n # Try dict style\n if isinstance(response, dict) and \"usage\" in response:\n usage = response[\"usage\"]\n input_tokens = usage.get(\"input_tokens\") or usage.get(\"prompt_tokens\")\n output_tokens = usage.get(\"output_tokens\") or usage.get(\"completion_tokens\")\n if input_tokens is not None and output_tokens is not None:\n return input_tokens, output_tokens\n\n logging.warning(\n \"Unable to extract input and output tokens from the response. Defaulting to 0.\"\n )\n return 0, 0\n\n def get_effective_cost(self, response):\n \"\"\"Get the effective cost from the response based on the provider.\"\"\"\n if self._pricing_api == \"anthropic\":\n return self.get_effective_cost_from_antrophic_api(response)\n elif self._pricing_api == \"openai\":\n return self.get_effective_cost_from_openai_api(response)\n elif self._pricing_api == \"litellm\":\n return completion_cost(response)\n else:\n logging.warning(\n f\"Unsupported provider: {self._pricing_api}. No effective cost calculated.\"\n )\n return 0.0\n\n def get_effective_cost_from_antrophic_api(self, response) -> float:\n \"\"\"\n Get the effective cost from the Anthropic API response.\n\n Anthropic usage 'input_tokens' are new input tokens (tokens that are not cached).","source_hash":"2bdfa7569198f98ab9d4fb35ee9455450b6585e301295e394b34fb7b0490974f","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.tracking.get_effective_cost","uri":"program://AgentLab/function/src.agentlab.llm.tracking.get_effective_cost#L288-L300","kind":"function","name":"get_effective_cost","path":"src/agentlab/llm/tracking.py","language":"python","start_line":288,"end_line":300,"context_start_line":268,"context_end_line":320,"code":" )\n output_tokens = getattr(usage, \"output_tokens\", None) or getattr(\n usage, \"completion_tokens\", None\n )\n if input_tokens is not None and output_tokens is not None:\n return input_tokens, output_tokens\n\n # Try dict style\n if isinstance(response, dict) and \"usage\" in response:\n usage = response[\"usage\"]\n input_tokens = usage.get(\"input_tokens\") or usage.get(\"prompt_tokens\")\n output_tokens = usage.get(\"output_tokens\") or usage.get(\"completion_tokens\")\n if input_tokens is not None and output_tokens is not None:\n return input_tokens, output_tokens\n\n logging.warning(\n \"Unable to extract input and output tokens from the response. Defaulting to 0.\"\n )\n return 0, 0\n\n def get_effective_cost(self, response):\n \"\"\"Get the effective cost from the response based on the provider.\"\"\"\n if self._pricing_api == \"anthropic\":\n return self.get_effective_cost_from_antrophic_api(response)\n elif self._pricing_api == \"openai\":\n return self.get_effective_cost_from_openai_api(response)\n elif self._pricing_api == \"litellm\":\n return completion_cost(response)\n else:\n logging.warning(\n f\"Unsupported provider: {self._pricing_api}. No effective cost calculated.\"\n )\n return 0.0\n\n def get_effective_cost_from_antrophic_api(self, response) -> float:\n \"\"\"\n Get the effective cost from the Anthropic API response.\n\n Anthropic usage 'input_tokens' are new input tokens (tokens that are not cached).\n Anthropic has different pricing for cache write and cache read tokens.\n See https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching#tracking-cache-performance\n\n Args:\n response: The response object from the Anthropic API.\n\n Returns:\n float: The effective cost calculated from the response.\n \"\"\"\n usage = getattr(response, \"usage\", {})\n new_input_tokens = getattr(usage, \"input_tokens\", 0) # new input tokens\n output_tokens = getattr(usage, \"output_tokens\", 0)\n cache_read_tokens = getattr(usage, \"cache_input_tokens\", 0)\n cache_write_tokens = getattr(usage, \"cache_creation_input_tokens\", 0)","source_hash":"2bdfa7569198f98ab9d4fb35ee9455450b6585e301295e394b34fb7b0490974f","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.tracking.get_effective_cost_from_antrophic_api","uri":"program://AgentLab/function/src.agentlab.llm.tracking.get_effective_cost_from_antrophic_api#L302-L336","kind":"function","name":"get_effective_cost_from_antrophic_api","path":"src/agentlab/llm/tracking.py","language":"python","start_line":302,"end_line":336,"context_start_line":282,"context_end_line":356,"code":"\n logging.warning(\n \"Unable to extract input and output tokens from the response. Defaulting to 0.\"\n )\n return 0, 0\n\n def get_effective_cost(self, response):\n \"\"\"Get the effective cost from the response based on the provider.\"\"\"\n if self._pricing_api == \"anthropic\":\n return self.get_effective_cost_from_antrophic_api(response)\n elif self._pricing_api == \"openai\":\n return self.get_effective_cost_from_openai_api(response)\n elif self._pricing_api == \"litellm\":\n return completion_cost(response)\n else:\n logging.warning(\n f\"Unsupported provider: {self._pricing_api}. No effective cost calculated.\"\n )\n return 0.0\n\n def get_effective_cost_from_antrophic_api(self, response) -> float:\n \"\"\"\n Get the effective cost from the Anthropic API response.\n\n Anthropic usage 'input_tokens' are new input tokens (tokens that are not cached).\n Anthropic has different pricing for cache write and cache read tokens.\n See https://docs.anthropic.com/en/docs/build-with-claude/prompt-caching#tracking-cache-performance\n\n Args:\n response: The response object from the Anthropic API.\n\n Returns:\n float: The effective cost calculated from the response.\n \"\"\"\n usage = getattr(response, \"usage\", {})\n new_input_tokens = getattr(usage, \"input_tokens\", 0) # new input tokens\n output_tokens = getattr(usage, \"output_tokens\", 0)\n cache_read_tokens = getattr(usage, \"cache_input_tokens\", 0)\n cache_write_tokens = getattr(usage, \"cache_creation_input_tokens\", 0)\n\n cache_read_cost = self.input_cost * ANTHROPIC_CACHE_PRICING_FACTOR[\"cache_read_tokens\"]\n cache_write_cost = self.input_cost * ANTHROPIC_CACHE_PRICING_FACTOR[\"cache_write_tokens\"]\n\n # Calculate the effective cost\n effective_cost = (\n new_input_tokens * self.input_cost\n + output_tokens * self.output_cost\n + cache_read_tokens * cache_read_cost\n + cache_write_tokens * cache_write_cost\n )\n if effective_cost < 0:\n logging.warning(\n \"Anthropic: Negative effective cost detected.(Impossible! Likely a bug)\"\n )\n return effective_cost\n\n def get_effective_cost_from_openai_api(self, response) -> float:\n \"\"\"\n Get the effective cost from the OpenAI API response.\n\n OpenAI usage 'prompt_tokens' are the total input tokens (cache read tokens + new input tokens).\n See https://openai.com/index/api-prompt-caching/\n OpenAI has only one price for cache tokens, i.e., cache read price (generally 50% cheaper).\n OpenAI has no extra charge for cache write tokens.\n See Pricing Here: https://platform.openai.com/docs/pricing\n\n Args:\n response: The response object from the OpenAI API.\n\n Returns:\n float: The effective cost calculated from the response.\n \"\"\"\n usage = getattr(response, \"usage\", None)\n if usage is None:\n logging.warning(\"No usage information found in the response. Defaulting cost to 0.0.\")","source_hash":"2bdfa7569198f98ab9d4fb35ee9455450b6585e301295e394b34fb7b0490974f","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.tracking.get_effective_cost_from_openai_api","uri":"program://AgentLab/function/src.agentlab.llm.tracking.get_effective_cost_from_openai_api#L338-L387","kind":"function","name":"get_effective_cost_from_openai_api","path":"src/agentlab/llm/tracking.py","language":"python","start_line":338,"end_line":387,"context_start_line":318,"context_end_line":397,"code":" output_tokens = getattr(usage, \"output_tokens\", 0)\n cache_read_tokens = getattr(usage, \"cache_input_tokens\", 0)\n cache_write_tokens = getattr(usage, \"cache_creation_input_tokens\", 0)\n\n cache_read_cost = self.input_cost * ANTHROPIC_CACHE_PRICING_FACTOR[\"cache_read_tokens\"]\n cache_write_cost = self.input_cost * ANTHROPIC_CACHE_PRICING_FACTOR[\"cache_write_tokens\"]\n\n # Calculate the effective cost\n effective_cost = (\n new_input_tokens * self.input_cost\n + output_tokens * self.output_cost\n + cache_read_tokens * cache_read_cost\n + cache_write_tokens * cache_write_cost\n )\n if effective_cost < 0:\n logging.warning(\n \"Anthropic: Negative effective cost detected.(Impossible! Likely a bug)\"\n )\n return effective_cost\n\n def get_effective_cost_from_openai_api(self, response) -> float:\n \"\"\"\n Get the effective cost from the OpenAI API response.\n\n OpenAI usage 'prompt_tokens' are the total input tokens (cache read tokens + new input tokens).\n See https://openai.com/index/api-prompt-caching/\n OpenAI has only one price for cache tokens, i.e., cache read price (generally 50% cheaper).\n OpenAI has no extra charge for cache write tokens.\n See Pricing Here: https://platform.openai.com/docs/pricing\n\n Args:\n response: The response object from the OpenAI API.\n\n Returns:\n float: The effective cost calculated from the response.\n \"\"\"\n usage = getattr(response, \"usage\", None)\n if usage is None:\n logging.warning(\"No usage information found in the response. Defaulting cost to 0.0.\")\n return 0.0\n api_type = \"chatcompletion\" if hasattr(usage, \"prompt_tokens_details\") else \"response\"\n if api_type == \"chatcompletion\":\n total_input_tokens = usage.prompt_tokens # (cache read tokens + new input tokens)\n output_tokens = usage.completion_tokens\n cached_input_tokens = (\n usage.prompt_tokens_details.cached_tokens if usage.prompt_tokens_details else 0\n )\n new_input_tokens = total_input_tokens - cached_input_tokens\n elif api_type == \"response\":\n total_input_tokens = usage.input_tokens # (cache read tokens + new input tokens)\n output_tokens = usage.output_tokens\n cached_input_tokens = (\n usage.input_tokens_details.cached_tokens if usage.input_tokens_details else 0\n )\n new_input_tokens = total_input_tokens - cached_input_tokens\n else:\n logging.warning(f\"Unsupported API type: {api_type}. Defaulting cost to 0.0.\")\n return 0.0\n cache_read_cost = self.input_cost * OPENAI_CACHE_PRICING_FACTOR[\"cache_read_tokens\"]\n effective_cost = (\n self.input_cost * new_input_tokens\n + cached_input_tokens * cache_read_cost\n + self.output_cost * output_tokens\n )\n if effective_cost < 0:\n logging.warning(\n f\"OpenAI: Negative effective cost detected.(Impossible! Likely a bug). \"\n f\"New input tokens: {total_input_tokens}\"\n )\n return effective_cost\n\n\n@dataclass\nclass Stats:\n stats_dict: dict = field(default_factory=lambda: defaultdict(float))\n\n def increment_stats_dict(self, stats_dict: dict):\n \"\"\"increment the stats_dict with the given values.\"\"\"\n for k, v in stats_dict.items():\n self.stats_dict[k] += v","source_hash":"2bdfa7569198f98ab9d4fb35ee9455450b6585e301295e394b34fb7b0490974f","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.llm.tracking.increment_stats_dict","uri":"program://AgentLab/function/src.agentlab.llm.tracking.increment_stats_dict#L394-L397","kind":"function","name":"increment_stats_dict","path":"src/agentlab/llm/tracking.py","language":"python","start_line":394,"end_line":397,"context_start_line":374,"context_end_line":397,"code":" logging.warning(f\"Unsupported API type: {api_type}. Defaulting cost to 0.0.\")\n return 0.0\n cache_read_cost = self.input_cost * OPENAI_CACHE_PRICING_FACTOR[\"cache_read_tokens\"]\n effective_cost = (\n self.input_cost * new_input_tokens\n + cached_input_tokens * cache_read_cost\n + self.output_cost * output_tokens\n )\n if effective_cost < 0:\n logging.warning(\n f\"OpenAI: Negative effective cost detected.(Impossible! Likely a bug). \"\n f\"New input tokens: {total_input_tokens}\"\n )\n return effective_cost\n\n\n@dataclass\nclass Stats:\n stats_dict: dict = field(default_factory=lambda: defaultdict(float))\n\n def increment_stats_dict(self, stats_dict: dict):\n \"\"\"increment the stats_dict with the given values.\"\"\"\n for k, v in stats_dict.items():\n self.stats_dict[k] += v","source_hash":"2bdfa7569198f98ab9d4fb35ee9455450b6585e301295e394b34fb7b0490974f","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.launch_exp","uri":"program://AgentLab/module/src.agentlab.experiments.launch_exp#L1-L224","kind":"module","name":"src.agentlab.experiments.launch_exp","path":"src/agentlab/experiments/launch_exp.py","language":"python","start_line":1,"end_line":224,"context_start_line":1,"context_end_line":224,"code":"import logging\nfrom importlib import import_module\nfrom pathlib import Path\n\nimport bgym\n\nfrom agentlab.experiments.exp_utils import run_exp\nfrom agentlab.experiments.loop import ExpArgs, yield_all_exp_results\n\n\ndef run_experiments(\n n_jobs,\n exp_args_list: list[ExpArgs],\n study_dir,\n parallel_backend=\"ray\",\n avg_step_timeout=60,\n):\n \"\"\"Run a list of ExpArgs in parallel.\n\n To ensure optimal parallelism, make sure ExpArgs.depend_on is set correctly\n and the backend is set to dask.\n\n Args:\n n_jobs: int\n Number of parallel jobs.\n exp_args_list: list[ExpArgs]\n List of ExpArgs objects.\n study_dir: Path\n Directory where the experiments will be saved.\n parallel_backend: str\n Parallel backend to use. Either \"joblib\", \"ray\" or \"sequential\".\n The only backend that supports webarena graph dependencies correctly is ray or sequential.\n avg_step_timeout: int\n Will raise a TimeoutError if the episode is not finished after env_args.max_steps * avg_step_timeout seconds.\n\n Raises:\n ValueError: If the parallel_backend is not recognized.\n \"\"\"\n\n if len(exp_args_list) == 0:\n logging.warning(\"No experiments to run.\")\n return\n\n study_dir = Path(study_dir)\n study_dir.mkdir(parents=True, exist_ok=True)\n\n # if n_jobs == 1 and parallel_backend != \"sequential\":\n # logging.warning(\"Only 1 job, switching to sequential backend.\")\n # parallel_backend = \"sequential\"\n\n logging.info(f\"Saving experiments to {study_dir}\")\n for exp_args in exp_args_list:\n exp_args.agent_args.prepare()\n exp_args.prepare(exp_root=study_dir)\n try:\n if parallel_backend == \"joblib\":\n from joblib import Parallel, delayed\n\n # split sequential (should be no longer needed with dependencies)\n sequential_exp_args, exp_args_list = _split_sequential_exp(exp_args_list)\n\n logging.info(\n f\"Running {len(sequential_exp_args)} in sequential first. The remaining {len(exp_args_list)} will be run in parallel.\"\n )\n for exp_args in sequential_exp_args:\n run_exp(exp_args, avg_step_timeout=avg_step_timeout)\n\n Parallel(n_jobs=n_jobs, prefer=\"processes\")(\n delayed(run_exp)(exp_args, avg_step_timeout=avg_step_timeout)\n for exp_args in exp_args_list\n )\n\n # dask will be deprecated, as there was issues. use ray instead\n # elif parallel_backend == \"dask\":\n # from agentlab.experiments.graph_execution_dask import (\n # execute_task_graph,\n # make_dask_client,\n # )\n\n # with make_dask_client(n_worker=n_jobs):\n # execute_task_graph(exp_args_list)\n elif parallel_backend == \"ray\":\n from agentlab.experiments.graph_execution_ray import execute_task_graph, ray\n\n ray.init(num_cpus=n_jobs)\n try:\n execute_task_graph(exp_args_list, avg_step_timeout=avg_step_timeout)\n finally:\n ray.shutdown()\n elif parallel_backend == \"sequential\":\n for exp_args in exp_args_list:\n run_exp(exp_args, avg_step_timeout=avg_step_timeout)\n else:\n raise ValueError(f\"Unknown parallel_backend: {parallel_backend}\")\n finally:\n # will close servers even if there is an exception or ctrl+c\n # servers won't be closed if the script is killed with kill -9 or segfaults.\n logging.info(\"All jobs are finished. Calling agent_args.close() on all agents...\")\n for exp_args in exp_args_list:\n exp_args.agent_args.close()\n logging.info(f\"Experiment finished and saved in {study_dir}.\")\n\n\ndef find_incomplete(study_dir: str | Path, include_errors=True):\n \"\"\"Find all incomplete experiments for relaunching.\n\n Note: completed experiments are kept but are replaced by dummy exp_args\n with nothing to run. This help keeping the dependencies between tasks.\n\n Args:\n study_dir: Path\n The directory where the experiments are saved.\n include_errors: str\n Find all incomplete experiments and relaunch them.\n - \"incomplete_only\": relaunch only the incomplete experiments.\n - \"incomplete_or_error\": relaunch incomplete or errors.\n\n Returns:\n list[ExpArgs]\n List of ExpArgs objects to relaunch.\n\n Raises:\n ValueError: If the study_dir does not exist.\n \"\"\"\n study_dir = Path(study_dir)\n\n if not study_dir.exists():\n raise ValueError(\n f\"You asked to relaunch an existing experiment but {study_dir} does not exist.\"\n )\n\n exp_result_list = list(yield_all_exp_results(study_dir, progress_fn=None))\n exp_args_list = [_hide_completed(exp_result, include_errors) for exp_result in exp_result_list]\n # sort according to exp_args.order\n exp_args_list.sort(key=lambda exp_args: exp_args.order if exp_args.order is not None else 0)\n\n job_count = non_dummy_count(exp_args_list)\n\n if job_count == 0:\n logging.info(f\"No incomplete experiments found in {study_dir}.\")\n return exp_args_list\n else:\n logging.info(f\"Found {job_count} incomplete experiments in {study_dir}.\")\n\n message = \"Make sure the processes that were running are all stopped. Otherwise, \"\n \"there will be concurrent writing in the same directories.\\n\"\n\n logging.info(message)\n\n return exp_args_list\n\n\ndef non_dummy_count(exp_args_list: list[ExpArgs]) -> int:\n return sum([not exp_args.is_dummy for exp_args in exp_args_list])\n\n\ndef noop(*args, **kwargs):\n pass\n\n\ndef _hide_completed(exp_result: bgym.ExpResult, include_errors: bool = True):\n \"\"\"Hide completed experiments from the list.\n\n This little hack, allows an elegant way to keep the task dependencies for e.g. webarena\n while skipping the tasks that are completed when relaunching.\n\n Args:\n exp_result: bgym.ExpResult\n The experiment result to hide.\n include_errors: bool\n If True, include experiments that errored.\n\n Returns:\n ExpArgs\n The ExpArgs object hidden if the experiment is completed.\n \"\"\"\n\n hide = False\n if exp_result.status == \"done\":\n hide = True\n if exp_result.status == \"error\" and (not include_errors):\n hide = True\n\n exp_args = exp_result.exp_args\n exp_args.is_dummy = hide # just to keep track\n exp_args.status = exp_result.status\n if hide:\n # make those function do nothing since they are finished.\n exp_args.run = noop\n exp_args.prepare = noop\n\n return exp_args\n\n\n# TODO remove this function once ray backend is stable\ndef _split_sequential_exp(exp_args_list: list[ExpArgs]) -> tuple[list[ExpArgs], list[ExpArgs]]:\n \"\"\"split exp_args that are flagged as sequential from those that are not\"\"\"\n sequential_exp_args = []\n parallel_exp_args = []\n for exp_args in exp_args_list:\n if getattr(exp_args, \"sequential\", False):\n sequential_exp_args.append(exp_args)\n else:\n parallel_exp_args.append(exp_args)\n\n return sequential_exp_args, parallel_exp_args\n\n\ndef _split_path(path: str):\n \"\"\"Split a path into a module name and an object name.\"\"\"\n if \"/\" in path:\n path = path.replace(\"/\", \".\")\n module_name, obj_name = path.rsplit(\".\", 1)\n return module_name, obj_name\n\n\ndef import_object(path: str):\n module_name, obj_name = _split_path(path)\n try:\n module = import_module(module_name)\n obj = getattr(module, obj_name)\n except (ImportError, AttributeError) as e:\n raise ImportError(f\"Error importing {path}: {e}\")\n return obj","source_hash":"265b9682466bfb34018c9a6edc0a40a3a9b377a6671201c019e26e803bd99ec9","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.launch_exp.run_experiments","uri":"program://AgentLab/function/src.agentlab.experiments.launch_exp.run_experiments#L11-L101","kind":"function","name":"run_experiments","path":"src/agentlab/experiments/launch_exp.py","language":"python","start_line":11,"end_line":101,"context_start_line":1,"context_end_line":121,"code":"import logging\nfrom importlib import import_module\nfrom pathlib import Path\n\nimport bgym\n\nfrom agentlab.experiments.exp_utils import run_exp\nfrom agentlab.experiments.loop import ExpArgs, yield_all_exp_results\n\n\ndef run_experiments(\n n_jobs,\n exp_args_list: list[ExpArgs],\n study_dir,\n parallel_backend=\"ray\",\n avg_step_timeout=60,\n):\n \"\"\"Run a list of ExpArgs in parallel.\n\n To ensure optimal parallelism, make sure ExpArgs.depend_on is set correctly\n and the backend is set to dask.\n\n Args:\n n_jobs: int\n Number of parallel jobs.\n exp_args_list: list[ExpArgs]\n List of ExpArgs objects.\n study_dir: Path\n Directory where the experiments will be saved.\n parallel_backend: str\n Parallel backend to use. Either \"joblib\", \"ray\" or \"sequential\".\n The only backend that supports webarena graph dependencies correctly is ray or sequential.\n avg_step_timeout: int\n Will raise a TimeoutError if the episode is not finished after env_args.max_steps * avg_step_timeout seconds.\n\n Raises:\n ValueError: If the parallel_backend is not recognized.\n \"\"\"\n\n if len(exp_args_list) == 0:\n logging.warning(\"No experiments to run.\")\n return\n\n study_dir = Path(study_dir)\n study_dir.mkdir(parents=True, exist_ok=True)\n\n # if n_jobs == 1 and parallel_backend != \"sequential\":\n # logging.warning(\"Only 1 job, switching to sequential backend.\")\n # parallel_backend = \"sequential\"\n\n logging.info(f\"Saving experiments to {study_dir}\")\n for exp_args in exp_args_list:\n exp_args.agent_args.prepare()\n exp_args.prepare(exp_root=study_dir)\n try:\n if parallel_backend == \"joblib\":\n from joblib import Parallel, delayed\n\n # split sequential (should be no longer needed with dependencies)\n sequential_exp_args, exp_args_list = _split_sequential_exp(exp_args_list)\n\n logging.info(\n f\"Running {len(sequential_exp_args)} in sequential first. The remaining {len(exp_args_list)} will be run in parallel.\"\n )\n for exp_args in sequential_exp_args:\n run_exp(exp_args, avg_step_timeout=avg_step_timeout)\n\n Parallel(n_jobs=n_jobs, prefer=\"processes\")(\n delayed(run_exp)(exp_args, avg_step_timeout=avg_step_timeout)\n for exp_args in exp_args_list\n )\n\n # dask will be deprecated, as there was issues. use ray instead\n # elif parallel_backend == \"dask\":\n # from agentlab.experiments.graph_execution_dask import (\n # execute_task_graph,\n # make_dask_client,\n # )\n\n # with make_dask_client(n_worker=n_jobs):\n # execute_task_graph(exp_args_list)\n elif parallel_backend == \"ray\":\n from agentlab.experiments.graph_execution_ray import execute_task_graph, ray\n\n ray.init(num_cpus=n_jobs)\n try:\n execute_task_graph(exp_args_list, avg_step_timeout=avg_step_timeout)\n finally:\n ray.shutdown()\n elif parallel_backend == \"sequential\":\n for exp_args in exp_args_list:\n run_exp(exp_args, avg_step_timeout=avg_step_timeout)\n else:\n raise ValueError(f\"Unknown parallel_backend: {parallel_backend}\")\n finally:\n # will close servers even if there is an exception or ctrl+c\n # servers won't be closed if the script is killed with kill -9 or segfaults.\n logging.info(\"All jobs are finished. Calling agent_args.close() on all agents...\")\n for exp_args in exp_args_list:\n exp_args.agent_args.close()\n logging.info(f\"Experiment finished and saved in {study_dir}.\")\n\n\ndef find_incomplete(study_dir: str | Path, include_errors=True):\n \"\"\"Find all incomplete experiments for relaunching.\n\n Note: completed experiments are kept but are replaced by dummy exp_args\n with nothing to run. This help keeping the dependencies between tasks.\n\n Args:\n study_dir: Path\n The directory where the experiments are saved.\n include_errors: str\n Find all incomplete experiments and relaunch them.\n - \"incomplete_only\": relaunch only the incomplete experiments.\n - \"incomplete_or_error\": relaunch incomplete or errors.\n\n Returns:\n list[ExpArgs]\n List of ExpArgs objects to relaunch.\n","source_hash":"265b9682466bfb34018c9a6edc0a40a3a9b377a6671201c019e26e803bd99ec9","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.launch_exp.find_incomplete","uri":"program://AgentLab/function/src.agentlab.experiments.launch_exp.find_incomplete#L104-L150","kind":"function","name":"find_incomplete","path":"src/agentlab/experiments/launch_exp.py","language":"python","start_line":104,"end_line":150,"context_start_line":84,"context_end_line":170,"code":"\n ray.init(num_cpus=n_jobs)\n try:\n execute_task_graph(exp_args_list, avg_step_timeout=avg_step_timeout)\n finally:\n ray.shutdown()\n elif parallel_backend == \"sequential\":\n for exp_args in exp_args_list:\n run_exp(exp_args, avg_step_timeout=avg_step_timeout)\n else:\n raise ValueError(f\"Unknown parallel_backend: {parallel_backend}\")\n finally:\n # will close servers even if there is an exception or ctrl+c\n # servers won't be closed if the script is killed with kill -9 or segfaults.\n logging.info(\"All jobs are finished. Calling agent_args.close() on all agents...\")\n for exp_args in exp_args_list:\n exp_args.agent_args.close()\n logging.info(f\"Experiment finished and saved in {study_dir}.\")\n\n\ndef find_incomplete(study_dir: str | Path, include_errors=True):\n \"\"\"Find all incomplete experiments for relaunching.\n\n Note: completed experiments are kept but are replaced by dummy exp_args\n with nothing to run. This help keeping the dependencies between tasks.\n\n Args:\n study_dir: Path\n The directory where the experiments are saved.\n include_errors: str\n Find all incomplete experiments and relaunch them.\n - \"incomplete_only\": relaunch only the incomplete experiments.\n - \"incomplete_or_error\": relaunch incomplete or errors.\n\n Returns:\n list[ExpArgs]\n List of ExpArgs objects to relaunch.\n\n Raises:\n ValueError: If the study_dir does not exist.\n \"\"\"\n study_dir = Path(study_dir)\n\n if not study_dir.exists():\n raise ValueError(\n f\"You asked to relaunch an existing experiment but {study_dir} does not exist.\"\n )\n\n exp_result_list = list(yield_all_exp_results(study_dir, progress_fn=None))\n exp_args_list = [_hide_completed(exp_result, include_errors) for exp_result in exp_result_list]\n # sort according to exp_args.order\n exp_args_list.sort(key=lambda exp_args: exp_args.order if exp_args.order is not None else 0)\n\n job_count = non_dummy_count(exp_args_list)\n\n if job_count == 0:\n logging.info(f\"No incomplete experiments found in {study_dir}.\")\n return exp_args_list\n else:\n logging.info(f\"Found {job_count} incomplete experiments in {study_dir}.\")\n\n message = \"Make sure the processes that were running are all stopped. Otherwise, \"\n \"there will be concurrent writing in the same directories.\\n\"\n\n logging.info(message)\n\n return exp_args_list\n\n\ndef non_dummy_count(exp_args_list: list[ExpArgs]) -> int:\n return sum([not exp_args.is_dummy for exp_args in exp_args_list])\n\n\ndef noop(*args, **kwargs):\n pass\n\n\ndef _hide_completed(exp_result: bgym.ExpResult, include_errors: bool = True):\n \"\"\"Hide completed experiments from the list.\n\n This little hack, allows an elegant way to keep the task dependencies for e.g. webarena\n while skipping the tasks that are completed when relaunching.\n\n Args:\n exp_result: bgym.ExpResult\n The experiment result to hide.\n include_errors: bool","source_hash":"265b9682466bfb34018c9a6edc0a40a3a9b377a6671201c019e26e803bd99ec9","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.launch_exp.non_dummy_count","uri":"program://AgentLab/function/src.agentlab.experiments.launch_exp.non_dummy_count#L153-L154","kind":"function","name":"non_dummy_count","path":"src/agentlab/experiments/launch_exp.py","language":"python","start_line":153,"end_line":154,"context_start_line":133,"context_end_line":174,"code":" exp_args_list = [_hide_completed(exp_result, include_errors) for exp_result in exp_result_list]\n # sort according to exp_args.order\n exp_args_list.sort(key=lambda exp_args: exp_args.order if exp_args.order is not None else 0)\n\n job_count = non_dummy_count(exp_args_list)\n\n if job_count == 0:\n logging.info(f\"No incomplete experiments found in {study_dir}.\")\n return exp_args_list\n else:\n logging.info(f\"Found {job_count} incomplete experiments in {study_dir}.\")\n\n message = \"Make sure the processes that were running are all stopped. Otherwise, \"\n \"there will be concurrent writing in the same directories.\\n\"\n\n logging.info(message)\n\n return exp_args_list\n\n\ndef non_dummy_count(exp_args_list: list[ExpArgs]) -> int:\n return sum([not exp_args.is_dummy for exp_args in exp_args_list])\n\n\ndef noop(*args, **kwargs):\n pass\n\n\ndef _hide_completed(exp_result: bgym.ExpResult, include_errors: bool = True):\n \"\"\"Hide completed experiments from the list.\n\n This little hack, allows an elegant way to keep the task dependencies for e.g. webarena\n while skipping the tasks that are completed when relaunching.\n\n Args:\n exp_result: bgym.ExpResult\n The experiment result to hide.\n include_errors: bool\n If True, include experiments that errored.\n\n Returns:\n ExpArgs","source_hash":"265b9682466bfb34018c9a6edc0a40a3a9b377a6671201c019e26e803bd99ec9","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.launch_exp.noop","uri":"program://AgentLab/function/src.agentlab.experiments.launch_exp.noop#L157-L158","kind":"function","name":"noop","path":"src/agentlab/experiments/launch_exp.py","language":"python","start_line":157,"end_line":158,"context_start_line":137,"context_end_line":178,"code":" job_count = non_dummy_count(exp_args_list)\n\n if job_count == 0:\n logging.info(f\"No incomplete experiments found in {study_dir}.\")\n return exp_args_list\n else:\n logging.info(f\"Found {job_count} incomplete experiments in {study_dir}.\")\n\n message = \"Make sure the processes that were running are all stopped. Otherwise, \"\n \"there will be concurrent writing in the same directories.\\n\"\n\n logging.info(message)\n\n return exp_args_list\n\n\ndef non_dummy_count(exp_args_list: list[ExpArgs]) -> int:\n return sum([not exp_args.is_dummy for exp_args in exp_args_list])\n\n\ndef noop(*args, **kwargs):\n pass\n\n\ndef _hide_completed(exp_result: bgym.ExpResult, include_errors: bool = True):\n \"\"\"Hide completed experiments from the list.\n\n This little hack, allows an elegant way to keep the task dependencies for e.g. webarena\n while skipping the tasks that are completed when relaunching.\n\n Args:\n exp_result: bgym.ExpResult\n The experiment result to hide.\n include_errors: bool\n If True, include experiments that errored.\n\n Returns:\n ExpArgs\n The ExpArgs object hidden if the experiment is completed.\n \"\"\"\n\n hide = False","source_hash":"265b9682466bfb34018c9a6edc0a40a3a9b377a6671201c019e26e803bd99ec9","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.launch_exp._hide_completed","uri":"program://AgentLab/function/src.agentlab.experiments.launch_exp._hide_completed#L161-L192","kind":"function","name":"_hide_completed","path":"src/agentlab/experiments/launch_exp.py","language":"python","start_line":161,"end_line":192,"context_start_line":141,"context_end_line":212,"code":" return exp_args_list\n else:\n logging.info(f\"Found {job_count} incomplete experiments in {study_dir}.\")\n\n message = \"Make sure the processes that were running are all stopped. Otherwise, \"\n \"there will be concurrent writing in the same directories.\\n\"\n\n logging.info(message)\n\n return exp_args_list\n\n\ndef non_dummy_count(exp_args_list: list[ExpArgs]) -> int:\n return sum([not exp_args.is_dummy for exp_args in exp_args_list])\n\n\ndef noop(*args, **kwargs):\n pass\n\n\ndef _hide_completed(exp_result: bgym.ExpResult, include_errors: bool = True):\n \"\"\"Hide completed experiments from the list.\n\n This little hack, allows an elegant way to keep the task dependencies for e.g. webarena\n while skipping the tasks that are completed when relaunching.\n\n Args:\n exp_result: bgym.ExpResult\n The experiment result to hide.\n include_errors: bool\n If True, include experiments that errored.\n\n Returns:\n ExpArgs\n The ExpArgs object hidden if the experiment is completed.\n \"\"\"\n\n hide = False\n if exp_result.status == \"done\":\n hide = True\n if exp_result.status == \"error\" and (not include_errors):\n hide = True\n\n exp_args = exp_result.exp_args\n exp_args.is_dummy = hide # just to keep track\n exp_args.status = exp_result.status\n if hide:\n # make those function do nothing since they are finished.\n exp_args.run = noop\n exp_args.prepare = noop\n\n return exp_args\n\n\n# TODO remove this function once ray backend is stable\ndef _split_sequential_exp(exp_args_list: list[ExpArgs]) -> tuple[list[ExpArgs], list[ExpArgs]]:\n \"\"\"split exp_args that are flagged as sequential from those that are not\"\"\"\n sequential_exp_args = []\n parallel_exp_args = []\n for exp_args in exp_args_list:\n if getattr(exp_args, \"sequential\", False):\n sequential_exp_args.append(exp_args)\n else:\n parallel_exp_args.append(exp_args)\n\n return sequential_exp_args, parallel_exp_args\n\n\ndef _split_path(path: str):\n \"\"\"Split a path into a module name and an object name.\"\"\"\n if \"/\" in path:\n path = path.replace(\"/\", \".\")","source_hash":"265b9682466bfb34018c9a6edc0a40a3a9b377a6671201c019e26e803bd99ec9","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.launch_exp._split_sequential_exp","uri":"program://AgentLab/function/src.agentlab.experiments.launch_exp._split_sequential_exp#L196-L206","kind":"function","name":"_split_sequential_exp","path":"src/agentlab/experiments/launch_exp.py","language":"python","start_line":196,"end_line":206,"context_start_line":176,"context_end_line":224,"code":" \"\"\"\n\n hide = False\n if exp_result.status == \"done\":\n hide = True\n if exp_result.status == \"error\" and (not include_errors):\n hide = True\n\n exp_args = exp_result.exp_args\n exp_args.is_dummy = hide # just to keep track\n exp_args.status = exp_result.status\n if hide:\n # make those function do nothing since they are finished.\n exp_args.run = noop\n exp_args.prepare = noop\n\n return exp_args\n\n\n# TODO remove this function once ray backend is stable\ndef _split_sequential_exp(exp_args_list: list[ExpArgs]) -> tuple[list[ExpArgs], list[ExpArgs]]:\n \"\"\"split exp_args that are flagged as sequential from those that are not\"\"\"\n sequential_exp_args = []\n parallel_exp_args = []\n for exp_args in exp_args_list:\n if getattr(exp_args, \"sequential\", False):\n sequential_exp_args.append(exp_args)\n else:\n parallel_exp_args.append(exp_args)\n\n return sequential_exp_args, parallel_exp_args\n\n\ndef _split_path(path: str):\n \"\"\"Split a path into a module name and an object name.\"\"\"\n if \"/\" in path:\n path = path.replace(\"/\", \".\")\n module_name, obj_name = path.rsplit(\".\", 1)\n return module_name, obj_name\n\n\ndef import_object(path: str):\n module_name, obj_name = _split_path(path)\n try:\n module = import_module(module_name)\n obj = getattr(module, obj_name)\n except (ImportError, AttributeError) as e:\n raise ImportError(f\"Error importing {path}: {e}\")\n return obj","source_hash":"265b9682466bfb34018c9a6edc0a40a3a9b377a6671201c019e26e803bd99ec9","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.launch_exp._split_path","uri":"program://AgentLab/function/src.agentlab.experiments.launch_exp._split_path#L209-L214","kind":"function","name":"_split_path","path":"src/agentlab/experiments/launch_exp.py","language":"python","start_line":209,"end_line":214,"context_start_line":189,"context_end_line":224,"code":" exp_args.run = noop\n exp_args.prepare = noop\n\n return exp_args\n\n\n# TODO remove this function once ray backend is stable\ndef _split_sequential_exp(exp_args_list: list[ExpArgs]) -> tuple[list[ExpArgs], list[ExpArgs]]:\n \"\"\"split exp_args that are flagged as sequential from those that are not\"\"\"\n sequential_exp_args = []\n parallel_exp_args = []\n for exp_args in exp_args_list:\n if getattr(exp_args, \"sequential\", False):\n sequential_exp_args.append(exp_args)\n else:\n parallel_exp_args.append(exp_args)\n\n return sequential_exp_args, parallel_exp_args\n\n\ndef _split_path(path: str):\n \"\"\"Split a path into a module name and an object name.\"\"\"\n if \"/\" in path:\n path = path.replace(\"/\", \".\")\n module_name, obj_name = path.rsplit(\".\", 1)\n return module_name, obj_name\n\n\ndef import_object(path: str):\n module_name, obj_name = _split_path(path)\n try:\n module = import_module(module_name)\n obj = getattr(module, obj_name)\n except (ImportError, AttributeError) as e:\n raise ImportError(f\"Error importing {path}: {e}\")\n return obj","source_hash":"265b9682466bfb34018c9a6edc0a40a3a9b377a6671201c019e26e803bd99ec9","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.launch_exp.import_object","uri":"program://AgentLab/function/src.agentlab.experiments.launch_exp.import_object#L217-L224","kind":"function","name":"import_object","path":"src/agentlab/experiments/launch_exp.py","language":"python","start_line":217,"end_line":224,"context_start_line":197,"context_end_line":224,"code":" \"\"\"split exp_args that are flagged as sequential from those that are not\"\"\"\n sequential_exp_args = []\n parallel_exp_args = []\n for exp_args in exp_args_list:\n if getattr(exp_args, \"sequential\", False):\n sequential_exp_args.append(exp_args)\n else:\n parallel_exp_args.append(exp_args)\n\n return sequential_exp_args, parallel_exp_args\n\n\ndef _split_path(path: str):\n \"\"\"Split a path into a module name and an object name.\"\"\"\n if \"/\" in path:\n path = path.replace(\"/\", \".\")\n module_name, obj_name = path.rsplit(\".\", 1)\n return module_name, obj_name\n\n\ndef import_object(path: str):\n module_name, obj_name = _split_path(path)\n try:\n module = import_module(module_name)\n obj = getattr(module, obj_name)\n except (ImportError, AttributeError) as e:\n raise ImportError(f\"Error importing {path}: {e}\")\n return obj","source_hash":"265b9682466bfb34018c9a6edc0a40a3a9b377a6671201c019e26e803bd99ec9","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.get_ray_url","uri":"program://AgentLab/module/src.agentlab.experiments.get_ray_url#L1-L10","kind":"module","name":"src.agentlab.experiments.get_ray_url","path":"src/agentlab/experiments/get_ray_url.py","language":"python","start_line":1,"end_line":10,"context_start_line":1,"context_end_line":10,"code":"\"\"\"Temporary script to get the ray dashboard url for the current experiment.\n\nTODO figure out a more convenient way.\n\"\"\"\n\nimport ray\n\ncontext = ray.init(address=\"auto\", ignore_reinit_error=True)\n\nprint(context)","source_hash":"a8590a8c7f293d042ec1f1ad204ab03a2cacfd2906b526360b7d65998c76e28b","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.graph_execution_ray","uri":"program://AgentLab/module/src.agentlab.experiments.graph_execution_ray#L1-L107","kind":"module","name":"src.agentlab.experiments.graph_execution_ray","path":"src/agentlab/experiments/graph_execution_ray.py","language":"python","start_line":1,"end_line":107,"context_start_line":1,"context_end_line":107,"code":"import logging\nimport time\n\nimport bgym\nimport ray\nfrom ray.util import state\n\nfrom agentlab.experiments.exp_utils import _episode_timeout, run_exp\n\nlogger = logging.getLogger(__name__)\n\nrun_exp = ray.remote(run_exp)\n\n\ndef execute_task_graph(exp_args_list: list[bgym.ExpArgs], avg_step_timeout=60):\n \"\"\"Execute a task graph in parallel while respecting dependencies using Ray.\"\"\"\n\n exp_args_map = {exp_args.exp_id: exp_args for exp_args in exp_args_list}\n task_map = {}\n\n def get_task(exp_arg: bgym.ExpArgs):\n if exp_arg.exp_id not in task_map:\n # Get all dependency tasks first\n dependency_tasks = [get_task(exp_args_map[dep_key]) for dep_key in exp_arg.depends_on]\n\n # Create new task that depends on the dependency results\n task_map[exp_arg.exp_id] = run_exp.options(name=f\"{exp_arg.exp_name}\").remote(\n exp_arg, *dependency_tasks, avg_step_timeout=avg_step_timeout\n )\n return task_map[exp_arg.exp_id]\n\n # Build task graph\n for exp_arg in exp_args_list:\n get_task(exp_arg)\n\n max_timeout = max([_episode_timeout(exp_args, avg_step_timeout) for exp_args in exp_args_list])\n\n return poll_for_timeout(task_map, max_timeout, poll_interval=max_timeout * 0.1)\n\n\ndef poll_for_timeout(tasks: dict[str, ray.ObjectRef], timeout: float, poll_interval: float = 1.0):\n \"\"\"Cancel tasks that exceeds the timeout\n\n I tried various different methods for killing a job that hangs. so far it's\n the only one that seems to work reliably (hopefully)\n\n Args:\n tasks: dict[str, ray.ObjectRef]\n Dictionary of task_id: task_ref\n timeout: float\n Timeout in seconds\n poll_interval: float\n Polling interval in seconds\n\n Returns:\n dict[str, Any]: Dictionary of task_id: result\n \"\"\"\n task_list = list(tasks.values())\n task_ids = list(tasks.keys())\n\n logger.warning(f\"Any task exceeding {timeout} seconds will be cancelled.\")\n\n while True:\n ready, not_ready = ray.wait(task_list, num_returns=len(task_list), timeout=poll_interval)\n for task in not_ready:\n elapsed_time = get_elapsed_time(task)\n # print(f\"Task {task.task_id().hex()} elapsed time: {elapsed_time}\")\n if elapsed_time is not None and elapsed_time > timeout:\n msg = f\"Task {task.task_id().hex()} hase been running for {elapsed_time}s, more than the timeout: {timeout}s.\"\n if elapsed_time < timeout + 60 + poll_interval:\n logger.warning(msg + \" Cancelling task.\")\n ray.cancel(task, force=False, recursive=False)\n else:\n logger.warning(msg + \" Force killing.\")\n ray.cancel(task, force=True, recursive=False)\n if len(ready) == len(task_list):\n results = []\n for task in ready:\n try:\n result = ray.get(task)\n except Exception as e:\n result = e\n results.append(result)\n\n return {task_id: result for task_id, result in zip(task_ids, results)}\n\n\ndef get_elapsed_time(task_ref: ray.ObjectRef):\n try:\n task_id = task_ref.task_id().hex()\n task_info = state.get_task(task_id, address=\"auto\")\n if not task_info:\n return None\n if not isinstance(task_info, list):\n task_info = [task_info]\n\n start_times_ms = [getattr(t, \"start_time_ms\", None) for t in task_info]\n start_time_s = max([t / 1000.0 if t is not None else -1 for t in start_times_ms])\n if start_time_s < 0:\n return None # Task has not started yet\n\n current_time_s = time.time()\n elapsed_time = current_time_s - start_time_s\n return elapsed_time\n except Exception as e:\n logger.warning(f\"Could not get elapsed time for task {task_id}: {e}\")\n return None","source_hash":"caf5d357ffbbc51aa5d57b3b7a8b622760391be851d97fb7694bebb7e071ac2e","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.graph_execution_ray.execute_task_graph","uri":"program://AgentLab/function/src.agentlab.experiments.graph_execution_ray.execute_task_graph#L15-L38","kind":"function","name":"execute_task_graph","path":"src/agentlab/experiments/graph_execution_ray.py","language":"python","start_line":15,"end_line":38,"context_start_line":1,"context_end_line":58,"code":"import logging\nimport time\n\nimport bgym\nimport ray\nfrom ray.util import state\n\nfrom agentlab.experiments.exp_utils import _episode_timeout, run_exp\n\nlogger = logging.getLogger(__name__)\n\nrun_exp = ray.remote(run_exp)\n\n\ndef execute_task_graph(exp_args_list: list[bgym.ExpArgs], avg_step_timeout=60):\n \"\"\"Execute a task graph in parallel while respecting dependencies using Ray.\"\"\"\n\n exp_args_map = {exp_args.exp_id: exp_args for exp_args in exp_args_list}\n task_map = {}\n\n def get_task(exp_arg: bgym.ExpArgs):\n if exp_arg.exp_id not in task_map:\n # Get all dependency tasks first\n dependency_tasks = [get_task(exp_args_map[dep_key]) for dep_key in exp_arg.depends_on]\n\n # Create new task that depends on the dependency results\n task_map[exp_arg.exp_id] = run_exp.options(name=f\"{exp_arg.exp_name}\").remote(\n exp_arg, *dependency_tasks, avg_step_timeout=avg_step_timeout\n )\n return task_map[exp_arg.exp_id]\n\n # Build task graph\n for exp_arg in exp_args_list:\n get_task(exp_arg)\n\n max_timeout = max([_episode_timeout(exp_args, avg_step_timeout) for exp_args in exp_args_list])\n\n return poll_for_timeout(task_map, max_timeout, poll_interval=max_timeout * 0.1)\n\n\ndef poll_for_timeout(tasks: dict[str, ray.ObjectRef], timeout: float, poll_interval: float = 1.0):\n \"\"\"Cancel tasks that exceeds the timeout\n\n I tried various different methods for killing a job that hangs. so far it's\n the only one that seems to work reliably (hopefully)\n\n Args:\n tasks: dict[str, ray.ObjectRef]\n Dictionary of task_id: task_ref\n timeout: float\n Timeout in seconds\n poll_interval: float\n Polling interval in seconds\n\n Returns:\n dict[str, Any]: Dictionary of task_id: result\n \"\"\"\n task_list = list(tasks.values())","source_hash":"caf5d357ffbbc51aa5d57b3b7a8b622760391be851d97fb7694bebb7e071ac2e","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.graph_execution_ray.poll_for_timeout","uri":"program://AgentLab/function/src.agentlab.experiments.graph_execution_ray.poll_for_timeout#L41-L85","kind":"function","name":"poll_for_timeout","path":"src/agentlab/experiments/graph_execution_ray.py","language":"python","start_line":41,"end_line":85,"context_start_line":21,"context_end_line":105,"code":" def get_task(exp_arg: bgym.ExpArgs):\n if exp_arg.exp_id not in task_map:\n # Get all dependency tasks first\n dependency_tasks = [get_task(exp_args_map[dep_key]) for dep_key in exp_arg.depends_on]\n\n # Create new task that depends on the dependency results\n task_map[exp_arg.exp_id] = run_exp.options(name=f\"{exp_arg.exp_name}\").remote(\n exp_arg, *dependency_tasks, avg_step_timeout=avg_step_timeout\n )\n return task_map[exp_arg.exp_id]\n\n # Build task graph\n for exp_arg in exp_args_list:\n get_task(exp_arg)\n\n max_timeout = max([_episode_timeout(exp_args, avg_step_timeout) for exp_args in exp_args_list])\n\n return poll_for_timeout(task_map, max_timeout, poll_interval=max_timeout * 0.1)\n\n\ndef poll_for_timeout(tasks: dict[str, ray.ObjectRef], timeout: float, poll_interval: float = 1.0):\n \"\"\"Cancel tasks that exceeds the timeout\n\n I tried various different methods for killing a job that hangs. so far it's\n the only one that seems to work reliably (hopefully)\n\n Args:\n tasks: dict[str, ray.ObjectRef]\n Dictionary of task_id: task_ref\n timeout: float\n Timeout in seconds\n poll_interval: float\n Polling interval in seconds\n\n Returns:\n dict[str, Any]: Dictionary of task_id: result\n \"\"\"\n task_list = list(tasks.values())\n task_ids = list(tasks.keys())\n\n logger.warning(f\"Any task exceeding {timeout} seconds will be cancelled.\")\n\n while True:\n ready, not_ready = ray.wait(task_list, num_returns=len(task_list), timeout=poll_interval)\n for task in not_ready:\n elapsed_time = get_elapsed_time(task)\n # print(f\"Task {task.task_id().hex()} elapsed time: {elapsed_time}\")\n if elapsed_time is not None and elapsed_time > timeout:\n msg = f\"Task {task.task_id().hex()} hase been running for {elapsed_time}s, more than the timeout: {timeout}s.\"\n if elapsed_time < timeout + 60 + poll_interval:\n logger.warning(msg + \" Cancelling task.\")\n ray.cancel(task, force=False, recursive=False)\n else:\n logger.warning(msg + \" Force killing.\")\n ray.cancel(task, force=True, recursive=False)\n if len(ready) == len(task_list):\n results = []\n for task in ready:\n try:\n result = ray.get(task)\n except Exception as e:\n result = e\n results.append(result)\n\n return {task_id: result for task_id, result in zip(task_ids, results)}\n\n\ndef get_elapsed_time(task_ref: ray.ObjectRef):\n try:\n task_id = task_ref.task_id().hex()\n task_info = state.get_task(task_id, address=\"auto\")\n if not task_info:\n return None\n if not isinstance(task_info, list):\n task_info = [task_info]\n\n start_times_ms = [getattr(t, \"start_time_ms\", None) for t in task_info]\n start_time_s = max([t / 1000.0 if t is not None else -1 for t in start_times_ms])\n if start_time_s < 0:\n return None # Task has not started yet\n\n current_time_s = time.time()\n elapsed_time = current_time_s - start_time_s\n return elapsed_time\n except Exception as e:","source_hash":"caf5d357ffbbc51aa5d57b3b7a8b622760391be851d97fb7694bebb7e071ac2e","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.graph_execution_ray.get_elapsed_time","uri":"program://AgentLab/function/src.agentlab.experiments.graph_execution_ray.get_elapsed_time#L88-L107","kind":"function","name":"get_elapsed_time","path":"src/agentlab/experiments/graph_execution_ray.py","language":"python","start_line":88,"end_line":107,"context_start_line":68,"context_end_line":107,"code":" if elapsed_time is not None and elapsed_time > timeout:\n msg = f\"Task {task.task_id().hex()} hase been running for {elapsed_time}s, more than the timeout: {timeout}s.\"\n if elapsed_time < timeout + 60 + poll_interval:\n logger.warning(msg + \" Cancelling task.\")\n ray.cancel(task, force=False, recursive=False)\n else:\n logger.warning(msg + \" Force killing.\")\n ray.cancel(task, force=True, recursive=False)\n if len(ready) == len(task_list):\n results = []\n for task in ready:\n try:\n result = ray.get(task)\n except Exception as e:\n result = e\n results.append(result)\n\n return {task_id: result for task_id, result in zip(task_ids, results)}\n\n\ndef get_elapsed_time(task_ref: ray.ObjectRef):\n try:\n task_id = task_ref.task_id().hex()\n task_info = state.get_task(task_id, address=\"auto\")\n if not task_info:\n return None\n if not isinstance(task_info, list):\n task_info = [task_info]\n\n start_times_ms = [getattr(t, \"start_time_ms\", None) for t in task_info]\n start_time_s = max([t / 1000.0 if t is not None else -1 for t in start_times_ms])\n if start_time_s < 0:\n return None # Task has not started yet\n\n current_time_s = time.time()\n elapsed_time = current_time_s - start_time_s\n return elapsed_time\n except Exception as e:\n logger.warning(f\"Could not get elapsed time for task {task_id}: {e}\")\n return None","source_hash":"caf5d357ffbbc51aa5d57b3b7a8b622760391be851d97fb7694bebb7e071ac2e","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.graph_execution_ray.get_task","uri":"program://AgentLab/function/src.agentlab.experiments.graph_execution_ray.get_task#L21-L30","kind":"function","name":"get_task","path":"src/agentlab/experiments/graph_execution_ray.py","language":"python","start_line":21,"end_line":30,"context_start_line":1,"context_end_line":50,"code":"import logging\nimport time\n\nimport bgym\nimport ray\nfrom ray.util import state\n\nfrom agentlab.experiments.exp_utils import _episode_timeout, run_exp\n\nlogger = logging.getLogger(__name__)\n\nrun_exp = ray.remote(run_exp)\n\n\ndef execute_task_graph(exp_args_list: list[bgym.ExpArgs], avg_step_timeout=60):\n \"\"\"Execute a task graph in parallel while respecting dependencies using Ray.\"\"\"\n\n exp_args_map = {exp_args.exp_id: exp_args for exp_args in exp_args_list}\n task_map = {}\n\n def get_task(exp_arg: bgym.ExpArgs):\n if exp_arg.exp_id not in task_map:\n # Get all dependency tasks first\n dependency_tasks = [get_task(exp_args_map[dep_key]) for dep_key in exp_arg.depends_on]\n\n # Create new task that depends on the dependency results\n task_map[exp_arg.exp_id] = run_exp.options(name=f\"{exp_arg.exp_name}\").remote(\n exp_arg, *dependency_tasks, avg_step_timeout=avg_step_timeout\n )\n return task_map[exp_arg.exp_id]\n\n # Build task graph\n for exp_arg in exp_args_list:\n get_task(exp_arg)\n\n max_timeout = max([_episode_timeout(exp_args, avg_step_timeout) for exp_args in exp_args_list])\n\n return poll_for_timeout(task_map, max_timeout, poll_interval=max_timeout * 0.1)\n\n\ndef poll_for_timeout(tasks: dict[str, ray.ObjectRef], timeout: float, poll_interval: float = 1.0):\n \"\"\"Cancel tasks that exceeds the timeout\n\n I tried various different methods for killing a job that hangs. so far it's\n the only one that seems to work reliably (hopefully)\n\n Args:\n tasks: dict[str, ray.ObjectRef]\n Dictionary of task_id: task_ref\n timeout: float","source_hash":"caf5d357ffbbc51aa5d57b3b7a8b622760391be851d97fb7694bebb7e071ac2e","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.study","uri":"program://AgentLab/module/src.agentlab.experiments.study#L1-L834","kind":"module","name":"src.agentlab.experiments.study","path":"src/agentlab/experiments/study.py","language":"python","start_line":1,"end_line":834,"context_start_line":1,"context_end_line":834,"code":"import gzip\nimport logging\nimport os\nimport pickle\nimport random\nimport uuid\nfrom abc import ABC, abstractmethod\nfrom concurrent.futures import ProcessPoolExecutor\nfrom dataclasses import asdict, dataclass\nfrom datetime import datetime\nfrom multiprocessing import Manager, Pool, Queue\nfrom pathlib import Path\n\nimport bgym\nfrom bgym import DEFAULT_BENCHMARKS, Benchmark\nfrom slugify import slugify\n\nfrom agentlab.agents.agent_args import AgentArgs\nfrom agentlab.analyze import inspect_results\nfrom agentlab.benchmarks.abstract_env import AbstractEnvArgs\nfrom agentlab.experiments import reproducibility_util as repro\nfrom agentlab.experiments.exp_utils import RESULTS_DIR, add_dependencies\nfrom agentlab.experiments.launch_exp import (\n find_incomplete,\n non_dummy_count,\n run_experiments,\n)\nfrom agentlab.experiments.loop import EnvArgs, ExpArgs\nfrom agentlab.experiments.multi_server import BaseServer\n\nlogger = logging.getLogger(__name__)\n\n\ndef make_study(\n agent_args: list[AgentArgs] | AgentArgs,\n benchmark: Benchmark | str,\n logging_level=logging.WARNING,\n logging_level_stdout=logging.WARNING,\n suffix=\"\",\n comment=None,\n ignore_dependencies=False,\n parallel_servers=None,\n):\n \"\"\"Run a list of agents on a benchmark.\n\n Args:\n agent_args: list[AgentArgs] | AgentArgs\n The agent configuration(s) to run. *IMPORTANT*: these objects will be pickled and\n unpickled. Make sure they are imported from a package that is accessible from\n PYTHONPATH. Otherwise, it won't load in agentlab-xray.\n benchmark: Benchmark | str\n The benchmark to run the agents on. See DEFAULT_BENCHMARKS for the main ones. You\n can also make your own by modifying an existing one.\n logging_level: int\n The logging level for file log.\n logging_level_stdout: int\n The logging level for the stdout of the main script. Each job will have its own logging\n level that will save into file and can be seen in agentlab-xray.\n suffix: str\n A suffix to add to the study name. This can be useful to keep track of your experiments.\n By default the study name contains agent name, benchmark name and date.\n comment: str\n Extra comments from the authors of this study to be stored in the reproducibility\n information. Leave any extra information that can explain why results could be different\n than expected.\n ignore_dependencies: bool\n If True, ignore the dependencies of the tasks in the benchmark. *Use with caution.* So\n far, only WebArena and VisualWebArena have dependencies between tasks to minimize the\n influence of solving one task before another one. This dependency graph allows\n experiments to run in parallel while respecting task dependencies. However, it still\n can't run more than 4 and, in practice it's speeding up evaluation by a factor of only\n 3x compare to sequential executionz. To accelerate execution, you can ignore\n dependencies and run in full parallel. This leads to a decrease in performance of about\n 1%-2%, and could be more. Note: ignore_dependencies on VisualWebArena doesn't work.\n parallel_servers: list[WebArenaInstanceVars]\n The number of parallel servers to use `if \"webarena\" in benchmark.name`. Use this to\n dispatch agent_args on a pool of servers in parallel. If len(agent_args) >\n len(parallel_servers), the servers will be reused for next evaluation (with a reset) as\n soon as it is done.\n\n Returns:\n Study | SequentialStudies | ParallelStudies object.\n SequentialStudies: if the benchmark requires manual reset after each evaluation such as\n WebArena and VisualWebArena.\n ParallelStudies: if the benchmark has multiple servers to run in parallel.\n Study: otherwise.\n \"\"\"\n\n if not isinstance(agent_args, (list, tuple)):\n agent_args = [agent_args]\n\n if isinstance(benchmark, str):\n benchmark = DEFAULT_BENCHMARKS[benchmark.lower()]()\n\n if len(agent_args) > 1 and (\"webarena\" in benchmark.name or parallel_servers is not None):\n logger.warning(\n \"*WebArena* requires manual reset after each evaluation. Running through SequentialStudies.\"\n )\n studies = []\n for agent in agent_args:\n studies.append(\n Study(\n [agent],\n benchmark,\n logging_level=logging_level,\n logging_level_stdout=logging_level_stdout,\n suffix=suffix,\n comment=comment,\n ignore_dependencies=ignore_dependencies,\n )\n )\n if parallel_servers is not None:\n return ParallelStudies(studies, parallel_servers=parallel_servers)\n else:\n return SequentialStudies(studies)\n else:\n return Study(\n agent_args,\n benchmark,\n logging_level=logging_level,\n logging_level_stdout=logging_level_stdout,\n suffix=suffix,\n comment=comment,\n ignore_dependencies=ignore_dependencies,\n )\n\n\nclass AbstractStudy(ABC):\n \"\"\"Abstract class for a study.\"\"\"\n\n dir: Path = None\n suffix: str = \"\"\n\n @abstractmethod\n def find_incomplete(self, include_errors=True):\n \"\"\"Prepare the study for relaunching by finding incomplete experiments\"\"\"\n\n @abstractmethod\n def run(self, n_jobs=1, parallel_backend=\"ray\", strict_reproducibility=False, n_relaunch=3):\n \"\"\"Run the study\"\"\"\n\n def make_dir(self, exp_root=RESULTS_DIR):\n \"\"\"Create a directory for the study\"\"\"\n if self.dir is None:\n dir_name = f\"{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}_{self.name}\"\n\n self.dir = Path(exp_root) / dir_name\n self.dir.mkdir(parents=True, exist_ok=True)\n\n def save(self, exp_root=RESULTS_DIR):\n \"\"\"Pickle the study to the directory\"\"\"\n # TODO perhaps remove exp_args_list before pickling and when loading bring them from the individual directories\n\n self.make_dir(exp_root=exp_root)\n with gzip.open(self.dir / \"study.pkl.gz\", \"wb\") as f:\n pickle.dump(self, f)\n\n def get_results(self, suffix=\"\", also_save=True):\n \"\"\"Recursively load all results from the study directory and summarize them.\"\"\"\n result_df = inspect_results.load_result_df(self.dir)\n error_report = inspect_results.error_report(result_df, max_stack_trace=3, use_log=True)\n summary_df = inspect_results.summarize_study(result_df)\n\n if also_save:\n suffix = f\"_{suffix}\" if suffix else \"\"\n result_df.to_csv(self.dir / f\"result_df{suffix}.csv\")\n summary_df.to_csv(self.dir / f\"summary_df{suffix}.csv\")\n (self.dir / f\"error_report{suffix}.md\").write_text(error_report)\n\n return result_df, summary_df, error_report\n\n def shuffle_exps(self):\n \"\"\"Shuffle the experiments in the study.\"\"\"\n self.exp_args_list = random.sample(self.exp_args_list, len(self.exp_args_list))\n\n\n@dataclass\nclass Study(AbstractStudy):\n \"\"\"A study coresponds to one or multiple agents evaluated on a benchmark.\n\n This is part of the high level API to help keep experiments organized and reproducible.\n\n Attributes:\n agent_args: list[AgentArgs]\n The agent configuration(s) to run. *IMPORTANT*: these objects will be pickled and\n unpickled. Make sure they are imported from a package that is accessible from\n PYTHONPATH. Otherwise, it won't load in agentlab-xray.\n benchmark: Benchmark | str\n The benchmark to run the agents on. See DEFAULT_BENCHMARKS for the main ones. You\n can also make your own by modifying an existing one.\n dir: Path\n The directory where the study will be saved. If None, a directory will be created in\n RESULTS_DIR.\n suffix: str\n A suffix to add to the study name. This can be useful to keep track of your experiments.\n By default the study name contains agent name, benchmark name and date.\n uuid: str\n A unique identifier for the study. Will be generated automatically.\n reproducibility_info: dict\n Information about the study that may affect the reproducibility of the experiment. e.g.:\n versions of BrowserGym, benchmark, AgentLab...\n logging_level: int\n The logging level for individual jobs.\n logging_level_stdout: int\n The logging level for the stdout of the main script. Each job will have its own logging\n level that will save into file and can be seen in agentlab-xray.\n comment: str\n Extra comments from the authors of this study to be stored in the reproducibility\n information. Leave any extra information that can explain why results could be different\n than expected.\n ignore_dependencies: bool\n If True, ignore the dependencies of the tasks in the benchmark. *Use with caution*. So\n far, only WebArena and VisualWebArena have dependencies between tasks to minimize the\n influence of solving one task before another one. This dependency graph allows\n experiments to run in parallel while respecting task dependencies. However, it still\n can't run more than 4 and, in practice it's speeding up evaluation by a factor of only\n 3x compare to sequential execution. To accelerate execution, you can ignore\n dependencies and run in full parallel. This leads to a decrease in performance of about\n 1%-2%, and could be more. Note: ignore_dependencies on VisualWebArena doesn't work.\n avg_step_timeout: int\n The average step timeout in seconds. This is used to stop the experiments if they are\n taking too long. The default is 60 seconds.\n demo_mode: bool\n If True, the experiments will be run in demo mode, which will record videos, and enable\n visual effects for actions.\n \"\"\"\n\n agent_args: list[AgentArgs] = None\n benchmark: Benchmark | str = None\n dir: Path = None\n suffix: str = \"\" # used for adding a personnal comment to the study name\n uuid: str = None\n reproducibility_info: dict = None\n logging_level: int = logging.DEBUG\n logging_level_stdout: int = logging.WARNING\n comment: str = None # Extra comments from the authors of this study\n ignore_dependencies: bool = False\n avg_step_timeout: int = 60\n demo_mode: bool = False\n\n def __post_init__(self):\n \"\"\"Initialize the study. Set the uuid, and generate the exp_args_list.\"\"\"\n self.uuid = uuid.uuid4()\n if isinstance(self.benchmark, str):\n self.benchmark = DEFAULT_BENCHMARKS[self.benchmark.lower()]()\n\n self.benchmark.env_args_list = _convert_env_args(self.benchmark.env_args_list)\n\n if isinstance(self.dir, str):\n self.dir = Path(self.dir)\n self.make_exp_args_list()\n\n def make_exp_args_list(self):\n \"\"\"Generate the exp_args_list from the agent_args and the benchmark.\"\"\"\n self.exp_args_list = self.agents_on_benchmark(\n self.agent_args,\n self.benchmark,\n logging_level=self.logging_level,\n logging_level_stdout=self.logging_level_stdout,\n ignore_dependencies=self.ignore_dependencies,\n demo_mode=self.demo_mode,\n )\n\n def find_incomplete(self, include_errors=True):\n \"\"\"Find incomplete or errored experiments in the study directory for relaunching.\n\n Args:\n include_errors: bool\n If True, include errored experiments in the list.\n\n Returns:\n list[ExpArgs]: The list of all experiments with completed ones replaced by a\n dummy exp_args to keep the task dependencies.\n \"\"\"\n self.exp_args_list = find_incomplete(self.dir, include_errors=include_errors)\n n_incomplete = non_dummy_count(self.exp_args_list)\n n_error = [\n getattr(exp_args, \"status\", \"incomplete\") == \"error\" for exp_args in self.exp_args_list\n ].count(True)\n return n_incomplete, n_error\n\n def load_exp_args_list(self):\n logger.info(f\"Loading experiments from {self.dir}\")\n self.exp_args_list = list(inspect_results.yield_all_exp_results(savedir_base=self.dir))\n\n def set_reproducibility_info(self, strict_reproducibility=False, comment=None):\n \"\"\"Gather relevant information that may affect the reproducibility of the experiment\n\n e.g.: versions of BrowserGym, benchmark, AgentLab...\n\n Args:\n strict_reproducibility: bool\n If True, all modifications have to be committed before running the experiments.\n Also, if relaunching a study, it will not be possible if the code has changed.\n comment: str\n Extra comment to add to the reproducibility information.\n \"\"\"\n agent_names = [a.agent_name for a in self.agent_args]\n info = repro.get_reproducibility_info(\n agent_names,\n self.benchmark,\n self.uuid,\n ignore_changes=not strict_reproducibility,\n comment=comment,\n allow_bypass_benchmark_version=not strict_reproducibility,\n )\n if self.reproducibility_info is not None:\n repro.assert_compatible(\n self.reproducibility_info, info, raise_if_incompatible=strict_reproducibility\n )\n self.reproducibility_info = info\n\n def run(\n self,\n n_jobs=1,\n parallel_backend=\"ray\",\n strict_reproducibility=False,\n n_relaunch=3,\n relaunch_errors=True,\n exp_root=RESULTS_DIR,\n ):\n self.set_reproducibility_info(\n strict_reproducibility=strict_reproducibility, comment=self.comment\n )\n self.save(exp_root=exp_root)\n\n n_exp = len(self.exp_args_list)\n last_error_count = None\n\n for i in range(n_relaunch):\n logger.info(f\"Launching study {self.name} - trial {i + 1} / {n_relaunch}\")\n self._run(n_jobs, parallel_backend, strict_reproducibility)\n\n suffix = f\"trial_{i + 1}_of_{n_relaunch}\"\n _, summary_df, error_report = self.get_results(suffix=suffix)\n logger.info(\"\\n\" + str(summary_df))\n\n n_incomplete, n_error = self.find_incomplete(include_errors=relaunch_errors)\n\n if n_error / n_exp > 0.3:\n logger.warning(\"More than 30% of the experiments errored. Stopping the retries.\")\n break\n\n if last_error_count is not None and n_error >= last_error_count:\n logger.warning(\n \"Last trial did not reduce the number of errors. Stopping the retries.\"\n )\n break\n\n if n_incomplete == 0:\n logger.info(f\"Study {self.name} finished.\")\n break\n\n logger.info(\"# Error Report:\\n-------------\\n\\n\" + error_report)\n\n if n_incomplete != 0:\n logger.warning(\n f\"Study {self.name} did not finish after {n_relaunch} trials. There are {n_incomplete} incomplete experiments.\"\n )\n\n def _run(self, n_jobs=1, parallel_backend=\"joblib\", strict_reproducibility=False):\n \"\"\"Run all experiments in the study in parallel when possible.\n\n Args:\n n_jobs: int\n Number of parallel jobs.\n parallel_backend: str\n Parallel backend to use. Either \"joblib\", \"ray\" or \"sequential\".\n strict_reproducibility: bool\n If True, all modifications have to be committed before running the experiments.\n Also, if relaunching a study, it will not be possible if the code has changed.\n\n Raises:\n ValueError: If the exp_args_list is None.\n \"\"\"\n\n if self.exp_args_list is None:\n raise ValueError(\"exp_args_list is None. Please set exp_args_list before running.\")\n\n logger.info(\"Preparing backends...\")\n self.benchmark.prepare_backends()\n logger.info(\"Backends ready.\")\n\n run_experiments(\n n_jobs,\n self.exp_args_list,\n self.dir,\n parallel_backend=parallel_backend,\n avg_step_timeout=self.avg_step_timeout,\n )\n\n def append_to_journal(self, strict_reproducibility=True):\n \"\"\"Append the study to the journal.\n\n Args:\n strict_reproducibility: bool\n If True, incomplete experiments will raise an error.\n \"\"\"\n _, summary_df, _ = self.get_results()\n repro.append_to_journal(\n self.reproducibility_info,\n summary_df,\n strict_reproducibility=strict_reproducibility,\n )\n\n @property\n def name(self):\n agent_names = [a.agent_name for a in self.agent_args]\n return _make_study_name(agent_names, [self.benchmark.name], self.suffix)\n\n def override_max_steps(self, max_steps):\n for exp_args in self.exp_args_list:\n exp_args.env_args.max_steps = max_steps\n\n @staticmethod\n def load(dir: Path) -> \"Study\":\n dir = Path(dir)\n study_path = dir / \"study.pkl.gz\"\n if not study_path.exists() and dir.is_dir():\n # For backward compatibility\n first_result = next(\n inspect_results.yield_all_exp_results(savedir_base=dir, progress_fn=None)\n )\n benchmark_name = first_result.exp_args.env_args.task_name.split(\".\")[0]\n agent_args = first_result.exp_args.agent_args\n study = Study(agent_args=agent_args, benchmark=benchmark_name, dir=dir)\n else:\n with gzip.open(dir / \"study.pkl.gz\", \"rb\") as f:\n study = pickle.load(f) # type: Study\n study.dir = dir\n\n # # just a check\n # for i, exp_args in enumerate(study.exp_args_list):\n # if exp_args.order != i:\n # logging.warning(f\"The order of the experiments is not correct. {exp_args.order} != {i}\")\n\n return study\n\n @staticmethod\n def load_most_recent(root_dir: Path = None, contains=None) -> \"Study\":\n return Study.load(get_most_recent_study(root_dir, contains=contains))\n\n def agents_on_benchmark(\n self,\n agents: list[AgentArgs] | AgentArgs,\n benchmark: Benchmark,\n demo_mode=False,\n logging_level: int = logging.INFO,\n logging_level_stdout: int = logging.INFO,\n ignore_dependencies=False,\n ):\n \"\"\"Run one or multiple agents on a benchmark.\n\n Args:\n agents: list[AgentArgs] | AgentArgs\n The agent configuration(s) to run.\n benchmark: Benchmark\n The benchmark to run the agents on.\n demo_mode: bool\n If True, the experiments will be run in demo mode.\n logging_level: int\n The logging level for individual jobs.\n logging_level_stdout: int\n The logging level for the stdout.\n ignore_dependencies: bool\n If True, the dependencies will be ignored and all experiments can be run in parallel.\n\n Returns:\n list[ExpArgs]: The list of experiments to run.\n\n Raises:\n ValueError: If multiple agents are run on a benchmark that requires manual reset.\n \"\"\"\n\n if not isinstance(agents, (list, tuple)):\n agents = [agents]\n\n if benchmark.name.startswith(\"visualwebarena\") or benchmark.name.startswith(\"webarena\"):\n if len(agents) > 1:\n# ... truncated ...","source_hash":"9fc48e904c84951afeeaae7a4f365b40a665dc6a6bd851d8761c9b89e727b920","truncated":true} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.study.make_study","uri":"program://AgentLab/function/src.agentlab.experiments.study.make_study#L34-L125","kind":"function","name":"make_study","path":"src/agentlab/experiments/study.py","language":"python","start_line":34,"end_line":125,"context_start_line":14,"context_end_line":145,"code":"import bgym\nfrom bgym import DEFAULT_BENCHMARKS, Benchmark\nfrom slugify import slugify\n\nfrom agentlab.agents.agent_args import AgentArgs\nfrom agentlab.analyze import inspect_results\nfrom agentlab.benchmarks.abstract_env import AbstractEnvArgs\nfrom agentlab.experiments import reproducibility_util as repro\nfrom agentlab.experiments.exp_utils import RESULTS_DIR, add_dependencies\nfrom agentlab.experiments.launch_exp import (\n find_incomplete,\n non_dummy_count,\n run_experiments,\n)\nfrom agentlab.experiments.loop import EnvArgs, ExpArgs\nfrom agentlab.experiments.multi_server import BaseServer\n\nlogger = logging.getLogger(__name__)\n\n\ndef make_study(\n agent_args: list[AgentArgs] | AgentArgs,\n benchmark: Benchmark | str,\n logging_level=logging.WARNING,\n logging_level_stdout=logging.WARNING,\n suffix=\"\",\n comment=None,\n ignore_dependencies=False,\n parallel_servers=None,\n):\n \"\"\"Run a list of agents on a benchmark.\n\n Args:\n agent_args: list[AgentArgs] | AgentArgs\n The agent configuration(s) to run. *IMPORTANT*: these objects will be pickled and\n unpickled. Make sure they are imported from a package that is accessible from\n PYTHONPATH. Otherwise, it won't load in agentlab-xray.\n benchmark: Benchmark | str\n The benchmark to run the agents on. See DEFAULT_BENCHMARKS for the main ones. You\n can also make your own by modifying an existing one.\n logging_level: int\n The logging level for file log.\n logging_level_stdout: int\n The logging level for the stdout of the main script. Each job will have its own logging\n level that will save into file and can be seen in agentlab-xray.\n suffix: str\n A suffix to add to the study name. This can be useful to keep track of your experiments.\n By default the study name contains agent name, benchmark name and date.\n comment: str\n Extra comments from the authors of this study to be stored in the reproducibility\n information. Leave any extra information that can explain why results could be different\n than expected.\n ignore_dependencies: bool\n If True, ignore the dependencies of the tasks in the benchmark. *Use with caution.* So\n far, only WebArena and VisualWebArena have dependencies between tasks to minimize the\n influence of solving one task before another one. This dependency graph allows\n experiments to run in parallel while respecting task dependencies. However, it still\n can't run more than 4 and, in practice it's speeding up evaluation by a factor of only\n 3x compare to sequential executionz. To accelerate execution, you can ignore\n dependencies and run in full parallel. This leads to a decrease in performance of about\n 1%-2%, and could be more. Note: ignore_dependencies on VisualWebArena doesn't work.\n parallel_servers: list[WebArenaInstanceVars]\n The number of parallel servers to use `if \"webarena\" in benchmark.name`. Use this to\n dispatch agent_args on a pool of servers in parallel. If len(agent_args) >\n len(parallel_servers), the servers will be reused for next evaluation (with a reset) as\n soon as it is done.\n\n Returns:\n Study | SequentialStudies | ParallelStudies object.\n SequentialStudies: if the benchmark requires manual reset after each evaluation such as\n WebArena and VisualWebArena.\n ParallelStudies: if the benchmark has multiple servers to run in parallel.\n Study: otherwise.\n \"\"\"\n\n if not isinstance(agent_args, (list, tuple)):\n agent_args = [agent_args]\n\n if isinstance(benchmark, str):\n benchmark = DEFAULT_BENCHMARKS[benchmark.lower()]()\n\n if len(agent_args) > 1 and (\"webarena\" in benchmark.name or parallel_servers is not None):\n logger.warning(\n \"*WebArena* requires manual reset after each evaluation. Running through SequentialStudies.\"\n )\n studies = []\n for agent in agent_args:\n studies.append(\n Study(\n [agent],\n benchmark,\n logging_level=logging_level,\n logging_level_stdout=logging_level_stdout,\n suffix=suffix,\n comment=comment,\n ignore_dependencies=ignore_dependencies,\n )\n )\n if parallel_servers is not None:\n return ParallelStudies(studies, parallel_servers=parallel_servers)\n else:\n return SequentialStudies(studies)\n else:\n return Study(\n agent_args,\n benchmark,\n logging_level=logging_level,\n logging_level_stdout=logging_level_stdout,\n suffix=suffix,\n comment=comment,\n ignore_dependencies=ignore_dependencies,\n )\n\n\nclass AbstractStudy(ABC):\n \"\"\"Abstract class for a study.\"\"\"\n\n dir: Path = None\n suffix: str = \"\"\n\n @abstractmethod\n def find_incomplete(self, include_errors=True):\n \"\"\"Prepare the study for relaunching by finding incomplete experiments\"\"\"\n\n @abstractmethod\n def run(self, n_jobs=1, parallel_backend=\"ray\", strict_reproducibility=False, n_relaunch=3):\n \"\"\"Run the study\"\"\"\n\n def make_dir(self, exp_root=RESULTS_DIR):\n \"\"\"Create a directory for the study\"\"\"\n if self.dir is None:\n dir_name = f\"{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}_{self.name}\"","source_hash":"9fc48e904c84951afeeaae7a4f365b40a665dc6a6bd851d8761c9b89e727b920","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.study.AbstractStudy","uri":"program://AgentLab/class/src.agentlab.experiments.study.AbstractStudy#L128-L174","kind":"class","name":"AbstractStudy","path":"src/agentlab/experiments/study.py","language":"python","start_line":128,"end_line":174,"context_start_line":108,"context_end_line":194,"code":" comment=comment,\n ignore_dependencies=ignore_dependencies,\n )\n )\n if parallel_servers is not None:\n return ParallelStudies(studies, parallel_servers=parallel_servers)\n else:\n return SequentialStudies(studies)\n else:\n return Study(\n agent_args,\n benchmark,\n logging_level=logging_level,\n logging_level_stdout=logging_level_stdout,\n suffix=suffix,\n comment=comment,\n ignore_dependencies=ignore_dependencies,\n )\n\n\nclass AbstractStudy(ABC):\n \"\"\"Abstract class for a study.\"\"\"\n\n dir: Path = None\n suffix: str = \"\"\n\n @abstractmethod\n def find_incomplete(self, include_errors=True):\n \"\"\"Prepare the study for relaunching by finding incomplete experiments\"\"\"\n\n @abstractmethod\n def run(self, n_jobs=1, parallel_backend=\"ray\", strict_reproducibility=False, n_relaunch=3):\n \"\"\"Run the study\"\"\"\n\n def make_dir(self, exp_root=RESULTS_DIR):\n \"\"\"Create a directory for the study\"\"\"\n if self.dir is None:\n dir_name = f\"{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}_{self.name}\"\n\n self.dir = Path(exp_root) / dir_name\n self.dir.mkdir(parents=True, exist_ok=True)\n\n def save(self, exp_root=RESULTS_DIR):\n \"\"\"Pickle the study to the directory\"\"\"\n # TODO perhaps remove exp_args_list before pickling and when loading bring them from the individual directories\n\n self.make_dir(exp_root=exp_root)\n with gzip.open(self.dir / \"study.pkl.gz\", \"wb\") as f:\n pickle.dump(self, f)\n\n def get_results(self, suffix=\"\", also_save=True):\n \"\"\"Recursively load all results from the study directory and summarize them.\"\"\"\n result_df = inspect_results.load_result_df(self.dir)\n error_report = inspect_results.error_report(result_df, max_stack_trace=3, use_log=True)\n summary_df = inspect_results.summarize_study(result_df)\n\n if also_save:\n suffix = f\"_{suffix}\" if suffix else \"\"\n result_df.to_csv(self.dir / f\"result_df{suffix}.csv\")\n summary_df.to_csv(self.dir / f\"summary_df{suffix}.csv\")\n (self.dir / f\"error_report{suffix}.md\").write_text(error_report)\n\n return result_df, summary_df, error_report\n\n def shuffle_exps(self):\n \"\"\"Shuffle the experiments in the study.\"\"\"\n self.exp_args_list = random.sample(self.exp_args_list, len(self.exp_args_list))\n\n\n@dataclass\nclass Study(AbstractStudy):\n \"\"\"A study coresponds to one or multiple agents evaluated on a benchmark.\n\n This is part of the high level API to help keep experiments organized and reproducible.\n\n Attributes:\n agent_args: list[AgentArgs]\n The agent configuration(s) to run. *IMPORTANT*: these objects will be pickled and\n unpickled. Make sure they are imported from a package that is accessible from\n PYTHONPATH. Otherwise, it won't load in agentlab-xray.\n benchmark: Benchmark | str\n The benchmark to run the agents on. See DEFAULT_BENCHMARKS for the main ones. You\n can also make your own by modifying an existing one.\n dir: Path\n The directory where the study will be saved. If None, a directory will be created in\n RESULTS_DIR.\n suffix: str","source_hash":"9fc48e904c84951afeeaae7a4f365b40a665dc6a6bd851d8761c9b89e727b920","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.study.Study","uri":"program://AgentLab/class/src.agentlab.experiments.study.Study#L178-L519","kind":"class","name":"Study","path":"src/agentlab/experiments/study.py","language":"python","start_line":178,"end_line":519,"context_start_line":158,"context_end_line":539,"code":" def get_results(self, suffix=\"\", also_save=True):\n \"\"\"Recursively load all results from the study directory and summarize them.\"\"\"\n result_df = inspect_results.load_result_df(self.dir)\n error_report = inspect_results.error_report(result_df, max_stack_trace=3, use_log=True)\n summary_df = inspect_results.summarize_study(result_df)\n\n if also_save:\n suffix = f\"_{suffix}\" if suffix else \"\"\n result_df.to_csv(self.dir / f\"result_df{suffix}.csv\")\n summary_df.to_csv(self.dir / f\"summary_df{suffix}.csv\")\n (self.dir / f\"error_report{suffix}.md\").write_text(error_report)\n\n return result_df, summary_df, error_report\n\n def shuffle_exps(self):\n \"\"\"Shuffle the experiments in the study.\"\"\"\n self.exp_args_list = random.sample(self.exp_args_list, len(self.exp_args_list))\n\n\n@dataclass\nclass Study(AbstractStudy):\n \"\"\"A study coresponds to one or multiple agents evaluated on a benchmark.\n\n This is part of the high level API to help keep experiments organized and reproducible.\n\n Attributes:\n agent_args: list[AgentArgs]\n The agent configuration(s) to run. *IMPORTANT*: these objects will be pickled and\n unpickled. Make sure they are imported from a package that is accessible from\n PYTHONPATH. Otherwise, it won't load in agentlab-xray.\n benchmark: Benchmark | str\n The benchmark to run the agents on. See DEFAULT_BENCHMARKS for the main ones. You\n can also make your own by modifying an existing one.\n dir: Path\n The directory where the study will be saved. If None, a directory will be created in\n RESULTS_DIR.\n suffix: str\n A suffix to add to the study name. This can be useful to keep track of your experiments.\n By default the study name contains agent name, benchmark name and date.\n uuid: str\n A unique identifier for the study. Will be generated automatically.\n reproducibility_info: dict\n Information about the study that may affect the reproducibility of the experiment. e.g.:\n versions of BrowserGym, benchmark, AgentLab...\n logging_level: int\n The logging level for individual jobs.\n logging_level_stdout: int\n The logging level for the stdout of the main script. Each job will have its own logging\n level that will save into file and can be seen in agentlab-xray.\n comment: str\n Extra comments from the authors of this study to be stored in the reproducibility\n information. Leave any extra information that can explain why results could be different\n than expected.\n ignore_dependencies: bool\n If True, ignore the dependencies of the tasks in the benchmark. *Use with caution*. So\n far, only WebArena and VisualWebArena have dependencies between tasks to minimize the\n influence of solving one task before another one. This dependency graph allows\n experiments to run in parallel while respecting task dependencies. However, it still\n can't run more than 4 and, in practice it's speeding up evaluation by a factor of only\n 3x compare to sequential execution. To accelerate execution, you can ignore\n dependencies and run in full parallel. This leads to a decrease in performance of about\n 1%-2%, and could be more. Note: ignore_dependencies on VisualWebArena doesn't work.\n avg_step_timeout: int\n The average step timeout in seconds. This is used to stop the experiments if they are\n taking too long. The default is 60 seconds.\n demo_mode: bool\n If True, the experiments will be run in demo mode, which will record videos, and enable\n visual effects for actions.\n \"\"\"\n\n agent_args: list[AgentArgs] = None\n benchmark: Benchmark | str = None\n dir: Path = None\n suffix: str = \"\" # used for adding a personnal comment to the study name\n uuid: str = None\n reproducibility_info: dict = None\n logging_level: int = logging.DEBUG\n logging_level_stdout: int = logging.WARNING\n comment: str = None # Extra comments from the authors of this study\n ignore_dependencies: bool = False\n avg_step_timeout: int = 60\n demo_mode: bool = False\n\n def __post_init__(self):\n \"\"\"Initialize the study. Set the uuid, and generate the exp_args_list.\"\"\"\n self.uuid = uuid.uuid4()\n if isinstance(self.benchmark, str):\n self.benchmark = DEFAULT_BENCHMARKS[self.benchmark.lower()]()\n\n self.benchmark.env_args_list = _convert_env_args(self.benchmark.env_args_list)\n\n if isinstance(self.dir, str):\n self.dir = Path(self.dir)\n self.make_exp_args_list()\n\n def make_exp_args_list(self):\n \"\"\"Generate the exp_args_list from the agent_args and the benchmark.\"\"\"\n self.exp_args_list = self.agents_on_benchmark(\n self.agent_args,\n self.benchmark,\n logging_level=self.logging_level,\n logging_level_stdout=self.logging_level_stdout,\n ignore_dependencies=self.ignore_dependencies,\n demo_mode=self.demo_mode,\n )\n\n def find_incomplete(self, include_errors=True):\n \"\"\"Find incomplete or errored experiments in the study directory for relaunching.\n\n Args:\n include_errors: bool\n If True, include errored experiments in the list.\n\n Returns:\n list[ExpArgs]: The list of all experiments with completed ones replaced by a\n dummy exp_args to keep the task dependencies.\n \"\"\"\n self.exp_args_list = find_incomplete(self.dir, include_errors=include_errors)\n n_incomplete = non_dummy_count(self.exp_args_list)\n n_error = [\n getattr(exp_args, \"status\", \"incomplete\") == \"error\" for exp_args in self.exp_args_list\n ].count(True)\n return n_incomplete, n_error\n\n def load_exp_args_list(self):\n logger.info(f\"Loading experiments from {self.dir}\")\n self.exp_args_list = list(inspect_results.yield_all_exp_results(savedir_base=self.dir))\n\n def set_reproducibility_info(self, strict_reproducibility=False, comment=None):\n \"\"\"Gather relevant information that may affect the reproducibility of the experiment\n\n e.g.: versions of BrowserGym, benchmark, AgentLab...\n\n Args:\n strict_reproducibility: bool\n If True, all modifications have to be committed before running the experiments.\n Also, if relaunching a study, it will not be possible if the code has changed.\n comment: str\n Extra comment to add to the reproducibility information.\n \"\"\"\n agent_names = [a.agent_name for a in self.agent_args]\n info = repro.get_reproducibility_info(\n agent_names,\n self.benchmark,\n self.uuid,\n ignore_changes=not strict_reproducibility,\n comment=comment,\n allow_bypass_benchmark_version=not strict_reproducibility,\n )\n if self.reproducibility_info is not None:\n repro.assert_compatible(\n self.reproducibility_info, info, raise_if_incompatible=strict_reproducibility\n )\n self.reproducibility_info = info\n\n def run(\n self,\n n_jobs=1,\n parallel_backend=\"ray\",\n strict_reproducibility=False,\n n_relaunch=3,\n relaunch_errors=True,\n exp_root=RESULTS_DIR,\n ):\n self.set_reproducibility_info(\n strict_reproducibility=strict_reproducibility, comment=self.comment\n )\n self.save(exp_root=exp_root)\n\n n_exp = len(self.exp_args_list)\n last_error_count = None\n\n for i in range(n_relaunch):\n logger.info(f\"Launching study {self.name} - trial {i + 1} / {n_relaunch}\")\n self._run(n_jobs, parallel_backend, strict_reproducibility)\n\n suffix = f\"trial_{i + 1}_of_{n_relaunch}\"\n _, summary_df, error_report = self.get_results(suffix=suffix)\n logger.info(\"\\n\" + str(summary_df))\n\n n_incomplete, n_error = self.find_incomplete(include_errors=relaunch_errors)\n\n if n_error / n_exp > 0.3:\n logger.warning(\"More than 30% of the experiments errored. Stopping the retries.\")\n break\n\n if last_error_count is not None and n_error >= last_error_count:\n logger.warning(\n \"Last trial did not reduce the number of errors. Stopping the retries.\"\n )\n break\n\n if n_incomplete == 0:\n logger.info(f\"Study {self.name} finished.\")\n break\n\n logger.info(\"# Error Report:\\n-------------\\n\\n\" + error_report)\n\n if n_incomplete != 0:\n logger.warning(\n f\"Study {self.name} did not finish after {n_relaunch} trials. There are {n_incomplete} incomplete experiments.\"\n )\n\n def _run(self, n_jobs=1, parallel_backend=\"joblib\", strict_reproducibility=False):\n \"\"\"Run all experiments in the study in parallel when possible.\n\n Args:\n n_jobs: int\n Number of parallel jobs.\n parallel_backend: str\n Parallel backend to use. Either \"joblib\", \"ray\" or \"sequential\".\n strict_reproducibility: bool\n If True, all modifications have to be committed before running the experiments.\n Also, if relaunching a study, it will not be possible if the code has changed.\n\n Raises:\n ValueError: If the exp_args_list is None.\n \"\"\"\n\n if self.exp_args_list is None:\n raise ValueError(\"exp_args_list is None. Please set exp_args_list before running.\")\n\n logger.info(\"Preparing backends...\")\n self.benchmark.prepare_backends()\n logger.info(\"Backends ready.\")\n\n run_experiments(\n n_jobs,\n self.exp_args_list,\n self.dir,\n parallel_backend=parallel_backend,\n avg_step_timeout=self.avg_step_timeout,\n )\n\n def append_to_journal(self, strict_reproducibility=True):\n \"\"\"Append the study to the journal.\n\n Args:\n strict_reproducibility: bool\n If True, incomplete experiments will raise an error.\n \"\"\"\n _, summary_df, _ = self.get_results()\n repro.append_to_journal(\n self.reproducibility_info,\n summary_df,\n strict_reproducibility=strict_reproducibility,\n )\n\n @property\n def name(self):\n agent_names = [a.agent_name for a in self.agent_args]\n return _make_study_name(agent_names, [self.benchmark.name], self.suffix)\n\n def override_max_steps(self, max_steps):\n for exp_args in self.exp_args_list:\n exp_args.env_args.max_steps = max_steps\n\n @staticmethod\n def load(dir: Path) -> \"Study\":\n dir = Path(dir)\n study_path = dir / \"study.pkl.gz\"\n if not study_path.exists() and dir.is_dir():\n # For backward compatibility\n first_result = next(\n inspect_results.yield_all_exp_results(savedir_base=dir, progress_fn=None)\n )\n benchmark_name = first_result.exp_args.env_args.task_name.split(\".\")[0]\n agent_args = first_result.exp_args.agent_args\n study = Study(agent_args=agent_args, benchmark=benchmark_name, dir=dir)\n else:\n with gzip.open(dir / \"study.pkl.gz\", \"rb\") as f:\n study = pickle.load(f) # type: Study\n study.dir = dir\n\n # # just a check\n # for i, exp_args in enumerate(study.exp_args_list):\n # if exp_args.order != i:\n # logging.warning(f\"The order of the experiments is not correct. {exp_args.order} != {i}\")\n\n return study\n\n @staticmethod\n def load_most_recent(root_dir: Path = None, contains=None) -> \"Study\":\n return Study.load(get_most_recent_study(root_dir, contains=contains))\n\n def agents_on_benchmark(\n self,\n agents: list[AgentArgs] | AgentArgs,\n benchmark: Benchmark,\n demo_mode=False,\n logging_level: int = logging.INFO,\n logging_level_stdout: int = logging.INFO,\n ignore_dependencies=False,\n ):\n \"\"\"Run one or multiple agents on a benchmark.\n\n Args:\n agents: list[AgentArgs] | AgentArgs\n The agent configuration(s) to run.\n benchmark: Benchmark\n The benchmark to run the agents on.\n demo_mode: bool\n If True, the experiments will be run in demo mode.\n logging_level: int\n The logging level for individual jobs.\n logging_level_stdout: int\n The logging level for the stdout.\n ignore_dependencies: bool\n If True, the dependencies will be ignored and all experiments can be run in parallel.\n\n Returns:\n list[ExpArgs]: The list of experiments to run.\n\n Raises:\n ValueError: If multiple agents are run on a benchmark that requires manual reset.\n \"\"\"\n\n if not isinstance(agents, (list, tuple)):\n agents = [agents]\n\n if benchmark.name.startswith(\"visualwebarena\") or benchmark.name.startswith(\"webarena\"):\n if len(agents) > 1:\n raise ValueError(\n f\"Only one agent can be run on {benchmark.name} since the instance requires manual reset after each evaluation.\"\n )\n\n for agent in agents:\n agent.set_benchmark(\n benchmark, demo_mode\n ) # the agent can adapt (lightly?) to the benchmark\n\n env_args_list = benchmark.env_args_list\n if demo_mode:\n set_demo_mode(env_args_list)\n\n exp_args_list = []\n\n for agent in agents:\n for env_args in env_args_list:\n exp_args = ExpArgs(\n agent_args=agent,\n env_args=env_args,\n logging_level=logging_level,\n logging_level_stdout=logging_level_stdout,\n )\n exp_args_list.append(exp_args)\n\n for i, exp_args in enumerate(exp_args_list):\n exp_args.order = i\n\n # not required with ray, but keeping around if we would need it for visualwebareana on joblib\n # _flag_sequential_exp(exp_args_list, benchmark)\n\n if not ignore_dependencies:\n # populate the depends_on field based on the task dependencies in the benchmark\n exp_args_list = add_dependencies(exp_args_list, benchmark.dependency_graph_over_tasks())\n else:\n logger.warning(\n f\"Ignoring dependencies for benchmark {benchmark.name}. This could lead to different results.\"\n )\n\n return exp_args_list\n\n\ndef _make_study_name(agent_names, benchmark_names, suffix=None):\n \"\"\"Make a study name from the agent and benchmark names.\"\"\"\n # extract unique agent and benchmark names\n agent_names = list(set(agent_names))\n benchmark_names = list(set(benchmark_names))\n\n if len(agent_names) == 1:\n agent_name = agent_names[0]\n else:\n agent_name = f\"{len(agent_names)}_agents\"\n\n if len(benchmark_names) == 1:\n benchmark_name = benchmark_names[0]\n else:\n benchmark_name = f\"{len(benchmark_names)}_benchmarks\"\n\n study_name = f\"{agent_name}_on_{benchmark_name}_{suffix if suffix else ''}\"\n","source_hash":"9fc48e904c84951afeeaae7a4f365b40a665dc6a6bd851d8761c9b89e727b920","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.study._make_study_name","uri":"program://AgentLab/function/src.agentlab.experiments.study._make_study_name#L522-L540","kind":"function","name":"_make_study_name","path":"src/agentlab/experiments/study.py","language":"python","start_line":522,"end_line":540,"context_start_line":502,"context_end_line":560,"code":" )\n exp_args_list.append(exp_args)\n\n for i, exp_args in enumerate(exp_args_list):\n exp_args.order = i\n\n # not required with ray, but keeping around if we would need it for visualwebareana on joblib\n # _flag_sequential_exp(exp_args_list, benchmark)\n\n if not ignore_dependencies:\n # populate the depends_on field based on the task dependencies in the benchmark\n exp_args_list = add_dependencies(exp_args_list, benchmark.dependency_graph_over_tasks())\n else:\n logger.warning(\n f\"Ignoring dependencies for benchmark {benchmark.name}. This could lead to different results.\"\n )\n\n return exp_args_list\n\n\ndef _make_study_name(agent_names, benchmark_names, suffix=None):\n \"\"\"Make a study name from the agent and benchmark names.\"\"\"\n # extract unique agent and benchmark names\n agent_names = list(set(agent_names))\n benchmark_names = list(set(benchmark_names))\n\n if len(agent_names) == 1:\n agent_name = agent_names[0]\n else:\n agent_name = f\"{len(agent_names)}_agents\"\n\n if len(benchmark_names) == 1:\n benchmark_name = benchmark_names[0]\n else:\n benchmark_name = f\"{len(benchmark_names)}_benchmarks\"\n\n study_name = f\"{agent_name}_on_{benchmark_name}_{suffix if suffix else ''}\"\n\n return slugify(study_name, max_length=200, allow_unicode=True)\n\n\n@dataclass\nclass SequentialStudies(AbstractStudy):\n \"\"\"\n Sequential execution of multiple studies.\n\n This is required for e.g. WebArena, where a server reset is required between evaluations of each agent.\n \"\"\"\n\n studies: list[Study]\n\n @property\n def name(self):\n \"\"\"The name of the study.\"\"\"\n agent_names = [a.agent_name for study in self.studies for a in study.agent_args]\n benchmark_names = [study.benchmark.name for study in self.studies]\n return _make_study_name(agent_names, benchmark_names, self.suffix)\n\n def find_incomplete(self, include_errors=True):","source_hash":"9fc48e904c84951afeeaae7a4f365b40a665dc6a6bd851d8761c9b89e727b920","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.study.SequentialStudies","uri":"program://AgentLab/class/src.agentlab.experiments.study.SequentialStudies#L544-L594","kind":"class","name":"SequentialStudies","path":"src/agentlab/experiments/study.py","language":"python","start_line":544,"end_line":594,"context_start_line":524,"context_end_line":614,"code":" # extract unique agent and benchmark names\n agent_names = list(set(agent_names))\n benchmark_names = list(set(benchmark_names))\n\n if len(agent_names) == 1:\n agent_name = agent_names[0]\n else:\n agent_name = f\"{len(agent_names)}_agents\"\n\n if len(benchmark_names) == 1:\n benchmark_name = benchmark_names[0]\n else:\n benchmark_name = f\"{len(benchmark_names)}_benchmarks\"\n\n study_name = f\"{agent_name}_on_{benchmark_name}_{suffix if suffix else ''}\"\n\n return slugify(study_name, max_length=200, allow_unicode=True)\n\n\n@dataclass\nclass SequentialStudies(AbstractStudy):\n \"\"\"\n Sequential execution of multiple studies.\n\n This is required for e.g. WebArena, where a server reset is required between evaluations of each agent.\n \"\"\"\n\n studies: list[Study]\n\n @property\n def name(self):\n \"\"\"The name of the study.\"\"\"\n agent_names = [a.agent_name for study in self.studies for a in study.agent_args]\n benchmark_names = [study.benchmark.name for study in self.studies]\n return _make_study_name(agent_names, benchmark_names, self.suffix)\n\n def find_incomplete(self, include_errors=True):\n for study in self.studies:\n study.find_incomplete(include_errors=include_errors)\n\n def run(\n self,\n n_jobs=1,\n parallel_backend=\"ray\",\n strict_reproducibility=False,\n n_relaunch=3,\n exp_root=RESULTS_DIR,\n ):\n # This sequence of of making directories is important to make sure objects are materialized\n # properly before saving. Otherwise relaunch may not work properly.\n self.make_dir()\n for study in self.studies:\n study.make_dir(exp_root=self.dir)\n\n self.save(exp_root=exp_root)\n self._run(n_jobs, parallel_backend, strict_reproducibility, n_relaunch)\n _, summary_df, _ = self.get_results()\n logger.info(\"\\n\" + str(summary_df))\n logger.info(f\"SequentialStudies {self.name} finished.\")\n\n def _run(self, n_jobs=1, parallel_backend=\"ray\", strict_reproducibility=False, n_relaunch=3):\n for study in self.studies:\n study.run(n_jobs, parallel_backend, strict_reproducibility, n_relaunch)\n\n def override_max_steps(self, max_steps):\n for study in self.studies:\n study.override_max_steps(max_steps)\n\n def append_to_journal(self, strict_reproducibility=True):\n for study in self.studies:\n study.append_to_journal(strict_reproducibility=strict_reproducibility)\n\n\ndef _init_worker(server_queue: Queue):\n \"\"\"Run once at the initialization of the worker in the multiprocessing.Pool.\n\n This is typically used to initialize different environment variables of the WebArena server for\n multiple instances in parallel.\n\n Args:\n server_queue: Queue\n A queue of object implementing BaseServer to initialize (or anything with a init\n method).\n \"\"\"\n print(\"initializing server instance with on process\", os.getpid())\n print(f\"using queue {server_queue}\")\n server_instance = server_queue.get() # type: \"WebArenaInstanceVars\"\n logger.warning(f\"Initializing server instance {server_instance} from process {os.getpid()}\")\n server_instance.init()\n\n","source_hash":"9fc48e904c84951afeeaae7a4f365b40a665dc6a6bd851d8761c9b89e727b920","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.study._init_worker","uri":"program://AgentLab/function/src.agentlab.experiments.study._init_worker#L597-L612","kind":"function","name":"_init_worker","path":"src/agentlab/experiments/study.py","language":"python","start_line":597,"end_line":612,"context_start_line":577,"context_end_line":632,"code":"\n self.save(exp_root=exp_root)\n self._run(n_jobs, parallel_backend, strict_reproducibility, n_relaunch)\n _, summary_df, _ = self.get_results()\n logger.info(\"\\n\" + str(summary_df))\n logger.info(f\"SequentialStudies {self.name} finished.\")\n\n def _run(self, n_jobs=1, parallel_backend=\"ray\", strict_reproducibility=False, n_relaunch=3):\n for study in self.studies:\n study.run(n_jobs, parallel_backend, strict_reproducibility, n_relaunch)\n\n def override_max_steps(self, max_steps):\n for study in self.studies:\n study.override_max_steps(max_steps)\n\n def append_to_journal(self, strict_reproducibility=True):\n for study in self.studies:\n study.append_to_journal(strict_reproducibility=strict_reproducibility)\n\n\ndef _init_worker(server_queue: Queue):\n \"\"\"Run once at the initialization of the worker in the multiprocessing.Pool.\n\n This is typically used to initialize different environment variables of the WebArena server for\n multiple instances in parallel.\n\n Args:\n server_queue: Queue\n A queue of object implementing BaseServer to initialize (or anything with a init\n method).\n \"\"\"\n print(\"initializing server instance with on process\", os.getpid())\n print(f\"using queue {server_queue}\")\n server_instance = server_queue.get() # type: \"WebArenaInstanceVars\"\n logger.warning(f\"Initializing server instance {server_instance} from process {os.getpid()}\")\n server_instance.init()\n\n\ndef _run_study(study: Study, n_jobs, parallel_backend, strict_reproducibility, n_relaunch):\n \"\"\"Wrapper to run a study remotely.\"\"\"\n study.run(n_jobs, parallel_backend, strict_reproducibility, n_relaunch)\n\n\n@dataclass\nclass ParallelStudies(SequentialStudies):\n parallel_servers: list[BaseServer] | int = None\n\n def _run(\n self,\n n_jobs=1,\n parallel_backend=\"ray\",\n strict_reproducibility=False,\n n_relaunch=3,\n ):\n parallel_servers = self.parallel_servers\n if isinstance(parallel_servers, int):","source_hash":"9fc48e904c84951afeeaae7a4f365b40a665dc6a6bd851d8761c9b89e727b920","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.study._run_study","uri":"program://AgentLab/function/src.agentlab.experiments.study._run_study#L615-L617","kind":"function","name":"_run_study","path":"src/agentlab/experiments/study.py","language":"python","start_line":615,"end_line":617,"context_start_line":595,"context_end_line":637,"code":"\n\ndef _init_worker(server_queue: Queue):\n \"\"\"Run once at the initialization of the worker in the multiprocessing.Pool.\n\n This is typically used to initialize different environment variables of the WebArena server for\n multiple instances in parallel.\n\n Args:\n server_queue: Queue\n A queue of object implementing BaseServer to initialize (or anything with a init\n method).\n \"\"\"\n print(\"initializing server instance with on process\", os.getpid())\n print(f\"using queue {server_queue}\")\n server_instance = server_queue.get() # type: \"WebArenaInstanceVars\"\n logger.warning(f\"Initializing server instance {server_instance} from process {os.getpid()}\")\n server_instance.init()\n\n\ndef _run_study(study: Study, n_jobs, parallel_backend, strict_reproducibility, n_relaunch):\n \"\"\"Wrapper to run a study remotely.\"\"\"\n study.run(n_jobs, parallel_backend, strict_reproducibility, n_relaunch)\n\n\n@dataclass\nclass ParallelStudies(SequentialStudies):\n parallel_servers: list[BaseServer] | int = None\n\n def _run(\n self,\n n_jobs=1,\n parallel_backend=\"ray\",\n strict_reproducibility=False,\n n_relaunch=3,\n ):\n parallel_servers = self.parallel_servers\n if isinstance(parallel_servers, int):\n parallel_servers = [BaseServer() for _ in range(parallel_servers)]\n\n server_queue = Manager().Queue()\n for server in parallel_servers:\n server_queue.put(server)","source_hash":"9fc48e904c84951afeeaae7a4f365b40a665dc6a6bd851d8761c9b89e727b920","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.study.ParallelStudies","uri":"program://AgentLab/class/src.agentlab.experiments.study.ParallelStudies#L621-L653","kind":"class","name":"ParallelStudies","path":"src/agentlab/experiments/study.py","language":"python","start_line":621,"end_line":653,"context_start_line":601,"context_end_line":673,"code":" multiple instances in parallel.\n\n Args:\n server_queue: Queue\n A queue of object implementing BaseServer to initialize (or anything with a init\n method).\n \"\"\"\n print(\"initializing server instance with on process\", os.getpid())\n print(f\"using queue {server_queue}\")\n server_instance = server_queue.get() # type: \"WebArenaInstanceVars\"\n logger.warning(f\"Initializing server instance {server_instance} from process {os.getpid()}\")\n server_instance.init()\n\n\ndef _run_study(study: Study, n_jobs, parallel_backend, strict_reproducibility, n_relaunch):\n \"\"\"Wrapper to run a study remotely.\"\"\"\n study.run(n_jobs, parallel_backend, strict_reproducibility, n_relaunch)\n\n\n@dataclass\nclass ParallelStudies(SequentialStudies):\n parallel_servers: list[BaseServer] | int = None\n\n def _run(\n self,\n n_jobs=1,\n parallel_backend=\"ray\",\n strict_reproducibility=False,\n n_relaunch=3,\n ):\n parallel_servers = self.parallel_servers\n if isinstance(parallel_servers, int):\n parallel_servers = [BaseServer() for _ in range(parallel_servers)]\n\n server_queue = Manager().Queue()\n for server in parallel_servers:\n server_queue.put(server)\n\n with ProcessPoolExecutor(\n max_workers=len(parallel_servers), initializer=_init_worker, initargs=(server_queue,)\n ) as executor:\n # Create list of arguments for each study\n study_args = [\n (study, n_jobs, parallel_backend, strict_reproducibility, n_relaunch)\n for study in self.studies\n ]\n\n # Submit all tasks and wait for completion\n futures = [executor.submit(_run_study, *args) for args in study_args]\n\n # Wait for all futures to complete and raise any exceptions\n for future in futures:\n future.result()\n\n\n@dataclass\nclass ParallelStudies_alt(SequentialStudies):\n parallel_servers: list[BaseServer] | int = None\n\n def _run(\n self,\n n_jobs=1,\n parallel_backend=\"ray\",\n strict_reproducibility=False,\n n_relaunch=3,\n ):\n parallel_servers = self.parallel_servers\n if isinstance(parallel_servers, int):\n parallel_servers = [BaseServer() for _ in range(parallel_servers)]\n\n server_queue = Manager().Queue()\n for server in parallel_servers:\n server_queue.put(server)","source_hash":"9fc48e904c84951afeeaae7a4f365b40a665dc6a6bd851d8761c9b89e727b920","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.study.ParallelStudies_alt","uri":"program://AgentLab/class/src.agentlab.experiments.study.ParallelStudies_alt#L657-L682","kind":"class","name":"ParallelStudies_alt","path":"src/agentlab/experiments/study.py","language":"python","start_line":657,"end_line":682,"context_start_line":637,"context_end_line":702,"code":" server_queue.put(server)\n\n with ProcessPoolExecutor(\n max_workers=len(parallel_servers), initializer=_init_worker, initargs=(server_queue,)\n ) as executor:\n # Create list of arguments for each study\n study_args = [\n (study, n_jobs, parallel_backend, strict_reproducibility, n_relaunch)\n for study in self.studies\n ]\n\n # Submit all tasks and wait for completion\n futures = [executor.submit(_run_study, *args) for args in study_args]\n\n # Wait for all futures to complete and raise any exceptions\n for future in futures:\n future.result()\n\n\n@dataclass\nclass ParallelStudies_alt(SequentialStudies):\n parallel_servers: list[BaseServer] | int = None\n\n def _run(\n self,\n n_jobs=1,\n parallel_backend=\"ray\",\n strict_reproducibility=False,\n n_relaunch=3,\n ):\n parallel_servers = self.parallel_servers\n if isinstance(parallel_servers, int):\n parallel_servers = [BaseServer() for _ in range(parallel_servers)]\n\n server_queue = Manager().Queue()\n for server in parallel_servers:\n server_queue.put(server)\n\n with Pool(len(parallel_servers), initializer=_init_worker, initargs=(server_queue,)) as p:\n p.starmap(\n _run_study,\n [\n (study, n_jobs, parallel_backend, strict_reproducibility, n_relaunch)\n for study in self.studies\n ],\n )\n\n\ndef get_most_recent_study(\n root_dir: Path = None, date_format: str = \"%Y-%m-%d_%H-%M-%S\", contains=None\n):\n \"\"\"Return the most recent directory based on the date in the folder name.\n\n Args:\n root_dir: The directory to search in\n date_format: The format of the date in the folder name\n contains: If not None, only consider folders that contains this string\n\n Returns:\n Path: The most recent folder satisfying the conditions\n \"\"\"\n\n if root_dir is None:\n root_dir = RESULTS_DIR\n\n most_recent_folder = None","source_hash":"9fc48e904c84951afeeaae7a4f365b40a665dc6a6bd851d8761c9b89e727b920","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.study.get_most_recent_study","uri":"program://AgentLab/function/src.agentlab.experiments.study.get_most_recent_study#L685-L717","kind":"function","name":"get_most_recent_study","path":"src/agentlab/experiments/study.py","language":"python","start_line":685,"end_line":717,"context_start_line":665,"context_end_line":737,"code":" n_relaunch=3,\n ):\n parallel_servers = self.parallel_servers\n if isinstance(parallel_servers, int):\n parallel_servers = [BaseServer() for _ in range(parallel_servers)]\n\n server_queue = Manager().Queue()\n for server in parallel_servers:\n server_queue.put(server)\n\n with Pool(len(parallel_servers), initializer=_init_worker, initargs=(server_queue,)) as p:\n p.starmap(\n _run_study,\n [\n (study, n_jobs, parallel_backend, strict_reproducibility, n_relaunch)\n for study in self.studies\n ],\n )\n\n\ndef get_most_recent_study(\n root_dir: Path = None, date_format: str = \"%Y-%m-%d_%H-%M-%S\", contains=None\n):\n \"\"\"Return the most recent directory based on the date in the folder name.\n\n Args:\n root_dir: The directory to search in\n date_format: The format of the date in the folder name\n contains: If not None, only consider folders that contains this string\n\n Returns:\n Path: The most recent folder satisfying the conditions\n \"\"\"\n\n if root_dir is None:\n root_dir = RESULTS_DIR\n\n most_recent_folder = None\n most_recent_time = datetime.min\n\n for item in root_dir.iterdir():\n if item.is_dir() and not item.name.startswith(\"_\"):\n if contains is not None and contains not in item.name:\n continue\n try:\n folder_date = datetime.strptime(\"_\".join(item.name.split(\"_\")[:2]), date_format)\n if folder_date > most_recent_time:\n most_recent_time = folder_date\n most_recent_folder = item\n except (ValueError, IndexError):\n continue\n\n return most_recent_folder\n\n\ndef set_demo_mode(env_args_list: list[EnvArgs]):\n \"\"\"Set the demo mode for the experiments. This can be useful for generating videos for demos.\"\"\"\n for env_args in env_args_list:\n env_args.viewport = {\"width\": 1280, \"height\": 720}\n env_args.record_video = True\n env_args.wait_for_user_message = False\n env_args.slow_mo = 1000\n\n\ndef _convert_env_args(env_args_list) -> list[EnvArgs]:\n \"\"\"Return a list where every element is the *new* EnvArgs.\n\n For backward compatibility, we need to convert the old EnvArgs to the new one.\n\n Args:\n env_args_list (list): list of EnvArgs objects to convert\n\n Returns:","source_hash":"9fc48e904c84951afeeaae7a4f365b40a665dc6a6bd851d8761c9b89e727b920","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.study.set_demo_mode","uri":"program://AgentLab/function/src.agentlab.experiments.study.set_demo_mode#L720-L726","kind":"function","name":"set_demo_mode","path":"src/agentlab/experiments/study.py","language":"python","start_line":720,"end_line":726,"context_start_line":700,"context_end_line":746,"code":" root_dir = RESULTS_DIR\n\n most_recent_folder = None\n most_recent_time = datetime.min\n\n for item in root_dir.iterdir():\n if item.is_dir() and not item.name.startswith(\"_\"):\n if contains is not None and contains not in item.name:\n continue\n try:\n folder_date = datetime.strptime(\"_\".join(item.name.split(\"_\")[:2]), date_format)\n if folder_date > most_recent_time:\n most_recent_time = folder_date\n most_recent_folder = item\n except (ValueError, IndexError):\n continue\n\n return most_recent_folder\n\n\ndef set_demo_mode(env_args_list: list[EnvArgs]):\n \"\"\"Set the demo mode for the experiments. This can be useful for generating videos for demos.\"\"\"\n for env_args in env_args_list:\n env_args.viewport = {\"width\": 1280, \"height\": 720}\n env_args.record_video = True\n env_args.wait_for_user_message = False\n env_args.slow_mo = 1000\n\n\ndef _convert_env_args(env_args_list) -> list[EnvArgs]:\n \"\"\"Return a list where every element is the *new* EnvArgs.\n\n For backward compatibility, we need to convert the old EnvArgs to the new one.\n\n Args:\n env_args_list (list): list of EnvArgs objects to convert\n\n Returns:\n list: list of converted EnvArgs objects\n\n Raises:\n TypeError: If an element in env_args_list is not of expected type.\n \"\"\"\n from bgym import EnvArgs as BGymEnvArgs\n\n new_list = []\n for ea in env_args_list:","source_hash":"9fc48e904c84951afeeaae7a4f365b40a665dc6a6bd851d8761c9b89e727b920","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.study._convert_env_args","uri":"program://AgentLab/function/src.agentlab.experiments.study._convert_env_args#L729-L755","kind":"function","name":"_convert_env_args","path":"src/agentlab/experiments/study.py","language":"python","start_line":729,"end_line":755,"context_start_line":709,"context_end_line":775,"code":" try:\n folder_date = datetime.strptime(\"_\".join(item.name.split(\"_\")[:2]), date_format)\n if folder_date > most_recent_time:\n most_recent_time = folder_date\n most_recent_folder = item\n except (ValueError, IndexError):\n continue\n\n return most_recent_folder\n\n\ndef set_demo_mode(env_args_list: list[EnvArgs]):\n \"\"\"Set the demo mode for the experiments. This can be useful for generating videos for demos.\"\"\"\n for env_args in env_args_list:\n env_args.viewport = {\"width\": 1280, \"height\": 720}\n env_args.record_video = True\n env_args.wait_for_user_message = False\n env_args.slow_mo = 1000\n\n\ndef _convert_env_args(env_args_list) -> list[EnvArgs]:\n \"\"\"Return a list where every element is the *new* EnvArgs.\n\n For backward compatibility, we need to convert the old EnvArgs to the new one.\n\n Args:\n env_args_list (list): list of EnvArgs objects to convert\n\n Returns:\n list: list of converted EnvArgs objects\n\n Raises:\n TypeError: If an element in env_args_list is not of expected type.\n \"\"\"\n from bgym import EnvArgs as BGymEnvArgs\n\n new_list = []\n for ea in env_args_list:\n # already new → keep as‑is\n if isinstance(ea, (EnvArgs, AbstractEnvArgs)):\n new_list.append(ea)\n # old → convert\n elif isinstance(ea, BGymEnvArgs):\n new_list.append(EnvArgs(**asdict(ea)))\n else:\n raise TypeError(f\"Unexpected type: {type(ea)}\")\n return new_list\n\n\n# def _flag_sequential_exp(exp_args_list: list[ExpArgs], benchmark: Benchmark):\n# if benchmark.name.startswith(\"visualwebarena\"):\n# sequential_subset = benchmark.subset_from_glob(\"requires_reset\", \"True\")\n# sequential_subset = set(\n# [env_args.task_name for env_args in sequential_subset.env_args_list]\n# )\n# for exp_args in exp_args_list:\n# if exp_args.env_args.task_name in sequential_subset:\n# exp_args.sequential = True\n\n\n# def ablation_study(start_agent: AgentArgs, changes, benchmark: str, demo_mode=False):\n# \"\"\"Ablation study of an agent.\n\n# Changes is a list of tuples (path_to_attribute, value) to change in the agent\n# configuration.\n\n# Args:","source_hash":"9fc48e904c84951afeeaae7a4f365b40a665dc6a6bd851d8761c9b89e727b920","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.study.find_incomplete","uri":"program://AgentLab/function/src.agentlab.experiments.study.find_incomplete#L560-L562","kind":"function","name":"find_incomplete","path":"src/agentlab/experiments/study.py","language":"python","start_line":560,"end_line":562,"context_start_line":540,"context_end_line":582,"code":" return slugify(study_name, max_length=200, allow_unicode=True)\n\n\n@dataclass\nclass SequentialStudies(AbstractStudy):\n \"\"\"\n Sequential execution of multiple studies.\n\n This is required for e.g. WebArena, where a server reset is required between evaluations of each agent.\n \"\"\"\n\n studies: list[Study]\n\n @property\n def name(self):\n \"\"\"The name of the study.\"\"\"\n agent_names = [a.agent_name for study in self.studies for a in study.agent_args]\n benchmark_names = [study.benchmark.name for study in self.studies]\n return _make_study_name(agent_names, benchmark_names, self.suffix)\n\n def find_incomplete(self, include_errors=True):\n for study in self.studies:\n study.find_incomplete(include_errors=include_errors)\n\n def run(\n self,\n n_jobs=1,\n parallel_backend=\"ray\",\n strict_reproducibility=False,\n n_relaunch=3,\n exp_root=RESULTS_DIR,\n ):\n # This sequence of of making directories is important to make sure objects are materialized\n # properly before saving. Otherwise relaunch may not work properly.\n self.make_dir()\n for study in self.studies:\n study.make_dir(exp_root=self.dir)\n\n self.save(exp_root=exp_root)\n self._run(n_jobs, parallel_backend, strict_reproducibility, n_relaunch)\n _, summary_df, _ = self.get_results()\n logger.info(\"\\n\" + str(summary_df))\n logger.info(f\"SequentialStudies {self.name} finished.\")","source_hash":"9fc48e904c84951afeeaae7a4f365b40a665dc6a6bd851d8761c9b89e727b920","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.study.run","uri":"program://AgentLab/function/src.agentlab.experiments.study.run#L564-L582","kind":"function","name":"run","path":"src/agentlab/experiments/study.py","language":"python","start_line":564,"end_line":582,"context_start_line":544,"context_end_line":602,"code":"class SequentialStudies(AbstractStudy):\n \"\"\"\n Sequential execution of multiple studies.\n\n This is required for e.g. WebArena, where a server reset is required between evaluations of each agent.\n \"\"\"\n\n studies: list[Study]\n\n @property\n def name(self):\n \"\"\"The name of the study.\"\"\"\n agent_names = [a.agent_name for study in self.studies for a in study.agent_args]\n benchmark_names = [study.benchmark.name for study in self.studies]\n return _make_study_name(agent_names, benchmark_names, self.suffix)\n\n def find_incomplete(self, include_errors=True):\n for study in self.studies:\n study.find_incomplete(include_errors=include_errors)\n\n def run(\n self,\n n_jobs=1,\n parallel_backend=\"ray\",\n strict_reproducibility=False,\n n_relaunch=3,\n exp_root=RESULTS_DIR,\n ):\n # This sequence of of making directories is important to make sure objects are materialized\n # properly before saving. Otherwise relaunch may not work properly.\n self.make_dir()\n for study in self.studies:\n study.make_dir(exp_root=self.dir)\n\n self.save(exp_root=exp_root)\n self._run(n_jobs, parallel_backend, strict_reproducibility, n_relaunch)\n _, summary_df, _ = self.get_results()\n logger.info(\"\\n\" + str(summary_df))\n logger.info(f\"SequentialStudies {self.name} finished.\")\n\n def _run(self, n_jobs=1, parallel_backend=\"ray\", strict_reproducibility=False, n_relaunch=3):\n for study in self.studies:\n study.run(n_jobs, parallel_backend, strict_reproducibility, n_relaunch)\n\n def override_max_steps(self, max_steps):\n for study in self.studies:\n study.override_max_steps(max_steps)\n\n def append_to_journal(self, strict_reproducibility=True):\n for study in self.studies:\n study.append_to_journal(strict_reproducibility=strict_reproducibility)\n\n\ndef _init_worker(server_queue: Queue):\n \"\"\"Run once at the initialization of the worker in the multiprocessing.Pool.\n\n This is typically used to initialize different environment variables of the WebArena server for\n multiple instances in parallel.\n","source_hash":"9fc48e904c84951afeeaae7a4f365b40a665dc6a6bd851d8761c9b89e727b920","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.study.make_dir","uri":"program://AgentLab/function/src.agentlab.experiments.study.make_dir#L142-L148","kind":"function","name":"make_dir","path":"src/agentlab/experiments/study.py","language":"python","start_line":142,"end_line":148,"context_start_line":122,"context_end_line":168,"code":" suffix=suffix,\n comment=comment,\n ignore_dependencies=ignore_dependencies,\n )\n\n\nclass AbstractStudy(ABC):\n \"\"\"Abstract class for a study.\"\"\"\n\n dir: Path = None\n suffix: str = \"\"\n\n @abstractmethod\n def find_incomplete(self, include_errors=True):\n \"\"\"Prepare the study for relaunching by finding incomplete experiments\"\"\"\n\n @abstractmethod\n def run(self, n_jobs=1, parallel_backend=\"ray\", strict_reproducibility=False, n_relaunch=3):\n \"\"\"Run the study\"\"\"\n\n def make_dir(self, exp_root=RESULTS_DIR):\n \"\"\"Create a directory for the study\"\"\"\n if self.dir is None:\n dir_name = f\"{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}_{self.name}\"\n\n self.dir = Path(exp_root) / dir_name\n self.dir.mkdir(parents=True, exist_ok=True)\n\n def save(self, exp_root=RESULTS_DIR):\n \"\"\"Pickle the study to the directory\"\"\"\n # TODO perhaps remove exp_args_list before pickling and when loading bring them from the individual directories\n\n self.make_dir(exp_root=exp_root)\n with gzip.open(self.dir / \"study.pkl.gz\", \"wb\") as f:\n pickle.dump(self, f)\n\n def get_results(self, suffix=\"\", also_save=True):\n \"\"\"Recursively load all results from the study directory and summarize them.\"\"\"\n result_df = inspect_results.load_result_df(self.dir)\n error_report = inspect_results.error_report(result_df, max_stack_trace=3, use_log=True)\n summary_df = inspect_results.summarize_study(result_df)\n\n if also_save:\n suffix = f\"_{suffix}\" if suffix else \"\"\n result_df.to_csv(self.dir / f\"result_df{suffix}.csv\")\n summary_df.to_csv(self.dir / f\"summary_df{suffix}.csv\")\n (self.dir / f\"error_report{suffix}.md\").write_text(error_report)","source_hash":"9fc48e904c84951afeeaae7a4f365b40a665dc6a6bd851d8761c9b89e727b920","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.study.save","uri":"program://AgentLab/function/src.agentlab.experiments.study.save#L150-L156","kind":"function","name":"save","path":"src/agentlab/experiments/study.py","language":"python","start_line":150,"end_line":156,"context_start_line":130,"context_end_line":176,"code":"\n dir: Path = None\n suffix: str = \"\"\n\n @abstractmethod\n def find_incomplete(self, include_errors=True):\n \"\"\"Prepare the study for relaunching by finding incomplete experiments\"\"\"\n\n @abstractmethod\n def run(self, n_jobs=1, parallel_backend=\"ray\", strict_reproducibility=False, n_relaunch=3):\n \"\"\"Run the study\"\"\"\n\n def make_dir(self, exp_root=RESULTS_DIR):\n \"\"\"Create a directory for the study\"\"\"\n if self.dir is None:\n dir_name = f\"{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}_{self.name}\"\n\n self.dir = Path(exp_root) / dir_name\n self.dir.mkdir(parents=True, exist_ok=True)\n\n def save(self, exp_root=RESULTS_DIR):\n \"\"\"Pickle the study to the directory\"\"\"\n # TODO perhaps remove exp_args_list before pickling and when loading bring them from the individual directories\n\n self.make_dir(exp_root=exp_root)\n with gzip.open(self.dir / \"study.pkl.gz\", \"wb\") as f:\n pickle.dump(self, f)\n\n def get_results(self, suffix=\"\", also_save=True):\n \"\"\"Recursively load all results from the study directory and summarize them.\"\"\"\n result_df = inspect_results.load_result_df(self.dir)\n error_report = inspect_results.error_report(result_df, max_stack_trace=3, use_log=True)\n summary_df = inspect_results.summarize_study(result_df)\n\n if also_save:\n suffix = f\"_{suffix}\" if suffix else \"\"\n result_df.to_csv(self.dir / f\"result_df{suffix}.csv\")\n summary_df.to_csv(self.dir / f\"summary_df{suffix}.csv\")\n (self.dir / f\"error_report{suffix}.md\").write_text(error_report)\n\n return result_df, summary_df, error_report\n\n def shuffle_exps(self):\n \"\"\"Shuffle the experiments in the study.\"\"\"\n self.exp_args_list = random.sample(self.exp_args_list, len(self.exp_args_list))\n\n","source_hash":"9fc48e904c84951afeeaae7a4f365b40a665dc6a6bd851d8761c9b89e727b920","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.study.get_results","uri":"program://AgentLab/function/src.agentlab.experiments.study.get_results#L158-L170","kind":"function","name":"get_results","path":"src/agentlab/experiments/study.py","language":"python","start_line":158,"end_line":170,"context_start_line":138,"context_end_line":190,"code":" @abstractmethod\n def run(self, n_jobs=1, parallel_backend=\"ray\", strict_reproducibility=False, n_relaunch=3):\n \"\"\"Run the study\"\"\"\n\n def make_dir(self, exp_root=RESULTS_DIR):\n \"\"\"Create a directory for the study\"\"\"\n if self.dir is None:\n dir_name = f\"{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}_{self.name}\"\n\n self.dir = Path(exp_root) / dir_name\n self.dir.mkdir(parents=True, exist_ok=True)\n\n def save(self, exp_root=RESULTS_DIR):\n \"\"\"Pickle the study to the directory\"\"\"\n # TODO perhaps remove exp_args_list before pickling and when loading bring them from the individual directories\n\n self.make_dir(exp_root=exp_root)\n with gzip.open(self.dir / \"study.pkl.gz\", \"wb\") as f:\n pickle.dump(self, f)\n\n def get_results(self, suffix=\"\", also_save=True):\n \"\"\"Recursively load all results from the study directory and summarize them.\"\"\"\n result_df = inspect_results.load_result_df(self.dir)\n error_report = inspect_results.error_report(result_df, max_stack_trace=3, use_log=True)\n summary_df = inspect_results.summarize_study(result_df)\n\n if also_save:\n suffix = f\"_{suffix}\" if suffix else \"\"\n result_df.to_csv(self.dir / f\"result_df{suffix}.csv\")\n summary_df.to_csv(self.dir / f\"summary_df{suffix}.csv\")\n (self.dir / f\"error_report{suffix}.md\").write_text(error_report)\n\n return result_df, summary_df, error_report\n\n def shuffle_exps(self):\n \"\"\"Shuffle the experiments in the study.\"\"\"\n self.exp_args_list = random.sample(self.exp_args_list, len(self.exp_args_list))\n\n\n@dataclass\nclass Study(AbstractStudy):\n \"\"\"A study coresponds to one or multiple agents evaluated on a benchmark.\n\n This is part of the high level API to help keep experiments organized and reproducible.\n\n Attributes:\n agent_args: list[AgentArgs]\n The agent configuration(s) to run. *IMPORTANT*: these objects will be pickled and\n unpickled. Make sure they are imported from a package that is accessible from\n PYTHONPATH. Otherwise, it won't load in agentlab-xray.\n benchmark: Benchmark | str\n The benchmark to run the agents on. See DEFAULT_BENCHMARKS for the main ones. You\n can also make your own by modifying an existing one.","source_hash":"9fc48e904c84951afeeaae7a4f365b40a665dc6a6bd851d8761c9b89e727b920","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.study.shuffle_exps","uri":"program://AgentLab/function/src.agentlab.experiments.study.shuffle_exps#L172-L174","kind":"function","name":"shuffle_exps","path":"src/agentlab/experiments/study.py","language":"python","start_line":172,"end_line":174,"context_start_line":152,"context_end_line":194,"code":" # TODO perhaps remove exp_args_list before pickling and when loading bring them from the individual directories\n\n self.make_dir(exp_root=exp_root)\n with gzip.open(self.dir / \"study.pkl.gz\", \"wb\") as f:\n pickle.dump(self, f)\n\n def get_results(self, suffix=\"\", also_save=True):\n \"\"\"Recursively load all results from the study directory and summarize them.\"\"\"\n result_df = inspect_results.load_result_df(self.dir)\n error_report = inspect_results.error_report(result_df, max_stack_trace=3, use_log=True)\n summary_df = inspect_results.summarize_study(result_df)\n\n if also_save:\n suffix = f\"_{suffix}\" if suffix else \"\"\n result_df.to_csv(self.dir / f\"result_df{suffix}.csv\")\n summary_df.to_csv(self.dir / f\"summary_df{suffix}.csv\")\n (self.dir / f\"error_report{suffix}.md\").write_text(error_report)\n\n return result_df, summary_df, error_report\n\n def shuffle_exps(self):\n \"\"\"Shuffle the experiments in the study.\"\"\"\n self.exp_args_list = random.sample(self.exp_args_list, len(self.exp_args_list))\n\n\n@dataclass\nclass Study(AbstractStudy):\n \"\"\"A study coresponds to one or multiple agents evaluated on a benchmark.\n\n This is part of the high level API to help keep experiments organized and reproducible.\n\n Attributes:\n agent_args: list[AgentArgs]\n The agent configuration(s) to run. *IMPORTANT*: these objects will be pickled and\n unpickled. Make sure they are imported from a package that is accessible from\n PYTHONPATH. Otherwise, it won't load in agentlab-xray.\n benchmark: Benchmark | str\n The benchmark to run the agents on. See DEFAULT_BENCHMARKS for the main ones. You\n can also make your own by modifying an existing one.\n dir: Path\n The directory where the study will be saved. If None, a directory will be created in\n RESULTS_DIR.\n suffix: str","source_hash":"9fc48e904c84951afeeaae7a4f365b40a665dc6a6bd851d8761c9b89e727b920","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.study.__post_init__","uri":"program://AgentLab/function/src.agentlab.experiments.study.__post_init__#L241-L251","kind":"function","name":"__post_init__","path":"src/agentlab/experiments/study.py","language":"python","start_line":241,"end_line":251,"context_start_line":221,"context_end_line":271,"code":" The average step timeout in seconds. This is used to stop the experiments if they are\n taking too long. The default is 60 seconds.\n demo_mode: bool\n If True, the experiments will be run in demo mode, which will record videos, and enable\n visual effects for actions.\n \"\"\"\n\n agent_args: list[AgentArgs] = None\n benchmark: Benchmark | str = None\n dir: Path = None\n suffix: str = \"\" # used for adding a personnal comment to the study name\n uuid: str = None\n reproducibility_info: dict = None\n logging_level: int = logging.DEBUG\n logging_level_stdout: int = logging.WARNING\n comment: str = None # Extra comments from the authors of this study\n ignore_dependencies: bool = False\n avg_step_timeout: int = 60\n demo_mode: bool = False\n\n def __post_init__(self):\n \"\"\"Initialize the study. Set the uuid, and generate the exp_args_list.\"\"\"\n self.uuid = uuid.uuid4()\n if isinstance(self.benchmark, str):\n self.benchmark = DEFAULT_BENCHMARKS[self.benchmark.lower()]()\n\n self.benchmark.env_args_list = _convert_env_args(self.benchmark.env_args_list)\n\n if isinstance(self.dir, str):\n self.dir = Path(self.dir)\n self.make_exp_args_list()\n\n def make_exp_args_list(self):\n \"\"\"Generate the exp_args_list from the agent_args and the benchmark.\"\"\"\n self.exp_args_list = self.agents_on_benchmark(\n self.agent_args,\n self.benchmark,\n logging_level=self.logging_level,\n logging_level_stdout=self.logging_level_stdout,\n ignore_dependencies=self.ignore_dependencies,\n demo_mode=self.demo_mode,\n )\n\n def find_incomplete(self, include_errors=True):\n \"\"\"Find incomplete or errored experiments in the study directory for relaunching.\n\n Args:\n include_errors: bool\n If True, include errored experiments in the list.\n\n Returns:","source_hash":"9fc48e904c84951afeeaae7a4f365b40a665dc6a6bd851d8761c9b89e727b920","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.study.make_exp_args_list","uri":"program://AgentLab/function/src.agentlab.experiments.study.make_exp_args_list#L253-L262","kind":"function","name":"make_exp_args_list","path":"src/agentlab/experiments/study.py","language":"python","start_line":253,"end_line":262,"context_start_line":233,"context_end_line":282,"code":" reproducibility_info: dict = None\n logging_level: int = logging.DEBUG\n logging_level_stdout: int = logging.WARNING\n comment: str = None # Extra comments from the authors of this study\n ignore_dependencies: bool = False\n avg_step_timeout: int = 60\n demo_mode: bool = False\n\n def __post_init__(self):\n \"\"\"Initialize the study. Set the uuid, and generate the exp_args_list.\"\"\"\n self.uuid = uuid.uuid4()\n if isinstance(self.benchmark, str):\n self.benchmark = DEFAULT_BENCHMARKS[self.benchmark.lower()]()\n\n self.benchmark.env_args_list = _convert_env_args(self.benchmark.env_args_list)\n\n if isinstance(self.dir, str):\n self.dir = Path(self.dir)\n self.make_exp_args_list()\n\n def make_exp_args_list(self):\n \"\"\"Generate the exp_args_list from the agent_args and the benchmark.\"\"\"\n self.exp_args_list = self.agents_on_benchmark(\n self.agent_args,\n self.benchmark,\n logging_level=self.logging_level,\n logging_level_stdout=self.logging_level_stdout,\n ignore_dependencies=self.ignore_dependencies,\n demo_mode=self.demo_mode,\n )\n\n def find_incomplete(self, include_errors=True):\n \"\"\"Find incomplete or errored experiments in the study directory for relaunching.\n\n Args:\n include_errors: bool\n If True, include errored experiments in the list.\n\n Returns:\n list[ExpArgs]: The list of all experiments with completed ones replaced by a\n dummy exp_args to keep the task dependencies.\n \"\"\"\n self.exp_args_list = find_incomplete(self.dir, include_errors=include_errors)\n n_incomplete = non_dummy_count(self.exp_args_list)\n n_error = [\n getattr(exp_args, \"status\", \"incomplete\") == \"error\" for exp_args in self.exp_args_list\n ].count(True)\n return n_incomplete, n_error\n\n def load_exp_args_list(self):","source_hash":"9fc48e904c84951afeeaae7a4f365b40a665dc6a6bd851d8761c9b89e727b920","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.study.load_exp_args_list","uri":"program://AgentLab/function/src.agentlab.experiments.study.load_exp_args_list#L282-L284","kind":"function","name":"load_exp_args_list","path":"src/agentlab/experiments/study.py","language":"python","start_line":282,"end_line":284,"context_start_line":262,"context_end_line":304,"code":" )\n\n def find_incomplete(self, include_errors=True):\n \"\"\"Find incomplete or errored experiments in the study directory for relaunching.\n\n Args:\n include_errors: bool\n If True, include errored experiments in the list.\n\n Returns:\n list[ExpArgs]: The list of all experiments with completed ones replaced by a\n dummy exp_args to keep the task dependencies.\n \"\"\"\n self.exp_args_list = find_incomplete(self.dir, include_errors=include_errors)\n n_incomplete = non_dummy_count(self.exp_args_list)\n n_error = [\n getattr(exp_args, \"status\", \"incomplete\") == \"error\" for exp_args in self.exp_args_list\n ].count(True)\n return n_incomplete, n_error\n\n def load_exp_args_list(self):\n logger.info(f\"Loading experiments from {self.dir}\")\n self.exp_args_list = list(inspect_results.yield_all_exp_results(savedir_base=self.dir))\n\n def set_reproducibility_info(self, strict_reproducibility=False, comment=None):\n \"\"\"Gather relevant information that may affect the reproducibility of the experiment\n\n e.g.: versions of BrowserGym, benchmark, AgentLab...\n\n Args:\n strict_reproducibility: bool\n If True, all modifications have to be committed before running the experiments.\n Also, if relaunching a study, it will not be possible if the code has changed.\n comment: str\n Extra comment to add to the reproducibility information.\n \"\"\"\n agent_names = [a.agent_name for a in self.agent_args]\n info = repro.get_reproducibility_info(\n agent_names,\n self.benchmark,\n self.uuid,\n ignore_changes=not strict_reproducibility,\n comment=comment,","source_hash":"9fc48e904c84951afeeaae7a4f365b40a665dc6a6bd851d8761c9b89e727b920","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.study.set_reproducibility_info","uri":"program://AgentLab/function/src.agentlab.experiments.study.set_reproducibility_info#L286-L311","kind":"function","name":"set_reproducibility_info","path":"src/agentlab/experiments/study.py","language":"python","start_line":286,"end_line":311,"context_start_line":266,"context_end_line":331,"code":"\n Args:\n include_errors: bool\n If True, include errored experiments in the list.\n\n Returns:\n list[ExpArgs]: The list of all experiments with completed ones replaced by a\n dummy exp_args to keep the task dependencies.\n \"\"\"\n self.exp_args_list = find_incomplete(self.dir, include_errors=include_errors)\n n_incomplete = non_dummy_count(self.exp_args_list)\n n_error = [\n getattr(exp_args, \"status\", \"incomplete\") == \"error\" for exp_args in self.exp_args_list\n ].count(True)\n return n_incomplete, n_error\n\n def load_exp_args_list(self):\n logger.info(f\"Loading experiments from {self.dir}\")\n self.exp_args_list = list(inspect_results.yield_all_exp_results(savedir_base=self.dir))\n\n def set_reproducibility_info(self, strict_reproducibility=False, comment=None):\n \"\"\"Gather relevant information that may affect the reproducibility of the experiment\n\n e.g.: versions of BrowserGym, benchmark, AgentLab...\n\n Args:\n strict_reproducibility: bool\n If True, all modifications have to be committed before running the experiments.\n Also, if relaunching a study, it will not be possible if the code has changed.\n comment: str\n Extra comment to add to the reproducibility information.\n \"\"\"\n agent_names = [a.agent_name for a in self.agent_args]\n info = repro.get_reproducibility_info(\n agent_names,\n self.benchmark,\n self.uuid,\n ignore_changes=not strict_reproducibility,\n comment=comment,\n allow_bypass_benchmark_version=not strict_reproducibility,\n )\n if self.reproducibility_info is not None:\n repro.assert_compatible(\n self.reproducibility_info, info, raise_if_incompatible=strict_reproducibility\n )\n self.reproducibility_info = info\n\n def run(\n self,\n n_jobs=1,\n parallel_backend=\"ray\",\n strict_reproducibility=False,\n n_relaunch=3,\n relaunch_errors=True,\n exp_root=RESULTS_DIR,\n ):\n self.set_reproducibility_info(\n strict_reproducibility=strict_reproducibility, comment=self.comment\n )\n self.save(exp_root=exp_root)\n\n n_exp = len(self.exp_args_list)\n last_error_count = None\n\n for i in range(n_relaunch):\n logger.info(f\"Launching study {self.name} - trial {i + 1} / {n_relaunch}\")","source_hash":"9fc48e904c84951afeeaae7a4f365b40a665dc6a6bd851d8761c9b89e727b920","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.study._run","uri":"program://AgentLab/function/src.agentlab.experiments.study._run#L660-L682","kind":"function","name":"_run","path":"src/agentlab/experiments/study.py","language":"python","start_line":660,"end_line":682,"context_start_line":640,"context_end_line":702,"code":" max_workers=len(parallel_servers), initializer=_init_worker, initargs=(server_queue,)\n ) as executor:\n # Create list of arguments for each study\n study_args = [\n (study, n_jobs, parallel_backend, strict_reproducibility, n_relaunch)\n for study in self.studies\n ]\n\n # Submit all tasks and wait for completion\n futures = [executor.submit(_run_study, *args) for args in study_args]\n\n # Wait for all futures to complete and raise any exceptions\n for future in futures:\n future.result()\n\n\n@dataclass\nclass ParallelStudies_alt(SequentialStudies):\n parallel_servers: list[BaseServer] | int = None\n\n def _run(\n self,\n n_jobs=1,\n parallel_backend=\"ray\",\n strict_reproducibility=False,\n n_relaunch=3,\n ):\n parallel_servers = self.parallel_servers\n if isinstance(parallel_servers, int):\n parallel_servers = [BaseServer() for _ in range(parallel_servers)]\n\n server_queue = Manager().Queue()\n for server in parallel_servers:\n server_queue.put(server)\n\n with Pool(len(parallel_servers), initializer=_init_worker, initargs=(server_queue,)) as p:\n p.starmap(\n _run_study,\n [\n (study, n_jobs, parallel_backend, strict_reproducibility, n_relaunch)\n for study in self.studies\n ],\n )\n\n\ndef get_most_recent_study(\n root_dir: Path = None, date_format: str = \"%Y-%m-%d_%H-%M-%S\", contains=None\n):\n \"\"\"Return the most recent directory based on the date in the folder name.\n\n Args:\n root_dir: The directory to search in\n date_format: The format of the date in the folder name\n contains: If not None, only consider folders that contains this string\n\n Returns:\n Path: The most recent folder satisfying the conditions\n \"\"\"\n\n if root_dir is None:\n root_dir = RESULTS_DIR\n\n most_recent_folder = None","source_hash":"9fc48e904c84951afeeaae7a4f365b40a665dc6a6bd851d8761c9b89e727b920","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.study.append_to_journal","uri":"program://AgentLab/function/src.agentlab.experiments.study.append_to_journal#L592-L594","kind":"function","name":"append_to_journal","path":"src/agentlab/experiments/study.py","language":"python","start_line":592,"end_line":594,"context_start_line":572,"context_end_line":614,"code":" # This sequence of of making directories is important to make sure objects are materialized\n # properly before saving. Otherwise relaunch may not work properly.\n self.make_dir()\n for study in self.studies:\n study.make_dir(exp_root=self.dir)\n\n self.save(exp_root=exp_root)\n self._run(n_jobs, parallel_backend, strict_reproducibility, n_relaunch)\n _, summary_df, _ = self.get_results()\n logger.info(\"\\n\" + str(summary_df))\n logger.info(f\"SequentialStudies {self.name} finished.\")\n\n def _run(self, n_jobs=1, parallel_backend=\"ray\", strict_reproducibility=False, n_relaunch=3):\n for study in self.studies:\n study.run(n_jobs, parallel_backend, strict_reproducibility, n_relaunch)\n\n def override_max_steps(self, max_steps):\n for study in self.studies:\n study.override_max_steps(max_steps)\n\n def append_to_journal(self, strict_reproducibility=True):\n for study in self.studies:\n study.append_to_journal(strict_reproducibility=strict_reproducibility)\n\n\ndef _init_worker(server_queue: Queue):\n \"\"\"Run once at the initialization of the worker in the multiprocessing.Pool.\n\n This is typically used to initialize different environment variables of the WebArena server for\n multiple instances in parallel.\n\n Args:\n server_queue: Queue\n A queue of object implementing BaseServer to initialize (or anything with a init\n method).\n \"\"\"\n print(\"initializing server instance with on process\", os.getpid())\n print(f\"using queue {server_queue}\")\n server_instance = server_queue.get() # type: \"WebArenaInstanceVars\"\n logger.warning(f\"Initializing server instance {server_instance} from process {os.getpid()}\")\n server_instance.init()\n\n","source_hash":"9fc48e904c84951afeeaae7a4f365b40a665dc6a6bd851d8761c9b89e727b920","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.study.name","uri":"program://AgentLab/function/src.agentlab.experiments.study.name#L554-L558","kind":"function","name":"name","path":"src/agentlab/experiments/study.py","language":"python","start_line":554,"end_line":558,"context_start_line":534,"context_end_line":578,"code":" benchmark_name = benchmark_names[0]\n else:\n benchmark_name = f\"{len(benchmark_names)}_benchmarks\"\n\n study_name = f\"{agent_name}_on_{benchmark_name}_{suffix if suffix else ''}\"\n\n return slugify(study_name, max_length=200, allow_unicode=True)\n\n\n@dataclass\nclass SequentialStudies(AbstractStudy):\n \"\"\"\n Sequential execution of multiple studies.\n\n This is required for e.g. WebArena, where a server reset is required between evaluations of each agent.\n \"\"\"\n\n studies: list[Study]\n\n @property\n def name(self):\n \"\"\"The name of the study.\"\"\"\n agent_names = [a.agent_name for study in self.studies for a in study.agent_args]\n benchmark_names = [study.benchmark.name for study in self.studies]\n return _make_study_name(agent_names, benchmark_names, self.suffix)\n\n def find_incomplete(self, include_errors=True):\n for study in self.studies:\n study.find_incomplete(include_errors=include_errors)\n\n def run(\n self,\n n_jobs=1,\n parallel_backend=\"ray\",\n strict_reproducibility=False,\n n_relaunch=3,\n exp_root=RESULTS_DIR,\n ):\n # This sequence of of making directories is important to make sure objects are materialized\n # properly before saving. Otherwise relaunch may not work properly.\n self.make_dir()\n for study in self.studies:\n study.make_dir(exp_root=self.dir)\n\n self.save(exp_root=exp_root)","source_hash":"9fc48e904c84951afeeaae7a4f365b40a665dc6a6bd851d8761c9b89e727b920","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.study.override_max_steps","uri":"program://AgentLab/function/src.agentlab.experiments.study.override_max_steps#L588-L590","kind":"function","name":"override_max_steps","path":"src/agentlab/experiments/study.py","language":"python","start_line":588,"end_line":590,"context_start_line":568,"context_end_line":610,"code":" strict_reproducibility=False,\n n_relaunch=3,\n exp_root=RESULTS_DIR,\n ):\n # This sequence of of making directories is important to make sure objects are materialized\n # properly before saving. Otherwise relaunch may not work properly.\n self.make_dir()\n for study in self.studies:\n study.make_dir(exp_root=self.dir)\n\n self.save(exp_root=exp_root)\n self._run(n_jobs, parallel_backend, strict_reproducibility, n_relaunch)\n _, summary_df, _ = self.get_results()\n logger.info(\"\\n\" + str(summary_df))\n logger.info(f\"SequentialStudies {self.name} finished.\")\n\n def _run(self, n_jobs=1, parallel_backend=\"ray\", strict_reproducibility=False, n_relaunch=3):\n for study in self.studies:\n study.run(n_jobs, parallel_backend, strict_reproducibility, n_relaunch)\n\n def override_max_steps(self, max_steps):\n for study in self.studies:\n study.override_max_steps(max_steps)\n\n def append_to_journal(self, strict_reproducibility=True):\n for study in self.studies:\n study.append_to_journal(strict_reproducibility=strict_reproducibility)\n\n\ndef _init_worker(server_queue: Queue):\n \"\"\"Run once at the initialization of the worker in the multiprocessing.Pool.\n\n This is typically used to initialize different environment variables of the WebArena server for\n multiple instances in parallel.\n\n Args:\n server_queue: Queue\n A queue of object implementing BaseServer to initialize (or anything with a init\n method).\n \"\"\"\n print(\"initializing server instance with on process\", os.getpid())\n print(f\"using queue {server_queue}\")\n server_instance = server_queue.get() # type: \"WebArenaInstanceVars\"","source_hash":"9fc48e904c84951afeeaae7a4f365b40a665dc6a6bd851d8761c9b89e727b920","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.study.load","uri":"program://AgentLab/function/src.agentlab.experiments.study.load#L416-L437","kind":"function","name":"load","path":"src/agentlab/experiments/study.py","language":"python","start_line":416,"end_line":437,"context_start_line":396,"context_end_line":457,"code":" strict_reproducibility: bool\n If True, incomplete experiments will raise an error.\n \"\"\"\n _, summary_df, _ = self.get_results()\n repro.append_to_journal(\n self.reproducibility_info,\n summary_df,\n strict_reproducibility=strict_reproducibility,\n )\n\n @property\n def name(self):\n agent_names = [a.agent_name for a in self.agent_args]\n return _make_study_name(agent_names, [self.benchmark.name], self.suffix)\n\n def override_max_steps(self, max_steps):\n for exp_args in self.exp_args_list:\n exp_args.env_args.max_steps = max_steps\n\n @staticmethod\n def load(dir: Path) -> \"Study\":\n dir = Path(dir)\n study_path = dir / \"study.pkl.gz\"\n if not study_path.exists() and dir.is_dir():\n # For backward compatibility\n first_result = next(\n inspect_results.yield_all_exp_results(savedir_base=dir, progress_fn=None)\n )\n benchmark_name = first_result.exp_args.env_args.task_name.split(\".\")[0]\n agent_args = first_result.exp_args.agent_args\n study = Study(agent_args=agent_args, benchmark=benchmark_name, dir=dir)\n else:\n with gzip.open(dir / \"study.pkl.gz\", \"rb\") as f:\n study = pickle.load(f) # type: Study\n study.dir = dir\n\n # # just a check\n # for i, exp_args in enumerate(study.exp_args_list):\n # if exp_args.order != i:\n # logging.warning(f\"The order of the experiments is not correct. {exp_args.order} != {i}\")\n\n return study\n\n @staticmethod\n def load_most_recent(root_dir: Path = None, contains=None) -> \"Study\":\n return Study.load(get_most_recent_study(root_dir, contains=contains))\n\n def agents_on_benchmark(\n self,\n agents: list[AgentArgs] | AgentArgs,\n benchmark: Benchmark,\n demo_mode=False,\n logging_level: int = logging.INFO,\n logging_level_stdout: int = logging.INFO,\n ignore_dependencies=False,\n ):\n \"\"\"Run one or multiple agents on a benchmark.\n\n Args:\n agents: list[AgentArgs] | AgentArgs\n The agent configuration(s) to run.\n benchmark: Benchmark","source_hash":"9fc48e904c84951afeeaae7a4f365b40a665dc6a6bd851d8761c9b89e727b920","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.study.load_most_recent","uri":"program://AgentLab/function/src.agentlab.experiments.study.load_most_recent#L440-L441","kind":"function","name":"load_most_recent","path":"src/agentlab/experiments/study.py","language":"python","start_line":440,"end_line":441,"context_start_line":420,"context_end_line":461,"code":" # For backward compatibility\n first_result = next(\n inspect_results.yield_all_exp_results(savedir_base=dir, progress_fn=None)\n )\n benchmark_name = first_result.exp_args.env_args.task_name.split(\".\")[0]\n agent_args = first_result.exp_args.agent_args\n study = Study(agent_args=agent_args, benchmark=benchmark_name, dir=dir)\n else:\n with gzip.open(dir / \"study.pkl.gz\", \"rb\") as f:\n study = pickle.load(f) # type: Study\n study.dir = dir\n\n # # just a check\n # for i, exp_args in enumerate(study.exp_args_list):\n # if exp_args.order != i:\n # logging.warning(f\"The order of the experiments is not correct. {exp_args.order} != {i}\")\n\n return study\n\n @staticmethod\n def load_most_recent(root_dir: Path = None, contains=None) -> \"Study\":\n return Study.load(get_most_recent_study(root_dir, contains=contains))\n\n def agents_on_benchmark(\n self,\n agents: list[AgentArgs] | AgentArgs,\n benchmark: Benchmark,\n demo_mode=False,\n logging_level: int = logging.INFO,\n logging_level_stdout: int = logging.INFO,\n ignore_dependencies=False,\n ):\n \"\"\"Run one or multiple agents on a benchmark.\n\n Args:\n agents: list[AgentArgs] | AgentArgs\n The agent configuration(s) to run.\n benchmark: Benchmark\n The benchmark to run the agents on.\n demo_mode: bool\n If True, the experiments will be run in demo mode.\n logging_level: int","source_hash":"9fc48e904c84951afeeaae7a4f365b40a665dc6a6bd851d8761c9b89e727b920","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.study.agents_on_benchmark","uri":"program://AgentLab/function/src.agentlab.experiments.study.agents_on_benchmark#L443-L519","kind":"function","name":"agents_on_benchmark","path":"src/agentlab/experiments/study.py","language":"python","start_line":443,"end_line":519,"context_start_line":423,"context_end_line":539,"code":" )\n benchmark_name = first_result.exp_args.env_args.task_name.split(\".\")[0]\n agent_args = first_result.exp_args.agent_args\n study = Study(agent_args=agent_args, benchmark=benchmark_name, dir=dir)\n else:\n with gzip.open(dir / \"study.pkl.gz\", \"rb\") as f:\n study = pickle.load(f) # type: Study\n study.dir = dir\n\n # # just a check\n # for i, exp_args in enumerate(study.exp_args_list):\n # if exp_args.order != i:\n # logging.warning(f\"The order of the experiments is not correct. {exp_args.order} != {i}\")\n\n return study\n\n @staticmethod\n def load_most_recent(root_dir: Path = None, contains=None) -> \"Study\":\n return Study.load(get_most_recent_study(root_dir, contains=contains))\n\n def agents_on_benchmark(\n self,\n agents: list[AgentArgs] | AgentArgs,\n benchmark: Benchmark,\n demo_mode=False,\n logging_level: int = logging.INFO,\n logging_level_stdout: int = logging.INFO,\n ignore_dependencies=False,\n ):\n \"\"\"Run one or multiple agents on a benchmark.\n\n Args:\n agents: list[AgentArgs] | AgentArgs\n The agent configuration(s) to run.\n benchmark: Benchmark\n The benchmark to run the agents on.\n demo_mode: bool\n If True, the experiments will be run in demo mode.\n logging_level: int\n The logging level for individual jobs.\n logging_level_stdout: int\n The logging level for the stdout.\n ignore_dependencies: bool\n If True, the dependencies will be ignored and all experiments can be run in parallel.\n\n Returns:\n list[ExpArgs]: The list of experiments to run.\n\n Raises:\n ValueError: If multiple agents are run on a benchmark that requires manual reset.\n \"\"\"\n\n if not isinstance(agents, (list, tuple)):\n agents = [agents]\n\n if benchmark.name.startswith(\"visualwebarena\") or benchmark.name.startswith(\"webarena\"):\n if len(agents) > 1:\n raise ValueError(\n f\"Only one agent can be run on {benchmark.name} since the instance requires manual reset after each evaluation.\"\n )\n\n for agent in agents:\n agent.set_benchmark(\n benchmark, demo_mode\n ) # the agent can adapt (lightly?) to the benchmark\n\n env_args_list = benchmark.env_args_list\n if demo_mode:\n set_demo_mode(env_args_list)\n\n exp_args_list = []\n\n for agent in agents:\n for env_args in env_args_list:\n exp_args = ExpArgs(\n agent_args=agent,\n env_args=env_args,\n logging_level=logging_level,\n logging_level_stdout=logging_level_stdout,\n )\n exp_args_list.append(exp_args)\n\n for i, exp_args in enumerate(exp_args_list):\n exp_args.order = i\n\n # not required with ray, but keeping around if we would need it for visualwebareana on joblib\n # _flag_sequential_exp(exp_args_list, benchmark)\n\n if not ignore_dependencies:\n # populate the depends_on field based on the task dependencies in the benchmark\n exp_args_list = add_dependencies(exp_args_list, benchmark.dependency_graph_over_tasks())\n else:\n logger.warning(\n f\"Ignoring dependencies for benchmark {benchmark.name}. This could lead to different results.\"\n )\n\n return exp_args_list\n\n\ndef _make_study_name(agent_names, benchmark_names, suffix=None):\n \"\"\"Make a study name from the agent and benchmark names.\"\"\"\n # extract unique agent and benchmark names\n agent_names = list(set(agent_names))\n benchmark_names = list(set(benchmark_names))\n\n if len(agent_names) == 1:\n agent_name = agent_names[0]\n else:\n agent_name = f\"{len(agent_names)}_agents\"\n\n if len(benchmark_names) == 1:\n benchmark_name = benchmark_names[0]\n else:\n benchmark_name = f\"{len(benchmark_names)}_benchmarks\"\n\n study_name = f\"{agent_name}_on_{benchmark_name}_{suffix if suffix else ''}\"\n","source_hash":"9fc48e904c84951afeeaae7a4f365b40a665dc6a6bd851d8761c9b89e727b920","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.view_dep_graph","uri":"program://AgentLab/module/src.agentlab.experiments.view_dep_graph#L1-L326","kind":"module","name":"src.agentlab.experiments.view_dep_graph","path":"src/agentlab/experiments/view_dep_graph.py","language":"python","start_line":1,"end_line":326,"context_start_line":1,"context_end_line":326,"code":"\"\"\"Dirty script to visualize the dependency graph of a benchmark, e.g. webarena, vsisualwebarena,\netc. You may have to detust it to make it work for you.\"\"\"\n\nimport math\n\nimport bgym\nimport matplotlib.pyplot as plt\nimport networkx as nx\nimport numpy as np\nfrom bgym import DEFAULT_BENCHMARKS\n\n\ndef clean_dict(dependency_dict: dict[str, list[str]]) -> dict[str, list[str]]:\n new_dep = {}\n for key, deps in dependency_dict.items():\n new_key = key.split(\".\")[-1]\n\n new_dep[new_key] = [dep.split(\".\")[-1] for dep in deps]\n return new_dep\n\n\ndef dict_to_networkx(dependency_dict: dict[str, list[str]]) -> nx.DiGraph:\n\n G = nx.DiGraph()\n i = 0\n # Add edges from each node to its dependencies\n for node, dependencies in dependency_dict.items():\n i += 1\n if i > 20:\n pass\n\n print(node, dependencies)\n # Add edges from the node to each of its dependencies\n for dep in dependencies:\n G.add_edge(dep, node)\n return G\n\n\ndef plot_graph(G, ax, title=None, node_color=\"lightblue\", node_size=40, font_size=8):\n \"\"\"\n Plot a single graph component on the given matplotlib axis.\n\n Args:\n G: NetworkX graph (should be a single connected component)\n ax: Matplotlib axis to plot on\n title: Optional title for the subplot\n node_color: Color for the nodes\n node_size: Size of the nodes\n font_size: Size of the node labels\n \"\"\"\n # Use a simple layout for better performance\n # pos = nx.spring_layout(G, k=0.1, iterations=100)\n\n pos = nx.kamada_kawai_layout(G)\n\n # pos = nx.spectral_layout(G)\n\n def name_to_size(name):\n if \"-\" in name:\n start, end = name.split(\"-\")\n\n n_nodes = int(end) - int(start) + 1\n else:\n n_nodes = 1\n size_factor = node_size / 10\n return n_nodes * size_factor\n\n # compute size based on name\n sizes = [name_to_size(name) for name in G.nodes]\n\n nx.draw(\n G,\n pos,\n ax=ax,\n with_labels=True,\n node_color=node_color,\n node_size=sizes,\n font_size=font_size,\n font_weight=\"normal\",\n arrows=True,\n arrowsize=15,\n )\n\n if title:\n ax.set_title(title)\n ax.axis(\"off\")\n\n\ndef plot_components_grid(\n components, max_cols=4, node_color=\"lightblue\", node_size=2000, font_size=10\n):\n \"\"\"\n Plot components in a grid layout.\n\n Args:\n components: List of NetworkX graphs, one per component\n max_cols: Maximum number of columns in the grid\n node_color: Color for the nodes\n node_size: Size of the nodes\n font_size: Size of the node labels\n\n Returns:\n matplotlib figure\n \"\"\"\n n_components = len(components)\n\n if n_components == 0:\n print(\"No components found\")\n return None\n\n # Calculate grid dimensions\n ncols = min(n_components, max_cols)\n nrows = math.ceil(n_components / ncols)\n\n # Create figure with a reasonable size per subplot\n fig, axes = plt.subplots(nrows, ncols, figsize=(4 * ncols, 4 * nrows))\n fig.suptitle(\"Dependency Graph Components\", size=16)\n\n # Make axes iterable even if there's only one\n if n_components == 1:\n axes = np.array([[axes]])\n elif nrows == 1:\n axes = np.array([axes])\n elif ncols == 1:\n axes = axes.reshape(-1, 1)\n\n # Plot each component\n for idx, component in enumerate(components):\n i, j = divmod(idx, ncols)\n title = f\"Component {idx+1} ({component.number_of_nodes()} nodes)\"\n plot_graph(\n component,\n axes[i, j],\n title,\n node_color=node_color,\n node_size=node_size,\n font_size=font_size,\n )\n\n # Remove empty subplots\n for idx in range(n_components, nrows * ncols):\n i, j = divmod(idx, ncols)\n axes[i, j].remove()\n\n plt.tight_layout()\n return fig\n\n\ndef compress_sequential_chains(dep_dict: dict[str, list[str]]) -> dict[str, list[str]]:\n \"\"\"\n Compress chains of sequential numbers in a dependency dictionary.\n Returns a new dictionary with compressed chains using range notation.\n\n Args:\n dep_dict: Dictionary mapping string numbers to list of string number dependencies\n\n Returns:\n Dictionary with compressed chains using range notation\n \"\"\"\n # Convert to integers for easier processing\n int_dict = {int(k): [int(x) for x in v] for k, v in dep_dict.items()}\n\n # Find chains\n chains = []\n current_chain = []\n\n # Sort nodes for sequential processing\n nodes = sorted(int_dict.keys())\n\n i = 0\n while i < len(nodes):\n node = nodes[i]\n\n # Start new chain\n if not current_chain:\n current_chain = [node]\n i += 1\n continue\n\n # Check if this node continues the chain\n last_node = current_chain[-1]\n\n # Conditions for chain continuation:\n # 1. Numbers are consecutive\n # 2. Current node has exactly one dependency\n # 3. That dependency is the previous node in chain\n # 4. The previous node has exactly one successor\n is_consecutive = node == last_node + 1\n has_single_dep = len(int_dict[node]) == 1\n deps_on_last = has_single_dep and int_dict[node][0] == last_node\n last_has_single_successor = sum(1 for k, v in int_dict.items() if last_node in v) == 1\n\n if is_consecutive and deps_on_last and last_has_single_successor:\n current_chain.append(node)\n else:\n if len(current_chain) > 1:\n chains.append(current_chain)\n current_chain = [node]\n\n i += 1\n\n # Add last chain if it exists\n if len(current_chain) > 1:\n chains.append(current_chain)\n\n # Create compressed dictionary\n compressed_dict = {}\n processed_nodes = set()\n\n # Add compressed chains\n for chain in chains:\n chain_name = f\"{chain[0]}-{chain[-1]}\"\n # Find dependencies of first node in chain\n deps = int_dict[chain[0]]\n compressed_dict[chain_name] = [str(d) for d in deps]\n processed_nodes.update(chain)\n\n # Add remaining non-chain nodes\n for node in nodes:\n if node not in processed_nodes:\n compressed_dict[str(node)] = [str(d) for d in int_dict[node]]\n\n # Update dependencies to use compressed names\n for k in compressed_dict:\n deps = compressed_dict[k]\n new_deps = []\n for dep in deps:\n dep_int = int(dep)\n # Find if this dependency is part of a chain\n chain_found = False\n for chain in chains:\n if dep_int in chain:\n new_deps.append(f\"{chain[0]}-{chain[-1]}\")\n chain_found = True\n break\n if not chain_found:\n new_deps.append(dep)\n compressed_dict[k] = new_deps\n\n return compressed_dict\n\n\ndef compress_chains(G):\n \"\"\"\n Compress chains in a directed graph by merging nodes that have single parent and single child.\n\n Args:\n G: NetworkX directed graph\n\n Returns:\n NetworkX directed graph with compressed chains\n \"\"\"\n G_compressed = G.copy()\n processed_nodes = set()\n\n while True:\n # Find nodes with exactly one parent and one child\n nodes_to_compress = []\n for node in list(\n G_compressed.nodes()\n ): # Create a list to avoid modification during iteration\n if node in processed_nodes:\n continue\n\n predecessors = list(G_compressed.predecessors(node))\n successors = list(G_compressed.successors(node))\n\n if len(predecessors) == 1 and len(successors) == 1:\n pred = predecessors[0]\n succ = successors[0]\n\n # Skip if any node in the chain is already processed\n if pred in processed_nodes or succ in processed_nodes:\n continue\n\n # Only compress if middle node has single parent/child\n pred_preds = list(G_compressed.predecessors(pred))\n succ_succs = list(G_compressed.successors(succ))\n\n if len(pred_preds) <= 1 and len(succ_succs) <= 1:\n nodes_to_compress.append((pred, node, succ))\n processed_nodes.update([pred, node, succ])\n\n if not nodes_to_compress:\n break\n\n # Process each chain\n for pred, mid, succ in nodes_to_compress:\n if not all(G_compressed.has_node(n) for n in [pred, mid, succ]):\n continue\n\n # Create new merged node name\n new_node = \",\".join(str(n) for n in [pred, mid, succ])\n\n # Add the new node\n G_compressed.add_node(new_node)\n\n # Add edges from all predecessors of first node\n for p in list(G_compressed.predecessors(pred)):\n G_compressed.add_edge(p, new_node)\n\n # Add edges to all successors of last node\n for s in list(G_compressed.successors(succ)):\n G_compressed.add_edge(new_node, s)\n\n # Remove the old nodes\n G_compressed.remove_nodes_from([pred, mid, succ])\n\n return G_compressed\n\n\n# benchmark = DEFAULT_BENCHMARKS[\"webarena\"]()\nbenchmark = DEFAULT_BENCHMARKS[\"visualwebarena\"]()\n\ndep_graph = benchmark.dependency_graph_over_tasks()\ndep_graph = clean_dict(dep_graph)\n\ndep_graph = compress_sequential_chains(dep_graph)\ngraph = dict_to_networkx(dep_graph)\n\n# graph = compress_chains(graph)\n\ncomponents = nx.weakly_connected_components(graph)\ncomponents = [graph.subgraph(component).copy() for component in components]\nplot_components_grid(components)\nplt.show()","source_hash":"38fc7f4b3b3bd85837a1ff7a48042eb13eadea3099d3552a13f4ef4ed8b9d95e","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.view_dep_graph.clean_dict","uri":"program://AgentLab/function/src.agentlab.experiments.view_dep_graph.clean_dict#L13-L19","kind":"function","name":"clean_dict","path":"src/agentlab/experiments/view_dep_graph.py","language":"python","start_line":13,"end_line":19,"context_start_line":1,"context_end_line":39,"code":"\"\"\"Dirty script to visualize the dependency graph of a benchmark, e.g. webarena, vsisualwebarena,\netc. You may have to detust it to make it work for you.\"\"\"\n\nimport math\n\nimport bgym\nimport matplotlib.pyplot as plt\nimport networkx as nx\nimport numpy as np\nfrom bgym import DEFAULT_BENCHMARKS\n\n\ndef clean_dict(dependency_dict: dict[str, list[str]]) -> dict[str, list[str]]:\n new_dep = {}\n for key, deps in dependency_dict.items():\n new_key = key.split(\".\")[-1]\n\n new_dep[new_key] = [dep.split(\".\")[-1] for dep in deps]\n return new_dep\n\n\ndef dict_to_networkx(dependency_dict: dict[str, list[str]]) -> nx.DiGraph:\n\n G = nx.DiGraph()\n i = 0\n # Add edges from each node to its dependencies\n for node, dependencies in dependency_dict.items():\n i += 1\n if i > 20:\n pass\n\n print(node, dependencies)\n # Add edges from the node to each of its dependencies\n for dep in dependencies:\n G.add_edge(dep, node)\n return G\n\n\ndef plot_graph(G, ax, title=None, node_color=\"lightblue\", node_size=40, font_size=8):","source_hash":"38fc7f4b3b3bd85837a1ff7a48042eb13eadea3099d3552a13f4ef4ed8b9d95e","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.view_dep_graph.dict_to_networkx","uri":"program://AgentLab/function/src.agentlab.experiments.view_dep_graph.dict_to_networkx#L22-L36","kind":"function","name":"dict_to_networkx","path":"src/agentlab/experiments/view_dep_graph.py","language":"python","start_line":22,"end_line":36,"context_start_line":2,"context_end_line":56,"code":"etc. You may have to detust it to make it work for you.\"\"\"\n\nimport math\n\nimport bgym\nimport matplotlib.pyplot as plt\nimport networkx as nx\nimport numpy as np\nfrom bgym import DEFAULT_BENCHMARKS\n\n\ndef clean_dict(dependency_dict: dict[str, list[str]]) -> dict[str, list[str]]:\n new_dep = {}\n for key, deps in dependency_dict.items():\n new_key = key.split(\".\")[-1]\n\n new_dep[new_key] = [dep.split(\".\")[-1] for dep in deps]\n return new_dep\n\n\ndef dict_to_networkx(dependency_dict: dict[str, list[str]]) -> nx.DiGraph:\n\n G = nx.DiGraph()\n i = 0\n # Add edges from each node to its dependencies\n for node, dependencies in dependency_dict.items():\n i += 1\n if i > 20:\n pass\n\n print(node, dependencies)\n # Add edges from the node to each of its dependencies\n for dep in dependencies:\n G.add_edge(dep, node)\n return G\n\n\ndef plot_graph(G, ax, title=None, node_color=\"lightblue\", node_size=40, font_size=8):\n \"\"\"\n Plot a single graph component on the given matplotlib axis.\n\n Args:\n G: NetworkX graph (should be a single connected component)\n ax: Matplotlib axis to plot on\n title: Optional title for the subplot\n node_color: Color for the nodes\n node_size: Size of the nodes\n font_size: Size of the node labels\n \"\"\"\n # Use a simple layout for better performance\n # pos = nx.spring_layout(G, k=0.1, iterations=100)\n\n pos = nx.kamada_kawai_layout(G)\n\n # pos = nx.spectral_layout(G)","source_hash":"38fc7f4b3b3bd85837a1ff7a48042eb13eadea3099d3552a13f4ef4ed8b9d95e","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.view_dep_graph.plot_graph","uri":"program://AgentLab/function/src.agentlab.experiments.view_dep_graph.plot_graph#L39-L86","kind":"function","name":"plot_graph","path":"src/agentlab/experiments/view_dep_graph.py","language":"python","start_line":39,"end_line":86,"context_start_line":19,"context_end_line":106,"code":" return new_dep\n\n\ndef dict_to_networkx(dependency_dict: dict[str, list[str]]) -> nx.DiGraph:\n\n G = nx.DiGraph()\n i = 0\n # Add edges from each node to its dependencies\n for node, dependencies in dependency_dict.items():\n i += 1\n if i > 20:\n pass\n\n print(node, dependencies)\n # Add edges from the node to each of its dependencies\n for dep in dependencies:\n G.add_edge(dep, node)\n return G\n\n\ndef plot_graph(G, ax, title=None, node_color=\"lightblue\", node_size=40, font_size=8):\n \"\"\"\n Plot a single graph component on the given matplotlib axis.\n\n Args:\n G: NetworkX graph (should be a single connected component)\n ax: Matplotlib axis to plot on\n title: Optional title for the subplot\n node_color: Color for the nodes\n node_size: Size of the nodes\n font_size: Size of the node labels\n \"\"\"\n # Use a simple layout for better performance\n # pos = nx.spring_layout(G, k=0.1, iterations=100)\n\n pos = nx.kamada_kawai_layout(G)\n\n # pos = nx.spectral_layout(G)\n\n def name_to_size(name):\n if \"-\" in name:\n start, end = name.split(\"-\")\n\n n_nodes = int(end) - int(start) + 1\n else:\n n_nodes = 1\n size_factor = node_size / 10\n return n_nodes * size_factor\n\n # compute size based on name\n sizes = [name_to_size(name) for name in G.nodes]\n\n nx.draw(\n G,\n pos,\n ax=ax,\n with_labels=True,\n node_color=node_color,\n node_size=sizes,\n font_size=font_size,\n font_weight=\"normal\",\n arrows=True,\n arrowsize=15,\n )\n\n if title:\n ax.set_title(title)\n ax.axis(\"off\")\n\n\ndef plot_components_grid(\n components, max_cols=4, node_color=\"lightblue\", node_size=2000, font_size=10\n):\n \"\"\"\n Plot components in a grid layout.\n\n Args:\n components: List of NetworkX graphs, one per component\n max_cols: Maximum number of columns in the grid\n node_color: Color for the nodes\n node_size: Size of the nodes\n font_size: Size of the node labels\n\n Returns:\n matplotlib figure\n \"\"\"\n n_components = len(components)\n","source_hash":"38fc7f4b3b3bd85837a1ff7a48042eb13eadea3099d3552a13f4ef4ed8b9d95e","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.view_dep_graph.plot_components_grid","uri":"program://AgentLab/function/src.agentlab.experiments.view_dep_graph.plot_components_grid#L89-L146","kind":"function","name":"plot_components_grid","path":"src/agentlab/experiments/view_dep_graph.py","language":"python","start_line":89,"end_line":146,"context_start_line":69,"context_end_line":166,"code":" sizes = [name_to_size(name) for name in G.nodes]\n\n nx.draw(\n G,\n pos,\n ax=ax,\n with_labels=True,\n node_color=node_color,\n node_size=sizes,\n font_size=font_size,\n font_weight=\"normal\",\n arrows=True,\n arrowsize=15,\n )\n\n if title:\n ax.set_title(title)\n ax.axis(\"off\")\n\n\ndef plot_components_grid(\n components, max_cols=4, node_color=\"lightblue\", node_size=2000, font_size=10\n):\n \"\"\"\n Plot components in a grid layout.\n\n Args:\n components: List of NetworkX graphs, one per component\n max_cols: Maximum number of columns in the grid\n node_color: Color for the nodes\n node_size: Size of the nodes\n font_size: Size of the node labels\n\n Returns:\n matplotlib figure\n \"\"\"\n n_components = len(components)\n\n if n_components == 0:\n print(\"No components found\")\n return None\n\n # Calculate grid dimensions\n ncols = min(n_components, max_cols)\n nrows = math.ceil(n_components / ncols)\n\n # Create figure with a reasonable size per subplot\n fig, axes = plt.subplots(nrows, ncols, figsize=(4 * ncols, 4 * nrows))\n fig.suptitle(\"Dependency Graph Components\", size=16)\n\n # Make axes iterable even if there's only one\n if n_components == 1:\n axes = np.array([[axes]])\n elif nrows == 1:\n axes = np.array([axes])\n elif ncols == 1:\n axes = axes.reshape(-1, 1)\n\n # Plot each component\n for idx, component in enumerate(components):\n i, j = divmod(idx, ncols)\n title = f\"Component {idx+1} ({component.number_of_nodes()} nodes)\"\n plot_graph(\n component,\n axes[i, j],\n title,\n node_color=node_color,\n node_size=node_size,\n font_size=font_size,\n )\n\n # Remove empty subplots\n for idx in range(n_components, nrows * ncols):\n i, j = divmod(idx, ncols)\n axes[i, j].remove()\n\n plt.tight_layout()\n return fig\n\n\ndef compress_sequential_chains(dep_dict: dict[str, list[str]]) -> dict[str, list[str]]:\n \"\"\"\n Compress chains of sequential numbers in a dependency dictionary.\n Returns a new dictionary with compressed chains using range notation.\n\n Args:\n dep_dict: Dictionary mapping string numbers to list of string number dependencies\n\n Returns:\n Dictionary with compressed chains using range notation\n \"\"\"\n # Convert to integers for easier processing\n int_dict = {int(k): [int(x) for x in v] for k, v in dep_dict.items()}\n\n # Find chains\n chains = []\n current_chain = []\n","source_hash":"38fc7f4b3b3bd85837a1ff7a48042eb13eadea3099d3552a13f4ef4ed8b9d95e","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.view_dep_graph.compress_sequential_chains","uri":"program://AgentLab/function/src.agentlab.experiments.view_dep_graph.compress_sequential_chains#L149-L240","kind":"function","name":"compress_sequential_chains","path":"src/agentlab/experiments/view_dep_graph.py","language":"python","start_line":149,"end_line":240,"context_start_line":129,"context_end_line":260,"code":" i, j = divmod(idx, ncols)\n title = f\"Component {idx+1} ({component.number_of_nodes()} nodes)\"\n plot_graph(\n component,\n axes[i, j],\n title,\n node_color=node_color,\n node_size=node_size,\n font_size=font_size,\n )\n\n # Remove empty subplots\n for idx in range(n_components, nrows * ncols):\n i, j = divmod(idx, ncols)\n axes[i, j].remove()\n\n plt.tight_layout()\n return fig\n\n\ndef compress_sequential_chains(dep_dict: dict[str, list[str]]) -> dict[str, list[str]]:\n \"\"\"\n Compress chains of sequential numbers in a dependency dictionary.\n Returns a new dictionary with compressed chains using range notation.\n\n Args:\n dep_dict: Dictionary mapping string numbers to list of string number dependencies\n\n Returns:\n Dictionary with compressed chains using range notation\n \"\"\"\n # Convert to integers for easier processing\n int_dict = {int(k): [int(x) for x in v] for k, v in dep_dict.items()}\n\n # Find chains\n chains = []\n current_chain = []\n\n # Sort nodes for sequential processing\n nodes = sorted(int_dict.keys())\n\n i = 0\n while i < len(nodes):\n node = nodes[i]\n\n # Start new chain\n if not current_chain:\n current_chain = [node]\n i += 1\n continue\n\n # Check if this node continues the chain\n last_node = current_chain[-1]\n\n # Conditions for chain continuation:\n # 1. Numbers are consecutive\n # 2. Current node has exactly one dependency\n # 3. That dependency is the previous node in chain\n # 4. The previous node has exactly one successor\n is_consecutive = node == last_node + 1\n has_single_dep = len(int_dict[node]) == 1\n deps_on_last = has_single_dep and int_dict[node][0] == last_node\n last_has_single_successor = sum(1 for k, v in int_dict.items() if last_node in v) == 1\n\n if is_consecutive and deps_on_last and last_has_single_successor:\n current_chain.append(node)\n else:\n if len(current_chain) > 1:\n chains.append(current_chain)\n current_chain = [node]\n\n i += 1\n\n # Add last chain if it exists\n if len(current_chain) > 1:\n chains.append(current_chain)\n\n # Create compressed dictionary\n compressed_dict = {}\n processed_nodes = set()\n\n # Add compressed chains\n for chain in chains:\n chain_name = f\"{chain[0]}-{chain[-1]}\"\n # Find dependencies of first node in chain\n deps = int_dict[chain[0]]\n compressed_dict[chain_name] = [str(d) for d in deps]\n processed_nodes.update(chain)\n\n # Add remaining non-chain nodes\n for node in nodes:\n if node not in processed_nodes:\n compressed_dict[str(node)] = [str(d) for d in int_dict[node]]\n\n # Update dependencies to use compressed names\n for k in compressed_dict:\n deps = compressed_dict[k]\n new_deps = []\n for dep in deps:\n dep_int = int(dep)\n # Find if this dependency is part of a chain\n chain_found = False\n for chain in chains:\n if dep_int in chain:\n new_deps.append(f\"{chain[0]}-{chain[-1]}\")\n chain_found = True\n break\n if not chain_found:\n new_deps.append(dep)\n compressed_dict[k] = new_deps\n\n return compressed_dict\n\n\ndef compress_chains(G):\n \"\"\"\n Compress chains in a directed graph by merging nodes that have single parent and single child.\n\n Args:\n G: NetworkX directed graph\n\n Returns:\n NetworkX directed graph with compressed chains\n \"\"\"\n G_compressed = G.copy()\n processed_nodes = set()\n\n while True:\n # Find nodes with exactly one parent and one child\n nodes_to_compress = []\n for node in list(\n G_compressed.nodes()","source_hash":"38fc7f4b3b3bd85837a1ff7a48042eb13eadea3099d3552a13f4ef4ed8b9d95e","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.view_dep_graph.compress_chains","uri":"program://AgentLab/function/src.agentlab.experiments.view_dep_graph.compress_chains#L243-L309","kind":"function","name":"compress_chains","path":"src/agentlab/experiments/view_dep_graph.py","language":"python","start_line":243,"end_line":309,"context_start_line":223,"context_end_line":326,"code":" # Update dependencies to use compressed names\n for k in compressed_dict:\n deps = compressed_dict[k]\n new_deps = []\n for dep in deps:\n dep_int = int(dep)\n # Find if this dependency is part of a chain\n chain_found = False\n for chain in chains:\n if dep_int in chain:\n new_deps.append(f\"{chain[0]}-{chain[-1]}\")\n chain_found = True\n break\n if not chain_found:\n new_deps.append(dep)\n compressed_dict[k] = new_deps\n\n return compressed_dict\n\n\ndef compress_chains(G):\n \"\"\"\n Compress chains in a directed graph by merging nodes that have single parent and single child.\n\n Args:\n G: NetworkX directed graph\n\n Returns:\n NetworkX directed graph with compressed chains\n \"\"\"\n G_compressed = G.copy()\n processed_nodes = set()\n\n while True:\n # Find nodes with exactly one parent and one child\n nodes_to_compress = []\n for node in list(\n G_compressed.nodes()\n ): # Create a list to avoid modification during iteration\n if node in processed_nodes:\n continue\n\n predecessors = list(G_compressed.predecessors(node))\n successors = list(G_compressed.successors(node))\n\n if len(predecessors) == 1 and len(successors) == 1:\n pred = predecessors[0]\n succ = successors[0]\n\n # Skip if any node in the chain is already processed\n if pred in processed_nodes or succ in processed_nodes:\n continue\n\n # Only compress if middle node has single parent/child\n pred_preds = list(G_compressed.predecessors(pred))\n succ_succs = list(G_compressed.successors(succ))\n\n if len(pred_preds) <= 1 and len(succ_succs) <= 1:\n nodes_to_compress.append((pred, node, succ))\n processed_nodes.update([pred, node, succ])\n\n if not nodes_to_compress:\n break\n\n # Process each chain\n for pred, mid, succ in nodes_to_compress:\n if not all(G_compressed.has_node(n) for n in [pred, mid, succ]):\n continue\n\n # Create new merged node name\n new_node = \",\".join(str(n) for n in [pred, mid, succ])\n\n # Add the new node\n G_compressed.add_node(new_node)\n\n # Add edges from all predecessors of first node\n for p in list(G_compressed.predecessors(pred)):\n G_compressed.add_edge(p, new_node)\n\n # Add edges to all successors of last node\n for s in list(G_compressed.successors(succ)):\n G_compressed.add_edge(new_node, s)\n\n # Remove the old nodes\n G_compressed.remove_nodes_from([pred, mid, succ])\n\n return G_compressed\n\n\n# benchmark = DEFAULT_BENCHMARKS[\"webarena\"]()\nbenchmark = DEFAULT_BENCHMARKS[\"visualwebarena\"]()\n\ndep_graph = benchmark.dependency_graph_over_tasks()\ndep_graph = clean_dict(dep_graph)\n\ndep_graph = compress_sequential_chains(dep_graph)\ngraph = dict_to_networkx(dep_graph)\n\n# graph = compress_chains(graph)\n\ncomponents = nx.weakly_connected_components(graph)\ncomponents = [graph.subgraph(component).copy() for component in components]\nplot_components_grid(components)\nplt.show()","source_hash":"38fc7f4b3b3bd85837a1ff7a48042eb13eadea3099d3552a13f4ef4ed8b9d95e","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.view_dep_graph.name_to_size","uri":"program://AgentLab/function/src.agentlab.experiments.view_dep_graph.name_to_size#L58-L66","kind":"function","name":"name_to_size","path":"src/agentlab/experiments/view_dep_graph.py","language":"python","start_line":58,"end_line":66,"context_start_line":38,"context_end_line":86,"code":"\ndef plot_graph(G, ax, title=None, node_color=\"lightblue\", node_size=40, font_size=8):\n \"\"\"\n Plot a single graph component on the given matplotlib axis.\n\n Args:\n G: NetworkX graph (should be a single connected component)\n ax: Matplotlib axis to plot on\n title: Optional title for the subplot\n node_color: Color for the nodes\n node_size: Size of the nodes\n font_size: Size of the node labels\n \"\"\"\n # Use a simple layout for better performance\n # pos = nx.spring_layout(G, k=0.1, iterations=100)\n\n pos = nx.kamada_kawai_layout(G)\n\n # pos = nx.spectral_layout(G)\n\n def name_to_size(name):\n if \"-\" in name:\n start, end = name.split(\"-\")\n\n n_nodes = int(end) - int(start) + 1\n else:\n n_nodes = 1\n size_factor = node_size / 10\n return n_nodes * size_factor\n\n # compute size based on name\n sizes = [name_to_size(name) for name in G.nodes]\n\n nx.draw(\n G,\n pos,\n ax=ax,\n with_labels=True,\n node_color=node_color,\n node_size=sizes,\n font_size=font_size,\n font_weight=\"normal\",\n arrows=True,\n arrowsize=15,\n )\n\n if title:\n ax.set_title(title)\n ax.axis(\"off\")","source_hash":"38fc7f4b3b3bd85837a1ff7a48042eb13eadea3099d3552a13f4ef4ed8b9d95e","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.reproducibility_util","uri":"program://AgentLab/module/src.agentlab.experiments.reproducibility_util#L1-L374","kind":"module","name":"src.agentlab.experiments.reproducibility_util","path":"src/agentlab/experiments/reproducibility_util.py","language":"python","start_line":1,"end_line":374,"context_start_line":1,"context_end_line":374,"code":"import csv\nimport logging\nimport os\nimport platform\nfrom datetime import datetime\nfrom importlib import metadata\nfrom pathlib import Path\n\nimport bgym\nimport pandas as pd\nfrom bgym import Benchmark\nfrom git import InvalidGitRepositoryError, Repo\nfrom git.config import GitConfigParser\n\nimport agentlab\nfrom agentlab.experiments.exp_utils import RESULTS_DIR\n\n\ndef _get_repo(module):\n return Repo(Path(module.__file__).resolve().parent, search_parent_directories=True)\n\n\ndef _get_benchmark_version(\n benchmark: Benchmark, allow_bypass_benchmark_version: bool = False\n) -> str:\n benchmark_name = benchmark.name\n\n if hasattr(benchmark, \"get_version\"):\n return benchmark.get_version()\n\n # in between 2 pull requests\n if benchmark_name.startswith(\"miniwob\"):\n return metadata.distribution(\"browsergym.miniwob\").version\n elif benchmark_name.startswith(\"workarena\"):\n return metadata.distribution(\"browsergym.workarena\").version\n elif benchmark_name.startswith(\"webarena\"):\n return metadata.distribution(\"browsergym.webarena\").version\n elif benchmark_name.startswith(\"visualwebarena\"):\n return metadata.distribution(\"browsergym.visualwebarena\").version\n elif benchmark_name.startswith(\"weblinx\"):\n try:\n return metadata.distribution(\"weblinx_browsergym\").version\n except metadata.PackageNotFoundError:\n return \"0.0.1rc1\"\n elif benchmark_name.startswith(\"assistantbench\"):\n return metadata.distribution(\"browsergym.assistantbench\").version\n else:\n if allow_bypass_benchmark_version:\n return \"bypassed\"\n else:\n raise ValueError(f\"Unknown benchmark {benchmark_name}\")\n\n\ndef _get_git_username(repo: Repo) -> str:\n \"\"\"\n Retrieves the first available Git username from various sources.\n\n Note: overlycomplex designed by Claude and not fully tested.\n\n This function checks multiple locations for the Git username in the following order:\n 1. Repository-specific configuration\n 2. GitHub API (if the remote is a GitHub repository)\n 3. Global Git configuration\n 4. System Git configuration\n 5. Environment variables (GIT_AUTHOR_NAME and GIT_COMMITTER_NAME)\n\n Args:\n repo (Repo): A GitPython Repo object representing the Git repository.\n\n Returns:\n str: The first non-None username found, or None if no username is found.\n \"\"\"\n # Repository-specific configuration\n try:\n username = repo.config_reader().get_value(\"user\", \"name\", None)\n if username:\n return username\n except Exception:\n pass\n\n try:\n # GitHub username\n remote_url = repo.remotes.origin.url\n if \"github.com\" in remote_url:\n import json\n import re\n import urllib.request\n\n match = re.search(r\"github\\.com[:/](.+)/(.+)\\.git\", remote_url)\n if match:\n owner, repo_name = match.groups()\n api_url = f\"https://api.github.com/repos/{owner}/{repo_name}\"\n with urllib.request.urlopen(api_url) as response:\n data = json.loads(response.read().decode())\n username = data[\"owner\"][\"login\"]\n if username:\n return username\n except Exception:\n pass\n\n try:\n # Global configuration\n username = GitConfigParser(repo.git.config(\"--global\", \"--list\"), read_only=True).get_value(\n \"user\", \"name\", None\n )\n if username:\n return username\n except Exception:\n pass\n\n try:\n # System configuration\n username = GitConfigParser(repo.git.config(\"--system\", \"--list\"), read_only=True).get_value(\n \"user\", \"name\", None\n )\n if username:\n return username\n except Exception:\n pass\n\n # Environment variables\n return os.environ.get(\"GIT_AUTHOR_NAME\") or os.environ.get(\"GIT_COMMITTER_NAME\")\n\n\ndef _get_git_info(module, changes_white_list=()) -> tuple[str, list[tuple[str, Path]]]:\n \"\"\"\n Retrieve comprehensive git information for the given module.\n\n This function attempts to find the git repository containing the specified\n module and returns the current commit hash and a comprehensive list of all\n files that contribute to the repository's state.\n\n Args:\n module: The Python module object to check for git information.\n changes_white_list: A list of file paths to ignore when checking for changes.\n\n Returns:\n tuple: A tuple containing two elements:\n - str or None: The current git commit hash, or None if not a git repo.\n - list of tuple: A list of (status, Path) tuples for all modified files.\n Empty list if not a git repo. Status can be 'M' (modified), 'A' (added),\n 'D' (deleted), 'R' (renamed), 'C' (copied), 'U' (updated but unmerged),\n or '??' (untracked).\n \"\"\"\n\n try:\n repo = _get_repo(module)\n\n git_hash = repo.head.object.hexsha\n\n modified_files = []\n\n # Staged changes\n staged_changes = repo.index.diff(repo.head.commit)\n for change in staged_changes:\n modified_files.append((change.change_type, Path(change.a_path)))\n\n # Unstaged changes\n unstaged_changes = repo.index.diff(None)\n for change in unstaged_changes:\n modified_files.append((change.change_type, Path(change.a_path)))\n\n # Untracked files\n untracked_files = repo.untracked_files\n for file in untracked_files:\n modified_files.append((\"??\", Path(file)))\n\n # wildcard matching from white list\n modified_files_filtered = []\n for status, file in modified_files:\n if any(file.match(pattern) for pattern in changes_white_list):\n continue\n modified_files_filtered.append((status, file))\n\n return git_hash, modified_files_filtered\n except InvalidGitRepositoryError:\n return None, []\n\n\ndef get_reproducibility_info(\n agent_names: str | list[str],\n benchmark: Benchmark,\n study_id: str = \"\",\n comment=None,\n changes_white_list=( # Files that are often modified during experiments but do not affect reproducibility\n \"*/reproducibility_script.py\",\n \"*reproducibility_journal.csv\",\n \"*main.py\",\n \"*inspect_results.ipynb\",\n ),\n ignore_changes=False,\n allow_bypass_benchmark_version=False,\n):\n \"\"\"\n Retrieve a dict of information that could influence the reproducibility of an experiment.\n \"\"\"\n from browsergym import core\n\n import agentlab\n\n if isinstance(agent_names, str):\n agent_names = [agent_names]\n\n try:\n repo = _get_repo(agentlab)\n except InvalidGitRepositoryError:\n repo = None\n\n info = {\n \"git_user\": _get_git_username(repo),\n \"agent_names\": agent_names,\n \"benchmark\": benchmark.name,\n \"study_id\": study_id,\n \"comment\": comment,\n \"benchmark_version\": _get_benchmark_version(benchmark, allow_bypass_benchmark_version),\n \"date\": datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\"),\n \"os\": f\"{platform.system()} ({platform.version()})\",\n \"python_version\": platform.python_version(),\n \"playwright_version\": metadata.distribution(\"playwright\").version,\n }\n\n def add_git_info(module_name, module):\n git_hash, modified_files = _get_git_info(module, changes_white_list)\n\n modified_files_str = \"\\n\".join([f\" {status}: {file}\" for status, file in modified_files])\n\n if len(modified_files) > 0:\n msg = (\n f\"Module {module_name} has uncommitted changes. \"\n f\"Modified files: \\n{modified_files_str}\\n\"\n )\n if ignore_changes:\n logging.warning(\n msg + \"Ignoring changes as requested and proceeding to experiments.\"\n )\n else:\n raise ValueError(\n msg + \"Please commit or stash your changes before running the experiment.\"\n )\n\n info[f\"{module_name}_version\"] = module.__version__\n info[f\"{module_name}_git_hash\"] = git_hash\n info[f\"{module_name}__local_modifications\"] = modified_files_str\n\n add_git_info(\"agentlab\", agentlab)\n add_git_info(\"browsergym\", core)\n return info\n\n\ndef assert_compatible(info: dict, old_info: dict, raise_if_incompatible=True):\n \"\"\"Make sure that the two info dicts are compatible.\"\"\"\n # TODO may need to adapt if there are multiple agents, and the re-run on\n # error only has a subset of agents. Hence old_info.agent_name != info.agent_name\n for key in info.keys():\n if key in (\"date\", \"avg_reward\", \"std_err\", \"n_completed\", \"n_err\"):\n continue\n if info[key] != old_info[key]:\n _raise_or_warn(\n f\"Reproducibility info already exist and is not compatible.\"\n f\"Key {key} has changed from {old_info[key]} to {info[key]}.\"\n f\"Set strict_reproducibility=False to bypass this error.\",\n raise_error=raise_if_incompatible,\n )\n\n\ndef _raise_or_warn(msg, raise_error=True):\n if raise_error:\n raise ValueError(msg)\n else:\n logging.warning(msg)\n\n\ndef _verify_report(report_df: pd.DataFrame, agent_names=list[str], strict_reproducibility=True):\n\n report_df = report_df.reset_index()\n\n unique_agent_names = report_df[\"agent.agent_name\"].unique()\n if set(agent_names) != set(unique_agent_names):\n raise ValueError(\n f\"Agent names in the report {unique_agent_names} do not match the agent names {agent_names}.\"\n )\n if len(set(agent_names)) != len(agent_names):\n raise ValueError(f\"Duplicate agent names {agent_names}.\")\n\n report_df = report_df.set_index(\"agent.agent_name\", inplace=False)\n\n for idx in report_df.index:\n n_err = report_df.loc[idx, \"n_err\"].item()\n n_completed, n_total = report_df.loc[idx, \"n_completed\"].split(\"/\")\n if n_err > 0:\n _raise_or_warn(\n f\"Experiment {idx} has {n_err} errors. Please rerun the study and make sure all tasks are completed.\",\n raise_error=strict_reproducibility,\n )\n if n_completed != n_total:\n _raise_or_warn(\n f\"Experiment {idx} has {n_completed} completed tasks out of {n_total}. \"\n f\"Please rerun the study and make sure all tasks are completed.\",\n raise_error=strict_reproducibility,\n )\n return report_df\n\n\ndef _get_csv_headers(file_path: str) -> list[str]:\n with open(file_path, \"r\", newline=\"\") as file:\n reader = csv.reader(file)\n try:\n headers = next(reader)\n except StopIteration:\n headers = None\n return headers\n\n\ndef _add_result_to_info(info: dict, report_df: pd.DataFrame):\n \"\"\"Extracts the results from the report and adds them to the info dict inplace\"\"\"\n\n for key in (\"avg_reward\", \"std_err\", \"n_err\", \"n_completed\"):\n value = report_df.loc[info[\"agent_name\"], key]\n if hasattr(value, \"item\"):\n value = value.item()\n info[key] = value\n\n\ndef append_to_journal(\n info, report_df: pd.DataFrame, journal_path=None, strict_reproducibility=True\n):\n \"\"\"Append the info and results to the reproducibility journal.\"\"\"\n if journal_path is None:\n try:\n _get_repo(agentlab) # if not based on git clone, this will raise an error\n journal_path = (\n Path(agentlab.__file__).parent.parent.parent / \"reproducibility_journal.csv\"\n )\n except InvalidGitRepositoryError:\n logging.warning(\n \"Could not find a git repository. Saving the journal to the results directory.\"\n \"To add to the journal, git clone agentlab and use `pip install -e .`\"\n )\n journal_path = RESULTS_DIR / \"reproducibility_journal.csv\"\n\n logging.info(f\"Appending to journal {journal_path}\")\n\n if len(report_df) != len(info[\"agent_names\"]):\n raise ValueError(\n \"Mismatch between the number of agents in reproducibility info and the summary report.\"\n )\n\n report_df = _verify_report(\n report_df, info[\"agent_names\"], strict_reproducibility=strict_reproducibility\n )\n\n rows = []\n headers = None\n if journal_path.exists():\n headers = _get_csv_headers(journal_path)\n\n if headers is None: # first creation\n headers = list(info.keys())\n headers[headers.index(\"agent_names\")] = \"agent_name\"\n rows.append(headers)\n\n for agent_name in info[\"agent_names\"]:\n info_copy = info.copy()\n del info_copy[\"agent_names\"]\n info_copy[\"agent_name\"] = agent_name\n\n _add_result_to_info(info_copy, report_df)\n\n rows.append([str(info_copy[key]) for key in headers])\n\n with open(journal_path, \"a\", newline=\"\") as file:\n writer = csv.writer(file)\n for row in rows:\n writer.writerow(row)","source_hash":"54254f1615c59eebcc3ecb9a908298bd8feee1e144e94c521941f58e97a914cd","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.reproducibility_util._get_repo","uri":"program://AgentLab/function/src.agentlab.experiments.reproducibility_util._get_repo#L19-L20","kind":"function","name":"_get_repo","path":"src/agentlab/experiments/reproducibility_util.py","language":"python","start_line":19,"end_line":20,"context_start_line":1,"context_end_line":40,"code":"import csv\nimport logging\nimport os\nimport platform\nfrom datetime import datetime\nfrom importlib import metadata\nfrom pathlib import Path\n\nimport bgym\nimport pandas as pd\nfrom bgym import Benchmark\nfrom git import InvalidGitRepositoryError, Repo\nfrom git.config import GitConfigParser\n\nimport agentlab\nfrom agentlab.experiments.exp_utils import RESULTS_DIR\n\n\ndef _get_repo(module):\n return Repo(Path(module.__file__).resolve().parent, search_parent_directories=True)\n\n\ndef _get_benchmark_version(\n benchmark: Benchmark, allow_bypass_benchmark_version: bool = False\n) -> str:\n benchmark_name = benchmark.name\n\n if hasattr(benchmark, \"get_version\"):\n return benchmark.get_version()\n\n # in between 2 pull requests\n if benchmark_name.startswith(\"miniwob\"):\n return metadata.distribution(\"browsergym.miniwob\").version\n elif benchmark_name.startswith(\"workarena\"):\n return metadata.distribution(\"browsergym.workarena\").version\n elif benchmark_name.startswith(\"webarena\"):\n return metadata.distribution(\"browsergym.webarena\").version\n elif benchmark_name.startswith(\"visualwebarena\"):\n return metadata.distribution(\"browsergym.visualwebarena\").version\n elif benchmark_name.startswith(\"weblinx\"):","source_hash":"54254f1615c59eebcc3ecb9a908298bd8feee1e144e94c521941f58e97a914cd","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.reproducibility_util._get_benchmark_version","uri":"program://AgentLab/function/src.agentlab.experiments.reproducibility_util._get_benchmark_version#L23-L51","kind":"function","name":"_get_benchmark_version","path":"src/agentlab/experiments/reproducibility_util.py","language":"python","start_line":23,"end_line":51,"context_start_line":3,"context_end_line":71,"code":"import os\nimport platform\nfrom datetime import datetime\nfrom importlib import metadata\nfrom pathlib import Path\n\nimport bgym\nimport pandas as pd\nfrom bgym import Benchmark\nfrom git import InvalidGitRepositoryError, Repo\nfrom git.config import GitConfigParser\n\nimport agentlab\nfrom agentlab.experiments.exp_utils import RESULTS_DIR\n\n\ndef _get_repo(module):\n return Repo(Path(module.__file__).resolve().parent, search_parent_directories=True)\n\n\ndef _get_benchmark_version(\n benchmark: Benchmark, allow_bypass_benchmark_version: bool = False\n) -> str:\n benchmark_name = benchmark.name\n\n if hasattr(benchmark, \"get_version\"):\n return benchmark.get_version()\n\n # in between 2 pull requests\n if benchmark_name.startswith(\"miniwob\"):\n return metadata.distribution(\"browsergym.miniwob\").version\n elif benchmark_name.startswith(\"workarena\"):\n return metadata.distribution(\"browsergym.workarena\").version\n elif benchmark_name.startswith(\"webarena\"):\n return metadata.distribution(\"browsergym.webarena\").version\n elif benchmark_name.startswith(\"visualwebarena\"):\n return metadata.distribution(\"browsergym.visualwebarena\").version\n elif benchmark_name.startswith(\"weblinx\"):\n try:\n return metadata.distribution(\"weblinx_browsergym\").version\n except metadata.PackageNotFoundError:\n return \"0.0.1rc1\"\n elif benchmark_name.startswith(\"assistantbench\"):\n return metadata.distribution(\"browsergym.assistantbench\").version\n else:\n if allow_bypass_benchmark_version:\n return \"bypassed\"\n else:\n raise ValueError(f\"Unknown benchmark {benchmark_name}\")\n\n\ndef _get_git_username(repo: Repo) -> str:\n \"\"\"\n Retrieves the first available Git username from various sources.\n\n Note: overlycomplex designed by Claude and not fully tested.\n\n This function checks multiple locations for the Git username in the following order:\n 1. Repository-specific configuration\n 2. GitHub API (if the remote is a GitHub repository)\n 3. Global Git configuration\n 4. System Git configuration\n 5. Environment variables (GIT_AUTHOR_NAME and GIT_COMMITTER_NAME)\n\n Args:\n repo (Repo): A GitPython Repo object representing the Git repository.\n\n Returns:\n str: The first non-None username found, or None if no username is found.","source_hash":"54254f1615c59eebcc3ecb9a908298bd8feee1e144e94c521941f58e97a914cd","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.reproducibility_util._get_git_username","uri":"program://AgentLab/function/src.agentlab.experiments.reproducibility_util._get_git_username#L54-L122","kind":"function","name":"_get_git_username","path":"src/agentlab/experiments/reproducibility_util.py","language":"python","start_line":54,"end_line":122,"context_start_line":34,"context_end_line":142,"code":" elif benchmark_name.startswith(\"workarena\"):\n return metadata.distribution(\"browsergym.workarena\").version\n elif benchmark_name.startswith(\"webarena\"):\n return metadata.distribution(\"browsergym.webarena\").version\n elif benchmark_name.startswith(\"visualwebarena\"):\n return metadata.distribution(\"browsergym.visualwebarena\").version\n elif benchmark_name.startswith(\"weblinx\"):\n try:\n return metadata.distribution(\"weblinx_browsergym\").version\n except metadata.PackageNotFoundError:\n return \"0.0.1rc1\"\n elif benchmark_name.startswith(\"assistantbench\"):\n return metadata.distribution(\"browsergym.assistantbench\").version\n else:\n if allow_bypass_benchmark_version:\n return \"bypassed\"\n else:\n raise ValueError(f\"Unknown benchmark {benchmark_name}\")\n\n\ndef _get_git_username(repo: Repo) -> str:\n \"\"\"\n Retrieves the first available Git username from various sources.\n\n Note: overlycomplex designed by Claude and not fully tested.\n\n This function checks multiple locations for the Git username in the following order:\n 1. Repository-specific configuration\n 2. GitHub API (if the remote is a GitHub repository)\n 3. Global Git configuration\n 4. System Git configuration\n 5. Environment variables (GIT_AUTHOR_NAME and GIT_COMMITTER_NAME)\n\n Args:\n repo (Repo): A GitPython Repo object representing the Git repository.\n\n Returns:\n str: The first non-None username found, or None if no username is found.\n \"\"\"\n # Repository-specific configuration\n try:\n username = repo.config_reader().get_value(\"user\", \"name\", None)\n if username:\n return username\n except Exception:\n pass\n\n try:\n # GitHub username\n remote_url = repo.remotes.origin.url\n if \"github.com\" in remote_url:\n import json\n import re\n import urllib.request\n\n match = re.search(r\"github\\.com[:/](.+)/(.+)\\.git\", remote_url)\n if match:\n owner, repo_name = match.groups()\n api_url = f\"https://api.github.com/repos/{owner}/{repo_name}\"\n with urllib.request.urlopen(api_url) as response:\n data = json.loads(response.read().decode())\n username = data[\"owner\"][\"login\"]\n if username:\n return username\n except Exception:\n pass\n\n try:\n # Global configuration\n username = GitConfigParser(repo.git.config(\"--global\", \"--list\"), read_only=True).get_value(\n \"user\", \"name\", None\n )\n if username:\n return username\n except Exception:\n pass\n\n try:\n # System configuration\n username = GitConfigParser(repo.git.config(\"--system\", \"--list\"), read_only=True).get_value(\n \"user\", \"name\", None\n )\n if username:\n return username\n except Exception:\n pass\n\n # Environment variables\n return os.environ.get(\"GIT_AUTHOR_NAME\") or os.environ.get(\"GIT_COMMITTER_NAME\")\n\n\ndef _get_git_info(module, changes_white_list=()) -> tuple[str, list[tuple[str, Path]]]:\n \"\"\"\n Retrieve comprehensive git information for the given module.\n\n This function attempts to find the git repository containing the specified\n module and returns the current commit hash and a comprehensive list of all\n files that contribute to the repository's state.\n\n Args:\n module: The Python module object to check for git information.\n changes_white_list: A list of file paths to ignore when checking for changes.\n\n Returns:\n tuple: A tuple containing two elements:\n - str or None: The current git commit hash, or None if not a git repo.\n - list of tuple: A list of (status, Path) tuples for all modified files.\n Empty list if not a git repo. Status can be 'M' (modified), 'A' (added),\n 'D' (deleted), 'R' (renamed), 'C' (copied), 'U' (updated but unmerged),","source_hash":"54254f1615c59eebcc3ecb9a908298bd8feee1e144e94c521941f58e97a914cd","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.reproducibility_util._get_git_info","uri":"program://AgentLab/function/src.agentlab.experiments.reproducibility_util._get_git_info#L125-L177","kind":"function","name":"_get_git_info","path":"src/agentlab/experiments/reproducibility_util.py","language":"python","start_line":125,"end_line":177,"context_start_line":105,"context_end_line":197,"code":" )\n if username:\n return username\n except Exception:\n pass\n\n try:\n # System configuration\n username = GitConfigParser(repo.git.config(\"--system\", \"--list\"), read_only=True).get_value(\n \"user\", \"name\", None\n )\n if username:\n return username\n except Exception:\n pass\n\n # Environment variables\n return os.environ.get(\"GIT_AUTHOR_NAME\") or os.environ.get(\"GIT_COMMITTER_NAME\")\n\n\ndef _get_git_info(module, changes_white_list=()) -> tuple[str, list[tuple[str, Path]]]:\n \"\"\"\n Retrieve comprehensive git information for the given module.\n\n This function attempts to find the git repository containing the specified\n module and returns the current commit hash and a comprehensive list of all\n files that contribute to the repository's state.\n\n Args:\n module: The Python module object to check for git information.\n changes_white_list: A list of file paths to ignore when checking for changes.\n\n Returns:\n tuple: A tuple containing two elements:\n - str or None: The current git commit hash, or None if not a git repo.\n - list of tuple: A list of (status, Path) tuples for all modified files.\n Empty list if not a git repo. Status can be 'M' (modified), 'A' (added),\n 'D' (deleted), 'R' (renamed), 'C' (copied), 'U' (updated but unmerged),\n or '??' (untracked).\n \"\"\"\n\n try:\n repo = _get_repo(module)\n\n git_hash = repo.head.object.hexsha\n\n modified_files = []\n\n # Staged changes\n staged_changes = repo.index.diff(repo.head.commit)\n for change in staged_changes:\n modified_files.append((change.change_type, Path(change.a_path)))\n\n # Unstaged changes\n unstaged_changes = repo.index.diff(None)\n for change in unstaged_changes:\n modified_files.append((change.change_type, Path(change.a_path)))\n\n # Untracked files\n untracked_files = repo.untracked_files\n for file in untracked_files:\n modified_files.append((\"??\", Path(file)))\n\n # wildcard matching from white list\n modified_files_filtered = []\n for status, file in modified_files:\n if any(file.match(pattern) for pattern in changes_white_list):\n continue\n modified_files_filtered.append((status, file))\n\n return git_hash, modified_files_filtered\n except InvalidGitRepositoryError:\n return None, []\n\n\ndef get_reproducibility_info(\n agent_names: str | list[str],\n benchmark: Benchmark,\n study_id: str = \"\",\n comment=None,\n changes_white_list=( # Files that are often modified during experiments but do not affect reproducibility\n \"*/reproducibility_script.py\",\n \"*reproducibility_journal.csv\",\n \"*main.py\",\n \"*inspect_results.ipynb\",\n ),\n ignore_changes=False,\n allow_bypass_benchmark_version=False,\n):\n \"\"\"\n Retrieve a dict of information that could influence the reproducibility of an experiment.\n \"\"\"\n from browsergym import core","source_hash":"54254f1615c59eebcc3ecb9a908298bd8feee1e144e94c521941f58e97a914cd","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.reproducibility_util.get_reproducibility_info","uri":"program://AgentLab/function/src.agentlab.experiments.reproducibility_util.get_reproducibility_info#L180-L247","kind":"function","name":"get_reproducibility_info","path":"src/agentlab/experiments/reproducibility_util.py","language":"python","start_line":180,"end_line":247,"context_start_line":160,"context_end_line":267,"code":" for change in unstaged_changes:\n modified_files.append((change.change_type, Path(change.a_path)))\n\n # Untracked files\n untracked_files = repo.untracked_files\n for file in untracked_files:\n modified_files.append((\"??\", Path(file)))\n\n # wildcard matching from white list\n modified_files_filtered = []\n for status, file in modified_files:\n if any(file.match(pattern) for pattern in changes_white_list):\n continue\n modified_files_filtered.append((status, file))\n\n return git_hash, modified_files_filtered\n except InvalidGitRepositoryError:\n return None, []\n\n\ndef get_reproducibility_info(\n agent_names: str | list[str],\n benchmark: Benchmark,\n study_id: str = \"\",\n comment=None,\n changes_white_list=( # Files that are often modified during experiments but do not affect reproducibility\n \"*/reproducibility_script.py\",\n \"*reproducibility_journal.csv\",\n \"*main.py\",\n \"*inspect_results.ipynb\",\n ),\n ignore_changes=False,\n allow_bypass_benchmark_version=False,\n):\n \"\"\"\n Retrieve a dict of information that could influence the reproducibility of an experiment.\n \"\"\"\n from browsergym import core\n\n import agentlab\n\n if isinstance(agent_names, str):\n agent_names = [agent_names]\n\n try:\n repo = _get_repo(agentlab)\n except InvalidGitRepositoryError:\n repo = None\n\n info = {\n \"git_user\": _get_git_username(repo),\n \"agent_names\": agent_names,\n \"benchmark\": benchmark.name,\n \"study_id\": study_id,\n \"comment\": comment,\n \"benchmark_version\": _get_benchmark_version(benchmark, allow_bypass_benchmark_version),\n \"date\": datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\"),\n \"os\": f\"{platform.system()} ({platform.version()})\",\n \"python_version\": platform.python_version(),\n \"playwright_version\": metadata.distribution(\"playwright\").version,\n }\n\n def add_git_info(module_name, module):\n git_hash, modified_files = _get_git_info(module, changes_white_list)\n\n modified_files_str = \"\\n\".join([f\" {status}: {file}\" for status, file in modified_files])\n\n if len(modified_files) > 0:\n msg = (\n f\"Module {module_name} has uncommitted changes. \"\n f\"Modified files: \\n{modified_files_str}\\n\"\n )\n if ignore_changes:\n logging.warning(\n msg + \"Ignoring changes as requested and proceeding to experiments.\"\n )\n else:\n raise ValueError(\n msg + \"Please commit or stash your changes before running the experiment.\"\n )\n\n info[f\"{module_name}_version\"] = module.__version__\n info[f\"{module_name}_git_hash\"] = git_hash\n info[f\"{module_name}__local_modifications\"] = modified_files_str\n\n add_git_info(\"agentlab\", agentlab)\n add_git_info(\"browsergym\", core)\n return info\n\n\ndef assert_compatible(info: dict, old_info: dict, raise_if_incompatible=True):\n \"\"\"Make sure that the two info dicts are compatible.\"\"\"\n # TODO may need to adapt if there are multiple agents, and the re-run on\n # error only has a subset of agents. Hence old_info.agent_name != info.agent_name\n for key in info.keys():\n if key in (\"date\", \"avg_reward\", \"std_err\", \"n_completed\", \"n_err\"):\n continue\n if info[key] != old_info[key]:\n _raise_or_warn(\n f\"Reproducibility info already exist and is not compatible.\"\n f\"Key {key} has changed from {old_info[key]} to {info[key]}.\"\n f\"Set strict_reproducibility=False to bypass this error.\",\n raise_error=raise_if_incompatible,\n )\n\n\ndef _raise_or_warn(msg, raise_error=True):\n if raise_error:","source_hash":"54254f1615c59eebcc3ecb9a908298bd8feee1e144e94c521941f58e97a914cd","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.reproducibility_util.assert_compatible","uri":"program://AgentLab/function/src.agentlab.experiments.reproducibility_util.assert_compatible#L250-L263","kind":"function","name":"assert_compatible","path":"src/agentlab/experiments/reproducibility_util.py","language":"python","start_line":250,"end_line":263,"context_start_line":230,"context_end_line":283,"code":" f\"Modified files: \\n{modified_files_str}\\n\"\n )\n if ignore_changes:\n logging.warning(\n msg + \"Ignoring changes as requested and proceeding to experiments.\"\n )\n else:\n raise ValueError(\n msg + \"Please commit or stash your changes before running the experiment.\"\n )\n\n info[f\"{module_name}_version\"] = module.__version__\n info[f\"{module_name}_git_hash\"] = git_hash\n info[f\"{module_name}__local_modifications\"] = modified_files_str\n\n add_git_info(\"agentlab\", agentlab)\n add_git_info(\"browsergym\", core)\n return info\n\n\ndef assert_compatible(info: dict, old_info: dict, raise_if_incompatible=True):\n \"\"\"Make sure that the two info dicts are compatible.\"\"\"\n # TODO may need to adapt if there are multiple agents, and the re-run on\n # error only has a subset of agents. Hence old_info.agent_name != info.agent_name\n for key in info.keys():\n if key in (\"date\", \"avg_reward\", \"std_err\", \"n_completed\", \"n_err\"):\n continue\n if info[key] != old_info[key]:\n _raise_or_warn(\n f\"Reproducibility info already exist and is not compatible.\"\n f\"Key {key} has changed from {old_info[key]} to {info[key]}.\"\n f\"Set strict_reproducibility=False to bypass this error.\",\n raise_error=raise_if_incompatible,\n )\n\n\ndef _raise_or_warn(msg, raise_error=True):\n if raise_error:\n raise ValueError(msg)\n else:\n logging.warning(msg)\n\n\ndef _verify_report(report_df: pd.DataFrame, agent_names=list[str], strict_reproducibility=True):\n\n report_df = report_df.reset_index()\n\n unique_agent_names = report_df[\"agent.agent_name\"].unique()\n if set(agent_names) != set(unique_agent_names):\n raise ValueError(\n f\"Agent names in the report {unique_agent_names} do not match the agent names {agent_names}.\"\n )\n if len(set(agent_names)) != len(agent_names):\n raise ValueError(f\"Duplicate agent names {agent_names}.\")","source_hash":"54254f1615c59eebcc3ecb9a908298bd8feee1e144e94c521941f58e97a914cd","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.reproducibility_util._raise_or_warn","uri":"program://AgentLab/function/src.agentlab.experiments.reproducibility_util._raise_or_warn#L266-L270","kind":"function","name":"_raise_or_warn","path":"src/agentlab/experiments/reproducibility_util.py","language":"python","start_line":266,"end_line":270,"context_start_line":246,"context_end_line":290,"code":" add_git_info(\"browsergym\", core)\n return info\n\n\ndef assert_compatible(info: dict, old_info: dict, raise_if_incompatible=True):\n \"\"\"Make sure that the two info dicts are compatible.\"\"\"\n # TODO may need to adapt if there are multiple agents, and the re-run on\n # error only has a subset of agents. Hence old_info.agent_name != info.agent_name\n for key in info.keys():\n if key in (\"date\", \"avg_reward\", \"std_err\", \"n_completed\", \"n_err\"):\n continue\n if info[key] != old_info[key]:\n _raise_or_warn(\n f\"Reproducibility info already exist and is not compatible.\"\n f\"Key {key} has changed from {old_info[key]} to {info[key]}.\"\n f\"Set strict_reproducibility=False to bypass this error.\",\n raise_error=raise_if_incompatible,\n )\n\n\ndef _raise_or_warn(msg, raise_error=True):\n if raise_error:\n raise ValueError(msg)\n else:\n logging.warning(msg)\n\n\ndef _verify_report(report_df: pd.DataFrame, agent_names=list[str], strict_reproducibility=True):\n\n report_df = report_df.reset_index()\n\n unique_agent_names = report_df[\"agent.agent_name\"].unique()\n if set(agent_names) != set(unique_agent_names):\n raise ValueError(\n f\"Agent names in the report {unique_agent_names} do not match the agent names {agent_names}.\"\n )\n if len(set(agent_names)) != len(agent_names):\n raise ValueError(f\"Duplicate agent names {agent_names}.\")\n\n report_df = report_df.set_index(\"agent.agent_name\", inplace=False)\n\n for idx in report_df.index:\n n_err = report_df.loc[idx, \"n_err\"].item()\n n_completed, n_total = report_df.loc[idx, \"n_completed\"].split(\"/\")\n if n_err > 0:","source_hash":"54254f1615c59eebcc3ecb9a908298bd8feee1e144e94c521941f58e97a914cd","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.reproducibility_util._verify_report","uri":"program://AgentLab/function/src.agentlab.experiments.reproducibility_util._verify_report#L273-L301","kind":"function","name":"_verify_report","path":"src/agentlab/experiments/reproducibility_util.py","language":"python","start_line":273,"end_line":301,"context_start_line":253,"context_end_line":321,"code":" # error only has a subset of agents. Hence old_info.agent_name != info.agent_name\n for key in info.keys():\n if key in (\"date\", \"avg_reward\", \"std_err\", \"n_completed\", \"n_err\"):\n continue\n if info[key] != old_info[key]:\n _raise_or_warn(\n f\"Reproducibility info already exist and is not compatible.\"\n f\"Key {key} has changed from {old_info[key]} to {info[key]}.\"\n f\"Set strict_reproducibility=False to bypass this error.\",\n raise_error=raise_if_incompatible,\n )\n\n\ndef _raise_or_warn(msg, raise_error=True):\n if raise_error:\n raise ValueError(msg)\n else:\n logging.warning(msg)\n\n\ndef _verify_report(report_df: pd.DataFrame, agent_names=list[str], strict_reproducibility=True):\n\n report_df = report_df.reset_index()\n\n unique_agent_names = report_df[\"agent.agent_name\"].unique()\n if set(agent_names) != set(unique_agent_names):\n raise ValueError(\n f\"Agent names in the report {unique_agent_names} do not match the agent names {agent_names}.\"\n )\n if len(set(agent_names)) != len(agent_names):\n raise ValueError(f\"Duplicate agent names {agent_names}.\")\n\n report_df = report_df.set_index(\"agent.agent_name\", inplace=False)\n\n for idx in report_df.index:\n n_err = report_df.loc[idx, \"n_err\"].item()\n n_completed, n_total = report_df.loc[idx, \"n_completed\"].split(\"/\")\n if n_err > 0:\n _raise_or_warn(\n f\"Experiment {idx} has {n_err} errors. Please rerun the study and make sure all tasks are completed.\",\n raise_error=strict_reproducibility,\n )\n if n_completed != n_total:\n _raise_or_warn(\n f\"Experiment {idx} has {n_completed} completed tasks out of {n_total}. \"\n f\"Please rerun the study and make sure all tasks are completed.\",\n raise_error=strict_reproducibility,\n )\n return report_df\n\n\ndef _get_csv_headers(file_path: str) -> list[str]:\n with open(file_path, \"r\", newline=\"\") as file:\n reader = csv.reader(file)\n try:\n headers = next(reader)\n except StopIteration:\n headers = None\n return headers\n\n\ndef _add_result_to_info(info: dict, report_df: pd.DataFrame):\n \"\"\"Extracts the results from the report and adds them to the info dict inplace\"\"\"\n\n for key in (\"avg_reward\", \"std_err\", \"n_err\", \"n_completed\"):\n value = report_df.loc[info[\"agent_name\"], key]\n if hasattr(value, \"item\"):\n value = value.item()\n info[key] = value","source_hash":"54254f1615c59eebcc3ecb9a908298bd8feee1e144e94c521941f58e97a914cd","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.reproducibility_util._get_csv_headers","uri":"program://AgentLab/function/src.agentlab.experiments.reproducibility_util._get_csv_headers#L304-L311","kind":"function","name":"_get_csv_headers","path":"src/agentlab/experiments/reproducibility_util.py","language":"python","start_line":304,"end_line":311,"context_start_line":284,"context_end_line":331,"code":"\n report_df = report_df.set_index(\"agent.agent_name\", inplace=False)\n\n for idx in report_df.index:\n n_err = report_df.loc[idx, \"n_err\"].item()\n n_completed, n_total = report_df.loc[idx, \"n_completed\"].split(\"/\")\n if n_err > 0:\n _raise_or_warn(\n f\"Experiment {idx} has {n_err} errors. Please rerun the study and make sure all tasks are completed.\",\n raise_error=strict_reproducibility,\n )\n if n_completed != n_total:\n _raise_or_warn(\n f\"Experiment {idx} has {n_completed} completed tasks out of {n_total}. \"\n f\"Please rerun the study and make sure all tasks are completed.\",\n raise_error=strict_reproducibility,\n )\n return report_df\n\n\ndef _get_csv_headers(file_path: str) -> list[str]:\n with open(file_path, \"r\", newline=\"\") as file:\n reader = csv.reader(file)\n try:\n headers = next(reader)\n except StopIteration:\n headers = None\n return headers\n\n\ndef _add_result_to_info(info: dict, report_df: pd.DataFrame):\n \"\"\"Extracts the results from the report and adds them to the info dict inplace\"\"\"\n\n for key in (\"avg_reward\", \"std_err\", \"n_err\", \"n_completed\"):\n value = report_df.loc[info[\"agent_name\"], key]\n if hasattr(value, \"item\"):\n value = value.item()\n info[key] = value\n\n\ndef append_to_journal(\n info, report_df: pd.DataFrame, journal_path=None, strict_reproducibility=True\n):\n \"\"\"Append the info and results to the reproducibility journal.\"\"\"\n if journal_path is None:\n try:\n _get_repo(agentlab) # if not based on git clone, this will raise an error\n journal_path = (","source_hash":"54254f1615c59eebcc3ecb9a908298bd8feee1e144e94c521941f58e97a914cd","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.reproducibility_util._add_result_to_info","uri":"program://AgentLab/function/src.agentlab.experiments.reproducibility_util._add_result_to_info#L314-L321","kind":"function","name":"_add_result_to_info","path":"src/agentlab/experiments/reproducibility_util.py","language":"python","start_line":314,"end_line":321,"context_start_line":294,"context_end_line":341,"code":" )\n if n_completed != n_total:\n _raise_or_warn(\n f\"Experiment {idx} has {n_completed} completed tasks out of {n_total}. \"\n f\"Please rerun the study and make sure all tasks are completed.\",\n raise_error=strict_reproducibility,\n )\n return report_df\n\n\ndef _get_csv_headers(file_path: str) -> list[str]:\n with open(file_path, \"r\", newline=\"\") as file:\n reader = csv.reader(file)\n try:\n headers = next(reader)\n except StopIteration:\n headers = None\n return headers\n\n\ndef _add_result_to_info(info: dict, report_df: pd.DataFrame):\n \"\"\"Extracts the results from the report and adds them to the info dict inplace\"\"\"\n\n for key in (\"avg_reward\", \"std_err\", \"n_err\", \"n_completed\"):\n value = report_df.loc[info[\"agent_name\"], key]\n if hasattr(value, \"item\"):\n value = value.item()\n info[key] = value\n\n\ndef append_to_journal(\n info, report_df: pd.DataFrame, journal_path=None, strict_reproducibility=True\n):\n \"\"\"Append the info and results to the reproducibility journal.\"\"\"\n if journal_path is None:\n try:\n _get_repo(agentlab) # if not based on git clone, this will raise an error\n journal_path = (\n Path(agentlab.__file__).parent.parent.parent / \"reproducibility_journal.csv\"\n )\n except InvalidGitRepositoryError:\n logging.warning(\n \"Could not find a git repository. Saving the journal to the results directory.\"\n \"To add to the journal, git clone agentlab and use `pip install -e .`\"\n )\n journal_path = RESULTS_DIR / \"reproducibility_journal.csv\"\n\n logging.info(f\"Appending to journal {journal_path}\")","source_hash":"54254f1615c59eebcc3ecb9a908298bd8feee1e144e94c521941f58e97a914cd","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.reproducibility_util.append_to_journal","uri":"program://AgentLab/function/src.agentlab.experiments.reproducibility_util.append_to_journal#L324-L374","kind":"function","name":"append_to_journal","path":"src/agentlab/experiments/reproducibility_util.py","language":"python","start_line":324,"end_line":374,"context_start_line":304,"context_end_line":374,"code":"def _get_csv_headers(file_path: str) -> list[str]:\n with open(file_path, \"r\", newline=\"\") as file:\n reader = csv.reader(file)\n try:\n headers = next(reader)\n except StopIteration:\n headers = None\n return headers\n\n\ndef _add_result_to_info(info: dict, report_df: pd.DataFrame):\n \"\"\"Extracts the results from the report and adds them to the info dict inplace\"\"\"\n\n for key in (\"avg_reward\", \"std_err\", \"n_err\", \"n_completed\"):\n value = report_df.loc[info[\"agent_name\"], key]\n if hasattr(value, \"item\"):\n value = value.item()\n info[key] = value\n\n\ndef append_to_journal(\n info, report_df: pd.DataFrame, journal_path=None, strict_reproducibility=True\n):\n \"\"\"Append the info and results to the reproducibility journal.\"\"\"\n if journal_path is None:\n try:\n _get_repo(agentlab) # if not based on git clone, this will raise an error\n journal_path = (\n Path(agentlab.__file__).parent.parent.parent / \"reproducibility_journal.csv\"\n )\n except InvalidGitRepositoryError:\n logging.warning(\n \"Could not find a git repository. Saving the journal to the results directory.\"\n \"To add to the journal, git clone agentlab and use `pip install -e .`\"\n )\n journal_path = RESULTS_DIR / \"reproducibility_journal.csv\"\n\n logging.info(f\"Appending to journal {journal_path}\")\n\n if len(report_df) != len(info[\"agent_names\"]):\n raise ValueError(\n \"Mismatch between the number of agents in reproducibility info and the summary report.\"\n )\n\n report_df = _verify_report(\n report_df, info[\"agent_names\"], strict_reproducibility=strict_reproducibility\n )\n\n rows = []\n headers = None\n if journal_path.exists():\n headers = _get_csv_headers(journal_path)\n\n if headers is None: # first creation\n headers = list(info.keys())\n headers[headers.index(\"agent_names\")] = \"agent_name\"\n rows.append(headers)\n\n for agent_name in info[\"agent_names\"]:\n info_copy = info.copy()\n del info_copy[\"agent_names\"]\n info_copy[\"agent_name\"] = agent_name\n\n _add_result_to_info(info_copy, report_df)\n\n rows.append([str(info_copy[key]) for key in headers])\n\n with open(journal_path, \"a\", newline=\"\") as file:\n writer = csv.writer(file)\n for row in rows:\n writer.writerow(row)","source_hash":"54254f1615c59eebcc3ecb9a908298bd8feee1e144e94c521941f58e97a914cd","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.reproducibility_util.add_git_info","uri":"program://AgentLab/function/src.agentlab.experiments.reproducibility_util.add_git_info#L222-L243","kind":"function","name":"add_git_info","path":"src/agentlab/experiments/reproducibility_util.py","language":"python","start_line":222,"end_line":243,"context_start_line":202,"context_end_line":263,"code":" agent_names = [agent_names]\n\n try:\n repo = _get_repo(agentlab)\n except InvalidGitRepositoryError:\n repo = None\n\n info = {\n \"git_user\": _get_git_username(repo),\n \"agent_names\": agent_names,\n \"benchmark\": benchmark.name,\n \"study_id\": study_id,\n \"comment\": comment,\n \"benchmark_version\": _get_benchmark_version(benchmark, allow_bypass_benchmark_version),\n \"date\": datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\"),\n \"os\": f\"{platform.system()} ({platform.version()})\",\n \"python_version\": platform.python_version(),\n \"playwright_version\": metadata.distribution(\"playwright\").version,\n }\n\n def add_git_info(module_name, module):\n git_hash, modified_files = _get_git_info(module, changes_white_list)\n\n modified_files_str = \"\\n\".join([f\" {status}: {file}\" for status, file in modified_files])\n\n if len(modified_files) > 0:\n msg = (\n f\"Module {module_name} has uncommitted changes. \"\n f\"Modified files: \\n{modified_files_str}\\n\"\n )\n if ignore_changes:\n logging.warning(\n msg + \"Ignoring changes as requested and proceeding to experiments.\"\n )\n else:\n raise ValueError(\n msg + \"Please commit or stash your changes before running the experiment.\"\n )\n\n info[f\"{module_name}_version\"] = module.__version__\n info[f\"{module_name}_git_hash\"] = git_hash\n info[f\"{module_name}__local_modifications\"] = modified_files_str\n\n add_git_info(\"agentlab\", agentlab)\n add_git_info(\"browsergym\", core)\n return info\n\n\ndef assert_compatible(info: dict, old_info: dict, raise_if_incompatible=True):\n \"\"\"Make sure that the two info dicts are compatible.\"\"\"\n # TODO may need to adapt if there are multiple agents, and the re-run on\n # error only has a subset of agents. Hence old_info.agent_name != info.agent_name\n for key in info.keys():\n if key in (\"date\", \"avg_reward\", \"std_err\", \"n_completed\", \"n_err\"):\n continue\n if info[key] != old_info[key]:\n _raise_or_warn(\n f\"Reproducibility info already exist and is not compatible.\"\n f\"Key {key} has changed from {old_info[key]} to {info[key]}.\"\n f\"Set strict_reproducibility=False to bypass this error.\",\n raise_error=raise_if_incompatible,\n )","source_hash":"54254f1615c59eebcc3ecb9a908298bd8feee1e144e94c521941f58e97a914cd","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.loop","uri":"program://AgentLab/module/src.agentlab.experiments.loop#L1-L955","kind":"module","name":"src.agentlab.experiments.loop","path":"src/agentlab/experiments/loop.py","language":"python","start_line":1,"end_line":955,"context_start_line":1,"context_end_line":955,"code":"import gzip\nimport importlib.metadata\nimport json\nimport logging\nimport os\nimport pickle\nimport re\nimport sys\nimport time\nimport traceback\nimport uuid\nfrom abc import ABC, abstractmethod\nfrom collections import defaultdict\nfrom dataclasses import asdict, dataclass, field, is_dataclass\nfrom datetime import datetime\nfrom pathlib import Path\nfrom typing import Optional\n\nimport gymnasium as gym\nimport numpy as np\nfrom browsergym.core.chat import Chat\nfrom browsergym.experiments.agent import Agent\nfrom browsergym.experiments.utils import count_tokens\nfrom dataclasses_json import DataClassJsonMixin\nfrom PIL import Image\nfrom tqdm import tqdm\n\ntry:\n from agentlab.agents.tapeagent import TapeAgent, save_tape\nexcept ImportError:\n TapeAgent = None\n\n\nlogger = logging.getLogger(__name__)\n\nSEED_MAX = 2 ^ 32 # arbitrary max value (exclusive), seems large enough\n\n\n@dataclass\nclass EnvArgs(DataClassJsonMixin):\n task_name: str\n task_seed: Optional[int] = None\n max_steps: Optional[int] = None\n headless: bool = True\n record_video: bool = False\n wait_for_user_message: bool = False\n viewport: Optional[dict] = None # use default value from BrowserGym\n slow_mo: Optional[int] = None # use default value from BrowserGym\n storage_state: Optional[str | Path | dict] = None\n task_kwargs: Optional[dict] = None # use default value from BrowserGym\n pre_observation_delay: float = None # seconds, wait for JS events to be fired\n\n def make_env(\n self, action_mapping, exp_dir, exp_task_kwargs: dict = {}, use_raw_page_output=True\n ):\n \"\"\"\n Instantiates the BrowserGym environment corresponding to the arguments (with some tweaks).\n\n Args:\n action_mapping: overrides the action mapping of the environment.\n exp_dir: will set some environment parameters (e.g., record_video_dir) with respect to the directory where the experiment is running.\n exp_task_kwargs: use with caution! Will override task parameters to experiment-specific values. Useful to set different server configs for different experiments, or output file paths within the experiment's folder (e.g., assistantbench).\n use_raw_page_output: if True, the environment will also return raw page output in the observation.\n\n Returns:\n env: the gym environment.\n \"\"\"\n extra_kwargs = {}\n if self.record_video:\n extra_kwargs[\"record_video_dir\"] = exp_dir\n if self.viewport:\n extra_kwargs[\"viewport\"] = self.viewport\n if self.slow_mo is not None:\n extra_kwargs[\"slow_mo\"] = self.slow_mo\n if self.pre_observation_delay is not None:\n extra_kwargs[\"pre_observation_delay\"] = self.pre_observation_delay\n if self.storage_state:\n extra_kwargs[\"pw_context_kwargs\"] = {\"storage_state\": self.storage_state}\n if self.task_kwargs is not None:\n extra_kwargs[\"task_kwargs\"] = self.task_kwargs\n if exp_task_kwargs:\n extra_kwargs[\"task_kwargs\"] = extra_kwargs.get(\"task_kwargs\", {}) | exp_task_kwargs\n\n # assistantbench hack, write the task output (agent prediction) to a file in the experiment's directory\n # TODO: find a better way to deal with this\n if self.task_name.startswith(\"assistantbench.test\"):\n extra_kwargs[\"task_kwargs\"] = extra_kwargs.get(\"task_kwargs\", {}) | {\n \"output_file\": exp_dir / \"assistantbench-prediction.json\"\n }\n\n return gym.make(\n _get_env_name(self.task_name),\n disable_env_checker=True,\n max_episode_steps=self.max_steps,\n headless=self.headless,\n wait_for_user_message=self.wait_for_user_message,\n action_mapping=action_mapping, # action mapping is provided by the agent\n use_raw_page_output=use_raw_page_output,\n **extra_kwargs,\n )\n\n\n@dataclass\nclass AbstractAgentArgs(ABC):\n \"\"\"A template class that defines the required signature of an agent's arguments.\"\"\"\n\n agent_name: str = None # type: ignore\n\n def __post_init__(self):\n if self.agent_name is None:\n self.agent_name = self.__class__.__name__\n\n def prepare(self):\n \"\"\"Prepare the agent's LLM models before running the experiment.\"\"\"\n pass\n\n def close(self):\n \"\"\"Close the agent's LLM models after running the experiment.\"\"\"\n pass\n\n @abstractmethod\n def make_agent(self) -> Agent:\n \"\"\"Comply the experiments.loop API for instantiating the agent.\"\"\"\n\n\ndef save_package_versions(exp_dir: Path):\n \"\"\"Save the versions of the installed packages in the experiment directory.\"\"\"\n python_dists = \"\\n\".join(\n sorted(\n [\n f\"{dist.metadata['Name']}=={dist.metadata['Version']}\"\n for dist in importlib.metadata.distributions()\n ]\n )\n )\n (exp_dir / \"package_versions.txt\").write_text(python_dists)\n\n\n@dataclass\nclass StepTimestamps:\n env_start: float = 0\n action_exec_start: float = 0 # to extract begining of visual action from video\n action_exec_stop: float = 0 # to extract end of visual action from video\n action_exect_after_timeout: float = 0\n env_stop: float = 0\n agent_start: float = 0\n agent_stop: float = 0\n wait_for_page_loading_start: float = 0\n wait_for_page_loading_stop: float = 0\n validation_start: float = 0\n validation_stop: float = 0\n get_observation_start: float = 0\n get_observation_stop: float = 0\n\n\n@dataclass\nclass StepInfo:\n \"\"\"Collects information about step that will be saved and reloaded.\n Helper functions only modify the dataclass attributes and helps keeping the\n information organized.\n\n Attributes:\n -----------\n step: int\n The step number of the episode.\n obs: dict\n The observation of the environment.\n reward: float\n The reward of the step.\n raw_reward: float\n The raw reward of the step.\n terminated: bool\n Whether the episode is terminated i.e. reached a terminal state.\n truncated: bool\n Whether the episode is truncated i.e. reached a maximum number of steps.\n action: str\n The action taken by the agent.\n agent_info: dict\n Additional information from the agent.\n stats: dict\n Extra statistics about the step.\n profiling: StepTimestamps\n Timestamps of the different events during the episode.\n \"\"\"\n\n step: int = None\n obs: dict = None\n reward: float = 0\n raw_reward: float = 0\n terminated: bool = None\n truncated: bool = None\n action: str = None\n agent_info: dict = field(default_factory=dict)\n stats: dict = None\n profiling: StepTimestamps = field(default_factory=StepTimestamps)\n task_info: dict = None\n\n def from_step(self, env: gym.Env, action: str, obs_preprocessor: callable):\n t = self.profiling\n t.env_start = time.time()\n self.obs, self.reward, self.terminated, self.truncated, env_info = env.step(action)\n t.env_stop = time.time()\n\n self.task_info = env_info.get(\"task_info\", None)\n\n self.raw_reward = env_info.get(\"RAW_REWARD_GLOBAL\", None)\n\n t.action_exec_start = env_info[\"action_exec_start\"] # start\n t.action_exect_after_timeout = env_info[\"action_exec_stop\"]\n t.action_exec_stop = env_info[\"action_exec_stop\"] - env_info[\"action_exec_timeout\"]\n t.wait_for_page_loading_start = env_info.get(\"wait_for_page_loading_start\", None)\n t.wait_for_page_loading_stop = env_info.get(\"wait_for_page_loading_stop\", None)\n t.validation_start = env_info.get(\"validation_start\", None)\n t.validation_stop = env_info.get(\"validation_stop\", None)\n t.get_observation_start = env_info.get(\"get_observation_start\", None)\n t.get_observation_stop = env_info.get(\"get_observation_stop\", None)\n\n if obs_preprocessor:\n self.obs = obs_preprocessor(self.obs)\n\n def from_action(self, agent: Agent):\n self.profiling.agent_start = time.time()\n self.action, self.agent_info = agent.get_action(self.obs.copy())\n self.profiling.agent_stop = time.time()\n\n self.make_stats()\n\n return self.action\n\n def from_reset(self, env: gym.Env, seed: int, obs_preprocessor: callable):\n t = self.profiling\n t.env_start = time.time()\n self.obs, env_info = env.reset(seed=seed)\n self.reward, self.terminated, self.truncated = 0, False, False\n t.env_stop = time.time()\n\n t.action_exec_start = env_info.get(\"recording_start_time\", t.env_start)\n t.action_exect_after_timeout = t.env_stop\n t.action_exec_stop = t.env_stop\n\n if obs_preprocessor:\n self.obs = obs_preprocessor(self.obs)\n\n @property\n def is_done(self):\n return self.terminated or self.truncated\n\n def make_stats(self):\n if isinstance(self.obs, dict):\n stats = {\n f\"n_token_{key}\": count_tokens(val)\n for key, val in self.obs.items()\n if isinstance(val, str)\n }\n else:\n stats = {}\n stats.update(self.agent_info.pop(\"stats\", {}))\n\n t = self.profiling\n stats[\"step_elapsed\"] = t.env_stop - t.env_start\n stats[\"agent_elapsed\"] = t.agent_stop - t.agent_start\n\n self.stats = stats\n\n def save_step_info(self, exp_dir, save_json=False, save_screenshot=True, save_som=False):\n # special treatment for some of the observation fields\n if isinstance(self.obs, dict):\n # save screenshots to separate files\n screenshot = self.obs.pop(\"screenshot\", None)\n screenshot_som = self.obs.pop(\"screenshot_som\", None)\n\n if save_screenshot and screenshot is not None:\n img = Image.fromarray(screenshot)\n img.save(exp_dir / f\"screenshot_step_{self.step}.png\")\n\n if save_som and screenshot_som is not None:\n img = Image.fromarray(screenshot_som)\n img.save(exp_dir / f\"screenshot_som_step_{self.step}.png\")\n\n # save goal object (which might contain images) to a separate file to save space\n if self.obs.get(\"goal_object\", False):\n # save the goal object only once (goal should never change once setup)\n goal_object_file = Path(exp_dir) / \"goal_object.pkl.gz\"\n if not goal_object_file.exists():\n with gzip.open(goal_object_file, \"wb\") as f:\n pickle.dump(self.obs[\"goal_object\"], f)\n # set goal_object to a special placeholder value, which indicates it should be loaded from a separate file\n self.obs[\"goal_object\"] = None\n\n with gzip.open(exp_dir / f\"step_{self.step}.pkl.gz\", \"wb\") as f:\n pickle.dump(self, f)\n\n if save_json:\n with open(exp_dir / \"steps_info.json\", \"w\") as f:\n json.dump(self, f, indent=4, cls=DataclassJSONEncoder)\n\n if isinstance(self.obs, dict):\n # add the screenshots back to the obs\n # why do we need this?\n if screenshot is not None:\n self.obs[\"screenshot\"] = screenshot\n if screenshot_som is not None:\n self.obs[\"screenshot_som\"] = screenshot_som\n\n\n@dataclass\nclass ExpArgs:\n \"\"\"Arguments to run an experiment, i.e. run agent in an environment until done.\n\n This dataclass is used to store experiments arguments. It contains\n agent_args and env_args which follows the same principle. It contains helper\n functions to prepare and run experiments.\n\n Attributes:\n -----------\n agent_args: AbstractAgentArgs\n The arguments to instantiate the agent.\n env_args: EnvArgs\n The arguments to instantiate the environment.\n exp_dir: str\n The directory where the experiment will be saved.\n exp_name: str\n The name of the experiment. If None, it will be generated from the\n agent and environment names.\n enable_debug: bool\n If python is running in debug mode and `enable_debug` is True, errors\n will be raised instead of only logged\n error_msg: str\n Error that occured while running the experiment (if any).\n stack_trace: str\n Stack trace of the error (if any).\n order: int (internal)\n The order of the experiment in the batch. It is used to keep track of\n the original order of the experiments in case they are shuffled.\n \"\"\"\n\n agent_args: AbstractAgentArgs\n env_args: EnvArgs\n exp_dir: str = None\n exp_name: str = None\n enable_debug: bool = True\n err_msg: str = None\n stack_trace: str = None\n order: int = None # use to keep the original order the experiments were meant to be launched.\n logging_level: int = logging.INFO\n logging_level_stdout: int = logging.INFO\n exp_id: str = None\n depends_on: tuple[str] = ()\n save_screenshot: bool = True\n save_som: bool = False\n\n def make_id(self):\n \"\"\"Create a unique id for the experiment.\"\"\"\n if self.exp_id is None:\n self.exp_id = str(uuid.uuid4())\n\n def prepare(self, exp_root):\n \"\"\"Prepare the experiment directory and save the experiment arguments.\n\n This enables inspecting experiments that are not run yet.\n\n Args:\n exp_root: str\n The root directory where the experiment will be saved.\n \"\"\"\n if self.env_args.task_seed is None:\n self.env_args.task_seed = np.random.randint(0, SEED_MAX)\n\n if self.exp_name is None:\n task_name = self.env_args.task_name\n self.exp_name = f\"{self.agent_args.agent_name}_on_{task_name}_{self.env_args.task_seed}\"\n\n # if exp_dir exists, it means it's a re-run, move the old one\n if self.exp_dir is not None:\n _move_old_exp(self.exp_dir)\n\n self.make_id()\n\n self.exp_date = datetime.now()\n self._make_dir(exp_root)\n\n self.exp_dir.mkdir(parents=True, exist_ok=True)\n with open(self.exp_dir / \"exp_args.pkl\", \"wb\") as f:\n pickle.dump(self, f)\n\n def _make_dir(self, exp_root):\n \"\"\"Create a unique directory for the experiment.\"\"\"\n date_str = self.exp_date.strftime(\"%Y-%m-%d_%H-%M-%S\")\n exp_str = re.sub(\n r\"[\\/:*?<>|]\", \"_\", self.exp_name\n ) # sanitize exp_name to be used as a file name (substitute forbidden characters)\n\n for i in range(1000):\n if i >= 999: # make sure we don't loop forever\n raise ValueError(\"Could not find a unique name for the experiment directory.\")\n\n tag = f\"_{i}\" if i > 0 else \"\"\n self.exp_dir = Path(exp_root) / f\"{date_str}_{exp_str}{tag}\"\n if not self.exp_dir.exists():\n break\n\n # TODO distinguish between agent error and environment or system error. e.g.\n # the parsing error of an action should not be re-run.\n def run(self):\n \"\"\"Run the experiment and save the results\"\"\"\n # start writing logs to run logfile\n self._set_logger()\n\n # log python environment info\n save_package_versions(Path(self.exp_dir))\n\n episode_info = []\n agent = None\n env, step_info, err_msg, stack_trace = None, None, None, None\n try:\n logger.info(f\"Running experiment {self.exp_name} in:\\n {self.exp_dir}\")\n agent = self.agent_args.make_agent()\n if hasattr(agent, \"set_task_name\"):\n agent.set_task_name(self.env_args.task_name)\n\n logger.debug(\"Agent created.\")\n\n env = self.env_args.make_env(\n action_mapping=agent.action_set.to_python_code,\n exp_dir=self.exp_dir,\n use_raw_page_output=getattr(self.agent_args, \"use_raw_page_output\", False),\n )\n\n logger.debug(\"Environment created.\")\n step_info = StepInfo(step=0)\n episode_info = [step_info]\n step_info.from_reset(\n env, seed=self.env_args.task_seed or 0, obs_preprocessor=agent.obs_preprocessor\n )\n logger.debug(\"Environment reset.\")\n\n while not step_info.is_done: # set a limit\n logger.debug(f\"Starting step {step_info.step}.\")\n action = step_info.from_action(agent)\n logger.debug(f\"Agent chose action:\\n {action}\")\n\n if action is None:\n # will end the episode after saving the step info.\n step_info.truncated = True\n\n step_info.save_step_info(\n self.exp_dir, save_screenshot=self.save_screenshot, save_som=self.save_som\n )\n logger.debug(\"Step info saved.\")\n\n if hasattr(env.unwrapped, \"chat\") and isinstance(env.unwrapped.chat, Chat):\n _send_chat_info(env.unwrapped.chat, action, step_info.agent_info)\n logger.debug(\"Chat info sent.\")\n\n if action is None:\n logger.debug(\"Agent returned None action. Ending episode.\")\n break\n\n step_info = StepInfo(step=step_info.step + 1)\n episode_info.append(step_info)\n\n logger.debug(\"Sending action to environment.\")\n step_info.from_step(env, action, obs_preprocessor=agent.obs_preprocessor)\n logger.debug(\"Environment stepped.\")\n if step_info.is_done:\n logger.debug(\n f\"Episode done: terminated: {step_info.terminated}, truncated: {step_info.truncated}.\"\n )\n\n except Exception as e:\n err_msg = f\"Exception uncaught by agent or environment in task {self.env_args.task_name}.\\n{type(e).__name__}:\\n{e}\"\n stack_trace = traceback.format_exc()\n\n self.err_msg = err_msg\n self.stack_trace = stack_trace\n\n logger.warning(err_msg + \"\\n\" + stack_trace)\n if _is_debugging() and self.enable_debug:\n logger.warning(\"Debug mode is enabled. Raising the error.\")\n raise\n\n finally:\n try:\n if step_info is not None:\n step_info.save_step_info(\n self.exp_dir, save_screenshot=self.save_screenshot, save_som=self.save_som\n )\n except Exception as e:\n logger.error(f\"Error while saving step info in the finally block: {e}\")\n try:\n if (\n not err_msg\n and len(episode_info) > 0\n and not (episode_info[-1].terminated or episode_info[-1].truncated)\n ):\n e = KeyboardInterrupt(\"Early termination??\")\n err_msg = f\"Exception uncaught by agent or environment in task {self.env_args.task_name}.\\n{type(e).__name__}:\\n{e}\"\n logger.info(\"Saving experiment info.\")\n self.save_summary_info(episode_info, Path(self.exp_dir), err_msg, stack_trace)\n if TapeAgent is not None and isinstance(agent, TapeAgent):\n task = getattr(env, \"task\", {})\n save_tape(self.exp_dir, episode_info, task, agent.final_tape)\n except Exception as e:\n logger.exception(f\"Error while saving experiment info: {e}\")\n try:\n if env is not None:\n env.close()\n except Exception as e:\n logger.exception(f\"Error while closing the environment: {e}\")\n try:\n self._unset_logger() # stop writing logs to run logfile\n except Exception as e:\n logger.exception(f\"Error while unsetting the logger: {e}\")\n\n def _set_logger(self):\n # output logging traces to a log file\n file_han\n# ... truncated ...","source_hash":"df7878b63efa81189cdb7b214765466d94d2c2f84a41a1d160718bc07673d2d0","truncated":true} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.loop.EnvArgs","uri":"program://AgentLab/class/src.agentlab.experiments.loop.EnvArgs#L40-L100","kind":"class","name":"EnvArgs","path":"src/agentlab/experiments/loop.py","language":"python","start_line":40,"end_line":100,"context_start_line":20,"context_end_line":120,"code":"import numpy as np\nfrom browsergym.core.chat import Chat\nfrom browsergym.experiments.agent import Agent\nfrom browsergym.experiments.utils import count_tokens\nfrom dataclasses_json import DataClassJsonMixin\nfrom PIL import Image\nfrom tqdm import tqdm\n\ntry:\n from agentlab.agents.tapeagent import TapeAgent, save_tape\nexcept ImportError:\n TapeAgent = None\n\n\nlogger = logging.getLogger(__name__)\n\nSEED_MAX = 2 ^ 32 # arbitrary max value (exclusive), seems large enough\n\n\n@dataclass\nclass EnvArgs(DataClassJsonMixin):\n task_name: str\n task_seed: Optional[int] = None\n max_steps: Optional[int] = None\n headless: bool = True\n record_video: bool = False\n wait_for_user_message: bool = False\n viewport: Optional[dict] = None # use default value from BrowserGym\n slow_mo: Optional[int] = None # use default value from BrowserGym\n storage_state: Optional[str | Path | dict] = None\n task_kwargs: Optional[dict] = None # use default value from BrowserGym\n pre_observation_delay: float = None # seconds, wait for JS events to be fired\n\n def make_env(\n self, action_mapping, exp_dir, exp_task_kwargs: dict = {}, use_raw_page_output=True\n ):\n \"\"\"\n Instantiates the BrowserGym environment corresponding to the arguments (with some tweaks).\n\n Args:\n action_mapping: overrides the action mapping of the environment.\n exp_dir: will set some environment parameters (e.g., record_video_dir) with respect to the directory where the experiment is running.\n exp_task_kwargs: use with caution! Will override task parameters to experiment-specific values. Useful to set different server configs for different experiments, or output file paths within the experiment's folder (e.g., assistantbench).\n use_raw_page_output: if True, the environment will also return raw page output in the observation.\n\n Returns:\n env: the gym environment.\n \"\"\"\n extra_kwargs = {}\n if self.record_video:\n extra_kwargs[\"record_video_dir\"] = exp_dir\n if self.viewport:\n extra_kwargs[\"viewport\"] = self.viewport\n if self.slow_mo is not None:\n extra_kwargs[\"slow_mo\"] = self.slow_mo\n if self.pre_observation_delay is not None:\n extra_kwargs[\"pre_observation_delay\"] = self.pre_observation_delay\n if self.storage_state:\n extra_kwargs[\"pw_context_kwargs\"] = {\"storage_state\": self.storage_state}\n if self.task_kwargs is not None:\n extra_kwargs[\"task_kwargs\"] = self.task_kwargs\n if exp_task_kwargs:\n extra_kwargs[\"task_kwargs\"] = extra_kwargs.get(\"task_kwargs\", {}) | exp_task_kwargs\n\n # assistantbench hack, write the task output (agent prediction) to a file in the experiment's directory\n # TODO: find a better way to deal with this\n if self.task_name.startswith(\"assistantbench.test\"):\n extra_kwargs[\"task_kwargs\"] = extra_kwargs.get(\"task_kwargs\", {}) | {\n \"output_file\": exp_dir / \"assistantbench-prediction.json\"\n }\n\n return gym.make(\n _get_env_name(self.task_name),\n disable_env_checker=True,\n max_episode_steps=self.max_steps,\n headless=self.headless,\n wait_for_user_message=self.wait_for_user_message,\n action_mapping=action_mapping, # action mapping is provided by the agent\n use_raw_page_output=use_raw_page_output,\n **extra_kwargs,\n )\n\n\n@dataclass\nclass AbstractAgentArgs(ABC):\n \"\"\"A template class that defines the required signature of an agent's arguments.\"\"\"\n\n agent_name: str = None # type: ignore\n\n def __post_init__(self):\n if self.agent_name is None:\n self.agent_name = self.__class__.__name__\n\n def prepare(self):\n \"\"\"Prepare the agent's LLM models before running the experiment.\"\"\"\n pass\n\n def close(self):\n \"\"\"Close the agent's LLM models after running the experiment.\"\"\"\n pass\n","source_hash":"df7878b63efa81189cdb7b214765466d94d2c2f84a41a1d160718bc07673d2d0","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.loop.AbstractAgentArgs","uri":"program://AgentLab/class/src.agentlab.experiments.loop.AbstractAgentArgs#L104-L123","kind":"class","name":"AbstractAgentArgs","path":"src/agentlab/experiments/loop.py","language":"python","start_line":104,"end_line":123,"context_start_line":84,"context_end_line":143,"code":" # assistantbench hack, write the task output (agent prediction) to a file in the experiment's directory\n # TODO: find a better way to deal with this\n if self.task_name.startswith(\"assistantbench.test\"):\n extra_kwargs[\"task_kwargs\"] = extra_kwargs.get(\"task_kwargs\", {}) | {\n \"output_file\": exp_dir / \"assistantbench-prediction.json\"\n }\n\n return gym.make(\n _get_env_name(self.task_name),\n disable_env_checker=True,\n max_episode_steps=self.max_steps,\n headless=self.headless,\n wait_for_user_message=self.wait_for_user_message,\n action_mapping=action_mapping, # action mapping is provided by the agent\n use_raw_page_output=use_raw_page_output,\n **extra_kwargs,\n )\n\n\n@dataclass\nclass AbstractAgentArgs(ABC):\n \"\"\"A template class that defines the required signature of an agent's arguments.\"\"\"\n\n agent_name: str = None # type: ignore\n\n def __post_init__(self):\n if self.agent_name is None:\n self.agent_name = self.__class__.__name__\n\n def prepare(self):\n \"\"\"Prepare the agent's LLM models before running the experiment.\"\"\"\n pass\n\n def close(self):\n \"\"\"Close the agent's LLM models after running the experiment.\"\"\"\n pass\n\n @abstractmethod\n def make_agent(self) -> Agent:\n \"\"\"Comply the experiments.loop API for instantiating the agent.\"\"\"\n\n\ndef save_package_versions(exp_dir: Path):\n \"\"\"Save the versions of the installed packages in the experiment directory.\"\"\"\n python_dists = \"\\n\".join(\n sorted(\n [\n f\"{dist.metadata['Name']}=={dist.metadata['Version']}\"\n for dist in importlib.metadata.distributions()\n ]\n )\n )\n (exp_dir / \"package_versions.txt\").write_text(python_dists)\n\n\n@dataclass\nclass StepTimestamps:\n env_start: float = 0\n action_exec_start: float = 0 # to extract begining of visual action from video\n action_exec_stop: float = 0 # to extract end of visual action from video","source_hash":"df7878b63efa81189cdb7b214765466d94d2c2f84a41a1d160718bc07673d2d0","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.loop.save_package_versions","uri":"program://AgentLab/function/src.agentlab.experiments.loop.save_package_versions#L126-L136","kind":"function","name":"save_package_versions","path":"src/agentlab/experiments/loop.py","language":"python","start_line":126,"end_line":136,"context_start_line":106,"context_end_line":156,"code":"\n agent_name: str = None # type: ignore\n\n def __post_init__(self):\n if self.agent_name is None:\n self.agent_name = self.__class__.__name__\n\n def prepare(self):\n \"\"\"Prepare the agent's LLM models before running the experiment.\"\"\"\n pass\n\n def close(self):\n \"\"\"Close the agent's LLM models after running the experiment.\"\"\"\n pass\n\n @abstractmethod\n def make_agent(self) -> Agent:\n \"\"\"Comply the experiments.loop API for instantiating the agent.\"\"\"\n\n\ndef save_package_versions(exp_dir: Path):\n \"\"\"Save the versions of the installed packages in the experiment directory.\"\"\"\n python_dists = \"\\n\".join(\n sorted(\n [\n f\"{dist.metadata['Name']}=={dist.metadata['Version']}\"\n for dist in importlib.metadata.distributions()\n ]\n )\n )\n (exp_dir / \"package_versions.txt\").write_text(python_dists)\n\n\n@dataclass\nclass StepTimestamps:\n env_start: float = 0\n action_exec_start: float = 0 # to extract begining of visual action from video\n action_exec_stop: float = 0 # to extract end of visual action from video\n action_exect_after_timeout: float = 0\n env_stop: float = 0\n agent_start: float = 0\n agent_stop: float = 0\n wait_for_page_loading_start: float = 0\n wait_for_page_loading_stop: float = 0\n validation_start: float = 0\n validation_stop: float = 0\n get_observation_start: float = 0\n get_observation_stop: float = 0\n\n\n@dataclass","source_hash":"df7878b63efa81189cdb7b214765466d94d2c2f84a41a1d160718bc07673d2d0","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.loop.StepTimestamps","uri":"program://AgentLab/class/src.agentlab.experiments.loop.StepTimestamps#L140-L153","kind":"class","name":"StepTimestamps","path":"src/agentlab/experiments/loop.py","language":"python","start_line":140,"end_line":153,"context_start_line":120,"context_end_line":173,"code":"\n @abstractmethod\n def make_agent(self) -> Agent:\n \"\"\"Comply the experiments.loop API for instantiating the agent.\"\"\"\n\n\ndef save_package_versions(exp_dir: Path):\n \"\"\"Save the versions of the installed packages in the experiment directory.\"\"\"\n python_dists = \"\\n\".join(\n sorted(\n [\n f\"{dist.metadata['Name']}=={dist.metadata['Version']}\"\n for dist in importlib.metadata.distributions()\n ]\n )\n )\n (exp_dir / \"package_versions.txt\").write_text(python_dists)\n\n\n@dataclass\nclass StepTimestamps:\n env_start: float = 0\n action_exec_start: float = 0 # to extract begining of visual action from video\n action_exec_stop: float = 0 # to extract end of visual action from video\n action_exect_after_timeout: float = 0\n env_stop: float = 0\n agent_start: float = 0\n agent_stop: float = 0\n wait_for_page_loading_start: float = 0\n wait_for_page_loading_stop: float = 0\n validation_start: float = 0\n validation_stop: float = 0\n get_observation_start: float = 0\n get_observation_stop: float = 0\n\n\n@dataclass\nclass StepInfo:\n \"\"\"Collects information about step that will be saved and reloaded.\n Helper functions only modify the dataclass attributes and helps keeping the\n information organized.\n\n Attributes:\n -----------\n step: int\n The step number of the episode.\n obs: dict\n The observation of the environment.\n reward: float\n The reward of the step.\n raw_reward: float\n The raw reward of the step.\n terminated: bool\n Whether the episode is terminated i.e. reached a terminal state.","source_hash":"df7878b63efa81189cdb7b214765466d94d2c2f84a41a1d160718bc07673d2d0","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.loop.StepInfo","uri":"program://AgentLab/class/src.agentlab.experiments.loop.StepInfo#L157-L303","kind":"class","name":"StepInfo","path":"src/agentlab/experiments/loop.py","language":"python","start_line":157,"end_line":303,"context_start_line":137,"context_end_line":323,"code":"\n\n@dataclass\nclass StepTimestamps:\n env_start: float = 0\n action_exec_start: float = 0 # to extract begining of visual action from video\n action_exec_stop: float = 0 # to extract end of visual action from video\n action_exect_after_timeout: float = 0\n env_stop: float = 0\n agent_start: float = 0\n agent_stop: float = 0\n wait_for_page_loading_start: float = 0\n wait_for_page_loading_stop: float = 0\n validation_start: float = 0\n validation_stop: float = 0\n get_observation_start: float = 0\n get_observation_stop: float = 0\n\n\n@dataclass\nclass StepInfo:\n \"\"\"Collects information about step that will be saved and reloaded.\n Helper functions only modify the dataclass attributes and helps keeping the\n information organized.\n\n Attributes:\n -----------\n step: int\n The step number of the episode.\n obs: dict\n The observation of the environment.\n reward: float\n The reward of the step.\n raw_reward: float\n The raw reward of the step.\n terminated: bool\n Whether the episode is terminated i.e. reached a terminal state.\n truncated: bool\n Whether the episode is truncated i.e. reached a maximum number of steps.\n action: str\n The action taken by the agent.\n agent_info: dict\n Additional information from the agent.\n stats: dict\n Extra statistics about the step.\n profiling: StepTimestamps\n Timestamps of the different events during the episode.\n \"\"\"\n\n step: int = None\n obs: dict = None\n reward: float = 0\n raw_reward: float = 0\n terminated: bool = None\n truncated: bool = None\n action: str = None\n agent_info: dict = field(default_factory=dict)\n stats: dict = None\n profiling: StepTimestamps = field(default_factory=StepTimestamps)\n task_info: dict = None\n\n def from_step(self, env: gym.Env, action: str, obs_preprocessor: callable):\n t = self.profiling\n t.env_start = time.time()\n self.obs, self.reward, self.terminated, self.truncated, env_info = env.step(action)\n t.env_stop = time.time()\n\n self.task_info = env_info.get(\"task_info\", None)\n\n self.raw_reward = env_info.get(\"RAW_REWARD_GLOBAL\", None)\n\n t.action_exec_start = env_info[\"action_exec_start\"] # start\n t.action_exect_after_timeout = env_info[\"action_exec_stop\"]\n t.action_exec_stop = env_info[\"action_exec_stop\"] - env_info[\"action_exec_timeout\"]\n t.wait_for_page_loading_start = env_info.get(\"wait_for_page_loading_start\", None)\n t.wait_for_page_loading_stop = env_info.get(\"wait_for_page_loading_stop\", None)\n t.validation_start = env_info.get(\"validation_start\", None)\n t.validation_stop = env_info.get(\"validation_stop\", None)\n t.get_observation_start = env_info.get(\"get_observation_start\", None)\n t.get_observation_stop = env_info.get(\"get_observation_stop\", None)\n\n if obs_preprocessor:\n self.obs = obs_preprocessor(self.obs)\n\n def from_action(self, agent: Agent):\n self.profiling.agent_start = time.time()\n self.action, self.agent_info = agent.get_action(self.obs.copy())\n self.profiling.agent_stop = time.time()\n\n self.make_stats()\n\n return self.action\n\n def from_reset(self, env: gym.Env, seed: int, obs_preprocessor: callable):\n t = self.profiling\n t.env_start = time.time()\n self.obs, env_info = env.reset(seed=seed)\n self.reward, self.terminated, self.truncated = 0, False, False\n t.env_stop = time.time()\n\n t.action_exec_start = env_info.get(\"recording_start_time\", t.env_start)\n t.action_exect_after_timeout = t.env_stop\n t.action_exec_stop = t.env_stop\n\n if obs_preprocessor:\n self.obs = obs_preprocessor(self.obs)\n\n @property\n def is_done(self):\n return self.terminated or self.truncated\n\n def make_stats(self):\n if isinstance(self.obs, dict):\n stats = {\n f\"n_token_{key}\": count_tokens(val)\n for key, val in self.obs.items()\n if isinstance(val, str)\n }\n else:\n stats = {}\n stats.update(self.agent_info.pop(\"stats\", {}))\n\n t = self.profiling\n stats[\"step_elapsed\"] = t.env_stop - t.env_start\n stats[\"agent_elapsed\"] = t.agent_stop - t.agent_start\n\n self.stats = stats\n\n def save_step_info(self, exp_dir, save_json=False, save_screenshot=True, save_som=False):\n # special treatment for some of the observation fields\n if isinstance(self.obs, dict):\n # save screenshots to separate files\n screenshot = self.obs.pop(\"screenshot\", None)\n screenshot_som = self.obs.pop(\"screenshot_som\", None)\n\n if save_screenshot and screenshot is not None:\n img = Image.fromarray(screenshot)\n img.save(exp_dir / f\"screenshot_step_{self.step}.png\")\n\n if save_som and screenshot_som is not None:\n img = Image.fromarray(screenshot_som)\n img.save(exp_dir / f\"screenshot_som_step_{self.step}.png\")\n\n # save goal object (which might contain images) to a separate file to save space\n if self.obs.get(\"goal_object\", False):\n # save the goal object only once (goal should never change once setup)\n goal_object_file = Path(exp_dir) / \"goal_object.pkl.gz\"\n if not goal_object_file.exists():\n with gzip.open(goal_object_file, \"wb\") as f:\n pickle.dump(self.obs[\"goal_object\"], f)\n # set goal_object to a special placeholder value, which indicates it should be loaded from a separate file\n self.obs[\"goal_object\"] = None\n\n with gzip.open(exp_dir / f\"step_{self.step}.pkl.gz\", \"wb\") as f:\n pickle.dump(self, f)\n\n if save_json:\n with open(exp_dir / \"steps_info.json\", \"w\") as f:\n json.dump(self, f, indent=4, cls=DataclassJSONEncoder)\n\n if isinstance(self.obs, dict):\n # add the screenshots back to the obs\n # why do we need this?\n if screenshot is not None:\n self.obs[\"screenshot\"] = screenshot\n if screenshot_som is not None:\n self.obs[\"screenshot_som\"] = screenshot_som\n\n\n@dataclass\nclass ExpArgs:\n \"\"\"Arguments to run an experiment, i.e. run agent in an environment until done.\n\n This dataclass is used to store experiments arguments. It contains\n agent_args and env_args which follows the same principle. It contains helper\n functions to prepare and run experiments.\n\n Attributes:\n -----------\n agent_args: AbstractAgentArgs\n The arguments to instantiate the agent.\n env_args: EnvArgs\n The arguments to instantiate the environment.\n exp_dir: str\n The directory where the experiment will be saved.\n exp_name: str\n The name of the experiment. If None, it will be generated from the","source_hash":"df7878b63efa81189cdb7b214765466d94d2c2f84a41a1d160718bc07673d2d0","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.loop.ExpArgs","uri":"program://AgentLab/class/src.agentlab.experiments.loop.ExpArgs#L307-L581","kind":"class","name":"ExpArgs","path":"src/agentlab/experiments/loop.py","language":"python","start_line":307,"end_line":581,"context_start_line":287,"context_end_line":601,"code":" # set goal_object to a special placeholder value, which indicates it should be loaded from a separate file\n self.obs[\"goal_object\"] = None\n\n with gzip.open(exp_dir / f\"step_{self.step}.pkl.gz\", \"wb\") as f:\n pickle.dump(self, f)\n\n if save_json:\n with open(exp_dir / \"steps_info.json\", \"w\") as f:\n json.dump(self, f, indent=4, cls=DataclassJSONEncoder)\n\n if isinstance(self.obs, dict):\n # add the screenshots back to the obs\n # why do we need this?\n if screenshot is not None:\n self.obs[\"screenshot\"] = screenshot\n if screenshot_som is not None:\n self.obs[\"screenshot_som\"] = screenshot_som\n\n\n@dataclass\nclass ExpArgs:\n \"\"\"Arguments to run an experiment, i.e. run agent in an environment until done.\n\n This dataclass is used to store experiments arguments. It contains\n agent_args and env_args which follows the same principle. It contains helper\n functions to prepare and run experiments.\n\n Attributes:\n -----------\n agent_args: AbstractAgentArgs\n The arguments to instantiate the agent.\n env_args: EnvArgs\n The arguments to instantiate the environment.\n exp_dir: str\n The directory where the experiment will be saved.\n exp_name: str\n The name of the experiment. If None, it will be generated from the\n agent and environment names.\n enable_debug: bool\n If python is running in debug mode and `enable_debug` is True, errors\n will be raised instead of only logged\n error_msg: str\n Error that occured while running the experiment (if any).\n stack_trace: str\n Stack trace of the error (if any).\n order: int (internal)\n The order of the experiment in the batch. It is used to keep track of\n the original order of the experiments in case they are shuffled.\n \"\"\"\n\n agent_args: AbstractAgentArgs\n env_args: EnvArgs\n exp_dir: str = None\n exp_name: str = None\n enable_debug: bool = True\n err_msg: str = None\n stack_trace: str = None\n order: int = None # use to keep the original order the experiments were meant to be launched.\n logging_level: int = logging.INFO\n logging_level_stdout: int = logging.INFO\n exp_id: str = None\n depends_on: tuple[str] = ()\n save_screenshot: bool = True\n save_som: bool = False\n\n def make_id(self):\n \"\"\"Create a unique id for the experiment.\"\"\"\n if self.exp_id is None:\n self.exp_id = str(uuid.uuid4())\n\n def prepare(self, exp_root):\n \"\"\"Prepare the experiment directory and save the experiment arguments.\n\n This enables inspecting experiments that are not run yet.\n\n Args:\n exp_root: str\n The root directory where the experiment will be saved.\n \"\"\"\n if self.env_args.task_seed is None:\n self.env_args.task_seed = np.random.randint(0, SEED_MAX)\n\n if self.exp_name is None:\n task_name = self.env_args.task_name\n self.exp_name = f\"{self.agent_args.agent_name}_on_{task_name}_{self.env_args.task_seed}\"\n\n # if exp_dir exists, it means it's a re-run, move the old one\n if self.exp_dir is not None:\n _move_old_exp(self.exp_dir)\n\n self.make_id()\n\n self.exp_date = datetime.now()\n self._make_dir(exp_root)\n\n self.exp_dir.mkdir(parents=True, exist_ok=True)\n with open(self.exp_dir / \"exp_args.pkl\", \"wb\") as f:\n pickle.dump(self, f)\n\n def _make_dir(self, exp_root):\n \"\"\"Create a unique directory for the experiment.\"\"\"\n date_str = self.exp_date.strftime(\"%Y-%m-%d_%H-%M-%S\")\n exp_str = re.sub(\n r\"[\\/:*?<>|]\", \"_\", self.exp_name\n ) # sanitize exp_name to be used as a file name (substitute forbidden characters)\n\n for i in range(1000):\n if i >= 999: # make sure we don't loop forever\n raise ValueError(\"Could not find a unique name for the experiment directory.\")\n\n tag = f\"_{i}\" if i > 0 else \"\"\n self.exp_dir = Path(exp_root) / f\"{date_str}_{exp_str}{tag}\"\n if not self.exp_dir.exists():\n break\n\n # TODO distinguish between agent error and environment or system error. e.g.\n # the parsing error of an action should not be re-run.\n def run(self):\n \"\"\"Run the experiment and save the results\"\"\"\n # start writing logs to run logfile\n self._set_logger()\n\n # log python environment info\n save_package_versions(Path(self.exp_dir))\n\n episode_info = []\n agent = None\n env, step_info, err_msg, stack_trace = None, None, None, None\n try:\n logger.info(f\"Running experiment {self.exp_name} in:\\n {self.exp_dir}\")\n agent = self.agent_args.make_agent()\n if hasattr(agent, \"set_task_name\"):\n agent.set_task_name(self.env_args.task_name)\n\n logger.debug(\"Agent created.\")\n\n env = self.env_args.make_env(\n action_mapping=agent.action_set.to_python_code,\n exp_dir=self.exp_dir,\n use_raw_page_output=getattr(self.agent_args, \"use_raw_page_output\", False),\n )\n\n logger.debug(\"Environment created.\")\n step_info = StepInfo(step=0)\n episode_info = [step_info]\n step_info.from_reset(\n env, seed=self.env_args.task_seed or 0, obs_preprocessor=agent.obs_preprocessor\n )\n logger.debug(\"Environment reset.\")\n\n while not step_info.is_done: # set a limit\n logger.debug(f\"Starting step {step_info.step}.\")\n action = step_info.from_action(agent)\n logger.debug(f\"Agent chose action:\\n {action}\")\n\n if action is None:\n # will end the episode after saving the step info.\n step_info.truncated = True\n\n step_info.save_step_info(\n self.exp_dir, save_screenshot=self.save_screenshot, save_som=self.save_som\n )\n logger.debug(\"Step info saved.\")\n\n if hasattr(env.unwrapped, \"chat\") and isinstance(env.unwrapped.chat, Chat):\n _send_chat_info(env.unwrapped.chat, action, step_info.agent_info)\n logger.debug(\"Chat info sent.\")\n\n if action is None:\n logger.debug(\"Agent returned None action. Ending episode.\")\n break\n\n step_info = StepInfo(step=step_info.step + 1)\n episode_info.append(step_info)\n\n logger.debug(\"Sending action to environment.\")\n step_info.from_step(env, action, obs_preprocessor=agent.obs_preprocessor)\n logger.debug(\"Environment stepped.\")\n if step_info.is_done:\n logger.debug(\n f\"Episode done: terminated: {step_info.terminated}, truncated: {step_info.truncated}.\"\n )\n\n except Exception as e:\n err_msg = f\"Exception uncaught by agent or environment in task {self.env_args.task_name}.\\n{type(e).__name__}:\\n{e}\"\n stack_trace = traceback.format_exc()\n\n self.err_msg = err_msg\n self.stack_trace = stack_trace\n\n logger.warning(err_msg + \"\\n\" + stack_trace)\n if _is_debugging() and self.enable_debug:\n logger.warning(\"Debug mode is enabled. Raising the error.\")\n raise\n\n finally:\n try:\n if step_info is not None:\n step_info.save_step_info(\n self.exp_dir, save_screenshot=self.save_screenshot, save_som=self.save_som\n )\n except Exception as e:\n logger.error(f\"Error while saving step info in the finally block: {e}\")\n try:\n if (\n not err_msg\n and len(episode_info) > 0\n and not (episode_info[-1].terminated or episode_info[-1].truncated)\n ):\n e = KeyboardInterrupt(\"Early termination??\")\n err_msg = f\"Exception uncaught by agent or environment in task {self.env_args.task_name}.\\n{type(e).__name__}:\\n{e}\"\n logger.info(\"Saving experiment info.\")\n self.save_summary_info(episode_info, Path(self.exp_dir), err_msg, stack_trace)\n if TapeAgent is not None and isinstance(agent, TapeAgent):\n task = getattr(env, \"task\", {})\n save_tape(self.exp_dir, episode_info, task, agent.final_tape)\n except Exception as e:\n logger.exception(f\"Error while saving experiment info: {e}\")\n try:\n if env is not None:\n env.close()\n except Exception as e:\n logger.exception(f\"Error while closing the environment: {e}\")\n try:\n self._unset_logger() # stop writing logs to run logfile\n except Exception as e:\n logger.exception(f\"Error while unsetting the logger: {e}\")\n\n def _set_logger(self):\n # output logging traces to a log file\n file_handler = logging.FileHandler(self.exp_dir / \"experiment.log\")\n file_handler.setLevel(self.logging_level) # same level as console outputs\n formatter = logging.Formatter(\n \"%(asctime)s - %(process)d - %(name)s - %(levelname)s - %(message)s\"\n )\n file_handler.setFormatter(formatter)\n # output handler\n stream_handler = logging.StreamHandler()\n stream_handler.setLevel(self.logging_level_stdout)\n stream_handler.setFormatter(formatter)\n # setup root logger\n root_logger = logging.getLogger()\n\n # remove previous stream handlers\n for handler in root_logger.handlers:\n if isinstance(handler, logging.StreamHandler):\n root_logger.removeHandler(handler)\n\n root_logger.setLevel(self.logging_level)\n root_logger.addHandler(file_handler)\n root_logger.addHandler(stream_handler)\n # setup openai logger (don't go below INFO verbosity)\n openai_logger = logging.getLogger(\"openai._base_client\")\n openai_logger.setLevel(max(logging.INFO, self.logging_level))\n\n self.logging_file_handler = file_handler\n\n def _unset_logger(self):\n root_logger = logging.getLogger()\n root_logger.removeHandler(self.logging_file_handler)\n\n def save_summary_info(\n self,\n episode_info: list[StepInfo],\n exp_dir: Path,\n err_msg: str | None,\n stack_trace: str | None,\n ):\n # bring err from agent_info to the top level\n if err_msg is None:\n err_msg, stack_trace = _extract_err_msg(episode_info)\n else:\n # useful until we get a proper place in agent_xray to view error\n # messages.\n if len(episode_info) == 0:\n episode_info.append(StepInfo())\n episode_info[-1].agent_info[\"err_msg\"] = err_msg\n episode_info[-1].agent_info[\"stack_trace\"] = stack_trace\n\n summary_info = dict(\n n_steps=len(episode_info) - 1,\n cum_reward=sum([step.reward for step in episode_info]),\n cum_raw_reward=sum([step.raw_reward for step in episode_info if step.raw_reward]),\n err_msg=err_msg,\n stack_trace=stack_trace,\n )\n for key, val in _aggregate_episode_stats(episode_info).items():\n summary_info[f\"stats.{key}\"] = val\n\n if len(episode_info) > 0:\n summary_info[\"terminated\"] = episode_info[-1].terminated\n summary_info[\"truncated\"] = episode_info[-1].truncated\n\n with open(exp_dir / \"summary_info.json\", \"w\") as f:\n json.dump(summary_info, f, indent=4)\n\n\ndef _extract_err_msg(episode_info: list[StepInfo]):\n \"\"\"Extract the last error message from the episode info.\"\"\"\n errors = [(None, None)]\n for step_info in episode_info:\n if step_info.agent_info is None:\n continue\n err_msg = step_info.agent_info.get(\"err_msg\", None)\n if err_msg is not None:\n errors.append((err_msg, step_info.agent_info.get(\"stack_trace\", None)))\n\n return errors[-1]\n\n\ndef _aggregate_episode_stats(episode_info: list[StepInfo]):\n \"\"\"Aggregate StepInfo.stats across episodes.\n\n It will compute the sum and max of each value in the stats dict.\n These two summaries should cover many use cases. If more are needed, the","source_hash":"df7878b63efa81189cdb7b214765466d94d2c2f84a41a1d160718bc07673d2d0","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.loop._extract_err_msg","uri":"program://AgentLab/function/src.agentlab.experiments.loop._extract_err_msg#L584-L594","kind":"function","name":"_extract_err_msg","path":"src/agentlab/experiments/loop.py","language":"python","start_line":584,"end_line":594,"context_start_line":564,"context_end_line":614,"code":" episode_info[-1].agent_info[\"stack_trace\"] = stack_trace\n\n summary_info = dict(\n n_steps=len(episode_info) - 1,\n cum_reward=sum([step.reward for step in episode_info]),\n cum_raw_reward=sum([step.raw_reward for step in episode_info if step.raw_reward]),\n err_msg=err_msg,\n stack_trace=stack_trace,\n )\n for key, val in _aggregate_episode_stats(episode_info).items():\n summary_info[f\"stats.{key}\"] = val\n\n if len(episode_info) > 0:\n summary_info[\"terminated\"] = episode_info[-1].terminated\n summary_info[\"truncated\"] = episode_info[-1].truncated\n\n with open(exp_dir / \"summary_info.json\", \"w\") as f:\n json.dump(summary_info, f, indent=4)\n\n\ndef _extract_err_msg(episode_info: list[StepInfo]):\n \"\"\"Extract the last error message from the episode info.\"\"\"\n errors = [(None, None)]\n for step_info in episode_info:\n if step_info.agent_info is None:\n continue\n err_msg = step_info.agent_info.get(\"err_msg\", None)\n if err_msg is not None:\n errors.append((err_msg, step_info.agent_info.get(\"stack_trace\", None)))\n\n return errors[-1]\n\n\ndef _aggregate_episode_stats(episode_info: list[StepInfo]):\n \"\"\"Aggregate StepInfo.stats across episodes.\n\n It will compute the sum and max of each value in the stats dict.\n These two summaries should cover many use cases. If more are needed, the\n user can compute other stats by reloading individual StepInfo.\n\n Args:\n episode_info: list[StepInfo]\n The list of StepInfo objects to aggregate.\n\n Returns:\n dict\n A dictionary containing the aggregated stats.\n \"\"\"\n\n stats = defaultdict(list)\n for step_info in episode_info:","source_hash":"df7878b63efa81189cdb7b214765466d94d2c2f84a41a1d160718bc07673d2d0","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.loop._aggregate_episode_stats","uri":"program://AgentLab/function/src.agentlab.experiments.loop._aggregate_episode_stats#L597-L631","kind":"function","name":"_aggregate_episode_stats","path":"src/agentlab/experiments/loop.py","language":"python","start_line":597,"end_line":631,"context_start_line":577,"context_end_line":651,"code":" summary_info[\"terminated\"] = episode_info[-1].terminated\n summary_info[\"truncated\"] = episode_info[-1].truncated\n\n with open(exp_dir / \"summary_info.json\", \"w\") as f:\n json.dump(summary_info, f, indent=4)\n\n\ndef _extract_err_msg(episode_info: list[StepInfo]):\n \"\"\"Extract the last error message from the episode info.\"\"\"\n errors = [(None, None)]\n for step_info in episode_info:\n if step_info.agent_info is None:\n continue\n err_msg = step_info.agent_info.get(\"err_msg\", None)\n if err_msg is not None:\n errors.append((err_msg, step_info.agent_info.get(\"stack_trace\", None)))\n\n return errors[-1]\n\n\ndef _aggregate_episode_stats(episode_info: list[StepInfo]):\n \"\"\"Aggregate StepInfo.stats across episodes.\n\n It will compute the sum and max of each value in the stats dict.\n These two summaries should cover many use cases. If more are needed, the\n user can compute other stats by reloading individual StepInfo.\n\n Args:\n episode_info: list[StepInfo]\n The list of StepInfo objects to aggregate.\n\n Returns:\n dict\n A dictionary containing the aggregated stats.\n \"\"\"\n\n stats = defaultdict(list)\n for step_info in episode_info:\n if step_info.stats is not None:\n for key, val in step_info.stats.items():\n if val is None:\n val = np.nan\n stats[key].append(val)\n\n aggregated_stats = {\"cum_steps\": len(episode_info)} # to be able to compute the mean\n for key, val_list in stats.items():\n aggregated_stats[f\"cum_{key}\"] = np.nansum(val_list)\n aggregated_stats[f\"max_{key}\"] = np.nanmax(val_list)\n\n for key, val in aggregated_stats.items():\n if isinstance(val, np.generic):\n aggregated_stats[key] = val.item()\n if np.isnan(val):\n aggregated_stats[key] = None\n return aggregated_stats\n\n\ndef _is_debugging():\n \"\"\"Tells you if your code is currently running in debug mode.\"\"\"\n return sys.gettrace() is not None\n\n\nclass ExpResult:\n \"\"\"Helper class to load and visualize the results of an experiment.\n\n attributes are loaded lazily.\n\n Attributes (lazily loaded):\n exp_args: ExpArgs, the arguments of the experiment.\n steps_info: list[StepInfo], the information of each steps so far\n summary_info: dict, the summary of the experiment.\n screenshots: list[Image], the screenshots of each step.\n screenshots_som: list[Image], the screenshots of each step with set of\n marks inprinted.\n flat_exp_args: dict, the flattened version of exp_args.","source_hash":"df7878b63efa81189cdb7b214765466d94d2c2f84a41a1d160718bc07673d2d0","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.loop._is_debugging","uri":"program://AgentLab/function/src.agentlab.experiments.loop._is_debugging#L634-L636","kind":"function","name":"_is_debugging","path":"src/agentlab/experiments/loop.py","language":"python","start_line":634,"end_line":636,"context_start_line":614,"context_end_line":656,"code":" for step_info in episode_info:\n if step_info.stats is not None:\n for key, val in step_info.stats.items():\n if val is None:\n val = np.nan\n stats[key].append(val)\n\n aggregated_stats = {\"cum_steps\": len(episode_info)} # to be able to compute the mean\n for key, val_list in stats.items():\n aggregated_stats[f\"cum_{key}\"] = np.nansum(val_list)\n aggregated_stats[f\"max_{key}\"] = np.nanmax(val_list)\n\n for key, val in aggregated_stats.items():\n if isinstance(val, np.generic):\n aggregated_stats[key] = val.item()\n if np.isnan(val):\n aggregated_stats[key] = None\n return aggregated_stats\n\n\ndef _is_debugging():\n \"\"\"Tells you if your code is currently running in debug mode.\"\"\"\n return sys.gettrace() is not None\n\n\nclass ExpResult:\n \"\"\"Helper class to load and visualize the results of an experiment.\n\n attributes are loaded lazily.\n\n Attributes (lazily loaded):\n exp_args: ExpArgs, the arguments of the experiment.\n steps_info: list[StepInfo], the information of each steps so far\n summary_info: dict, the summary of the experiment.\n screenshots: list[Image], the screenshots of each step.\n screenshots_som: list[Image], the screenshots of each step with set of\n marks inprinted.\n flat_exp_args: dict, the flattened version of exp_args.\n chat_video_path: Path, the path to the chat video. (if record_video=True)\n task_video_path: Path, the path to the task video. (if record_video=True)\n combined_video_path: Path, the path to the combined video. (if video was\n combined)\n \"\"\"","source_hash":"df7878b63efa81189cdb7b214765466d94d2c2f84a41a1d160718bc07673d2d0","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.loop.ExpResult","uri":"program://AgentLab/class/src.agentlab.experiments.loop.ExpResult#L639-L828","kind":"class","name":"ExpResult","path":"src/agentlab/experiments/loop.py","language":"python","start_line":639,"end_line":828,"context_start_line":619,"context_end_line":848,"code":" stats[key].append(val)\n\n aggregated_stats = {\"cum_steps\": len(episode_info)} # to be able to compute the mean\n for key, val_list in stats.items():\n aggregated_stats[f\"cum_{key}\"] = np.nansum(val_list)\n aggregated_stats[f\"max_{key}\"] = np.nanmax(val_list)\n\n for key, val in aggregated_stats.items():\n if isinstance(val, np.generic):\n aggregated_stats[key] = val.item()\n if np.isnan(val):\n aggregated_stats[key] = None\n return aggregated_stats\n\n\ndef _is_debugging():\n \"\"\"Tells you if your code is currently running in debug mode.\"\"\"\n return sys.gettrace() is not None\n\n\nclass ExpResult:\n \"\"\"Helper class to load and visualize the results of an experiment.\n\n attributes are loaded lazily.\n\n Attributes (lazily loaded):\n exp_args: ExpArgs, the arguments of the experiment.\n steps_info: list[StepInfo], the information of each steps so far\n summary_info: dict, the summary of the experiment.\n screenshots: list[Image], the screenshots of each step.\n screenshots_som: list[Image], the screenshots of each step with set of\n marks inprinted.\n flat_exp_args: dict, the flattened version of exp_args.\n chat_video_path: Path, the path to the chat video. (if record_video=True)\n task_video_path: Path, the path to the task video. (if record_video=True)\n combined_video_path: Path, the path to the combined video. (if video was\n combined)\n \"\"\"\n\n def __init__(self, exp_dir) -> None:\n self.exp_dir = Path(exp_dir)\n self._exp_args = None\n self._steps_info = {}\n self._summary_info = None\n self._screenshots = {}\n self._flat_exp_args = None\n self._logs = None\n\n @property\n def exp_args(self) -> ExpArgs:\n if self._exp_args is None:\n with open(self.exp_dir / \"exp_args.pkl\", \"rb\") as f:\n self._exp_args = pickle.load(f)\n # in case experiments were moved\n self._exp_args.exp_dir = self.exp_dir\n return self._exp_args\n\n def get_step_info(self, step: int) -> StepInfo:\n \"\"\"Load the step info from the file and return it.\"\"\"\n if self._steps_info.get(step, None) is None:\n with gzip.open(self.exp_dir / f\"step_{step}.pkl.gz\", \"rb\") as f:\n self._steps_info[step] = pickle.load(f)\n if self._steps_info[step].obs:\n if \"screenshot\" not in self._steps_info[step].obs:\n try:\n self._steps_info[step].obs[\"screenshot\"] = np.array(\n self.get_screenshot(step), dtype=np.uint8\n )\n except FileNotFoundError:\n pass\n if \"screenshot_som\" not in self._steps_info[step].obs:\n try:\n self._steps_info[step].obs[\"screenshot_som\"] = np.array(\n self.get_screenshot(step, som=True), dtype=np.uint8\n )\n except FileNotFoundError:\n pass\n # if goal_object is set to None, it indicates it has been saved into a separate file\n if (\n \"goal_object\" in self._steps_info[step].obs\n and self._steps_info[step].obs[\"goal_object\"] is None\n ):\n with gzip.open(self.exp_dir / \"goal_object.pkl.gz\", \"rb\") as f:\n goal_object = pickle.load(f)\n self._steps_info[step].obs[\"goal_object\"] = goal_object\n\n return self._steps_info[step]\n\n @property\n def steps_info(self) -> list[StepInfo]:\n step_files = list(self.exp_dir.glob(\"step_*.pkl.gz\"))\n for file in step_files:\n step = int(file.name.split(\"_\")[-1].split(\".\")[0])\n self.get_step_info(step)\n\n return [self._steps_info[i] for i in range(len(self._steps_info))]\n\n @property\n def summary_info(self) -> dict:\n if self._summary_info is None:\n with open(self.exp_dir / \"summary_info.json\", \"r\") as f:\n # if length is zero raise file not found error\n if os.fstat(f.fileno()).st_size == 0:\n raise FileNotFoundError(\"summary_info.json is empty.\")\n self._summary_info = json.load(f)\n return self._summary_info\n\n def get_screenshot(self, step: int, som=False) -> Image:\n key = (step, som)\n if self._screenshots.get(key, None) is None:\n file_path = self.get_screenshot_path(step, som=som)\n self._screenshots[key] = Image.open(file_path).convert(\"RGB\")\n return self._screenshots[key]\n\n def get_screenshot_path(self, step: int, som=False) -> Path:\n \"\"\"Return the path to the screenshot file.\"\"\"\n file_name = f\"screenshot_{'som_' if som else ''}step_{step}\"\n for ext in [\".png\", \".jpg\"]:\n file_path = self.exp_dir / (file_name + ext)\n if file_path.exists():\n return file_path\n raise FileNotFoundError(\n f\"No screenshot found for step {step} (som={som}) in {self.exp_dir}\"\n )\n\n def get_screenshots(self, som=False):\n files = list(self.exp_dir.glob(\"screenshot_step_*\"))\n max_step = 0\n for file in files:\n step = int(file.name.split(\"_\")[-1].split(\".\")[0])\n self.get_screenshot(step, som=som)\n max_step = max(max_step, step)\n return [self._screenshots.get((i, som), None) for i in range(max_step + 1)]\n\n @property\n def screenshots(self):\n return self.get_screenshots(som=False)\n\n @property\n def screenshots_som(self):\n return self.get_screenshots(som=True)\n\n @property\n def flat_exp_args(self) -> dict:\n \"\"\"Return a dict with exp_args flattened.\"\"\"\n if self._flat_exp_args is None:\n exp_args = asdict(self.exp_args)\n # this will flatten nested dicts\n self._flat_exp_args = _flatten_dict(exp_args)\n return self._flat_exp_args\n\n def get_exp_record(self) -> dict:\n \"\"\"Return a dict with exp_args flattened and summary_info.\"\"\"\n record = {\"exp_dir\": self.exp_dir}\n try:\n record.update(self.flat_exp_args)\n except FileNotFoundError:\n pass\n try:\n record.update(self.summary_info)\n except FileNotFoundError:\n pass\n return record\n\n @property\n def chat_video_path(self) -> Path:\n try:\n return next(self.exp_dir.glob(\"chat_video/*.webm\"))\n except StopIteration:\n raise FileNotFoundError(f\"No chat_video found in {self.exp_dir}\")\n\n @property\n def task_video_path(self) -> Path:\n try:\n return next(self.exp_dir.glob(\"task_video/*.webm\"))\n except StopIteration:\n raise FileNotFoundError(f\"No task_video found in {self.exp_dir}\")\n\n @property\n def combined_video_path(self) -> Path:\n return self.exp_dir / \"combined_video.mp4\"\n\n @property\n def logs(self):\n if self._logs is None:\n self._logs = (self.exp_dir / \"experiment.log\").read_text()\n return self._logs\n\n @property\n def status(self):\n \"\"\"Possible values:\n * \"done\": completed with no error\n * \"error\": completed with error\n * \"incomplete\": not completed yet (may be pending or just stalled)\n\n Returns:\n str: the status of the experiment. One of \"done\", \"error\", \"incomplete\".\n \"\"\"\n try:\n summary_info = self.summary_info\n except FileNotFoundError:\n return \"incomplete\"\n\n if summary_info.get(\"err_msg\", None) is not None:\n return \"error\"\n\n if summary_info.get(\"terminated\", False) or summary_info.get(\"truncated\", False):\n return \"done\"\n\n return \"incomplete\"\n\n\nEXP_RESULT_CACHE = {}\n\n\ndef get_exp_result(exp_dir) -> ExpResult:\n \"\"\"Keep a cache of pre-loaded exp_results for faster loading\"\"\"\n exp_dir = str(exp_dir) # make sure it's not a Path\n exp_result = EXP_RESULT_CACHE.get(exp_dir, None)\n if exp_result is None:\n exp_result = ExpResult(exp_dir)\n EXP_RESULT_CACHE[exp_dir] = exp_result\n return exp_result\n\n\ndef yield_all_exp_results(\n savedir_base: str | Path, progress_fn=tqdm, load_hidden=False, use_cache=True\n):\n \"\"\"Recursively find all experiments from savedir_base folder.\n","source_hash":"df7878b63efa81189cdb7b214765466d94d2c2f84a41a1d160718bc07673d2d0","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.loop.get_exp_result","uri":"program://AgentLab/function/src.agentlab.experiments.loop.get_exp_result#L834-L841","kind":"function","name":"get_exp_result","path":"src/agentlab/experiments/loop.py","language":"python","start_line":834,"end_line":841,"context_start_line":814,"context_end_line":861,"code":" Returns:\n str: the status of the experiment. One of \"done\", \"error\", \"incomplete\".\n \"\"\"\n try:\n summary_info = self.summary_info\n except FileNotFoundError:\n return \"incomplete\"\n\n if summary_info.get(\"err_msg\", None) is not None:\n return \"error\"\n\n if summary_info.get(\"terminated\", False) or summary_info.get(\"truncated\", False):\n return \"done\"\n\n return \"incomplete\"\n\n\nEXP_RESULT_CACHE = {}\n\n\ndef get_exp_result(exp_dir) -> ExpResult:\n \"\"\"Keep a cache of pre-loaded exp_results for faster loading\"\"\"\n exp_dir = str(exp_dir) # make sure it's not a Path\n exp_result = EXP_RESULT_CACHE.get(exp_dir, None)\n if exp_result is None:\n exp_result = ExpResult(exp_dir)\n EXP_RESULT_CACHE[exp_dir] = exp_result\n return exp_result\n\n\ndef yield_all_exp_results(\n savedir_base: str | Path, progress_fn=tqdm, load_hidden=False, use_cache=True\n):\n \"\"\"Recursively find all experiments from savedir_base folder.\n\n This will ignore all experiments that start with \"_\" or \".\". use\n `load_hidden=True` to load them anyway.\n\n Args:\n savedir_base: str or Path\n The base directory where the experiments are saved.\n progress_fn: function\n A function to show progress. Defaults to tqdm.\n load_hidden: bool\n If True, load hidden experiments (those starting with \"_\" or \".\").\n use_cache: bool\n If True, use the cache of pre-loaded exp_results.\n","source_hash":"df7878b63efa81189cdb7b214765466d94d2c2f84a41a1d160718bc07673d2d0","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.loop.yield_all_exp_results","uri":"program://AgentLab/function/src.agentlab.experiments.loop.yield_all_exp_results#L844-L885","kind":"function","name":"yield_all_exp_results","path":"src/agentlab/experiments/loop.py","language":"python","start_line":844,"end_line":885,"context_start_line":824,"context_end_line":905,"code":"\n if summary_info.get(\"terminated\", False) or summary_info.get(\"truncated\", False):\n return \"done\"\n\n return \"incomplete\"\n\n\nEXP_RESULT_CACHE = {}\n\n\ndef get_exp_result(exp_dir) -> ExpResult:\n \"\"\"Keep a cache of pre-loaded exp_results for faster loading\"\"\"\n exp_dir = str(exp_dir) # make sure it's not a Path\n exp_result = EXP_RESULT_CACHE.get(exp_dir, None)\n if exp_result is None:\n exp_result = ExpResult(exp_dir)\n EXP_RESULT_CACHE[exp_dir] = exp_result\n return exp_result\n\n\ndef yield_all_exp_results(\n savedir_base: str | Path, progress_fn=tqdm, load_hidden=False, use_cache=True\n):\n \"\"\"Recursively find all experiments from savedir_base folder.\n\n This will ignore all experiments that start with \"_\" or \".\". use\n `load_hidden=True` to load them anyway.\n\n Args:\n savedir_base: str or Path\n The base directory where the experiments are saved.\n progress_fn: function\n A function to show progress. Defaults to tqdm.\n load_hidden: bool\n If True, load hidden experiments (those starting with \"_\" or \".\").\n use_cache: bool\n If True, use the cache of pre-loaded exp_results.\n\n Yields:\n ExpResult\n An instance of ExpResult for each experiment found.\n \"\"\"\n\n if not isinstance(savedir_base, list):\n savedir_base = [savedir_base]\n\n exp_args_paths = []\n for exp_dir in savedir_base:\n exp_args_paths.extend(list(Path(exp_dir).glob(\"**/exp_args.pkl\")))\n\n if progress_fn is not None:\n exp_args_paths = progress_fn(exp_args_paths, desc=\"Searching experiments directories.\")\n\n for exp_args_path in exp_args_paths:\n exp_dir = exp_args_path.parent\n if not load_hidden:\n if exp_dir.name.startswith(\"_\") or exp_dir.name.startswith(\".\"):\n continue\n if use_cache:\n yield get_exp_result(exp_dir)\n else:\n yield ExpResult(exp_dir)\n\n\nclass DataclassJSONEncoder(json.JSONEncoder):\n def default(self, obj):\n if is_dataclass(obj):\n return asdict(obj)\n if isinstance(obj, np.integer):\n return int(obj)\n if isinstance(obj, np.floating):\n return float(obj)\n if isinstance(obj, np.ndarray):\n return obj.tolist()\n return super().default(obj)\n\n\ndef _move_old_exp(exp_dir):\n \"\"\"Move the old experiment directory to a new name.\"\"\"\n exp_dir = Path(exp_dir)\n if exp_dir.exists():\n exp_dir.rename(exp_dir.with_name(\"_\" + exp_dir.name))","source_hash":"df7878b63efa81189cdb7b214765466d94d2c2f84a41a1d160718bc07673d2d0","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.loop.DataclassJSONEncoder","uri":"program://AgentLab/class/src.agentlab.experiments.loop.DataclassJSONEncoder#L888-L898","kind":"class","name":"DataclassJSONEncoder","path":"src/agentlab/experiments/loop.py","language":"python","start_line":888,"end_line":898,"context_start_line":868,"context_end_line":918,"code":" savedir_base = [savedir_base]\n\n exp_args_paths = []\n for exp_dir in savedir_base:\n exp_args_paths.extend(list(Path(exp_dir).glob(\"**/exp_args.pkl\")))\n\n if progress_fn is not None:\n exp_args_paths = progress_fn(exp_args_paths, desc=\"Searching experiments directories.\")\n\n for exp_args_path in exp_args_paths:\n exp_dir = exp_args_path.parent\n if not load_hidden:\n if exp_dir.name.startswith(\"_\") or exp_dir.name.startswith(\".\"):\n continue\n if use_cache:\n yield get_exp_result(exp_dir)\n else:\n yield ExpResult(exp_dir)\n\n\nclass DataclassJSONEncoder(json.JSONEncoder):\n def default(self, obj):\n if is_dataclass(obj):\n return asdict(obj)\n if isinstance(obj, np.integer):\n return int(obj)\n if isinstance(obj, np.floating):\n return float(obj)\n if isinstance(obj, np.ndarray):\n return obj.tolist()\n return super().default(obj)\n\n\ndef _move_old_exp(exp_dir):\n \"\"\"Move the old experiment directory to a new name.\"\"\"\n exp_dir = Path(exp_dir)\n if exp_dir.exists():\n exp_dir.rename(exp_dir.with_name(\"_\" + exp_dir.name))\n\n\ndef _get_env_name(task_name: str):\n \"\"\"Register tasks if needed (lazy import) and return environment name.\"\"\"\n\n # lazy import\n if task_name.startswith(\"miniwob\"):\n import browsergym.miniwob\n elif task_name.startswith(\"workarena\"):\n import browsergym.workarena\n elif task_name.startswith(\"webarena\"):\n import browsergym.webarena\n elif task_name.startswith(\"visualwebarena\"):","source_hash":"df7878b63efa81189cdb7b214765466d94d2c2f84a41a1d160718bc07673d2d0","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.loop._move_old_exp","uri":"program://AgentLab/function/src.agentlab.experiments.loop._move_old_exp#L901-L905","kind":"function","name":"_move_old_exp","path":"src/agentlab/experiments/loop.py","language":"python","start_line":901,"end_line":905,"context_start_line":881,"context_end_line":925,"code":" continue\n if use_cache:\n yield get_exp_result(exp_dir)\n else:\n yield ExpResult(exp_dir)\n\n\nclass DataclassJSONEncoder(json.JSONEncoder):\n def default(self, obj):\n if is_dataclass(obj):\n return asdict(obj)\n if isinstance(obj, np.integer):\n return int(obj)\n if isinstance(obj, np.floating):\n return float(obj)\n if isinstance(obj, np.ndarray):\n return obj.tolist()\n return super().default(obj)\n\n\ndef _move_old_exp(exp_dir):\n \"\"\"Move the old experiment directory to a new name.\"\"\"\n exp_dir = Path(exp_dir)\n if exp_dir.exists():\n exp_dir.rename(exp_dir.with_name(\"_\" + exp_dir.name))\n\n\ndef _get_env_name(task_name: str):\n \"\"\"Register tasks if needed (lazy import) and return environment name.\"\"\"\n\n # lazy import\n if task_name.startswith(\"miniwob\"):\n import browsergym.miniwob\n elif task_name.startswith(\"workarena\"):\n import browsergym.workarena\n elif task_name.startswith(\"webarena\"):\n import browsergym.webarena\n elif task_name.startswith(\"visualwebarena\"):\n import browsergym.visualwebarena\n elif task_name.startswith(\"assistantbench\"):\n import browsergym.assistantbench\n elif task_name.startswith(\"weblinx\"):\n import weblinx_browsergym\n\n return f\"browsergym/{task_name}\"","source_hash":"df7878b63efa81189cdb7b214765466d94d2c2f84a41a1d160718bc07673d2d0","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.loop._get_env_name","uri":"program://AgentLab/function/src.agentlab.experiments.loop._get_env_name#L908-L925","kind":"function","name":"_get_env_name","path":"src/agentlab/experiments/loop.py","language":"python","start_line":908,"end_line":925,"context_start_line":888,"context_end_line":945,"code":"class DataclassJSONEncoder(json.JSONEncoder):\n def default(self, obj):\n if is_dataclass(obj):\n return asdict(obj)\n if isinstance(obj, np.integer):\n return int(obj)\n if isinstance(obj, np.floating):\n return float(obj)\n if isinstance(obj, np.ndarray):\n return obj.tolist()\n return super().default(obj)\n\n\ndef _move_old_exp(exp_dir):\n \"\"\"Move the old experiment directory to a new name.\"\"\"\n exp_dir = Path(exp_dir)\n if exp_dir.exists():\n exp_dir.rename(exp_dir.with_name(\"_\" + exp_dir.name))\n\n\ndef _get_env_name(task_name: str):\n \"\"\"Register tasks if needed (lazy import) and return environment name.\"\"\"\n\n # lazy import\n if task_name.startswith(\"miniwob\"):\n import browsergym.miniwob\n elif task_name.startswith(\"workarena\"):\n import browsergym.workarena\n elif task_name.startswith(\"webarena\"):\n import browsergym.webarena\n elif task_name.startswith(\"visualwebarena\"):\n import browsergym.visualwebarena\n elif task_name.startswith(\"assistantbench\"):\n import browsergym.assistantbench\n elif task_name.startswith(\"weblinx\"):\n import weblinx_browsergym\n\n return f\"browsergym/{task_name}\"\n\n\ndef _send_chat_info(chat: Chat, action: str, agent_info: dict):\n \"\"\"Send the think and action info to the chat.\"\"\"\n msg = \"\"\n if \"think\" in agent_info:\n msg += f\"\"\"\\\n{agent_info[\"think\"]}\n\n\"\"\"\n\n msg += f\"\"\"\\\naction:\n{action}\n\"\"\"\n\n logger.info(msg)\n chat.add_message(role=\"info\", msg=msg)\n\n","source_hash":"df7878b63efa81189cdb7b214765466d94d2c2f84a41a1d160718bc07673d2d0","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.loop._send_chat_info","uri":"program://AgentLab/function/src.agentlab.experiments.loop._send_chat_info#L928-L943","kind":"function","name":"_send_chat_info","path":"src/agentlab/experiments/loop.py","language":"python","start_line":928,"end_line":943,"context_start_line":908,"context_end_line":955,"code":"def _get_env_name(task_name: str):\n \"\"\"Register tasks if needed (lazy import) and return environment name.\"\"\"\n\n # lazy import\n if task_name.startswith(\"miniwob\"):\n import browsergym.miniwob\n elif task_name.startswith(\"workarena\"):\n import browsergym.workarena\n elif task_name.startswith(\"webarena\"):\n import browsergym.webarena\n elif task_name.startswith(\"visualwebarena\"):\n import browsergym.visualwebarena\n elif task_name.startswith(\"assistantbench\"):\n import browsergym.assistantbench\n elif task_name.startswith(\"weblinx\"):\n import weblinx_browsergym\n\n return f\"browsergym/{task_name}\"\n\n\ndef _send_chat_info(chat: Chat, action: str, agent_info: dict):\n \"\"\"Send the think and action info to the chat.\"\"\"\n msg = \"\"\n if \"think\" in agent_info:\n msg += f\"\"\"\\\n{agent_info[\"think\"]}\n\n\"\"\"\n\n msg += f\"\"\"\\\naction:\n{action}\n\"\"\"\n\n logger.info(msg)\n chat.add_message(role=\"info\", msg=msg)\n\n\ndef _flatten_dict(d, parent_key=\"\", sep=\".\"):\n \"\"\"Recursively flatten a nested dictionary.\"\"\"\n items = []\n for k, v in d.items():\n new_key = parent_key + sep + k if parent_key else k\n if isinstance(v, dict):\n items.extend(_flatten_dict(v, new_key, sep).items())\n else:\n items.append((new_key, v))\n return dict(items)","source_hash":"df7878b63efa81189cdb7b214765466d94d2c2f84a41a1d160718bc07673d2d0","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.loop._flatten_dict","uri":"program://AgentLab/function/src.agentlab.experiments.loop._flatten_dict#L946-L955","kind":"function","name":"_flatten_dict","path":"src/agentlab/experiments/loop.py","language":"python","start_line":946,"end_line":955,"context_start_line":926,"context_end_line":955,"code":"\n\ndef _send_chat_info(chat: Chat, action: str, agent_info: dict):\n \"\"\"Send the think and action info to the chat.\"\"\"\n msg = \"\"\n if \"think\" in agent_info:\n msg += f\"\"\"\\\n{agent_info[\"think\"]}\n\n\"\"\"\n\n msg += f\"\"\"\\\naction:\n{action}\n\"\"\"\n\n logger.info(msg)\n chat.add_message(role=\"info\", msg=msg)\n\n\ndef _flatten_dict(d, parent_key=\"\", sep=\".\"):\n \"\"\"Recursively flatten a nested dictionary.\"\"\"\n items = []\n for k, v in d.items():\n new_key = parent_key + sep + k if parent_key else k\n if isinstance(v, dict):\n items.extend(_flatten_dict(v, new_key, sep).items())\n else:\n items.append((new_key, v))\n return dict(items)","source_hash":"df7878b63efa81189cdb7b214765466d94d2c2f84a41a1d160718bc07673d2d0","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.loop.make_env","uri":"program://AgentLab/function/src.agentlab.experiments.loop.make_env#L53-L100","kind":"function","name":"make_env","path":"src/agentlab/experiments/loop.py","language":"python","start_line":53,"end_line":100,"context_start_line":33,"context_end_line":120,"code":"\nlogger = logging.getLogger(__name__)\n\nSEED_MAX = 2 ^ 32 # arbitrary max value (exclusive), seems large enough\n\n\n@dataclass\nclass EnvArgs(DataClassJsonMixin):\n task_name: str\n task_seed: Optional[int] = None\n max_steps: Optional[int] = None\n headless: bool = True\n record_video: bool = False\n wait_for_user_message: bool = False\n viewport: Optional[dict] = None # use default value from BrowserGym\n slow_mo: Optional[int] = None # use default value from BrowserGym\n storage_state: Optional[str | Path | dict] = None\n task_kwargs: Optional[dict] = None # use default value from BrowserGym\n pre_observation_delay: float = None # seconds, wait for JS events to be fired\n\n def make_env(\n self, action_mapping, exp_dir, exp_task_kwargs: dict = {}, use_raw_page_output=True\n ):\n \"\"\"\n Instantiates the BrowserGym environment corresponding to the arguments (with some tweaks).\n\n Args:\n action_mapping: overrides the action mapping of the environment.\n exp_dir: will set some environment parameters (e.g., record_video_dir) with respect to the directory where the experiment is running.\n exp_task_kwargs: use with caution! Will override task parameters to experiment-specific values. Useful to set different server configs for different experiments, or output file paths within the experiment's folder (e.g., assistantbench).\n use_raw_page_output: if True, the environment will also return raw page output in the observation.\n\n Returns:\n env: the gym environment.\n \"\"\"\n extra_kwargs = {}\n if self.record_video:\n extra_kwargs[\"record_video_dir\"] = exp_dir\n if self.viewport:\n extra_kwargs[\"viewport\"] = self.viewport\n if self.slow_mo is not None:\n extra_kwargs[\"slow_mo\"] = self.slow_mo\n if self.pre_observation_delay is not None:\n extra_kwargs[\"pre_observation_delay\"] = self.pre_observation_delay\n if self.storage_state:\n extra_kwargs[\"pw_context_kwargs\"] = {\"storage_state\": self.storage_state}\n if self.task_kwargs is not None:\n extra_kwargs[\"task_kwargs\"] = self.task_kwargs\n if exp_task_kwargs:\n extra_kwargs[\"task_kwargs\"] = extra_kwargs.get(\"task_kwargs\", {}) | exp_task_kwargs\n\n # assistantbench hack, write the task output (agent prediction) to a file in the experiment's directory\n # TODO: find a better way to deal with this\n if self.task_name.startswith(\"assistantbench.test\"):\n extra_kwargs[\"task_kwargs\"] = extra_kwargs.get(\"task_kwargs\", {}) | {\n \"output_file\": exp_dir / \"assistantbench-prediction.json\"\n }\n\n return gym.make(\n _get_env_name(self.task_name),\n disable_env_checker=True,\n max_episode_steps=self.max_steps,\n headless=self.headless,\n wait_for_user_message=self.wait_for_user_message,\n action_mapping=action_mapping, # action mapping is provided by the agent\n use_raw_page_output=use_raw_page_output,\n **extra_kwargs,\n )\n\n\n@dataclass\nclass AbstractAgentArgs(ABC):\n \"\"\"A template class that defines the required signature of an agent's arguments.\"\"\"\n\n agent_name: str = None # type: ignore\n\n def __post_init__(self):\n if self.agent_name is None:\n self.agent_name = self.__class__.__name__\n\n def prepare(self):\n \"\"\"Prepare the agent's LLM models before running the experiment.\"\"\"\n pass\n\n def close(self):\n \"\"\"Close the agent's LLM models after running the experiment.\"\"\"\n pass\n","source_hash":"df7878b63efa81189cdb7b214765466d94d2c2f84a41a1d160718bc07673d2d0","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.loop.__post_init__","uri":"program://AgentLab/function/src.agentlab.experiments.loop.__post_init__#L109-L111","kind":"function","name":"__post_init__","path":"src/agentlab/experiments/loop.py","language":"python","start_line":109,"end_line":111,"context_start_line":89,"context_end_line":131,"code":" }\n\n return gym.make(\n _get_env_name(self.task_name),\n disable_env_checker=True,\n max_episode_steps=self.max_steps,\n headless=self.headless,\n wait_for_user_message=self.wait_for_user_message,\n action_mapping=action_mapping, # action mapping is provided by the agent\n use_raw_page_output=use_raw_page_output,\n **extra_kwargs,\n )\n\n\n@dataclass\nclass AbstractAgentArgs(ABC):\n \"\"\"A template class that defines the required signature of an agent's arguments.\"\"\"\n\n agent_name: str = None # type: ignore\n\n def __post_init__(self):\n if self.agent_name is None:\n self.agent_name = self.__class__.__name__\n\n def prepare(self):\n \"\"\"Prepare the agent's LLM models before running the experiment.\"\"\"\n pass\n\n def close(self):\n \"\"\"Close the agent's LLM models after running the experiment.\"\"\"\n pass\n\n @abstractmethod\n def make_agent(self) -> Agent:\n \"\"\"Comply the experiments.loop API for instantiating the agent.\"\"\"\n\n\ndef save_package_versions(exp_dir: Path):\n \"\"\"Save the versions of the installed packages in the experiment directory.\"\"\"\n python_dists = \"\\n\".join(\n sorted(\n [\n f\"{dist.metadata['Name']}=={dist.metadata['Version']}\"","source_hash":"df7878b63efa81189cdb7b214765466d94d2c2f84a41a1d160718bc07673d2d0","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.loop.prepare","uri":"program://AgentLab/function/src.agentlab.experiments.loop.prepare#L357-L384","kind":"function","name":"prepare","path":"src/agentlab/experiments/loop.py","language":"python","start_line":357,"end_line":384,"context_start_line":337,"context_end_line":404,"code":" agent_args: AbstractAgentArgs\n env_args: EnvArgs\n exp_dir: str = None\n exp_name: str = None\n enable_debug: bool = True\n err_msg: str = None\n stack_trace: str = None\n order: int = None # use to keep the original order the experiments were meant to be launched.\n logging_level: int = logging.INFO\n logging_level_stdout: int = logging.INFO\n exp_id: str = None\n depends_on: tuple[str] = ()\n save_screenshot: bool = True\n save_som: bool = False\n\n def make_id(self):\n \"\"\"Create a unique id for the experiment.\"\"\"\n if self.exp_id is None:\n self.exp_id = str(uuid.uuid4())\n\n def prepare(self, exp_root):\n \"\"\"Prepare the experiment directory and save the experiment arguments.\n\n This enables inspecting experiments that are not run yet.\n\n Args:\n exp_root: str\n The root directory where the experiment will be saved.\n \"\"\"\n if self.env_args.task_seed is None:\n self.env_args.task_seed = np.random.randint(0, SEED_MAX)\n\n if self.exp_name is None:\n task_name = self.env_args.task_name\n self.exp_name = f\"{self.agent_args.agent_name}_on_{task_name}_{self.env_args.task_seed}\"\n\n # if exp_dir exists, it means it's a re-run, move the old one\n if self.exp_dir is not None:\n _move_old_exp(self.exp_dir)\n\n self.make_id()\n\n self.exp_date = datetime.now()\n self._make_dir(exp_root)\n\n self.exp_dir.mkdir(parents=True, exist_ok=True)\n with open(self.exp_dir / \"exp_args.pkl\", \"wb\") as f:\n pickle.dump(self, f)\n\n def _make_dir(self, exp_root):\n \"\"\"Create a unique directory for the experiment.\"\"\"\n date_str = self.exp_date.strftime(\"%Y-%m-%d_%H-%M-%S\")\n exp_str = re.sub(\n r\"[\\/:*?<>|]\", \"_\", self.exp_name\n ) # sanitize exp_name to be used as a file name (substitute forbidden characters)\n\n for i in range(1000):\n if i >= 999: # make sure we don't loop forever\n raise ValueError(\"Could not find a unique name for the experiment directory.\")\n\n tag = f\"_{i}\" if i > 0 else \"\"\n self.exp_dir = Path(exp_root) / f\"{date_str}_{exp_str}{tag}\"\n if not self.exp_dir.exists():\n break\n\n # TODO distinguish between agent error and environment or system error. e.g.\n # the parsing error of an action should not be re-run.\n def run(self):","source_hash":"df7878b63efa81189cdb7b214765466d94d2c2f84a41a1d160718bc07673d2d0","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.loop.close","uri":"program://AgentLab/function/src.agentlab.experiments.loop.close#L117-L119","kind":"function","name":"close","path":"src/agentlab/experiments/loop.py","language":"python","start_line":117,"end_line":119,"context_start_line":97,"context_end_line":139,"code":" action_mapping=action_mapping, # action mapping is provided by the agent\n use_raw_page_output=use_raw_page_output,\n **extra_kwargs,\n )\n\n\n@dataclass\nclass AbstractAgentArgs(ABC):\n \"\"\"A template class that defines the required signature of an agent's arguments.\"\"\"\n\n agent_name: str = None # type: ignore\n\n def __post_init__(self):\n if self.agent_name is None:\n self.agent_name = self.__class__.__name__\n\n def prepare(self):\n \"\"\"Prepare the agent's LLM models before running the experiment.\"\"\"\n pass\n\n def close(self):\n \"\"\"Close the agent's LLM models after running the experiment.\"\"\"\n pass\n\n @abstractmethod\n def make_agent(self) -> Agent:\n \"\"\"Comply the experiments.loop API for instantiating the agent.\"\"\"\n\n\ndef save_package_versions(exp_dir: Path):\n \"\"\"Save the versions of the installed packages in the experiment directory.\"\"\"\n python_dists = \"\\n\".join(\n sorted(\n [\n f\"{dist.metadata['Name']}=={dist.metadata['Version']}\"\n for dist in importlib.metadata.distributions()\n ]\n )\n )\n (exp_dir / \"package_versions.txt\").write_text(python_dists)\n\n\n@dataclass","source_hash":"df7878b63efa81189cdb7b214765466d94d2c2f84a41a1d160718bc07673d2d0","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.loop.make_agent","uri":"program://AgentLab/function/src.agentlab.experiments.loop.make_agent#L122-L123","kind":"function","name":"make_agent","path":"src/agentlab/experiments/loop.py","language":"python","start_line":122,"end_line":123,"context_start_line":102,"context_end_line":143,"code":"\n@dataclass\nclass AbstractAgentArgs(ABC):\n \"\"\"A template class that defines the required signature of an agent's arguments.\"\"\"\n\n agent_name: str = None # type: ignore\n\n def __post_init__(self):\n if self.agent_name is None:\n self.agent_name = self.__class__.__name__\n\n def prepare(self):\n \"\"\"Prepare the agent's LLM models before running the experiment.\"\"\"\n pass\n\n def close(self):\n \"\"\"Close the agent's LLM models after running the experiment.\"\"\"\n pass\n\n @abstractmethod\n def make_agent(self) -> Agent:\n \"\"\"Comply the experiments.loop API for instantiating the agent.\"\"\"\n\n\ndef save_package_versions(exp_dir: Path):\n \"\"\"Save the versions of the installed packages in the experiment directory.\"\"\"\n python_dists = \"\\n\".join(\n sorted(\n [\n f\"{dist.metadata['Name']}=={dist.metadata['Version']}\"\n for dist in importlib.metadata.distributions()\n ]\n )\n )\n (exp_dir / \"package_versions.txt\").write_text(python_dists)\n\n\n@dataclass\nclass StepTimestamps:\n env_start: float = 0\n action_exec_start: float = 0 # to extract begining of visual action from video\n action_exec_stop: float = 0 # to extract end of visual action from video","source_hash":"df7878b63efa81189cdb7b214765466d94d2c2f84a41a1d160718bc07673d2d0","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.loop.from_step","uri":"program://AgentLab/function/src.agentlab.experiments.loop.from_step#L198-L219","kind":"function","name":"from_step","path":"src/agentlab/experiments/loop.py","language":"python","start_line":198,"end_line":219,"context_start_line":178,"context_end_line":239,"code":" agent_info: dict\n Additional information from the agent.\n stats: dict\n Extra statistics about the step.\n profiling: StepTimestamps\n Timestamps of the different events during the episode.\n \"\"\"\n\n step: int = None\n obs: dict = None\n reward: float = 0\n raw_reward: float = 0\n terminated: bool = None\n truncated: bool = None\n action: str = None\n agent_info: dict = field(default_factory=dict)\n stats: dict = None\n profiling: StepTimestamps = field(default_factory=StepTimestamps)\n task_info: dict = None\n\n def from_step(self, env: gym.Env, action: str, obs_preprocessor: callable):\n t = self.profiling\n t.env_start = time.time()\n self.obs, self.reward, self.terminated, self.truncated, env_info = env.step(action)\n t.env_stop = time.time()\n\n self.task_info = env_info.get(\"task_info\", None)\n\n self.raw_reward = env_info.get(\"RAW_REWARD_GLOBAL\", None)\n\n t.action_exec_start = env_info[\"action_exec_start\"] # start\n t.action_exect_after_timeout = env_info[\"action_exec_stop\"]\n t.action_exec_stop = env_info[\"action_exec_stop\"] - env_info[\"action_exec_timeout\"]\n t.wait_for_page_loading_start = env_info.get(\"wait_for_page_loading_start\", None)\n t.wait_for_page_loading_stop = env_info.get(\"wait_for_page_loading_stop\", None)\n t.validation_start = env_info.get(\"validation_start\", None)\n t.validation_stop = env_info.get(\"validation_stop\", None)\n t.get_observation_start = env_info.get(\"get_observation_start\", None)\n t.get_observation_stop = env_info.get(\"get_observation_stop\", None)\n\n if obs_preprocessor:\n self.obs = obs_preprocessor(self.obs)\n\n def from_action(self, agent: Agent):\n self.profiling.agent_start = time.time()\n self.action, self.agent_info = agent.get_action(self.obs.copy())\n self.profiling.agent_stop = time.time()\n\n self.make_stats()\n\n return self.action\n\n def from_reset(self, env: gym.Env, seed: int, obs_preprocessor: callable):\n t = self.profiling\n t.env_start = time.time()\n self.obs, env_info = env.reset(seed=seed)\n self.reward, self.terminated, self.truncated = 0, False, False\n t.env_stop = time.time()\n\n t.action_exec_start = env_info.get(\"recording_start_time\", t.env_start)\n t.action_exect_after_timeout = t.env_stop\n t.action_exec_stop = t.env_stop","source_hash":"df7878b63efa81189cdb7b214765466d94d2c2f84a41a1d160718bc07673d2d0","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.loop.from_action","uri":"program://AgentLab/function/src.agentlab.experiments.loop.from_action#L221-L228","kind":"function","name":"from_action","path":"src/agentlab/experiments/loop.py","language":"python","start_line":221,"end_line":228,"context_start_line":201,"context_end_line":248,"code":" self.obs, self.reward, self.terminated, self.truncated, env_info = env.step(action)\n t.env_stop = time.time()\n\n self.task_info = env_info.get(\"task_info\", None)\n\n self.raw_reward = env_info.get(\"RAW_REWARD_GLOBAL\", None)\n\n t.action_exec_start = env_info[\"action_exec_start\"] # start\n t.action_exect_after_timeout = env_info[\"action_exec_stop\"]\n t.action_exec_stop = env_info[\"action_exec_stop\"] - env_info[\"action_exec_timeout\"]\n t.wait_for_page_loading_start = env_info.get(\"wait_for_page_loading_start\", None)\n t.wait_for_page_loading_stop = env_info.get(\"wait_for_page_loading_stop\", None)\n t.validation_start = env_info.get(\"validation_start\", None)\n t.validation_stop = env_info.get(\"validation_stop\", None)\n t.get_observation_start = env_info.get(\"get_observation_start\", None)\n t.get_observation_stop = env_info.get(\"get_observation_stop\", None)\n\n if obs_preprocessor:\n self.obs = obs_preprocessor(self.obs)\n\n def from_action(self, agent: Agent):\n self.profiling.agent_start = time.time()\n self.action, self.agent_info = agent.get_action(self.obs.copy())\n self.profiling.agent_stop = time.time()\n\n self.make_stats()\n\n return self.action\n\n def from_reset(self, env: gym.Env, seed: int, obs_preprocessor: callable):\n t = self.profiling\n t.env_start = time.time()\n self.obs, env_info = env.reset(seed=seed)\n self.reward, self.terminated, self.truncated = 0, False, False\n t.env_stop = time.time()\n\n t.action_exec_start = env_info.get(\"recording_start_time\", t.env_start)\n t.action_exect_after_timeout = t.env_stop\n t.action_exec_stop = t.env_stop\n\n if obs_preprocessor:\n self.obs = obs_preprocessor(self.obs)\n\n @property\n def is_done(self):\n return self.terminated or self.truncated\n\n def make_stats(self):","source_hash":"df7878b63efa81189cdb7b214765466d94d2c2f84a41a1d160718bc07673d2d0","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.loop.from_reset","uri":"program://AgentLab/function/src.agentlab.experiments.loop.from_reset#L230-L242","kind":"function","name":"from_reset","path":"src/agentlab/experiments/loop.py","language":"python","start_line":230,"end_line":242,"context_start_line":210,"context_end_line":262,"code":" t.action_exec_stop = env_info[\"action_exec_stop\"] - env_info[\"action_exec_timeout\"]\n t.wait_for_page_loading_start = env_info.get(\"wait_for_page_loading_start\", None)\n t.wait_for_page_loading_stop = env_info.get(\"wait_for_page_loading_stop\", None)\n t.validation_start = env_info.get(\"validation_start\", None)\n t.validation_stop = env_info.get(\"validation_stop\", None)\n t.get_observation_start = env_info.get(\"get_observation_start\", None)\n t.get_observation_stop = env_info.get(\"get_observation_stop\", None)\n\n if obs_preprocessor:\n self.obs = obs_preprocessor(self.obs)\n\n def from_action(self, agent: Agent):\n self.profiling.agent_start = time.time()\n self.action, self.agent_info = agent.get_action(self.obs.copy())\n self.profiling.agent_stop = time.time()\n\n self.make_stats()\n\n return self.action\n\n def from_reset(self, env: gym.Env, seed: int, obs_preprocessor: callable):\n t = self.profiling\n t.env_start = time.time()\n self.obs, env_info = env.reset(seed=seed)\n self.reward, self.terminated, self.truncated = 0, False, False\n t.env_stop = time.time()\n\n t.action_exec_start = env_info.get(\"recording_start_time\", t.env_start)\n t.action_exect_after_timeout = t.env_stop\n t.action_exec_stop = t.env_stop\n\n if obs_preprocessor:\n self.obs = obs_preprocessor(self.obs)\n\n @property\n def is_done(self):\n return self.terminated or self.truncated\n\n def make_stats(self):\n if isinstance(self.obs, dict):\n stats = {\n f\"n_token_{key}\": count_tokens(val)\n for key, val in self.obs.items()\n if isinstance(val, str)\n }\n else:\n stats = {}\n stats.update(self.agent_info.pop(\"stats\", {}))\n\n t = self.profiling\n stats[\"step_elapsed\"] = t.env_stop - t.env_start\n stats[\"agent_elapsed\"] = t.agent_stop - t.agent_start\n","source_hash":"df7878b63efa81189cdb7b214765466d94d2c2f84a41a1d160718bc07673d2d0","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.loop.is_done","uri":"program://AgentLab/function/src.agentlab.experiments.loop.is_done#L245-L246","kind":"function","name":"is_done","path":"src/agentlab/experiments/loop.py","language":"python","start_line":245,"end_line":246,"context_start_line":225,"context_end_line":266,"code":"\n self.make_stats()\n\n return self.action\n\n def from_reset(self, env: gym.Env, seed: int, obs_preprocessor: callable):\n t = self.profiling\n t.env_start = time.time()\n self.obs, env_info = env.reset(seed=seed)\n self.reward, self.terminated, self.truncated = 0, False, False\n t.env_stop = time.time()\n\n t.action_exec_start = env_info.get(\"recording_start_time\", t.env_start)\n t.action_exect_after_timeout = t.env_stop\n t.action_exec_stop = t.env_stop\n\n if obs_preprocessor:\n self.obs = obs_preprocessor(self.obs)\n\n @property\n def is_done(self):\n return self.terminated or self.truncated\n\n def make_stats(self):\n if isinstance(self.obs, dict):\n stats = {\n f\"n_token_{key}\": count_tokens(val)\n for key, val in self.obs.items()\n if isinstance(val, str)\n }\n else:\n stats = {}\n stats.update(self.agent_info.pop(\"stats\", {}))\n\n t = self.profiling\n stats[\"step_elapsed\"] = t.env_stop - t.env_start\n stats[\"agent_elapsed\"] = t.agent_stop - t.agent_start\n\n self.stats = stats\n\n def save_step_info(self, exp_dir, save_json=False, save_screenshot=True, save_som=False):\n # special treatment for some of the observation fields","source_hash":"df7878b63efa81189cdb7b214765466d94d2c2f84a41a1d160718bc07673d2d0","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.loop.make_stats","uri":"program://AgentLab/function/src.agentlab.experiments.loop.make_stats#L248-L263","kind":"function","name":"make_stats","path":"src/agentlab/experiments/loop.py","language":"python","start_line":248,"end_line":263,"context_start_line":228,"context_end_line":283,"code":" return self.action\n\n def from_reset(self, env: gym.Env, seed: int, obs_preprocessor: callable):\n t = self.profiling\n t.env_start = time.time()\n self.obs, env_info = env.reset(seed=seed)\n self.reward, self.terminated, self.truncated = 0, False, False\n t.env_stop = time.time()\n\n t.action_exec_start = env_info.get(\"recording_start_time\", t.env_start)\n t.action_exect_after_timeout = t.env_stop\n t.action_exec_stop = t.env_stop\n\n if obs_preprocessor:\n self.obs = obs_preprocessor(self.obs)\n\n @property\n def is_done(self):\n return self.terminated or self.truncated\n\n def make_stats(self):\n if isinstance(self.obs, dict):\n stats = {\n f\"n_token_{key}\": count_tokens(val)\n for key, val in self.obs.items()\n if isinstance(val, str)\n }\n else:\n stats = {}\n stats.update(self.agent_info.pop(\"stats\", {}))\n\n t = self.profiling\n stats[\"step_elapsed\"] = t.env_stop - t.env_start\n stats[\"agent_elapsed\"] = t.agent_stop - t.agent_start\n\n self.stats = stats\n\n def save_step_info(self, exp_dir, save_json=False, save_screenshot=True, save_som=False):\n # special treatment for some of the observation fields\n if isinstance(self.obs, dict):\n # save screenshots to separate files\n screenshot = self.obs.pop(\"screenshot\", None)\n screenshot_som = self.obs.pop(\"screenshot_som\", None)\n\n if save_screenshot and screenshot is not None:\n img = Image.fromarray(screenshot)\n img.save(exp_dir / f\"screenshot_step_{self.step}.png\")\n\n if save_som and screenshot_som is not None:\n img = Image.fromarray(screenshot_som)\n img.save(exp_dir / f\"screenshot_som_step_{self.step}.png\")\n\n # save goal object (which might contain images) to a separate file to save space\n if self.obs.get(\"goal_object\", False):\n # save the goal object only once (goal should never change once setup)\n goal_object_file = Path(exp_dir) / \"goal_object.pkl.gz\"","source_hash":"df7878b63efa81189cdb7b214765466d94d2c2f84a41a1d160718bc07673d2d0","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.loop.save_step_info","uri":"program://AgentLab/function/src.agentlab.experiments.loop.save_step_info#L265-L303","kind":"function","name":"save_step_info","path":"src/agentlab/experiments/loop.py","language":"python","start_line":265,"end_line":303,"context_start_line":245,"context_end_line":323,"code":" def is_done(self):\n return self.terminated or self.truncated\n\n def make_stats(self):\n if isinstance(self.obs, dict):\n stats = {\n f\"n_token_{key}\": count_tokens(val)\n for key, val in self.obs.items()\n if isinstance(val, str)\n }\n else:\n stats = {}\n stats.update(self.agent_info.pop(\"stats\", {}))\n\n t = self.profiling\n stats[\"step_elapsed\"] = t.env_stop - t.env_start\n stats[\"agent_elapsed\"] = t.agent_stop - t.agent_start\n\n self.stats = stats\n\n def save_step_info(self, exp_dir, save_json=False, save_screenshot=True, save_som=False):\n # special treatment for some of the observation fields\n if isinstance(self.obs, dict):\n # save screenshots to separate files\n screenshot = self.obs.pop(\"screenshot\", None)\n screenshot_som = self.obs.pop(\"screenshot_som\", None)\n\n if save_screenshot and screenshot is not None:\n img = Image.fromarray(screenshot)\n img.save(exp_dir / f\"screenshot_step_{self.step}.png\")\n\n if save_som and screenshot_som is not None:\n img = Image.fromarray(screenshot_som)\n img.save(exp_dir / f\"screenshot_som_step_{self.step}.png\")\n\n # save goal object (which might contain images) to a separate file to save space\n if self.obs.get(\"goal_object\", False):\n # save the goal object only once (goal should never change once setup)\n goal_object_file = Path(exp_dir) / \"goal_object.pkl.gz\"\n if not goal_object_file.exists():\n with gzip.open(goal_object_file, \"wb\") as f:\n pickle.dump(self.obs[\"goal_object\"], f)\n # set goal_object to a special placeholder value, which indicates it should be loaded from a separate file\n self.obs[\"goal_object\"] = None\n\n with gzip.open(exp_dir / f\"step_{self.step}.pkl.gz\", \"wb\") as f:\n pickle.dump(self, f)\n\n if save_json:\n with open(exp_dir / \"steps_info.json\", \"w\") as f:\n json.dump(self, f, indent=4, cls=DataclassJSONEncoder)\n\n if isinstance(self.obs, dict):\n # add the screenshots back to the obs\n # why do we need this?\n if screenshot is not None:\n self.obs[\"screenshot\"] = screenshot\n if screenshot_som is not None:\n self.obs[\"screenshot_som\"] = screenshot_som\n\n\n@dataclass\nclass ExpArgs:\n \"\"\"Arguments to run an experiment, i.e. run agent in an environment until done.\n\n This dataclass is used to store experiments arguments. It contains\n agent_args and env_args which follows the same principle. It contains helper\n functions to prepare and run experiments.\n\n Attributes:\n -----------\n agent_args: AbstractAgentArgs\n The arguments to instantiate the agent.\n env_args: EnvArgs\n The arguments to instantiate the environment.\n exp_dir: str\n The directory where the experiment will be saved.\n exp_name: str\n The name of the experiment. If None, it will be generated from the","source_hash":"df7878b63efa81189cdb7b214765466d94d2c2f84a41a1d160718bc07673d2d0","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.loop.make_id","uri":"program://AgentLab/function/src.agentlab.experiments.loop.make_id#L352-L355","kind":"function","name":"make_id","path":"src/agentlab/experiments/loop.py","language":"python","start_line":352,"end_line":355,"context_start_line":332,"context_end_line":375,"code":" order: int (internal)\n The order of the experiment in the batch. It is used to keep track of\n the original order of the experiments in case they are shuffled.\n \"\"\"\n\n agent_args: AbstractAgentArgs\n env_args: EnvArgs\n exp_dir: str = None\n exp_name: str = None\n enable_debug: bool = True\n err_msg: str = None\n stack_trace: str = None\n order: int = None # use to keep the original order the experiments were meant to be launched.\n logging_level: int = logging.INFO\n logging_level_stdout: int = logging.INFO\n exp_id: str = None\n depends_on: tuple[str] = ()\n save_screenshot: bool = True\n save_som: bool = False\n\n def make_id(self):\n \"\"\"Create a unique id for the experiment.\"\"\"\n if self.exp_id is None:\n self.exp_id = str(uuid.uuid4())\n\n def prepare(self, exp_root):\n \"\"\"Prepare the experiment directory and save the experiment arguments.\n\n This enables inspecting experiments that are not run yet.\n\n Args:\n exp_root: str\n The root directory where the experiment will be saved.\n \"\"\"\n if self.env_args.task_seed is None:\n self.env_args.task_seed = np.random.randint(0, SEED_MAX)\n\n if self.exp_name is None:\n task_name = self.env_args.task_name\n self.exp_name = f\"{self.agent_args.agent_name}_on_{task_name}_{self.env_args.task_seed}\"\n\n # if exp_dir exists, it means it's a re-run, move the old one\n if self.exp_dir is not None:\n _move_old_exp(self.exp_dir)","source_hash":"df7878b63efa81189cdb7b214765466d94d2c2f84a41a1d160718bc07673d2d0","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.loop._make_dir","uri":"program://AgentLab/function/src.agentlab.experiments.loop._make_dir#L386-L400","kind":"function","name":"_make_dir","path":"src/agentlab/experiments/loop.py","language":"python","start_line":386,"end_line":400,"context_start_line":366,"context_end_line":420,"code":" if self.env_args.task_seed is None:\n self.env_args.task_seed = np.random.randint(0, SEED_MAX)\n\n if self.exp_name is None:\n task_name = self.env_args.task_name\n self.exp_name = f\"{self.agent_args.agent_name}_on_{task_name}_{self.env_args.task_seed}\"\n\n # if exp_dir exists, it means it's a re-run, move the old one\n if self.exp_dir is not None:\n _move_old_exp(self.exp_dir)\n\n self.make_id()\n\n self.exp_date = datetime.now()\n self._make_dir(exp_root)\n\n self.exp_dir.mkdir(parents=True, exist_ok=True)\n with open(self.exp_dir / \"exp_args.pkl\", \"wb\") as f:\n pickle.dump(self, f)\n\n def _make_dir(self, exp_root):\n \"\"\"Create a unique directory for the experiment.\"\"\"\n date_str = self.exp_date.strftime(\"%Y-%m-%d_%H-%M-%S\")\n exp_str = re.sub(\n r\"[\\/:*?<>|]\", \"_\", self.exp_name\n ) # sanitize exp_name to be used as a file name (substitute forbidden characters)\n\n for i in range(1000):\n if i >= 999: # make sure we don't loop forever\n raise ValueError(\"Could not find a unique name for the experiment directory.\")\n\n tag = f\"_{i}\" if i > 0 else \"\"\n self.exp_dir = Path(exp_root) / f\"{date_str}_{exp_str}{tag}\"\n if not self.exp_dir.exists():\n break\n\n # TODO distinguish between agent error and environment or system error. e.g.\n # the parsing error of an action should not be re-run.\n def run(self):\n \"\"\"Run the experiment and save the results\"\"\"\n # start writing logs to run logfile\n self._set_logger()\n\n # log python environment info\n save_package_versions(Path(self.exp_dir))\n\n episode_info = []\n agent = None\n env, step_info, err_msg, stack_trace = None, None, None, None\n try:\n logger.info(f\"Running experiment {self.exp_name} in:\\n {self.exp_dir}\")\n agent = self.agent_args.make_agent()\n if hasattr(agent, \"set_task_name\"):\n agent.set_task_name(self.env_args.task_name)\n","source_hash":"df7878b63efa81189cdb7b214765466d94d2c2f84a41a1d160718bc07673d2d0","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.loop.run","uri":"program://AgentLab/function/src.agentlab.experiments.loop.run#L404-L513","kind":"function","name":"run","path":"src/agentlab/experiments/loop.py","language":"python","start_line":404,"end_line":513,"context_start_line":384,"context_end_line":533,"code":" pickle.dump(self, f)\n\n def _make_dir(self, exp_root):\n \"\"\"Create a unique directory for the experiment.\"\"\"\n date_str = self.exp_date.strftime(\"%Y-%m-%d_%H-%M-%S\")\n exp_str = re.sub(\n r\"[\\/:*?<>|]\", \"_\", self.exp_name\n ) # sanitize exp_name to be used as a file name (substitute forbidden characters)\n\n for i in range(1000):\n if i >= 999: # make sure we don't loop forever\n raise ValueError(\"Could not find a unique name for the experiment directory.\")\n\n tag = f\"_{i}\" if i > 0 else \"\"\n self.exp_dir = Path(exp_root) / f\"{date_str}_{exp_str}{tag}\"\n if not self.exp_dir.exists():\n break\n\n # TODO distinguish between agent error and environment or system error. e.g.\n # the parsing error of an action should not be re-run.\n def run(self):\n \"\"\"Run the experiment and save the results\"\"\"\n # start writing logs to run logfile\n self._set_logger()\n\n # log python environment info\n save_package_versions(Path(self.exp_dir))\n\n episode_info = []\n agent = None\n env, step_info, err_msg, stack_trace = None, None, None, None\n try:\n logger.info(f\"Running experiment {self.exp_name} in:\\n {self.exp_dir}\")\n agent = self.agent_args.make_agent()\n if hasattr(agent, \"set_task_name\"):\n agent.set_task_name(self.env_args.task_name)\n\n logger.debug(\"Agent created.\")\n\n env = self.env_args.make_env(\n action_mapping=agent.action_set.to_python_code,\n exp_dir=self.exp_dir,\n use_raw_page_output=getattr(self.agent_args, \"use_raw_page_output\", False),\n )\n\n logger.debug(\"Environment created.\")\n step_info = StepInfo(step=0)\n episode_info = [step_info]\n step_info.from_reset(\n env, seed=self.env_args.task_seed or 0, obs_preprocessor=agent.obs_preprocessor\n )\n logger.debug(\"Environment reset.\")\n\n while not step_info.is_done: # set a limit\n logger.debug(f\"Starting step {step_info.step}.\")\n action = step_info.from_action(agent)\n logger.debug(f\"Agent chose action:\\n {action}\")\n\n if action is None:\n # will end the episode after saving the step info.\n step_info.truncated = True\n\n step_info.save_step_info(\n self.exp_dir, save_screenshot=self.save_screenshot, save_som=self.save_som\n )\n logger.debug(\"Step info saved.\")\n\n if hasattr(env.unwrapped, \"chat\") and isinstance(env.unwrapped.chat, Chat):\n _send_chat_info(env.unwrapped.chat, action, step_info.agent_info)\n logger.debug(\"Chat info sent.\")\n\n if action is None:\n logger.debug(\"Agent returned None action. Ending episode.\")\n break\n\n step_info = StepInfo(step=step_info.step + 1)\n episode_info.append(step_info)\n\n logger.debug(\"Sending action to environment.\")\n step_info.from_step(env, action, obs_preprocessor=agent.obs_preprocessor)\n logger.debug(\"Environment stepped.\")\n if step_info.is_done:\n logger.debug(\n f\"Episode done: terminated: {step_info.terminated}, truncated: {step_info.truncated}.\"\n )\n\n except Exception as e:\n err_msg = f\"Exception uncaught by agent or environment in task {self.env_args.task_name}.\\n{type(e).__name__}:\\n{e}\"\n stack_trace = traceback.format_exc()\n\n self.err_msg = err_msg\n self.stack_trace = stack_trace\n\n logger.warning(err_msg + \"\\n\" + stack_trace)\n if _is_debugging() and self.enable_debug:\n logger.warning(\"Debug mode is enabled. Raising the error.\")\n raise\n\n finally:\n try:\n if step_info is not None:\n step_info.save_step_info(\n self.exp_dir, save_screenshot=self.save_screenshot, save_som=self.save_som\n )\n except Exception as e:\n logger.error(f\"Error while saving step info in the finally block: {e}\")\n try:\n if (\n not err_msg\n and len(episode_info) > 0\n and not (episode_info[-1].terminated or episode_info[-1].truncated)\n ):\n e = KeyboardInterrupt(\"Early termination??\")\n err_msg = f\"Exception uncaught by agent or environment in task {self.env_args.task_name}.\\n{type(e).__name__}:\\n{e}\"\n logger.info(\"Saving experiment info.\")\n self.save_summary_info(episode_info, Path(self.exp_dir), err_msg, stack_trace)\n if TapeAgent is not None and isinstance(agent, TapeAgent):\n task = getattr(env, \"task\", {})\n save_tape(self.exp_dir, episode_info, task, agent.final_tape)\n except Exception as e:\n logger.exception(f\"Error while saving experiment info: {e}\")\n try:\n if env is not None:\n env.close()\n except Exception as e:\n logger.exception(f\"Error while closing the environment: {e}\")\n try:\n self._unset_logger() # stop writing logs to run logfile\n except Exception as e:\n logger.exception(f\"Error while unsetting the logger: {e}\")\n\n def _set_logger(self):\n # output logging traces to a log file\n file_handler = logging.FileHandler(self.exp_dir / \"experiment.log\")\n file_handler.setLevel(self.logging_level) # same level as console outputs\n formatter = logging.Formatter(\n \"%(asctime)s - %(process)d - %(name)s - %(levelname)s - %(message)s\"\n )\n file_handler.setFormatter(formatter)\n # output handler\n stream_handler = logging.StreamHandler()\n stream_handler.setLevel(self.logging_level_stdout)\n stream_handler.setFormatter(formatter)\n # setup root logger\n root_logger = logging.getLogger()\n\n # remove previous stream handlers\n for handler in root_logger.handlers:\n if isinstance(handler, logging.StreamHandler):\n root_logger.removeHandler(handler)","source_hash":"df7878b63efa81189cdb7b214765466d94d2c2f84a41a1d160718bc07673d2d0","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.loop._set_logger","uri":"program://AgentLab/function/src.agentlab.experiments.loop._set_logger#L515-L542","kind":"function","name":"_set_logger","path":"src/agentlab/experiments/loop.py","language":"python","start_line":515,"end_line":542,"context_start_line":495,"context_end_line":562,"code":" ):\n e = KeyboardInterrupt(\"Early termination??\")\n err_msg = f\"Exception uncaught by agent or environment in task {self.env_args.task_name}.\\n{type(e).__name__}:\\n{e}\"\n logger.info(\"Saving experiment info.\")\n self.save_summary_info(episode_info, Path(self.exp_dir), err_msg, stack_trace)\n if TapeAgent is not None and isinstance(agent, TapeAgent):\n task = getattr(env, \"task\", {})\n save_tape(self.exp_dir, episode_info, task, agent.final_tape)\n except Exception as e:\n logger.exception(f\"Error while saving experiment info: {e}\")\n try:\n if env is not None:\n env.close()\n except Exception as e:\n logger.exception(f\"Error while closing the environment: {e}\")\n try:\n self._unset_logger() # stop writing logs to run logfile\n except Exception as e:\n logger.exception(f\"Error while unsetting the logger: {e}\")\n\n def _set_logger(self):\n # output logging traces to a log file\n file_handler = logging.FileHandler(self.exp_dir / \"experiment.log\")\n file_handler.setLevel(self.logging_level) # same level as console outputs\n formatter = logging.Formatter(\n \"%(asctime)s - %(process)d - %(name)s - %(levelname)s - %(message)s\"\n )\n file_handler.setFormatter(formatter)\n # output handler\n stream_handler = logging.StreamHandler()\n stream_handler.setLevel(self.logging_level_stdout)\n stream_handler.setFormatter(formatter)\n # setup root logger\n root_logger = logging.getLogger()\n\n # remove previous stream handlers\n for handler in root_logger.handlers:\n if isinstance(handler, logging.StreamHandler):\n root_logger.removeHandler(handler)\n\n root_logger.setLevel(self.logging_level)\n root_logger.addHandler(file_handler)\n root_logger.addHandler(stream_handler)\n # setup openai logger (don't go below INFO verbosity)\n openai_logger = logging.getLogger(\"openai._base_client\")\n openai_logger.setLevel(max(logging.INFO, self.logging_level))\n\n self.logging_file_handler = file_handler\n\n def _unset_logger(self):\n root_logger = logging.getLogger()\n root_logger.removeHandler(self.logging_file_handler)\n\n def save_summary_info(\n self,\n episode_info: list[StepInfo],\n exp_dir: Path,\n err_msg: str | None,\n stack_trace: str | None,\n ):\n # bring err from agent_info to the top level\n if err_msg is None:\n err_msg, stack_trace = _extract_err_msg(episode_info)\n else:\n # useful until we get a proper place in agent_xray to view error\n # messages.\n if len(episode_info) == 0:\n episode_info.append(StepInfo())","source_hash":"df7878b63efa81189cdb7b214765466d94d2c2f84a41a1d160718bc07673d2d0","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.loop._unset_logger","uri":"program://AgentLab/function/src.agentlab.experiments.loop._unset_logger#L544-L546","kind":"function","name":"_unset_logger","path":"src/agentlab/experiments/loop.py","language":"python","start_line":544,"end_line":546,"context_start_line":524,"context_end_line":566,"code":" stream_handler = logging.StreamHandler()\n stream_handler.setLevel(self.logging_level_stdout)\n stream_handler.setFormatter(formatter)\n # setup root logger\n root_logger = logging.getLogger()\n\n # remove previous stream handlers\n for handler in root_logger.handlers:\n if isinstance(handler, logging.StreamHandler):\n root_logger.removeHandler(handler)\n\n root_logger.setLevel(self.logging_level)\n root_logger.addHandler(file_handler)\n root_logger.addHandler(stream_handler)\n # setup openai logger (don't go below INFO verbosity)\n openai_logger = logging.getLogger(\"openai._base_client\")\n openai_logger.setLevel(max(logging.INFO, self.logging_level))\n\n self.logging_file_handler = file_handler\n\n def _unset_logger(self):\n root_logger = logging.getLogger()\n root_logger.removeHandler(self.logging_file_handler)\n\n def save_summary_info(\n self,\n episode_info: list[StepInfo],\n exp_dir: Path,\n err_msg: str | None,\n stack_trace: str | None,\n ):\n # bring err from agent_info to the top level\n if err_msg is None:\n err_msg, stack_trace = _extract_err_msg(episode_info)\n else:\n # useful until we get a proper place in agent_xray to view error\n # messages.\n if len(episode_info) == 0:\n episode_info.append(StepInfo())\n episode_info[-1].agent_info[\"err_msg\"] = err_msg\n episode_info[-1].agent_info[\"stack_trace\"] = stack_trace\n\n summary_info = dict(","source_hash":"df7878b63efa81189cdb7b214765466d94d2c2f84a41a1d160718bc07673d2d0","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.loop.save_summary_info","uri":"program://AgentLab/function/src.agentlab.experiments.loop.save_summary_info#L548-L581","kind":"function","name":"save_summary_info","path":"src/agentlab/experiments/loop.py","language":"python","start_line":548,"end_line":581,"context_start_line":528,"context_end_line":601,"code":" root_logger = logging.getLogger()\n\n # remove previous stream handlers\n for handler in root_logger.handlers:\n if isinstance(handler, logging.StreamHandler):\n root_logger.removeHandler(handler)\n\n root_logger.setLevel(self.logging_level)\n root_logger.addHandler(file_handler)\n root_logger.addHandler(stream_handler)\n # setup openai logger (don't go below INFO verbosity)\n openai_logger = logging.getLogger(\"openai._base_client\")\n openai_logger.setLevel(max(logging.INFO, self.logging_level))\n\n self.logging_file_handler = file_handler\n\n def _unset_logger(self):\n root_logger = logging.getLogger()\n root_logger.removeHandler(self.logging_file_handler)\n\n def save_summary_info(\n self,\n episode_info: list[StepInfo],\n exp_dir: Path,\n err_msg: str | None,\n stack_trace: str | None,\n ):\n # bring err from agent_info to the top level\n if err_msg is None:\n err_msg, stack_trace = _extract_err_msg(episode_info)\n else:\n # useful until we get a proper place in agent_xray to view error\n # messages.\n if len(episode_info) == 0:\n episode_info.append(StepInfo())\n episode_info[-1].agent_info[\"err_msg\"] = err_msg\n episode_info[-1].agent_info[\"stack_trace\"] = stack_trace\n\n summary_info = dict(\n n_steps=len(episode_info) - 1,\n cum_reward=sum([step.reward for step in episode_info]),\n cum_raw_reward=sum([step.raw_reward for step in episode_info if step.raw_reward]),\n err_msg=err_msg,\n stack_trace=stack_trace,\n )\n for key, val in _aggregate_episode_stats(episode_info).items():\n summary_info[f\"stats.{key}\"] = val\n\n if len(episode_info) > 0:\n summary_info[\"terminated\"] = episode_info[-1].terminated\n summary_info[\"truncated\"] = episode_info[-1].truncated\n\n with open(exp_dir / \"summary_info.json\", \"w\") as f:\n json.dump(summary_info, f, indent=4)\n\n\ndef _extract_err_msg(episode_info: list[StepInfo]):\n \"\"\"Extract the last error message from the episode info.\"\"\"\n errors = [(None, None)]\n for step_info in episode_info:\n if step_info.agent_info is None:\n continue\n err_msg = step_info.agent_info.get(\"err_msg\", None)\n if err_msg is not None:\n errors.append((err_msg, step_info.agent_info.get(\"stack_trace\", None)))\n\n return errors[-1]\n\n\ndef _aggregate_episode_stats(episode_info: list[StepInfo]):\n \"\"\"Aggregate StepInfo.stats across episodes.\n\n It will compute the sum and max of each value in the stats dict.\n These two summaries should cover many use cases. If more are needed, the","source_hash":"df7878b63efa81189cdb7b214765466d94d2c2f84a41a1d160718bc07673d2d0","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.loop.__init__","uri":"program://AgentLab/function/src.agentlab.experiments.loop.__init__#L658-L665","kind":"function","name":"__init__","path":"src/agentlab/experiments/loop.py","language":"python","start_line":658,"end_line":665,"context_start_line":638,"context_end_line":685,"code":"\nclass ExpResult:\n \"\"\"Helper class to load and visualize the results of an experiment.\n\n attributes are loaded lazily.\n\n Attributes (lazily loaded):\n exp_args: ExpArgs, the arguments of the experiment.\n steps_info: list[StepInfo], the information of each steps so far\n summary_info: dict, the summary of the experiment.\n screenshots: list[Image], the screenshots of each step.\n screenshots_som: list[Image], the screenshots of each step with set of\n marks inprinted.\n flat_exp_args: dict, the flattened version of exp_args.\n chat_video_path: Path, the path to the chat video. (if record_video=True)\n task_video_path: Path, the path to the task video. (if record_video=True)\n combined_video_path: Path, the path to the combined video. (if video was\n combined)\n \"\"\"\n\n def __init__(self, exp_dir) -> None:\n self.exp_dir = Path(exp_dir)\n self._exp_args = None\n self._steps_info = {}\n self._summary_info = None\n self._screenshots = {}\n self._flat_exp_args = None\n self._logs = None\n\n @property\n def exp_args(self) -> ExpArgs:\n if self._exp_args is None:\n with open(self.exp_dir / \"exp_args.pkl\", \"rb\") as f:\n self._exp_args = pickle.load(f)\n # in case experiments were moved\n self._exp_args.exp_dir = self.exp_dir\n return self._exp_args\n\n def get_step_info(self, step: int) -> StepInfo:\n \"\"\"Load the step info from the file and return it.\"\"\"\n if self._steps_info.get(step, None) is None:\n with gzip.open(self.exp_dir / f\"step_{step}.pkl.gz\", \"rb\") as f:\n self._steps_info[step] = pickle.load(f)\n if self._steps_info[step].obs:\n if \"screenshot\" not in self._steps_info[step].obs:\n try:\n self._steps_info[step].obs[\"screenshot\"] = np.array(\n self.get_screenshot(step), dtype=np.uint8","source_hash":"df7878b63efa81189cdb7b214765466d94d2c2f84a41a1d160718bc07673d2d0","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.loop.exp_args","uri":"program://AgentLab/function/src.agentlab.experiments.loop.exp_args#L668-L674","kind":"function","name":"exp_args","path":"src/agentlab/experiments/loop.py","language":"python","start_line":668,"end_line":674,"context_start_line":648,"context_end_line":694,"code":" screenshots: list[Image], the screenshots of each step.\n screenshots_som: list[Image], the screenshots of each step with set of\n marks inprinted.\n flat_exp_args: dict, the flattened version of exp_args.\n chat_video_path: Path, the path to the chat video. (if record_video=True)\n task_video_path: Path, the path to the task video. (if record_video=True)\n combined_video_path: Path, the path to the combined video. (if video was\n combined)\n \"\"\"\n\n def __init__(self, exp_dir) -> None:\n self.exp_dir = Path(exp_dir)\n self._exp_args = None\n self._steps_info = {}\n self._summary_info = None\n self._screenshots = {}\n self._flat_exp_args = None\n self._logs = None\n\n @property\n def exp_args(self) -> ExpArgs:\n if self._exp_args is None:\n with open(self.exp_dir / \"exp_args.pkl\", \"rb\") as f:\n self._exp_args = pickle.load(f)\n # in case experiments were moved\n self._exp_args.exp_dir = self.exp_dir\n return self._exp_args\n\n def get_step_info(self, step: int) -> StepInfo:\n \"\"\"Load the step info from the file and return it.\"\"\"\n if self._steps_info.get(step, None) is None:\n with gzip.open(self.exp_dir / f\"step_{step}.pkl.gz\", \"rb\") as f:\n self._steps_info[step] = pickle.load(f)\n if self._steps_info[step].obs:\n if \"screenshot\" not in self._steps_info[step].obs:\n try:\n self._steps_info[step].obs[\"screenshot\"] = np.array(\n self.get_screenshot(step), dtype=np.uint8\n )\n except FileNotFoundError:\n pass\n if \"screenshot_som\" not in self._steps_info[step].obs:\n try:\n self._steps_info[step].obs[\"screenshot_som\"] = np.array(\n self.get_screenshot(step, som=True), dtype=np.uint8\n )\n except FileNotFoundError:","source_hash":"df7878b63efa81189cdb7b214765466d94d2c2f84a41a1d160718bc07673d2d0","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.loop.get_step_info","uri":"program://AgentLab/function/src.agentlab.experiments.loop.get_step_info#L676-L705","kind":"function","name":"get_step_info","path":"src/agentlab/experiments/loop.py","language":"python","start_line":676,"end_line":705,"context_start_line":656,"context_end_line":725,"code":" \"\"\"\n\n def __init__(self, exp_dir) -> None:\n self.exp_dir = Path(exp_dir)\n self._exp_args = None\n self._steps_info = {}\n self._summary_info = None\n self._screenshots = {}\n self._flat_exp_args = None\n self._logs = None\n\n @property\n def exp_args(self) -> ExpArgs:\n if self._exp_args is None:\n with open(self.exp_dir / \"exp_args.pkl\", \"rb\") as f:\n self._exp_args = pickle.load(f)\n # in case experiments were moved\n self._exp_args.exp_dir = self.exp_dir\n return self._exp_args\n\n def get_step_info(self, step: int) -> StepInfo:\n \"\"\"Load the step info from the file and return it.\"\"\"\n if self._steps_info.get(step, None) is None:\n with gzip.open(self.exp_dir / f\"step_{step}.pkl.gz\", \"rb\") as f:\n self._steps_info[step] = pickle.load(f)\n if self._steps_info[step].obs:\n if \"screenshot\" not in self._steps_info[step].obs:\n try:\n self._steps_info[step].obs[\"screenshot\"] = np.array(\n self.get_screenshot(step), dtype=np.uint8\n )\n except FileNotFoundError:\n pass\n if \"screenshot_som\" not in self._steps_info[step].obs:\n try:\n self._steps_info[step].obs[\"screenshot_som\"] = np.array(\n self.get_screenshot(step, som=True), dtype=np.uint8\n )\n except FileNotFoundError:\n pass\n # if goal_object is set to None, it indicates it has been saved into a separate file\n if (\n \"goal_object\" in self._steps_info[step].obs\n and self._steps_info[step].obs[\"goal_object\"] is None\n ):\n with gzip.open(self.exp_dir / \"goal_object.pkl.gz\", \"rb\") as f:\n goal_object = pickle.load(f)\n self._steps_info[step].obs[\"goal_object\"] = goal_object\n\n return self._steps_info[step]\n\n @property\n def steps_info(self) -> list[StepInfo]:\n step_files = list(self.exp_dir.glob(\"step_*.pkl.gz\"))\n for file in step_files:\n step = int(file.name.split(\"_\")[-1].split(\".\")[0])\n self.get_step_info(step)\n\n return [self._steps_info[i] for i in range(len(self._steps_info))]\n\n @property\n def summary_info(self) -> dict:\n if self._summary_info is None:\n with open(self.exp_dir / \"summary_info.json\", \"r\") as f:\n # if length is zero raise file not found error\n if os.fstat(f.fileno()).st_size == 0:\n raise FileNotFoundError(\"summary_info.json is empty.\")\n self._summary_info = json.load(f)\n return self._summary_info\n","source_hash":"df7878b63efa81189cdb7b214765466d94d2c2f84a41a1d160718bc07673d2d0","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.loop.steps_info","uri":"program://AgentLab/function/src.agentlab.experiments.loop.steps_info#L708-L714","kind":"function","name":"steps_info","path":"src/agentlab/experiments/loop.py","language":"python","start_line":708,"end_line":714,"context_start_line":688,"context_end_line":734,"code":" pass\n if \"screenshot_som\" not in self._steps_info[step].obs:\n try:\n self._steps_info[step].obs[\"screenshot_som\"] = np.array(\n self.get_screenshot(step, som=True), dtype=np.uint8\n )\n except FileNotFoundError:\n pass\n # if goal_object is set to None, it indicates it has been saved into a separate file\n if (\n \"goal_object\" in self._steps_info[step].obs\n and self._steps_info[step].obs[\"goal_object\"] is None\n ):\n with gzip.open(self.exp_dir / \"goal_object.pkl.gz\", \"rb\") as f:\n goal_object = pickle.load(f)\n self._steps_info[step].obs[\"goal_object\"] = goal_object\n\n return self._steps_info[step]\n\n @property\n def steps_info(self) -> list[StepInfo]:\n step_files = list(self.exp_dir.glob(\"step_*.pkl.gz\"))\n for file in step_files:\n step = int(file.name.split(\"_\")[-1].split(\".\")[0])\n self.get_step_info(step)\n\n return [self._steps_info[i] for i in range(len(self._steps_info))]\n\n @property\n def summary_info(self) -> dict:\n if self._summary_info is None:\n with open(self.exp_dir / \"summary_info.json\", \"r\") as f:\n # if length is zero raise file not found error\n if os.fstat(f.fileno()).st_size == 0:\n raise FileNotFoundError(\"summary_info.json is empty.\")\n self._summary_info = json.load(f)\n return self._summary_info\n\n def get_screenshot(self, step: int, som=False) -> Image:\n key = (step, som)\n if self._screenshots.get(key, None) is None:\n file_path = self.get_screenshot_path(step, som=som)\n self._screenshots[key] = Image.open(file_path).convert(\"RGB\")\n return self._screenshots[key]\n\n def get_screenshot_path(self, step: int, som=False) -> Path:\n \"\"\"Return the path to the screenshot file.\"\"\"","source_hash":"df7878b63efa81189cdb7b214765466d94d2c2f84a41a1d160718bc07673d2d0","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.loop.summary_info","uri":"program://AgentLab/function/src.agentlab.experiments.loop.summary_info#L717-L724","kind":"function","name":"summary_info","path":"src/agentlab/experiments/loop.py","language":"python","start_line":717,"end_line":724,"context_start_line":697,"context_end_line":744,"code":" if (\n \"goal_object\" in self._steps_info[step].obs\n and self._steps_info[step].obs[\"goal_object\"] is None\n ):\n with gzip.open(self.exp_dir / \"goal_object.pkl.gz\", \"rb\") as f:\n goal_object = pickle.load(f)\n self._steps_info[step].obs[\"goal_object\"] = goal_object\n\n return self._steps_info[step]\n\n @property\n def steps_info(self) -> list[StepInfo]:\n step_files = list(self.exp_dir.glob(\"step_*.pkl.gz\"))\n for file in step_files:\n step = int(file.name.split(\"_\")[-1].split(\".\")[0])\n self.get_step_info(step)\n\n return [self._steps_info[i] for i in range(len(self._steps_info))]\n\n @property\n def summary_info(self) -> dict:\n if self._summary_info is None:\n with open(self.exp_dir / \"summary_info.json\", \"r\") as f:\n # if length is zero raise file not found error\n if os.fstat(f.fileno()).st_size == 0:\n raise FileNotFoundError(\"summary_info.json is empty.\")\n self._summary_info = json.load(f)\n return self._summary_info\n\n def get_screenshot(self, step: int, som=False) -> Image:\n key = (step, som)\n if self._screenshots.get(key, None) is None:\n file_path = self.get_screenshot_path(step, som=som)\n self._screenshots[key] = Image.open(file_path).convert(\"RGB\")\n return self._screenshots[key]\n\n def get_screenshot_path(self, step: int, som=False) -> Path:\n \"\"\"Return the path to the screenshot file.\"\"\"\n file_name = f\"screenshot_{'som_' if som else ''}step_{step}\"\n for ext in [\".png\", \".jpg\"]:\n file_path = self.exp_dir / (file_name + ext)\n if file_path.exists():\n return file_path\n raise FileNotFoundError(\n f\"No screenshot found for step {step} (som={som}) in {self.exp_dir}\"\n )\n\n def get_screenshots(self, som=False):","source_hash":"df7878b63efa81189cdb7b214765466d94d2c2f84a41a1d160718bc07673d2d0","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.loop.get_screenshot","uri":"program://AgentLab/function/src.agentlab.experiments.loop.get_screenshot#L726-L731","kind":"function","name":"get_screenshot","path":"src/agentlab/experiments/loop.py","language":"python","start_line":726,"end_line":731,"context_start_line":706,"context_end_line":751,"code":"\n @property\n def steps_info(self) -> list[StepInfo]:\n step_files = list(self.exp_dir.glob(\"step_*.pkl.gz\"))\n for file in step_files:\n step = int(file.name.split(\"_\")[-1].split(\".\")[0])\n self.get_step_info(step)\n\n return [self._steps_info[i] for i in range(len(self._steps_info))]\n\n @property\n def summary_info(self) -> dict:\n if self._summary_info is None:\n with open(self.exp_dir / \"summary_info.json\", \"r\") as f:\n # if length is zero raise file not found error\n if os.fstat(f.fileno()).st_size == 0:\n raise FileNotFoundError(\"summary_info.json is empty.\")\n self._summary_info = json.load(f)\n return self._summary_info\n\n def get_screenshot(self, step: int, som=False) -> Image:\n key = (step, som)\n if self._screenshots.get(key, None) is None:\n file_path = self.get_screenshot_path(step, som=som)\n self._screenshots[key] = Image.open(file_path).convert(\"RGB\")\n return self._screenshots[key]\n\n def get_screenshot_path(self, step: int, som=False) -> Path:\n \"\"\"Return the path to the screenshot file.\"\"\"\n file_name = f\"screenshot_{'som_' if som else ''}step_{step}\"\n for ext in [\".png\", \".jpg\"]:\n file_path = self.exp_dir / (file_name + ext)\n if file_path.exists():\n return file_path\n raise FileNotFoundError(\n f\"No screenshot found for step {step} (som={som}) in {self.exp_dir}\"\n )\n\n def get_screenshots(self, som=False):\n files = list(self.exp_dir.glob(\"screenshot_step_*\"))\n max_step = 0\n for file in files:\n step = int(file.name.split(\"_\")[-1].split(\".\")[0])\n self.get_screenshot(step, som=som)\n max_step = max(max_step, step)\n return [self._screenshots.get((i, som), None) for i in range(max_step + 1)]","source_hash":"df7878b63efa81189cdb7b214765466d94d2c2f84a41a1d160718bc07673d2d0","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.loop.get_screenshot_path","uri":"program://AgentLab/function/src.agentlab.experiments.loop.get_screenshot_path#L733-L742","kind":"function","name":"get_screenshot_path","path":"src/agentlab/experiments/loop.py","language":"python","start_line":733,"end_line":742,"context_start_line":713,"context_end_line":762,"code":"\n return [self._steps_info[i] for i in range(len(self._steps_info))]\n\n @property\n def summary_info(self) -> dict:\n if self._summary_info is None:\n with open(self.exp_dir / \"summary_info.json\", \"r\") as f:\n # if length is zero raise file not found error\n if os.fstat(f.fileno()).st_size == 0:\n raise FileNotFoundError(\"summary_info.json is empty.\")\n self._summary_info = json.load(f)\n return self._summary_info\n\n def get_screenshot(self, step: int, som=False) -> Image:\n key = (step, som)\n if self._screenshots.get(key, None) is None:\n file_path = self.get_screenshot_path(step, som=som)\n self._screenshots[key] = Image.open(file_path).convert(\"RGB\")\n return self._screenshots[key]\n\n def get_screenshot_path(self, step: int, som=False) -> Path:\n \"\"\"Return the path to the screenshot file.\"\"\"\n file_name = f\"screenshot_{'som_' if som else ''}step_{step}\"\n for ext in [\".png\", \".jpg\"]:\n file_path = self.exp_dir / (file_name + ext)\n if file_path.exists():\n return file_path\n raise FileNotFoundError(\n f\"No screenshot found for step {step} (som={som}) in {self.exp_dir}\"\n )\n\n def get_screenshots(self, som=False):\n files = list(self.exp_dir.glob(\"screenshot_step_*\"))\n max_step = 0\n for file in files:\n step = int(file.name.split(\"_\")[-1].split(\".\")[0])\n self.get_screenshot(step, som=som)\n max_step = max(max_step, step)\n return [self._screenshots.get((i, som), None) for i in range(max_step + 1)]\n\n @property\n def screenshots(self):\n return self.get_screenshots(som=False)\n\n @property\n def screenshots_som(self):\n return self.get_screenshots(som=True)\n\n @property\n def flat_exp_args(self) -> dict:","source_hash":"df7878b63efa81189cdb7b214765466d94d2c2f84a41a1d160718bc07673d2d0","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.loop.get_screenshots","uri":"program://AgentLab/function/src.agentlab.experiments.loop.get_screenshots#L744-L751","kind":"function","name":"get_screenshots","path":"src/agentlab/experiments/loop.py","language":"python","start_line":744,"end_line":751,"context_start_line":724,"context_end_line":771,"code":" return self._summary_info\n\n def get_screenshot(self, step: int, som=False) -> Image:\n key = (step, som)\n if self._screenshots.get(key, None) is None:\n file_path = self.get_screenshot_path(step, som=som)\n self._screenshots[key] = Image.open(file_path).convert(\"RGB\")\n return self._screenshots[key]\n\n def get_screenshot_path(self, step: int, som=False) -> Path:\n \"\"\"Return the path to the screenshot file.\"\"\"\n file_name = f\"screenshot_{'som_' if som else ''}step_{step}\"\n for ext in [\".png\", \".jpg\"]:\n file_path = self.exp_dir / (file_name + ext)\n if file_path.exists():\n return file_path\n raise FileNotFoundError(\n f\"No screenshot found for step {step} (som={som}) in {self.exp_dir}\"\n )\n\n def get_screenshots(self, som=False):\n files = list(self.exp_dir.glob(\"screenshot_step_*\"))\n max_step = 0\n for file in files:\n step = int(file.name.split(\"_\")[-1].split(\".\")[0])\n self.get_screenshot(step, som=som)\n max_step = max(max_step, step)\n return [self._screenshots.get((i, som), None) for i in range(max_step + 1)]\n\n @property\n def screenshots(self):\n return self.get_screenshots(som=False)\n\n @property\n def screenshots_som(self):\n return self.get_screenshots(som=True)\n\n @property\n def flat_exp_args(self) -> dict:\n \"\"\"Return a dict with exp_args flattened.\"\"\"\n if self._flat_exp_args is None:\n exp_args = asdict(self.exp_args)\n # this will flatten nested dicts\n self._flat_exp_args = _flatten_dict(exp_args)\n return self._flat_exp_args\n\n def get_exp_record(self) -> dict:\n \"\"\"Return a dict with exp_args flattened and summary_info.\"\"\"","source_hash":"df7878b63efa81189cdb7b214765466d94d2c2f84a41a1d160718bc07673d2d0","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.loop.screenshots","uri":"program://AgentLab/function/src.agentlab.experiments.loop.screenshots#L754-L755","kind":"function","name":"screenshots","path":"src/agentlab/experiments/loop.py","language":"python","start_line":754,"end_line":755,"context_start_line":734,"context_end_line":775,"code":" \"\"\"Return the path to the screenshot file.\"\"\"\n file_name = f\"screenshot_{'som_' if som else ''}step_{step}\"\n for ext in [\".png\", \".jpg\"]:\n file_path = self.exp_dir / (file_name + ext)\n if file_path.exists():\n return file_path\n raise FileNotFoundError(\n f\"No screenshot found for step {step} (som={som}) in {self.exp_dir}\"\n )\n\n def get_screenshots(self, som=False):\n files = list(self.exp_dir.glob(\"screenshot_step_*\"))\n max_step = 0\n for file in files:\n step = int(file.name.split(\"_\")[-1].split(\".\")[0])\n self.get_screenshot(step, som=som)\n max_step = max(max_step, step)\n return [self._screenshots.get((i, som), None) for i in range(max_step + 1)]\n\n @property\n def screenshots(self):\n return self.get_screenshots(som=False)\n\n @property\n def screenshots_som(self):\n return self.get_screenshots(som=True)\n\n @property\n def flat_exp_args(self) -> dict:\n \"\"\"Return a dict with exp_args flattened.\"\"\"\n if self._flat_exp_args is None:\n exp_args = asdict(self.exp_args)\n # this will flatten nested dicts\n self._flat_exp_args = _flatten_dict(exp_args)\n return self._flat_exp_args\n\n def get_exp_record(self) -> dict:\n \"\"\"Return a dict with exp_args flattened and summary_info.\"\"\"\n record = {\"exp_dir\": self.exp_dir}\n try:\n record.update(self.flat_exp_args)\n except FileNotFoundError:","source_hash":"df7878b63efa81189cdb7b214765466d94d2c2f84a41a1d160718bc07673d2d0","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.loop.screenshots_som","uri":"program://AgentLab/function/src.agentlab.experiments.loop.screenshots_som#L758-L759","kind":"function","name":"screenshots_som","path":"src/agentlab/experiments/loop.py","language":"python","start_line":758,"end_line":759,"context_start_line":738,"context_end_line":779,"code":" if file_path.exists():\n return file_path\n raise FileNotFoundError(\n f\"No screenshot found for step {step} (som={som}) in {self.exp_dir}\"\n )\n\n def get_screenshots(self, som=False):\n files = list(self.exp_dir.glob(\"screenshot_step_*\"))\n max_step = 0\n for file in files:\n step = int(file.name.split(\"_\")[-1].split(\".\")[0])\n self.get_screenshot(step, som=som)\n max_step = max(max_step, step)\n return [self._screenshots.get((i, som), None) for i in range(max_step + 1)]\n\n @property\n def screenshots(self):\n return self.get_screenshots(som=False)\n\n @property\n def screenshots_som(self):\n return self.get_screenshots(som=True)\n\n @property\n def flat_exp_args(self) -> dict:\n \"\"\"Return a dict with exp_args flattened.\"\"\"\n if self._flat_exp_args is None:\n exp_args = asdict(self.exp_args)\n # this will flatten nested dicts\n self._flat_exp_args = _flatten_dict(exp_args)\n return self._flat_exp_args\n\n def get_exp_record(self) -> dict:\n \"\"\"Return a dict with exp_args flattened and summary_info.\"\"\"\n record = {\"exp_dir\": self.exp_dir}\n try:\n record.update(self.flat_exp_args)\n except FileNotFoundError:\n pass\n try:\n record.update(self.summary_info)\n except FileNotFoundError:","source_hash":"df7878b63efa81189cdb7b214765466d94d2c2f84a41a1d160718bc07673d2d0","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.loop.flat_exp_args","uri":"program://AgentLab/function/src.agentlab.experiments.loop.flat_exp_args#L762-L768","kind":"function","name":"flat_exp_args","path":"src/agentlab/experiments/loop.py","language":"python","start_line":762,"end_line":768,"context_start_line":742,"context_end_line":788,"code":" )\n\n def get_screenshots(self, som=False):\n files = list(self.exp_dir.glob(\"screenshot_step_*\"))\n max_step = 0\n for file in files:\n step = int(file.name.split(\"_\")[-1].split(\".\")[0])\n self.get_screenshot(step, som=som)\n max_step = max(max_step, step)\n return [self._screenshots.get((i, som), None) for i in range(max_step + 1)]\n\n @property\n def screenshots(self):\n return self.get_screenshots(som=False)\n\n @property\n def screenshots_som(self):\n return self.get_screenshots(som=True)\n\n @property\n def flat_exp_args(self) -> dict:\n \"\"\"Return a dict with exp_args flattened.\"\"\"\n if self._flat_exp_args is None:\n exp_args = asdict(self.exp_args)\n # this will flatten nested dicts\n self._flat_exp_args = _flatten_dict(exp_args)\n return self._flat_exp_args\n\n def get_exp_record(self) -> dict:\n \"\"\"Return a dict with exp_args flattened and summary_info.\"\"\"\n record = {\"exp_dir\": self.exp_dir}\n try:\n record.update(self.flat_exp_args)\n except FileNotFoundError:\n pass\n try:\n record.update(self.summary_info)\n except FileNotFoundError:\n pass\n return record\n\n @property\n def chat_video_path(self) -> Path:\n try:\n return next(self.exp_dir.glob(\"chat_video/*.webm\"))\n except StopIteration:\n raise FileNotFoundError(f\"No chat_video found in {self.exp_dir}\")","source_hash":"df7878b63efa81189cdb7b214765466d94d2c2f84a41a1d160718bc07673d2d0","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.loop.get_exp_record","uri":"program://AgentLab/function/src.agentlab.experiments.loop.get_exp_record#L770-L781","kind":"function","name":"get_exp_record","path":"src/agentlab/experiments/loop.py","language":"python","start_line":770,"end_line":781,"context_start_line":750,"context_end_line":801,"code":" max_step = max(max_step, step)\n return [self._screenshots.get((i, som), None) for i in range(max_step + 1)]\n\n @property\n def screenshots(self):\n return self.get_screenshots(som=False)\n\n @property\n def screenshots_som(self):\n return self.get_screenshots(som=True)\n\n @property\n def flat_exp_args(self) -> dict:\n \"\"\"Return a dict with exp_args flattened.\"\"\"\n if self._flat_exp_args is None:\n exp_args = asdict(self.exp_args)\n # this will flatten nested dicts\n self._flat_exp_args = _flatten_dict(exp_args)\n return self._flat_exp_args\n\n def get_exp_record(self) -> dict:\n \"\"\"Return a dict with exp_args flattened and summary_info.\"\"\"\n record = {\"exp_dir\": self.exp_dir}\n try:\n record.update(self.flat_exp_args)\n except FileNotFoundError:\n pass\n try:\n record.update(self.summary_info)\n except FileNotFoundError:\n pass\n return record\n\n @property\n def chat_video_path(self) -> Path:\n try:\n return next(self.exp_dir.glob(\"chat_video/*.webm\"))\n except StopIteration:\n raise FileNotFoundError(f\"No chat_video found in {self.exp_dir}\")\n\n @property\n def task_video_path(self) -> Path:\n try:\n return next(self.exp_dir.glob(\"task_video/*.webm\"))\n except StopIteration:\n raise FileNotFoundError(f\"No task_video found in {self.exp_dir}\")\n\n @property\n def combined_video_path(self) -> Path:\n return self.exp_dir / \"combined_video.mp4\"\n\n @property","source_hash":"df7878b63efa81189cdb7b214765466d94d2c2f84a41a1d160718bc07673d2d0","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.loop.chat_video_path","uri":"program://AgentLab/function/src.agentlab.experiments.loop.chat_video_path#L784-L788","kind":"function","name":"chat_video_path","path":"src/agentlab/experiments/loop.py","language":"python","start_line":784,"end_line":788,"context_start_line":764,"context_end_line":808,"code":" if self._flat_exp_args is None:\n exp_args = asdict(self.exp_args)\n # this will flatten nested dicts\n self._flat_exp_args = _flatten_dict(exp_args)\n return self._flat_exp_args\n\n def get_exp_record(self) -> dict:\n \"\"\"Return a dict with exp_args flattened and summary_info.\"\"\"\n record = {\"exp_dir\": self.exp_dir}\n try:\n record.update(self.flat_exp_args)\n except FileNotFoundError:\n pass\n try:\n record.update(self.summary_info)\n except FileNotFoundError:\n pass\n return record\n\n @property\n def chat_video_path(self) -> Path:\n try:\n return next(self.exp_dir.glob(\"chat_video/*.webm\"))\n except StopIteration:\n raise FileNotFoundError(f\"No chat_video found in {self.exp_dir}\")\n\n @property\n def task_video_path(self) -> Path:\n try:\n return next(self.exp_dir.glob(\"task_video/*.webm\"))\n except StopIteration:\n raise FileNotFoundError(f\"No task_video found in {self.exp_dir}\")\n\n @property\n def combined_video_path(self) -> Path:\n return self.exp_dir / \"combined_video.mp4\"\n\n @property\n def logs(self):\n if self._logs is None:\n self._logs = (self.exp_dir / \"experiment.log\").read_text()\n return self._logs\n\n @property\n def status(self):","source_hash":"df7878b63efa81189cdb7b214765466d94d2c2f84a41a1d160718bc07673d2d0","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.loop.task_video_path","uri":"program://AgentLab/function/src.agentlab.experiments.loop.task_video_path#L791-L795","kind":"function","name":"task_video_path","path":"src/agentlab/experiments/loop.py","language":"python","start_line":791,"end_line":795,"context_start_line":771,"context_end_line":815,"code":" \"\"\"Return a dict with exp_args flattened and summary_info.\"\"\"\n record = {\"exp_dir\": self.exp_dir}\n try:\n record.update(self.flat_exp_args)\n except FileNotFoundError:\n pass\n try:\n record.update(self.summary_info)\n except FileNotFoundError:\n pass\n return record\n\n @property\n def chat_video_path(self) -> Path:\n try:\n return next(self.exp_dir.glob(\"chat_video/*.webm\"))\n except StopIteration:\n raise FileNotFoundError(f\"No chat_video found in {self.exp_dir}\")\n\n @property\n def task_video_path(self) -> Path:\n try:\n return next(self.exp_dir.glob(\"task_video/*.webm\"))\n except StopIteration:\n raise FileNotFoundError(f\"No task_video found in {self.exp_dir}\")\n\n @property\n def combined_video_path(self) -> Path:\n return self.exp_dir / \"combined_video.mp4\"\n\n @property\n def logs(self):\n if self._logs is None:\n self._logs = (self.exp_dir / \"experiment.log\").read_text()\n return self._logs\n\n @property\n def status(self):\n \"\"\"Possible values:\n * \"done\": completed with no error\n * \"error\": completed with error\n * \"incomplete\": not completed yet (may be pending or just stalled)\n\n Returns:\n str: the status of the experiment. One of \"done\", \"error\", \"incomplete\".","source_hash":"df7878b63efa81189cdb7b214765466d94d2c2f84a41a1d160718bc07673d2d0","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.loop.combined_video_path","uri":"program://AgentLab/function/src.agentlab.experiments.loop.combined_video_path#L798-L799","kind":"function","name":"combined_video_path","path":"src/agentlab/experiments/loop.py","language":"python","start_line":798,"end_line":799,"context_start_line":778,"context_end_line":819,"code":" record.update(self.summary_info)\n except FileNotFoundError:\n pass\n return record\n\n @property\n def chat_video_path(self) -> Path:\n try:\n return next(self.exp_dir.glob(\"chat_video/*.webm\"))\n except StopIteration:\n raise FileNotFoundError(f\"No chat_video found in {self.exp_dir}\")\n\n @property\n def task_video_path(self) -> Path:\n try:\n return next(self.exp_dir.glob(\"task_video/*.webm\"))\n except StopIteration:\n raise FileNotFoundError(f\"No task_video found in {self.exp_dir}\")\n\n @property\n def combined_video_path(self) -> Path:\n return self.exp_dir / \"combined_video.mp4\"\n\n @property\n def logs(self):\n if self._logs is None:\n self._logs = (self.exp_dir / \"experiment.log\").read_text()\n return self._logs\n\n @property\n def status(self):\n \"\"\"Possible values:\n * \"done\": completed with no error\n * \"error\": completed with error\n * \"incomplete\": not completed yet (may be pending or just stalled)\n\n Returns:\n str: the status of the experiment. One of \"done\", \"error\", \"incomplete\".\n \"\"\"\n try:\n summary_info = self.summary_info\n except FileNotFoundError:","source_hash":"df7878b63efa81189cdb7b214765466d94d2c2f84a41a1d160718bc07673d2d0","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.loop.logs","uri":"program://AgentLab/function/src.agentlab.experiments.loop.logs#L802-L805","kind":"function","name":"logs","path":"src/agentlab/experiments/loop.py","language":"python","start_line":802,"end_line":805,"context_start_line":782,"context_end_line":825,"code":"\n @property\n def chat_video_path(self) -> Path:\n try:\n return next(self.exp_dir.glob(\"chat_video/*.webm\"))\n except StopIteration:\n raise FileNotFoundError(f\"No chat_video found in {self.exp_dir}\")\n\n @property\n def task_video_path(self) -> Path:\n try:\n return next(self.exp_dir.glob(\"task_video/*.webm\"))\n except StopIteration:\n raise FileNotFoundError(f\"No task_video found in {self.exp_dir}\")\n\n @property\n def combined_video_path(self) -> Path:\n return self.exp_dir / \"combined_video.mp4\"\n\n @property\n def logs(self):\n if self._logs is None:\n self._logs = (self.exp_dir / \"experiment.log\").read_text()\n return self._logs\n\n @property\n def status(self):\n \"\"\"Possible values:\n * \"done\": completed with no error\n * \"error\": completed with error\n * \"incomplete\": not completed yet (may be pending or just stalled)\n\n Returns:\n str: the status of the experiment. One of \"done\", \"error\", \"incomplete\".\n \"\"\"\n try:\n summary_info = self.summary_info\n except FileNotFoundError:\n return \"incomplete\"\n\n if summary_info.get(\"err_msg\", None) is not None:\n return \"error\"\n\n if summary_info.get(\"terminated\", False) or summary_info.get(\"truncated\", False):","source_hash":"df7878b63efa81189cdb7b214765466d94d2c2f84a41a1d160718bc07673d2d0","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.loop.status","uri":"program://AgentLab/function/src.agentlab.experiments.loop.status#L808-L828","kind":"function","name":"status","path":"src/agentlab/experiments/loop.py","language":"python","start_line":808,"end_line":828,"context_start_line":788,"context_end_line":848,"code":" raise FileNotFoundError(f\"No chat_video found in {self.exp_dir}\")\n\n @property\n def task_video_path(self) -> Path:\n try:\n return next(self.exp_dir.glob(\"task_video/*.webm\"))\n except StopIteration:\n raise FileNotFoundError(f\"No task_video found in {self.exp_dir}\")\n\n @property\n def combined_video_path(self) -> Path:\n return self.exp_dir / \"combined_video.mp4\"\n\n @property\n def logs(self):\n if self._logs is None:\n self._logs = (self.exp_dir / \"experiment.log\").read_text()\n return self._logs\n\n @property\n def status(self):\n \"\"\"Possible values:\n * \"done\": completed with no error\n * \"error\": completed with error\n * \"incomplete\": not completed yet (may be pending or just stalled)\n\n Returns:\n str: the status of the experiment. One of \"done\", \"error\", \"incomplete\".\n \"\"\"\n try:\n summary_info = self.summary_info\n except FileNotFoundError:\n return \"incomplete\"\n\n if summary_info.get(\"err_msg\", None) is not None:\n return \"error\"\n\n if summary_info.get(\"terminated\", False) or summary_info.get(\"truncated\", False):\n return \"done\"\n\n return \"incomplete\"\n\n\nEXP_RESULT_CACHE = {}\n\n\ndef get_exp_result(exp_dir) -> ExpResult:\n \"\"\"Keep a cache of pre-loaded exp_results for faster loading\"\"\"\n exp_dir = str(exp_dir) # make sure it's not a Path\n exp_result = EXP_RESULT_CACHE.get(exp_dir, None)\n if exp_result is None:\n exp_result = ExpResult(exp_dir)\n EXP_RESULT_CACHE[exp_dir] = exp_result\n return exp_result\n\n\ndef yield_all_exp_results(\n savedir_base: str | Path, progress_fn=tqdm, load_hidden=False, use_cache=True\n):\n \"\"\"Recursively find all experiments from savedir_base folder.\n","source_hash":"df7878b63efa81189cdb7b214765466d94d2c2f84a41a1d160718bc07673d2d0","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.loop.default","uri":"program://AgentLab/function/src.agentlab.experiments.loop.default#L889-L898","kind":"function","name":"default","path":"src/agentlab/experiments/loop.py","language":"python","start_line":889,"end_line":898,"context_start_line":869,"context_end_line":918,"code":"\n exp_args_paths = []\n for exp_dir in savedir_base:\n exp_args_paths.extend(list(Path(exp_dir).glob(\"**/exp_args.pkl\")))\n\n if progress_fn is not None:\n exp_args_paths = progress_fn(exp_args_paths, desc=\"Searching experiments directories.\")\n\n for exp_args_path in exp_args_paths:\n exp_dir = exp_args_path.parent\n if not load_hidden:\n if exp_dir.name.startswith(\"_\") or exp_dir.name.startswith(\".\"):\n continue\n if use_cache:\n yield get_exp_result(exp_dir)\n else:\n yield ExpResult(exp_dir)\n\n\nclass DataclassJSONEncoder(json.JSONEncoder):\n def default(self, obj):\n if is_dataclass(obj):\n return asdict(obj)\n if isinstance(obj, np.integer):\n return int(obj)\n if isinstance(obj, np.floating):\n return float(obj)\n if isinstance(obj, np.ndarray):\n return obj.tolist()\n return super().default(obj)\n\n\ndef _move_old_exp(exp_dir):\n \"\"\"Move the old experiment directory to a new name.\"\"\"\n exp_dir = Path(exp_dir)\n if exp_dir.exists():\n exp_dir.rename(exp_dir.with_name(\"_\" + exp_dir.name))\n\n\ndef _get_env_name(task_name: str):\n \"\"\"Register tasks if needed (lazy import) and return environment name.\"\"\"\n\n # lazy import\n if task_name.startswith(\"miniwob\"):\n import browsergym.miniwob\n elif task_name.startswith(\"workarena\"):\n import browsergym.workarena\n elif task_name.startswith(\"webarena\"):\n import browsergym.webarena\n elif task_name.startswith(\"visualwebarena\"):","source_hash":"df7878b63efa81189cdb7b214765466d94d2c2f84a41a1d160718bc07673d2d0","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.args","uri":"program://AgentLab/module/src.agentlab.experiments.args#L1-L315","kind":"module","name":"src.agentlab.experiments.args","path":"src/agentlab/experiments/args.py","language":"python","start_line":1,"end_line":315,"context_start_line":1,"context_end_line":315,"code":"import copy\nfrom abc import ABC\nfrom dataclasses import fields, is_dataclass\nfrom itertools import product\nfrom typing import Any\n\nimport numpy as np\n\n\nclass CrossProd:\n \"\"\"Use to specify that this will be part of a cross product\"\"\"\n\n def __init__(self, elements):\n self.elements = elements\n\n\nclass Distribution(ABC):\n \"\"\"Generic Class to identify that this is a distribution\"\"\"\n\n def sample(self):\n pass\n\n\nclass Choice(Distribution):\n \"\"\"Use to specify that this will be sampled from a list of elements\"\"\"\n\n def __init__(self, elements, p=None):\n self.elements = elements\n self.p = p\n\n def sample(self, rng=np.random):\n return rng.choice(self.elements, p=self.p)\n\n\ndef _find_cprod_with_paths(obj, path=None):\n \"\"\"Find all the CrossProd objects and their paths in the given object.\n\n Args:\n obj (Any): The object to search for CrossProd objects.\n path (List[str]): The path to the current object.\n\n Returns:\n List[Tuple[List[str], CrossProd]]: A list of tuples where the first element is the path to the CrossProd\n object and the second element is the CrossProd object.\n \"\"\"\n if path is None:\n path = []\n\n if isinstance(obj, CrossProd):\n return [(path, obj)]\n\n cprod_paths = []\n if is_dataclass(obj):\n for field in fields(obj):\n field_value = getattr(obj, field.name)\n cprod_paths += _find_cprod_with_paths(field_value, path + [field.name])\n elif isinstance(obj, dict):\n for key, value in obj.items():\n cprod_paths += _find_cprod_with_paths(value, path + [key])\n\n return cprod_paths\n\n\ndef _set_value(obj, path, value):\n \"\"\"Set the value of the given path in the given object to the given value.\"\"\"\n for key in path[:-1]:\n if isinstance(obj, dict):\n obj = obj[key]\n else:\n obj = getattr(obj, key)\n if isinstance(obj, dict):\n obj[path[-1]] = value\n else:\n setattr(obj, path[-1], value)\n\n\ndef expand_cross_product(obj: Any | list[Any]):\n \"\"\"Expand the given object into a list of objects with all combinations of\n CrossProd objects.\n\n This function will recursively search for CrossProd objects in the given\n object and create a list of objects with all combinations of CrossProd. It\n searches through dataclasses and dictionaries.\n\n Args:\n obj: Any | List[Any],\n The object to expand.\n\n Returns:\n List[Any]:\n A list of objects with all combinations of CrossProd objects.\n\n \"\"\"\n\n if isinstance(obj, CrossProd):\n return obj.elements\n\n if isinstance(obj, list):\n obj_list = obj\n else:\n obj_list = [obj]\n\n result = []\n\n for obj in obj_list:\n cprod_paths = _find_cprod_with_paths(obj)\n if not cprod_paths:\n result.append(copy.deepcopy(obj))\n continue\n\n paths, cprod_objects = zip(*cprod_paths)\n combinations = product(*[cprod_obj.elements for cprod_obj in cprod_objects])\n\n # create a base object with empty fields to make fast deep copies from\n base_obj = copy.deepcopy(obj)\n for path in paths:\n _set_value(base_obj, path, None)\n\n for combo in combinations:\n new_obj = copy.deepcopy(base_obj)\n for path, value in zip(paths, combo):\n _set_value(new_obj, path, value)\n result.append(new_obj)\n\n return result\n\n\ndef sample_and_expand_cross_product(obj: Any | list[Any], n_samples: int):\n \"\"\"This will sample first and then expand the cross product.\"\"\"\n return expand_cross_product(sample_args(obj, n_samples))\n\n\ndef sample_args(obj: Any | list[Any], n_samples: int):\n \"\"\"Sample the given object n_samples times. Each sample is a deep copy of\n the original object with any object of type Distribution replaced by a\n sample from that distribution.\n\n Args:\n obj: Any | List[Any],\n the object to sample\n n_samples: int,\n the number of samples to generate\n\n Returns:\n List[Any]:\n A list of n_samples objects with the given object.\n \"\"\"\n\n if isinstance(obj, list):\n obj_list = obj\n else:\n obj_list = [obj]\n\n result = []\n\n for obj in obj_list:\n for _ in range(n_samples):\n result.append(_sample_single(copy.deepcopy(obj)))\n\n return result\n\n\ndef _sample_single(obj):\n \"\"\"Sample the given object once.\"\"\"\n if isinstance(obj, Distribution):\n return obj.sample()\n\n if is_dataclass(obj):\n for field in fields(obj):\n value = getattr(obj, field.name)\n sampled_value = _sample_single(value)\n setattr(obj, field.name, sampled_value)\n elif isinstance(obj, dict):\n for key, value in obj.items():\n obj[key] = _sample_single(value)\n\n return obj\n\n\nclass Toggle:\n pass\n\n\nTOGGLE = Toggle()\n\n\ndef _change_value(obj, path, value):\n \"\"\"Set the value to the given path in the nested objects.\n\n Note: This doesn't work with list or dict or tuples. Only works with objects.\n\n Args:\n obj: the object to change the value\n path: the path to the value to change\n value: the value to change to\n\n Raises:\n ValueError: if the field is not found in the object\n \"\"\"\n key_list = path.split(\".\")\n for key in key_list[:-1]:\n if key == \"\":\n continue\n obj = getattr(obj, key)\n # if dataclass, then set the value only if the field exists\n\n def _set(obj, key, value):\n if isinstance(value, Toggle):\n previous_value = getattr(obj, key)\n if not isinstance(previous_value, bool):\n raise ValueError(f\"Toggle object {obj} attribute {key} is not a boolean\")\n setattr(obj, key, not previous_value)\n else:\n setattr(obj, key_list[-1], value)\n\n if is_dataclass(obj):\n field_names = [field.name for field in fields(obj)]\n if key_list[-1] in field_names:\n _set(obj, key_list[-1], value)\n else:\n raise ValueError(f\"field {key_list[-1]} not found in {obj}\")\n else:\n _set(obj, key_list[-1], value)\n\n\ndef _apply_change(params, change):\n \"\"\"Apply the change to the params object in place.\"\"\"\n if callable(change):\n change(params)\n elif isinstance(change, (tuple, list)):\n if isinstance(change[0], str) and len(change) == 2:\n path, value = change\n _change_value(params, path, value)\n else:\n for c in change:\n _apply_change(params, c)\n else:\n raise ValueError(f\"change {change} not recognized\")\n return params\n\n\ndef make_progression_study(start_point, changes, return_cross_prod=True):\n \"\"\"A kind of ablation study by changing the start_point with changes one by\n one.\n\n Args:\n start_point: the starting point of the study\n changes: a list of changes to make to the start_point. Each change is\n either a callable or tuple containing a string identifying the path in the object to\n change and the value to change to. ex: (\".obs.use_html\", True)\n return_cross_prod: return a CrossProd object or just the list\n\n Returns:\n A CrossProd object containing a list of objects with progressive changes\n from `start_point`. If return_cross_prod is False, then it will return a\n list.\n\n \"\"\"\n params_list = [start_point]\n for change in changes:\n params = copy.deepcopy(params_list[-1])\n _apply_change(params, change)\n params_list.append(params)\n\n if return_cross_prod:\n return CrossProd(params_list)\n else:\n return params_list\n\n\ndef make_ablation_study(start_point, changes, return_cross_prod=True):\n \"\"\"Ablation study by modifying the start_point with only one change at a\n time, and restarting from the original start_point for each configuration.\n\n Args:\n start_point: the starting point of the ablation study\n changes: a list of changes to make to the start_point. Each change is\n either a callable or tuple containing a string identifying the path in the object to\n change and the value to change to. ex: (\".obs.use_html\", True)\n return_cross_prod: return a CrossProd object or just the list\n\n Returns:\n A CrossProd object containing a list of objects with one change\n from `start_point`. If return_cross_prod is False, then it will return a\n list.\n\n \"\"\"\n params_list = [start_point]\n for change in changes:\n params = copy.deepcopy(start_point)\n _apply_change(params, change)\n params_list.append(params)\n\n if return_cross_prod:\n return CrossProd(params_list)\n else:\n return params_list\n\n\nif __name__ == \"__main__\":\n from agentlab.agents.dynamic_prompting import Flags\n\n study = make_progression_study(\n start_point=Flags(),\n changes=[\n (\"use_thinking\", True),\n (\".use_plan\", True),\n (\".use_criticise\", True),\n ],\n return_cross_prod=True,\n )\n\n study = expand_cross_product(study)\n for p in study:\n print(p)","source_hash":"30ff2cebecae01c0a3a071dad3427ae92d108ec344da45dbd2c9209d4ba14fde","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.args.CrossProd","uri":"program://AgentLab/class/src.agentlab.experiments.args.CrossProd#L10-L14","kind":"class","name":"CrossProd","path":"src/agentlab/experiments/args.py","language":"python","start_line":10,"end_line":14,"context_start_line":1,"context_end_line":34,"code":"import copy\nfrom abc import ABC\nfrom dataclasses import fields, is_dataclass\nfrom itertools import product\nfrom typing import Any\n\nimport numpy as np\n\n\nclass CrossProd:\n \"\"\"Use to specify that this will be part of a cross product\"\"\"\n\n def __init__(self, elements):\n self.elements = elements\n\n\nclass Distribution(ABC):\n \"\"\"Generic Class to identify that this is a distribution\"\"\"\n\n def sample(self):\n pass\n\n\nclass Choice(Distribution):\n \"\"\"Use to specify that this will be sampled from a list of elements\"\"\"\n\n def __init__(self, elements, p=None):\n self.elements = elements\n self.p = p\n\n def sample(self, rng=np.random):\n return rng.choice(self.elements, p=self.p)\n\n","source_hash":"30ff2cebecae01c0a3a071dad3427ae92d108ec344da45dbd2c9209d4ba14fde","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.args.Distribution","uri":"program://AgentLab/class/src.agentlab.experiments.args.Distribution#L17-L21","kind":"class","name":"Distribution","path":"src/agentlab/experiments/args.py","language":"python","start_line":17,"end_line":21,"context_start_line":1,"context_end_line":41,"code":"import copy\nfrom abc import ABC\nfrom dataclasses import fields, is_dataclass\nfrom itertools import product\nfrom typing import Any\n\nimport numpy as np\n\n\nclass CrossProd:\n \"\"\"Use to specify that this will be part of a cross product\"\"\"\n\n def __init__(self, elements):\n self.elements = elements\n\n\nclass Distribution(ABC):\n \"\"\"Generic Class to identify that this is a distribution\"\"\"\n\n def sample(self):\n pass\n\n\nclass Choice(Distribution):\n \"\"\"Use to specify that this will be sampled from a list of elements\"\"\"\n\n def __init__(self, elements, p=None):\n self.elements = elements\n self.p = p\n\n def sample(self, rng=np.random):\n return rng.choice(self.elements, p=self.p)\n\n\ndef _find_cprod_with_paths(obj, path=None):\n \"\"\"Find all the CrossProd objects and their paths in the given object.\n\n Args:\n obj (Any): The object to search for CrossProd objects.\n path (List[str]): The path to the current object.\n","source_hash":"30ff2cebecae01c0a3a071dad3427ae92d108ec344da45dbd2c9209d4ba14fde","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.args.Choice","uri":"program://AgentLab/class/src.agentlab.experiments.args.Choice#L24-L32","kind":"class","name":"Choice","path":"src/agentlab/experiments/args.py","language":"python","start_line":24,"end_line":32,"context_start_line":4,"context_end_line":52,"code":"from itertools import product\nfrom typing import Any\n\nimport numpy as np\n\n\nclass CrossProd:\n \"\"\"Use to specify that this will be part of a cross product\"\"\"\n\n def __init__(self, elements):\n self.elements = elements\n\n\nclass Distribution(ABC):\n \"\"\"Generic Class to identify that this is a distribution\"\"\"\n\n def sample(self):\n pass\n\n\nclass Choice(Distribution):\n \"\"\"Use to specify that this will be sampled from a list of elements\"\"\"\n\n def __init__(self, elements, p=None):\n self.elements = elements\n self.p = p\n\n def sample(self, rng=np.random):\n return rng.choice(self.elements, p=self.p)\n\n\ndef _find_cprod_with_paths(obj, path=None):\n \"\"\"Find all the CrossProd objects and their paths in the given object.\n\n Args:\n obj (Any): The object to search for CrossProd objects.\n path (List[str]): The path to the current object.\n\n Returns:\n List[Tuple[List[str], CrossProd]]: A list of tuples where the first element is the path to the CrossProd\n object and the second element is the CrossProd object.\n \"\"\"\n if path is None:\n path = []\n\n if isinstance(obj, CrossProd):\n return [(path, obj)]\n\n cprod_paths = []","source_hash":"30ff2cebecae01c0a3a071dad3427ae92d108ec344da45dbd2c9209d4ba14fde","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.args._find_cprod_with_paths","uri":"program://AgentLab/function/src.agentlab.experiments.args._find_cprod_with_paths#L35-L61","kind":"function","name":"_find_cprod_with_paths","path":"src/agentlab/experiments/args.py","language":"python","start_line":35,"end_line":61,"context_start_line":15,"context_end_line":81,"code":"\n\nclass Distribution(ABC):\n \"\"\"Generic Class to identify that this is a distribution\"\"\"\n\n def sample(self):\n pass\n\n\nclass Choice(Distribution):\n \"\"\"Use to specify that this will be sampled from a list of elements\"\"\"\n\n def __init__(self, elements, p=None):\n self.elements = elements\n self.p = p\n\n def sample(self, rng=np.random):\n return rng.choice(self.elements, p=self.p)\n\n\ndef _find_cprod_with_paths(obj, path=None):\n \"\"\"Find all the CrossProd objects and their paths in the given object.\n\n Args:\n obj (Any): The object to search for CrossProd objects.\n path (List[str]): The path to the current object.\n\n Returns:\n List[Tuple[List[str], CrossProd]]: A list of tuples where the first element is the path to the CrossProd\n object and the second element is the CrossProd object.\n \"\"\"\n if path is None:\n path = []\n\n if isinstance(obj, CrossProd):\n return [(path, obj)]\n\n cprod_paths = []\n if is_dataclass(obj):\n for field in fields(obj):\n field_value = getattr(obj, field.name)\n cprod_paths += _find_cprod_with_paths(field_value, path + [field.name])\n elif isinstance(obj, dict):\n for key, value in obj.items():\n cprod_paths += _find_cprod_with_paths(value, path + [key])\n\n return cprod_paths\n\n\ndef _set_value(obj, path, value):\n \"\"\"Set the value of the given path in the given object to the given value.\"\"\"\n for key in path[:-1]:\n if isinstance(obj, dict):\n obj = obj[key]\n else:\n obj = getattr(obj, key)\n if isinstance(obj, dict):\n obj[path[-1]] = value\n else:\n setattr(obj, path[-1], value)\n\n\ndef expand_cross_product(obj: Any | list[Any]):\n \"\"\"Expand the given object into a list of objects with all combinations of\n CrossProd objects.\n\n This function will recursively search for CrossProd objects in the given","source_hash":"30ff2cebecae01c0a3a071dad3427ae92d108ec344da45dbd2c9209d4ba14fde","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.args._set_value","uri":"program://AgentLab/function/src.agentlab.experiments.args._set_value#L64-L74","kind":"function","name":"_set_value","path":"src/agentlab/experiments/args.py","language":"python","start_line":64,"end_line":74,"context_start_line":44,"context_end_line":94,"code":" object and the second element is the CrossProd object.\n \"\"\"\n if path is None:\n path = []\n\n if isinstance(obj, CrossProd):\n return [(path, obj)]\n\n cprod_paths = []\n if is_dataclass(obj):\n for field in fields(obj):\n field_value = getattr(obj, field.name)\n cprod_paths += _find_cprod_with_paths(field_value, path + [field.name])\n elif isinstance(obj, dict):\n for key, value in obj.items():\n cprod_paths += _find_cprod_with_paths(value, path + [key])\n\n return cprod_paths\n\n\ndef _set_value(obj, path, value):\n \"\"\"Set the value of the given path in the given object to the given value.\"\"\"\n for key in path[:-1]:\n if isinstance(obj, dict):\n obj = obj[key]\n else:\n obj = getattr(obj, key)\n if isinstance(obj, dict):\n obj[path[-1]] = value\n else:\n setattr(obj, path[-1], value)\n\n\ndef expand_cross_product(obj: Any | list[Any]):\n \"\"\"Expand the given object into a list of objects with all combinations of\n CrossProd objects.\n\n This function will recursively search for CrossProd objects in the given\n object and create a list of objects with all combinations of CrossProd. It\n searches through dataclasses and dictionaries.\n\n Args:\n obj: Any | List[Any],\n The object to expand.\n\n Returns:\n List[Any]:\n A list of objects with all combinations of CrossProd objects.\n\n \"\"\"\n","source_hash":"30ff2cebecae01c0a3a071dad3427ae92d108ec344da45dbd2c9209d4ba14fde","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.args.expand_cross_product","uri":"program://AgentLab/function/src.agentlab.experiments.args.expand_cross_product#L77-L125","kind":"function","name":"expand_cross_product","path":"src/agentlab/experiments/args.py","language":"python","start_line":77,"end_line":125,"context_start_line":57,"context_end_line":145,"code":" elif isinstance(obj, dict):\n for key, value in obj.items():\n cprod_paths += _find_cprod_with_paths(value, path + [key])\n\n return cprod_paths\n\n\ndef _set_value(obj, path, value):\n \"\"\"Set the value of the given path in the given object to the given value.\"\"\"\n for key in path[:-1]:\n if isinstance(obj, dict):\n obj = obj[key]\n else:\n obj = getattr(obj, key)\n if isinstance(obj, dict):\n obj[path[-1]] = value\n else:\n setattr(obj, path[-1], value)\n\n\ndef expand_cross_product(obj: Any | list[Any]):\n \"\"\"Expand the given object into a list of objects with all combinations of\n CrossProd objects.\n\n This function will recursively search for CrossProd objects in the given\n object and create a list of objects with all combinations of CrossProd. It\n searches through dataclasses and dictionaries.\n\n Args:\n obj: Any | List[Any],\n The object to expand.\n\n Returns:\n List[Any]:\n A list of objects with all combinations of CrossProd objects.\n\n \"\"\"\n\n if isinstance(obj, CrossProd):\n return obj.elements\n\n if isinstance(obj, list):\n obj_list = obj\n else:\n obj_list = [obj]\n\n result = []\n\n for obj in obj_list:\n cprod_paths = _find_cprod_with_paths(obj)\n if not cprod_paths:\n result.append(copy.deepcopy(obj))\n continue\n\n paths, cprod_objects = zip(*cprod_paths)\n combinations = product(*[cprod_obj.elements for cprod_obj in cprod_objects])\n\n # create a base object with empty fields to make fast deep copies from\n base_obj = copy.deepcopy(obj)\n for path in paths:\n _set_value(base_obj, path, None)\n\n for combo in combinations:\n new_obj = copy.deepcopy(base_obj)\n for path, value in zip(paths, combo):\n _set_value(new_obj, path, value)\n result.append(new_obj)\n\n return result\n\n\ndef sample_and_expand_cross_product(obj: Any | list[Any], n_samples: int):\n \"\"\"This will sample first and then expand the cross product.\"\"\"\n return expand_cross_product(sample_args(obj, n_samples))\n\n\ndef sample_args(obj: Any | list[Any], n_samples: int):\n \"\"\"Sample the given object n_samples times. Each sample is a deep copy of\n the original object with any object of type Distribution replaced by a\n sample from that distribution.\n\n Args:\n obj: Any | List[Any],\n the object to sample\n n_samples: int,\n the number of samples to generate\n\n Returns:\n List[Any]:","source_hash":"30ff2cebecae01c0a3a071dad3427ae92d108ec344da45dbd2c9209d4ba14fde","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.args.sample_and_expand_cross_product","uri":"program://AgentLab/function/src.agentlab.experiments.args.sample_and_expand_cross_product#L128-L130","kind":"function","name":"sample_and_expand_cross_product","path":"src/agentlab/experiments/args.py","language":"python","start_line":128,"end_line":130,"context_start_line":108,"context_end_line":150,"code":" result.append(copy.deepcopy(obj))\n continue\n\n paths, cprod_objects = zip(*cprod_paths)\n combinations = product(*[cprod_obj.elements for cprod_obj in cprod_objects])\n\n # create a base object with empty fields to make fast deep copies from\n base_obj = copy.deepcopy(obj)\n for path in paths:\n _set_value(base_obj, path, None)\n\n for combo in combinations:\n new_obj = copy.deepcopy(base_obj)\n for path, value in zip(paths, combo):\n _set_value(new_obj, path, value)\n result.append(new_obj)\n\n return result\n\n\ndef sample_and_expand_cross_product(obj: Any | list[Any], n_samples: int):\n \"\"\"This will sample first and then expand the cross product.\"\"\"\n return expand_cross_product(sample_args(obj, n_samples))\n\n\ndef sample_args(obj: Any | list[Any], n_samples: int):\n \"\"\"Sample the given object n_samples times. Each sample is a deep copy of\n the original object with any object of type Distribution replaced by a\n sample from that distribution.\n\n Args:\n obj: Any | List[Any],\n the object to sample\n n_samples: int,\n the number of samples to generate\n\n Returns:\n List[Any]:\n A list of n_samples objects with the given object.\n \"\"\"\n\n if isinstance(obj, list):\n obj_list = obj","source_hash":"30ff2cebecae01c0a3a071dad3427ae92d108ec344da45dbd2c9209d4ba14fde","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.args.sample_args","uri":"program://AgentLab/function/src.agentlab.experiments.args.sample_args#L133-L160","kind":"function","name":"sample_args","path":"src/agentlab/experiments/args.py","language":"python","start_line":133,"end_line":160,"context_start_line":113,"context_end_line":180,"code":"\n # create a base object with empty fields to make fast deep copies from\n base_obj = copy.deepcopy(obj)\n for path in paths:\n _set_value(base_obj, path, None)\n\n for combo in combinations:\n new_obj = copy.deepcopy(base_obj)\n for path, value in zip(paths, combo):\n _set_value(new_obj, path, value)\n result.append(new_obj)\n\n return result\n\n\ndef sample_and_expand_cross_product(obj: Any | list[Any], n_samples: int):\n \"\"\"This will sample first and then expand the cross product.\"\"\"\n return expand_cross_product(sample_args(obj, n_samples))\n\n\ndef sample_args(obj: Any | list[Any], n_samples: int):\n \"\"\"Sample the given object n_samples times. Each sample is a deep copy of\n the original object with any object of type Distribution replaced by a\n sample from that distribution.\n\n Args:\n obj: Any | List[Any],\n the object to sample\n n_samples: int,\n the number of samples to generate\n\n Returns:\n List[Any]:\n A list of n_samples objects with the given object.\n \"\"\"\n\n if isinstance(obj, list):\n obj_list = obj\n else:\n obj_list = [obj]\n\n result = []\n\n for obj in obj_list:\n for _ in range(n_samples):\n result.append(_sample_single(copy.deepcopy(obj)))\n\n return result\n\n\ndef _sample_single(obj):\n \"\"\"Sample the given object once.\"\"\"\n if isinstance(obj, Distribution):\n return obj.sample()\n\n if is_dataclass(obj):\n for field in fields(obj):\n value = getattr(obj, field.name)\n sampled_value = _sample_single(value)\n setattr(obj, field.name, sampled_value)\n elif isinstance(obj, dict):\n for key, value in obj.items():\n obj[key] = _sample_single(value)\n\n return obj\n\n\nclass Toggle:","source_hash":"30ff2cebecae01c0a3a071dad3427ae92d108ec344da45dbd2c9209d4ba14fde","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.args._sample_single","uri":"program://AgentLab/function/src.agentlab.experiments.args._sample_single#L163-L177","kind":"function","name":"_sample_single","path":"src/agentlab/experiments/args.py","language":"python","start_line":163,"end_line":177,"context_start_line":143,"context_end_line":197,"code":"\n Returns:\n List[Any]:\n A list of n_samples objects with the given object.\n \"\"\"\n\n if isinstance(obj, list):\n obj_list = obj\n else:\n obj_list = [obj]\n\n result = []\n\n for obj in obj_list:\n for _ in range(n_samples):\n result.append(_sample_single(copy.deepcopy(obj)))\n\n return result\n\n\ndef _sample_single(obj):\n \"\"\"Sample the given object once.\"\"\"\n if isinstance(obj, Distribution):\n return obj.sample()\n\n if is_dataclass(obj):\n for field in fields(obj):\n value = getattr(obj, field.name)\n sampled_value = _sample_single(value)\n setattr(obj, field.name, sampled_value)\n elif isinstance(obj, dict):\n for key, value in obj.items():\n obj[key] = _sample_single(value)\n\n return obj\n\n\nclass Toggle:\n pass\n\n\nTOGGLE = Toggle()\n\n\ndef _change_value(obj, path, value):\n \"\"\"Set the value to the given path in the nested objects.\n\n Note: This doesn't work with list or dict or tuples. Only works with objects.\n\n Args:\n obj: the object to change the value\n path: the path to the value to change\n value: the value to change to\n\n Raises:","source_hash":"30ff2cebecae01c0a3a071dad3427ae92d108ec344da45dbd2c9209d4ba14fde","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.args.Toggle","uri":"program://AgentLab/class/src.agentlab.experiments.args.Toggle#L180-L181","kind":"class","name":"Toggle","path":"src/agentlab/experiments/args.py","language":"python","start_line":180,"end_line":181,"context_start_line":160,"context_end_line":201,"code":" return result\n\n\ndef _sample_single(obj):\n \"\"\"Sample the given object once.\"\"\"\n if isinstance(obj, Distribution):\n return obj.sample()\n\n if is_dataclass(obj):\n for field in fields(obj):\n value = getattr(obj, field.name)\n sampled_value = _sample_single(value)\n setattr(obj, field.name, sampled_value)\n elif isinstance(obj, dict):\n for key, value in obj.items():\n obj[key] = _sample_single(value)\n\n return obj\n\n\nclass Toggle:\n pass\n\n\nTOGGLE = Toggle()\n\n\ndef _change_value(obj, path, value):\n \"\"\"Set the value to the given path in the nested objects.\n\n Note: This doesn't work with list or dict or tuples. Only works with objects.\n\n Args:\n obj: the object to change the value\n path: the path to the value to change\n value: the value to change to\n\n Raises:\n ValueError: if the field is not found in the object\n \"\"\"\n key_list = path.split(\".\")\n for key in key_list[:-1]:","source_hash":"30ff2cebecae01c0a3a071dad3427ae92d108ec344da45dbd2c9209d4ba14fde","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.args._change_value","uri":"program://AgentLab/function/src.agentlab.experiments.args._change_value#L187-L223","kind":"function","name":"_change_value","path":"src/agentlab/experiments/args.py","language":"python","start_line":187,"end_line":223,"context_start_line":167,"context_end_line":243,"code":"\n if is_dataclass(obj):\n for field in fields(obj):\n value = getattr(obj, field.name)\n sampled_value = _sample_single(value)\n setattr(obj, field.name, sampled_value)\n elif isinstance(obj, dict):\n for key, value in obj.items():\n obj[key] = _sample_single(value)\n\n return obj\n\n\nclass Toggle:\n pass\n\n\nTOGGLE = Toggle()\n\n\ndef _change_value(obj, path, value):\n \"\"\"Set the value to the given path in the nested objects.\n\n Note: This doesn't work with list or dict or tuples. Only works with objects.\n\n Args:\n obj: the object to change the value\n path: the path to the value to change\n value: the value to change to\n\n Raises:\n ValueError: if the field is not found in the object\n \"\"\"\n key_list = path.split(\".\")\n for key in key_list[:-1]:\n if key == \"\":\n continue\n obj = getattr(obj, key)\n # if dataclass, then set the value only if the field exists\n\n def _set(obj, key, value):\n if isinstance(value, Toggle):\n previous_value = getattr(obj, key)\n if not isinstance(previous_value, bool):\n raise ValueError(f\"Toggle object {obj} attribute {key} is not a boolean\")\n setattr(obj, key, not previous_value)\n else:\n setattr(obj, key_list[-1], value)\n\n if is_dataclass(obj):\n field_names = [field.name for field in fields(obj)]\n if key_list[-1] in field_names:\n _set(obj, key_list[-1], value)\n else:\n raise ValueError(f\"field {key_list[-1]} not found in {obj}\")\n else:\n _set(obj, key_list[-1], value)\n\n\ndef _apply_change(params, change):\n \"\"\"Apply the change to the params object in place.\"\"\"\n if callable(change):\n change(params)\n elif isinstance(change, (tuple, list)):\n if isinstance(change[0], str) and len(change) == 2:\n path, value = change\n _change_value(params, path, value)\n else:\n for c in change:\n _apply_change(params, c)\n else:\n raise ValueError(f\"change {change} not recognized\")\n return params\n\n\ndef make_progression_study(start_point, changes, return_cross_prod=True):\n \"\"\"A kind of ablation study by changing the start_point with changes one by","source_hash":"30ff2cebecae01c0a3a071dad3427ae92d108ec344da45dbd2c9209d4ba14fde","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.args._apply_change","uri":"program://AgentLab/function/src.agentlab.experiments.args._apply_change#L226-L239","kind":"function","name":"_apply_change","path":"src/agentlab/experiments/args.py","language":"python","start_line":226,"end_line":239,"context_start_line":206,"context_end_line":259,"code":"\n def _set(obj, key, value):\n if isinstance(value, Toggle):\n previous_value = getattr(obj, key)\n if not isinstance(previous_value, bool):\n raise ValueError(f\"Toggle object {obj} attribute {key} is not a boolean\")\n setattr(obj, key, not previous_value)\n else:\n setattr(obj, key_list[-1], value)\n\n if is_dataclass(obj):\n field_names = [field.name for field in fields(obj)]\n if key_list[-1] in field_names:\n _set(obj, key_list[-1], value)\n else:\n raise ValueError(f\"field {key_list[-1]} not found in {obj}\")\n else:\n _set(obj, key_list[-1], value)\n\n\ndef _apply_change(params, change):\n \"\"\"Apply the change to the params object in place.\"\"\"\n if callable(change):\n change(params)\n elif isinstance(change, (tuple, list)):\n if isinstance(change[0], str) and len(change) == 2:\n path, value = change\n _change_value(params, path, value)\n else:\n for c in change:\n _apply_change(params, c)\n else:\n raise ValueError(f\"change {change} not recognized\")\n return params\n\n\ndef make_progression_study(start_point, changes, return_cross_prod=True):\n \"\"\"A kind of ablation study by changing the start_point with changes one by\n one.\n\n Args:\n start_point: the starting point of the study\n changes: a list of changes to make to the start_point. Each change is\n either a callable or tuple containing a string identifying the path in the object to\n change and the value to change to. ex: (\".obs.use_html\", True)\n return_cross_prod: return a CrossProd object or just the list\n\n Returns:\n A CrossProd object containing a list of objects with progressive changes\n from `start_point`. If return_cross_prod is False, then it will return a\n list.\n\n \"\"\"\n params_list = [start_point]","source_hash":"30ff2cebecae01c0a3a071dad3427ae92d108ec344da45dbd2c9209d4ba14fde","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.args.make_progression_study","uri":"program://AgentLab/function/src.agentlab.experiments.args.make_progression_study#L242-L268","kind":"function","name":"make_progression_study","path":"src/agentlab/experiments/args.py","language":"python","start_line":242,"end_line":268,"context_start_line":222,"context_end_line":288,"code":" else:\n _set(obj, key_list[-1], value)\n\n\ndef _apply_change(params, change):\n \"\"\"Apply the change to the params object in place.\"\"\"\n if callable(change):\n change(params)\n elif isinstance(change, (tuple, list)):\n if isinstance(change[0], str) and len(change) == 2:\n path, value = change\n _change_value(params, path, value)\n else:\n for c in change:\n _apply_change(params, c)\n else:\n raise ValueError(f\"change {change} not recognized\")\n return params\n\n\ndef make_progression_study(start_point, changes, return_cross_prod=True):\n \"\"\"A kind of ablation study by changing the start_point with changes one by\n one.\n\n Args:\n start_point: the starting point of the study\n changes: a list of changes to make to the start_point. Each change is\n either a callable or tuple containing a string identifying the path in the object to\n change and the value to change to. ex: (\".obs.use_html\", True)\n return_cross_prod: return a CrossProd object or just the list\n\n Returns:\n A CrossProd object containing a list of objects with progressive changes\n from `start_point`. If return_cross_prod is False, then it will return a\n list.\n\n \"\"\"\n params_list = [start_point]\n for change in changes:\n params = copy.deepcopy(params_list[-1])\n _apply_change(params, change)\n params_list.append(params)\n\n if return_cross_prod:\n return CrossProd(params_list)\n else:\n return params_list\n\n\ndef make_ablation_study(start_point, changes, return_cross_prod=True):\n \"\"\"Ablation study by modifying the start_point with only one change at a\n time, and restarting from the original start_point for each configuration.\n\n Args:\n start_point: the starting point of the ablation study\n changes: a list of changes to make to the start_point. Each change is\n either a callable or tuple containing a string identifying the path in the object to\n change and the value to change to. ex: (\".obs.use_html\", True)\n return_cross_prod: return a CrossProd object or just the list\n\n Returns:\n A CrossProd object containing a list of objects with one change\n from `start_point`. If return_cross_prod is False, then it will return a\n list.\n\n \"\"\"\n params_list = [start_point]","source_hash":"30ff2cebecae01c0a3a071dad3427ae92d108ec344da45dbd2c9209d4ba14fde","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.args.make_ablation_study","uri":"program://AgentLab/function/src.agentlab.experiments.args.make_ablation_study#L271-L297","kind":"function","name":"make_ablation_study","path":"src/agentlab/experiments/args.py","language":"python","start_line":271,"end_line":297,"context_start_line":251,"context_end_line":315,"code":" return_cross_prod: return a CrossProd object or just the list\n\n Returns:\n A CrossProd object containing a list of objects with progressive changes\n from `start_point`. If return_cross_prod is False, then it will return a\n list.\n\n \"\"\"\n params_list = [start_point]\n for change in changes:\n params = copy.deepcopy(params_list[-1])\n _apply_change(params, change)\n params_list.append(params)\n\n if return_cross_prod:\n return CrossProd(params_list)\n else:\n return params_list\n\n\ndef make_ablation_study(start_point, changes, return_cross_prod=True):\n \"\"\"Ablation study by modifying the start_point with only one change at a\n time, and restarting from the original start_point for each configuration.\n\n Args:\n start_point: the starting point of the ablation study\n changes: a list of changes to make to the start_point. Each change is\n either a callable or tuple containing a string identifying the path in the object to\n change and the value to change to. ex: (\".obs.use_html\", True)\n return_cross_prod: return a CrossProd object or just the list\n\n Returns:\n A CrossProd object containing a list of objects with one change\n from `start_point`. If return_cross_prod is False, then it will return a\n list.\n\n \"\"\"\n params_list = [start_point]\n for change in changes:\n params = copy.deepcopy(start_point)\n _apply_change(params, change)\n params_list.append(params)\n\n if return_cross_prod:\n return CrossProd(params_list)\n else:\n return params_list\n\n\nif __name__ == \"__main__\":\n from agentlab.agents.dynamic_prompting import Flags\n\n study = make_progression_study(\n start_point=Flags(),\n changes=[\n (\"use_thinking\", True),\n (\".use_plan\", True),\n (\".use_criticise\", True),\n ],\n return_cross_prod=True,\n )\n\n study = expand_cross_product(study)\n for p in study:\n print(p)","source_hash":"30ff2cebecae01c0a3a071dad3427ae92d108ec344da45dbd2c9209d4ba14fde","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.args.__init__","uri":"program://AgentLab/function/src.agentlab.experiments.args.__init__#L27-L29","kind":"function","name":"__init__","path":"src/agentlab/experiments/args.py","language":"python","start_line":27,"end_line":29,"context_start_line":7,"context_end_line":49,"code":"import numpy as np\n\n\nclass CrossProd:\n \"\"\"Use to specify that this will be part of a cross product\"\"\"\n\n def __init__(self, elements):\n self.elements = elements\n\n\nclass Distribution(ABC):\n \"\"\"Generic Class to identify that this is a distribution\"\"\"\n\n def sample(self):\n pass\n\n\nclass Choice(Distribution):\n \"\"\"Use to specify that this will be sampled from a list of elements\"\"\"\n\n def __init__(self, elements, p=None):\n self.elements = elements\n self.p = p\n\n def sample(self, rng=np.random):\n return rng.choice(self.elements, p=self.p)\n\n\ndef _find_cprod_with_paths(obj, path=None):\n \"\"\"Find all the CrossProd objects and their paths in the given object.\n\n Args:\n obj (Any): The object to search for CrossProd objects.\n path (List[str]): The path to the current object.\n\n Returns:\n List[Tuple[List[str], CrossProd]]: A list of tuples where the first element is the path to the CrossProd\n object and the second element is the CrossProd object.\n \"\"\"\n if path is None:\n path = []\n\n if isinstance(obj, CrossProd):","source_hash":"30ff2cebecae01c0a3a071dad3427ae92d108ec344da45dbd2c9209d4ba14fde","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.args.sample","uri":"program://AgentLab/function/src.agentlab.experiments.args.sample#L31-L32","kind":"function","name":"sample","path":"src/agentlab/experiments/args.py","language":"python","start_line":31,"end_line":32,"context_start_line":11,"context_end_line":52,"code":" \"\"\"Use to specify that this will be part of a cross product\"\"\"\n\n def __init__(self, elements):\n self.elements = elements\n\n\nclass Distribution(ABC):\n \"\"\"Generic Class to identify that this is a distribution\"\"\"\n\n def sample(self):\n pass\n\n\nclass Choice(Distribution):\n \"\"\"Use to specify that this will be sampled from a list of elements\"\"\"\n\n def __init__(self, elements, p=None):\n self.elements = elements\n self.p = p\n\n def sample(self, rng=np.random):\n return rng.choice(self.elements, p=self.p)\n\n\ndef _find_cprod_with_paths(obj, path=None):\n \"\"\"Find all the CrossProd objects and their paths in the given object.\n\n Args:\n obj (Any): The object to search for CrossProd objects.\n path (List[str]): The path to the current object.\n\n Returns:\n List[Tuple[List[str], CrossProd]]: A list of tuples where the first element is the path to the CrossProd\n object and the second element is the CrossProd object.\n \"\"\"\n if path is None:\n path = []\n\n if isinstance(obj, CrossProd):\n return [(path, obj)]\n\n cprod_paths = []","source_hash":"30ff2cebecae01c0a3a071dad3427ae92d108ec344da45dbd2c9209d4ba14fde","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.args._set","uri":"program://AgentLab/function/src.agentlab.experiments.args._set#L207-L214","kind":"function","name":"_set","path":"src/agentlab/experiments/args.py","language":"python","start_line":207,"end_line":214,"context_start_line":187,"context_end_line":234,"code":"def _change_value(obj, path, value):\n \"\"\"Set the value to the given path in the nested objects.\n\n Note: This doesn't work with list or dict or tuples. Only works with objects.\n\n Args:\n obj: the object to change the value\n path: the path to the value to change\n value: the value to change to\n\n Raises:\n ValueError: if the field is not found in the object\n \"\"\"\n key_list = path.split(\".\")\n for key in key_list[:-1]:\n if key == \"\":\n continue\n obj = getattr(obj, key)\n # if dataclass, then set the value only if the field exists\n\n def _set(obj, key, value):\n if isinstance(value, Toggle):\n previous_value = getattr(obj, key)\n if not isinstance(previous_value, bool):\n raise ValueError(f\"Toggle object {obj} attribute {key} is not a boolean\")\n setattr(obj, key, not previous_value)\n else:\n setattr(obj, key_list[-1], value)\n\n if is_dataclass(obj):\n field_names = [field.name for field in fields(obj)]\n if key_list[-1] in field_names:\n _set(obj, key_list[-1], value)\n else:\n raise ValueError(f\"field {key_list[-1]} not found in {obj}\")\n else:\n _set(obj, key_list[-1], value)\n\n\ndef _apply_change(params, change):\n \"\"\"Apply the change to the params object in place.\"\"\"\n if callable(change):\n change(params)\n elif isinstance(change, (tuple, list)):\n if isinstance(change[0], str) and len(change) == 2:\n path, value = change\n _change_value(params, path, value)\n else:","source_hash":"30ff2cebecae01c0a3a071dad3427ae92d108ec344da45dbd2c9209d4ba14fde","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.list_openai_models","uri":"program://AgentLab/module/src.agentlab.experiments.list_openai_models#L1-L17","kind":"module","name":"src.agentlab.experiments.list_openai_models","path":"src/agentlab/experiments/list_openai_models.py","language":"python","start_line":1,"end_line":17,"context_start_line":1,"context_end_line":17,"code":"import pandas as pd\nfrom openai import OpenAI\n\nif __name__ == \"__main__\":\n models = OpenAI().models.list()\n df = pd.DataFrame([dict(model) for model in models.data])\n\n # Filter GPT models or o1 models\n # df = df[df[\"id\"].str.contains(\"gpt\") | df[\"id\"].str.contains(\"o1\")]\n\n # Convert Unix timestamps to dates (YYYY-MM-DD) and remove time\n df[\"created\"] = pd.to_datetime(df[\"created\"], unit=\"s\").dt.date\n df.sort_values(by=\"created\", inplace=True)\n # Print all entries\n\n # print all entries\n print(df.to_string(index=False))","source_hash":"9f935337b3d793cfb6f6d54b6bbd75b69d2fa8ad5a86fbd6892679459d8766c4","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.exp_utils","uri":"program://AgentLab/module/src.agentlab.experiments.exp_utils#L1-L191","kind":"module","name":"src.agentlab.experiments.exp_utils","path":"src/agentlab/experiments/exp_utils.py","language":"python","start_line":1,"end_line":191,"context_start_line":1,"context_end_line":191,"code":"import logging\nimport os\nimport signal\nimport sys\nfrom contextlib import contextmanager\nfrom pathlib import Path\nfrom time import sleep, time\n\nfrom tqdm import tqdm\n\nfrom agentlab.experiments.loop import ExpArgs, yield_all_exp_results\n\nlogger = logging.getLogger(__name__) # Get logger based on module name\n\n\n# TODO move this to a more appropriate place\nRESULTS_DIR = os.environ.get(\"AGENTLAB_EXP_ROOT\", None)\nif RESULTS_DIR is None:\n RESULTS_DIR = os.environ.get(\"UI_COPILOT_RESULTS_DIR\", None)\nif RESULTS_DIR is None:\n logging.info(\"$AGENTLAB_EXP_ROOT is not defined, Using $HOME/agentlab_results.\")\n RESULTS_DIR = Path.home() / \"agentlab_results\"\nelse:\n RESULTS_DIR = Path(RESULTS_DIR)\n\nRESULTS_DIR.mkdir(parents=True, exist_ok=True)\n\n\ndef run_exp(exp_arg: ExpArgs, *dependencies, avg_step_timeout=60):\n \"\"\"Run exp_args.run() with a timeout and handle dependencies.\"\"\"\n # episode_timeout = _episode_timeout(exp_arg, avg_step_timeout=avg_step_timeout)\n # logger.warning(f\"Running {exp_arg.exp_id} with timeout of {episode_timeout} seconds.\")\n # with timeout_manager(seconds=episode_timeout):\n # this timeout method is not robust enough. using ray.cancel instead\n return exp_arg.run()\n\n\ndef _episode_timeout(exp_arg: ExpArgs, avg_step_timeout=60):\n \"\"\"Some logic to determine the episode timeout.\"\"\"\n max_steps = getattr(exp_arg.env_args, \"max_steps\", None)\n if max_steps is None:\n episode_timeout_global = 10 * 60 * 60 # 10 hours\n else:\n episode_timeout_global = exp_arg.env_args.max_steps * avg_step_timeout\n\n episode_timeout_exp = getattr(exp_arg, \"episode_timeout\", episode_timeout_global)\n\n return min(episode_timeout_global, episode_timeout_exp)\n\n\n@contextmanager\ndef timeout_manager(seconds: int = None):\n \"\"\"Context manager to handle timeouts.\"\"\"\n\n if isinstance(seconds, float):\n seconds = max(1, int(seconds)) # make sure seconds is at least 1\n\n if seconds is None or sys.platform == \"win32\":\n try:\n logger.warning(\"Timeouts are not supported on Windows.\")\n yield\n finally:\n pass\n return\n\n def alarm_handler(signum, frame):\n logger.warning(f\"Operation timed out after {seconds}s, raising TimeoutError.\")\n # send sigint\n # os.kill(os.getpid(), signal.SIGINT) # this doesn't seem to do much I don't know why\n\n # Still raise TimeoutError for immediate handling\n # This works, but it doesn't seem enough to kill the job\n raise TimeoutError(f\"Operation timed out after {seconds} seconds\")\n\n previous_handler = signal.signal(signal.SIGALRM, alarm_handler)\n signal.alarm(seconds)\n\n try:\n yield\n finally:\n signal.alarm(0)\n signal.signal(signal.SIGALRM, previous_handler)\n\n\ndef add_dependencies(exp_args_list: list[ExpArgs], task_dependencies: dict[str, list[str]] = None):\n \"\"\"Add dependencies to a list of ExpArgs.\n\n Args:\n exp_args_list: list[ExpArgs]\n A list of experiments to run.\n task_dependencies: dict\n A dictionary mapping task names to a list of task names that they\n depend on. If None or empty, no dependencies are added.\n\n Returns:\n list[ExpArgs]\n The modified exp_args_list with dependencies added.\n\n Raises:\n ValueError: If the task_dependencies are not valid.\n \"\"\"\n\n if task_dependencies is None or all([len(dep) == 0 for dep in task_dependencies.values()]):\n # nothing to be done\n return exp_args_list\n\n for exp_args in exp_args_list:\n exp_args.make_id() # makes sure there is an exp_id\n\n exp_args_map = {exp_args.env_args.task_name: exp_args for exp_args in exp_args_list}\n if len(exp_args_map) != len(exp_args_list):\n raise ValueError(\n (\n \"Task names are not unique in exp_args_map, \"\n \"you can't run multiple seeds with task dependencies.\"\n )\n )\n\n for task_name in exp_args_map.keys():\n if task_name not in task_dependencies:\n raise ValueError(f\"Task {task_name} is missing from task_dependencies\")\n\n # turn dependencies from task names to exp_ids\n for task_name, exp_args in exp_args_map.items():\n exp_args.depends_on = tuple(\n exp_args_map[dep_name].exp_id for dep_name in task_dependencies[task_name]\n )\n\n return exp_args_list\n\n\n# Mock implementation of the ExpArgs class with timestamp checks for unit testing\nclass MockedExpArgs:\n def __init__(self, exp_id, depends_on=None):\n self.exp_id = exp_id\n self.exp_name = f\"exp_{exp_id}\"\n self.depends_on = depends_on if depends_on else []\n self.start_time = None\n self.end_time = None\n self.env_args = None\n\n def run(self):\n self.start_time = time()\n\n # # simulate playright code, (this was causing issues due to python async loop)\n # import playwright.sync_api\n\n # pw = playwright.sync_api.sync_playwright().start()\n # pw.selectors.set_test_id_attribute(\"mytestid\")\n sleep(3) # Simulate task execution time\n self.end_time = time()\n return self\n\n\ndef make_seeds(n, offset=42):\n raise DeprecationWarning(\"This function will be removed. Comment out this error if needed.\")\n return [seed + offset for seed in range(n)]\n\n\ndef order(exp_args_list: list[ExpArgs]):\n raise DeprecationWarning(\"This function will be removed. Comment out this error if needed.\")\n \"\"\"Store the order of the list of experiments to be able to sort them back.\n\n This is important for progression or ablation studies.\n \"\"\"\n for i, exp_args in enumerate(exp_args_list):\n exp_args.order = i\n return exp_args_list\n\n\n# This was an old function for filtering some issue with the experiments.\ndef hide_some_exp(base_dir, filter: callable, just_test):\n \"\"\"Move all experiments that match the filter to a new name.\"\"\"\n raise DeprecationWarning(\"This function will be removed. Comment out this error if needed.\")\n exp_list = list(yield_all_exp_results(base_dir, progress_fn=None))\n\n msg = f\"Searching {len(exp_list)} experiments to move to _* expriments where `filter(exp_args)` is True.\"\n if just_test:\n msg += \"\\nNote: This is a just a test, no experiments will be moved. Set `just_test=False` to move them.\"\n\n logging.info(msg)\n\n exp_list = tqdm(exp_list, desc=\"Filtering experiments.\")\n\n filtered_out = []\n for exp in exp_list:\n if filter(exp):\n if not just_test:\n _move_old_exp(exp.exp_dir)\n filtered_out.append(exp)\n return filtered_out","source_hash":"17d1b5506ed9382907144659933fe9f98306d66360952b7b038b4328e0ccd15c","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.exp_utils.run_exp","uri":"program://AgentLab/function/src.agentlab.experiments.exp_utils.run_exp#L29-L35","kind":"function","name":"run_exp","path":"src/agentlab/experiments/exp_utils.py","language":"python","start_line":29,"end_line":35,"context_start_line":9,"context_end_line":55,"code":"from tqdm import tqdm\n\nfrom agentlab.experiments.loop import ExpArgs, yield_all_exp_results\n\nlogger = logging.getLogger(__name__) # Get logger based on module name\n\n\n# TODO move this to a more appropriate place\nRESULTS_DIR = os.environ.get(\"AGENTLAB_EXP_ROOT\", None)\nif RESULTS_DIR is None:\n RESULTS_DIR = os.environ.get(\"UI_COPILOT_RESULTS_DIR\", None)\nif RESULTS_DIR is None:\n logging.info(\"$AGENTLAB_EXP_ROOT is not defined, Using $HOME/agentlab_results.\")\n RESULTS_DIR = Path.home() / \"agentlab_results\"\nelse:\n RESULTS_DIR = Path(RESULTS_DIR)\n\nRESULTS_DIR.mkdir(parents=True, exist_ok=True)\n\n\ndef run_exp(exp_arg: ExpArgs, *dependencies, avg_step_timeout=60):\n \"\"\"Run exp_args.run() with a timeout and handle dependencies.\"\"\"\n # episode_timeout = _episode_timeout(exp_arg, avg_step_timeout=avg_step_timeout)\n # logger.warning(f\"Running {exp_arg.exp_id} with timeout of {episode_timeout} seconds.\")\n # with timeout_manager(seconds=episode_timeout):\n # this timeout method is not robust enough. using ray.cancel instead\n return exp_arg.run()\n\n\ndef _episode_timeout(exp_arg: ExpArgs, avg_step_timeout=60):\n \"\"\"Some logic to determine the episode timeout.\"\"\"\n max_steps = getattr(exp_arg.env_args, \"max_steps\", None)\n if max_steps is None:\n episode_timeout_global = 10 * 60 * 60 # 10 hours\n else:\n episode_timeout_global = exp_arg.env_args.max_steps * avg_step_timeout\n\n episode_timeout_exp = getattr(exp_arg, \"episode_timeout\", episode_timeout_global)\n\n return min(episode_timeout_global, episode_timeout_exp)\n\n\n@contextmanager\ndef timeout_manager(seconds: int = None):\n \"\"\"Context manager to handle timeouts.\"\"\"\n\n if isinstance(seconds, float):","source_hash":"17d1b5506ed9382907144659933fe9f98306d66360952b7b038b4328e0ccd15c","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.exp_utils._episode_timeout","uri":"program://AgentLab/function/src.agentlab.experiments.exp_utils._episode_timeout#L38-L48","kind":"function","name":"_episode_timeout","path":"src/agentlab/experiments/exp_utils.py","language":"python","start_line":38,"end_line":48,"context_start_line":18,"context_end_line":68,"code":"if RESULTS_DIR is None:\n RESULTS_DIR = os.environ.get(\"UI_COPILOT_RESULTS_DIR\", None)\nif RESULTS_DIR is None:\n logging.info(\"$AGENTLAB_EXP_ROOT is not defined, Using $HOME/agentlab_results.\")\n RESULTS_DIR = Path.home() / \"agentlab_results\"\nelse:\n RESULTS_DIR = Path(RESULTS_DIR)\n\nRESULTS_DIR.mkdir(parents=True, exist_ok=True)\n\n\ndef run_exp(exp_arg: ExpArgs, *dependencies, avg_step_timeout=60):\n \"\"\"Run exp_args.run() with a timeout and handle dependencies.\"\"\"\n # episode_timeout = _episode_timeout(exp_arg, avg_step_timeout=avg_step_timeout)\n # logger.warning(f\"Running {exp_arg.exp_id} with timeout of {episode_timeout} seconds.\")\n # with timeout_manager(seconds=episode_timeout):\n # this timeout method is not robust enough. using ray.cancel instead\n return exp_arg.run()\n\n\ndef _episode_timeout(exp_arg: ExpArgs, avg_step_timeout=60):\n \"\"\"Some logic to determine the episode timeout.\"\"\"\n max_steps = getattr(exp_arg.env_args, \"max_steps\", None)\n if max_steps is None:\n episode_timeout_global = 10 * 60 * 60 # 10 hours\n else:\n episode_timeout_global = exp_arg.env_args.max_steps * avg_step_timeout\n\n episode_timeout_exp = getattr(exp_arg, \"episode_timeout\", episode_timeout_global)\n\n return min(episode_timeout_global, episode_timeout_exp)\n\n\n@contextmanager\ndef timeout_manager(seconds: int = None):\n \"\"\"Context manager to handle timeouts.\"\"\"\n\n if isinstance(seconds, float):\n seconds = max(1, int(seconds)) # make sure seconds is at least 1\n\n if seconds is None or sys.platform == \"win32\":\n try:\n logger.warning(\"Timeouts are not supported on Windows.\")\n yield\n finally:\n pass\n return\n\n def alarm_handler(signum, frame):\n logger.warning(f\"Operation timed out after {seconds}s, raising TimeoutError.\")\n # send sigint","source_hash":"17d1b5506ed9382907144659933fe9f98306d66360952b7b038b4328e0ccd15c","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.exp_utils.timeout_manager","uri":"program://AgentLab/function/src.agentlab.experiments.exp_utils.timeout_manager#L52-L82","kind":"function","name":"timeout_manager","path":"src/agentlab/experiments/exp_utils.py","language":"python","start_line":52,"end_line":82,"context_start_line":32,"context_end_line":102,"code":" # logger.warning(f\"Running {exp_arg.exp_id} with timeout of {episode_timeout} seconds.\")\n # with timeout_manager(seconds=episode_timeout):\n # this timeout method is not robust enough. using ray.cancel instead\n return exp_arg.run()\n\n\ndef _episode_timeout(exp_arg: ExpArgs, avg_step_timeout=60):\n \"\"\"Some logic to determine the episode timeout.\"\"\"\n max_steps = getattr(exp_arg.env_args, \"max_steps\", None)\n if max_steps is None:\n episode_timeout_global = 10 * 60 * 60 # 10 hours\n else:\n episode_timeout_global = exp_arg.env_args.max_steps * avg_step_timeout\n\n episode_timeout_exp = getattr(exp_arg, \"episode_timeout\", episode_timeout_global)\n\n return min(episode_timeout_global, episode_timeout_exp)\n\n\n@contextmanager\ndef timeout_manager(seconds: int = None):\n \"\"\"Context manager to handle timeouts.\"\"\"\n\n if isinstance(seconds, float):\n seconds = max(1, int(seconds)) # make sure seconds is at least 1\n\n if seconds is None or sys.platform == \"win32\":\n try:\n logger.warning(\"Timeouts are not supported on Windows.\")\n yield\n finally:\n pass\n return\n\n def alarm_handler(signum, frame):\n logger.warning(f\"Operation timed out after {seconds}s, raising TimeoutError.\")\n # send sigint\n # os.kill(os.getpid(), signal.SIGINT) # this doesn't seem to do much I don't know why\n\n # Still raise TimeoutError for immediate handling\n # This works, but it doesn't seem enough to kill the job\n raise TimeoutError(f\"Operation timed out after {seconds} seconds\")\n\n previous_handler = signal.signal(signal.SIGALRM, alarm_handler)\n signal.alarm(seconds)\n\n try:\n yield\n finally:\n signal.alarm(0)\n signal.signal(signal.SIGALRM, previous_handler)\n\n\ndef add_dependencies(exp_args_list: list[ExpArgs], task_dependencies: dict[str, list[str]] = None):\n \"\"\"Add dependencies to a list of ExpArgs.\n\n Args:\n exp_args_list: list[ExpArgs]\n A list of experiments to run.\n task_dependencies: dict\n A dictionary mapping task names to a list of task names that they\n depend on. If None or empty, no dependencies are added.\n\n Returns:\n list[ExpArgs]\n The modified exp_args_list with dependencies added.\n\n Raises:\n ValueError: If the task_dependencies are not valid.\n \"\"\"\n","source_hash":"17d1b5506ed9382907144659933fe9f98306d66360952b7b038b4328e0ccd15c","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.exp_utils.add_dependencies","uri":"program://AgentLab/function/src.agentlab.experiments.exp_utils.add_dependencies#L85-L129","kind":"function","name":"add_dependencies","path":"src/agentlab/experiments/exp_utils.py","language":"python","start_line":85,"end_line":129,"context_start_line":65,"context_end_line":149,"code":"\n def alarm_handler(signum, frame):\n logger.warning(f\"Operation timed out after {seconds}s, raising TimeoutError.\")\n # send sigint\n # os.kill(os.getpid(), signal.SIGINT) # this doesn't seem to do much I don't know why\n\n # Still raise TimeoutError for immediate handling\n # This works, but it doesn't seem enough to kill the job\n raise TimeoutError(f\"Operation timed out after {seconds} seconds\")\n\n previous_handler = signal.signal(signal.SIGALRM, alarm_handler)\n signal.alarm(seconds)\n\n try:\n yield\n finally:\n signal.alarm(0)\n signal.signal(signal.SIGALRM, previous_handler)\n\n\ndef add_dependencies(exp_args_list: list[ExpArgs], task_dependencies: dict[str, list[str]] = None):\n \"\"\"Add dependencies to a list of ExpArgs.\n\n Args:\n exp_args_list: list[ExpArgs]\n A list of experiments to run.\n task_dependencies: dict\n A dictionary mapping task names to a list of task names that they\n depend on. If None or empty, no dependencies are added.\n\n Returns:\n list[ExpArgs]\n The modified exp_args_list with dependencies added.\n\n Raises:\n ValueError: If the task_dependencies are not valid.\n \"\"\"\n\n if task_dependencies is None or all([len(dep) == 0 for dep in task_dependencies.values()]):\n # nothing to be done\n return exp_args_list\n\n for exp_args in exp_args_list:\n exp_args.make_id() # makes sure there is an exp_id\n\n exp_args_map = {exp_args.env_args.task_name: exp_args for exp_args in exp_args_list}\n if len(exp_args_map) != len(exp_args_list):\n raise ValueError(\n (\n \"Task names are not unique in exp_args_map, \"\n \"you can't run multiple seeds with task dependencies.\"\n )\n )\n\n for task_name in exp_args_map.keys():\n if task_name not in task_dependencies:\n raise ValueError(f\"Task {task_name} is missing from task_dependencies\")\n\n # turn dependencies from task names to exp_ids\n for task_name, exp_args in exp_args_map.items():\n exp_args.depends_on = tuple(\n exp_args_map[dep_name].exp_id for dep_name in task_dependencies[task_name]\n )\n\n return exp_args_list\n\n\n# Mock implementation of the ExpArgs class with timestamp checks for unit testing\nclass MockedExpArgs:\n def __init__(self, exp_id, depends_on=None):\n self.exp_id = exp_id\n self.exp_name = f\"exp_{exp_id}\"\n self.depends_on = depends_on if depends_on else []\n self.start_time = None\n self.end_time = None\n self.env_args = None\n\n def run(self):\n self.start_time = time()\n\n # # simulate playright code, (this was causing issues due to python async loop)\n # import playwright.sync_api\n\n # pw = playwright.sync_api.sync_playwright().start()\n # pw.selectors.set_test_id_attribute(\"mytestid\")","source_hash":"17d1b5506ed9382907144659933fe9f98306d66360952b7b038b4328e0ccd15c","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.exp_utils.MockedExpArgs","uri":"program://AgentLab/class/src.agentlab.experiments.exp_utils.MockedExpArgs#L133-L152","kind":"class","name":"MockedExpArgs","path":"src/agentlab/experiments/exp_utils.py","language":"python","start_line":133,"end_line":152,"context_start_line":113,"context_end_line":172,"code":" (\n \"Task names are not unique in exp_args_map, \"\n \"you can't run multiple seeds with task dependencies.\"\n )\n )\n\n for task_name in exp_args_map.keys():\n if task_name not in task_dependencies:\n raise ValueError(f\"Task {task_name} is missing from task_dependencies\")\n\n # turn dependencies from task names to exp_ids\n for task_name, exp_args in exp_args_map.items():\n exp_args.depends_on = tuple(\n exp_args_map[dep_name].exp_id for dep_name in task_dependencies[task_name]\n )\n\n return exp_args_list\n\n\n# Mock implementation of the ExpArgs class with timestamp checks for unit testing\nclass MockedExpArgs:\n def __init__(self, exp_id, depends_on=None):\n self.exp_id = exp_id\n self.exp_name = f\"exp_{exp_id}\"\n self.depends_on = depends_on if depends_on else []\n self.start_time = None\n self.end_time = None\n self.env_args = None\n\n def run(self):\n self.start_time = time()\n\n # # simulate playright code, (this was causing issues due to python async loop)\n # import playwright.sync_api\n\n # pw = playwright.sync_api.sync_playwright().start()\n # pw.selectors.set_test_id_attribute(\"mytestid\")\n sleep(3) # Simulate task execution time\n self.end_time = time()\n return self\n\n\ndef make_seeds(n, offset=42):\n raise DeprecationWarning(\"This function will be removed. Comment out this error if needed.\")\n return [seed + offset for seed in range(n)]\n\n\ndef order(exp_args_list: list[ExpArgs]):\n raise DeprecationWarning(\"This function will be removed. Comment out this error if needed.\")\n \"\"\"Store the order of the list of experiments to be able to sort them back.\n\n This is important for progression or ablation studies.\n \"\"\"\n for i, exp_args in enumerate(exp_args_list):\n exp_args.order = i\n return exp_args_list\n\n\n# This was an old function for filtering some issue with the experiments.\ndef hide_some_exp(base_dir, filter: callable, just_test):","source_hash":"17d1b5506ed9382907144659933fe9f98306d66360952b7b038b4328e0ccd15c","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.exp_utils.make_seeds","uri":"program://AgentLab/function/src.agentlab.experiments.exp_utils.make_seeds#L155-L157","kind":"function","name":"make_seeds","path":"src/agentlab/experiments/exp_utils.py","language":"python","start_line":155,"end_line":157,"context_start_line":135,"context_end_line":177,"code":" self.exp_id = exp_id\n self.exp_name = f\"exp_{exp_id}\"\n self.depends_on = depends_on if depends_on else []\n self.start_time = None\n self.end_time = None\n self.env_args = None\n\n def run(self):\n self.start_time = time()\n\n # # simulate playright code, (this was causing issues due to python async loop)\n # import playwright.sync_api\n\n # pw = playwright.sync_api.sync_playwright().start()\n # pw.selectors.set_test_id_attribute(\"mytestid\")\n sleep(3) # Simulate task execution time\n self.end_time = time()\n return self\n\n\ndef make_seeds(n, offset=42):\n raise DeprecationWarning(\"This function will be removed. Comment out this error if needed.\")\n return [seed + offset for seed in range(n)]\n\n\ndef order(exp_args_list: list[ExpArgs]):\n raise DeprecationWarning(\"This function will be removed. Comment out this error if needed.\")\n \"\"\"Store the order of the list of experiments to be able to sort them back.\n\n This is important for progression or ablation studies.\n \"\"\"\n for i, exp_args in enumerate(exp_args_list):\n exp_args.order = i\n return exp_args_list\n\n\n# This was an old function for filtering some issue with the experiments.\ndef hide_some_exp(base_dir, filter: callable, just_test):\n \"\"\"Move all experiments that match the filter to a new name.\"\"\"\n raise DeprecationWarning(\"This function will be removed. Comment out this error if needed.\")\n exp_list = list(yield_all_exp_results(base_dir, progress_fn=None))\n\n msg = f\"Searching {len(exp_list)} experiments to move to _* expriments where `filter(exp_args)` is True.\"","source_hash":"17d1b5506ed9382907144659933fe9f98306d66360952b7b038b4328e0ccd15c","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.exp_utils.order","uri":"program://AgentLab/function/src.agentlab.experiments.exp_utils.order#L160-L168","kind":"function","name":"order","path":"src/agentlab/experiments/exp_utils.py","language":"python","start_line":160,"end_line":168,"context_start_line":140,"context_end_line":188,"code":" self.env_args = None\n\n def run(self):\n self.start_time = time()\n\n # # simulate playright code, (this was causing issues due to python async loop)\n # import playwright.sync_api\n\n # pw = playwright.sync_api.sync_playwright().start()\n # pw.selectors.set_test_id_attribute(\"mytestid\")\n sleep(3) # Simulate task execution time\n self.end_time = time()\n return self\n\n\ndef make_seeds(n, offset=42):\n raise DeprecationWarning(\"This function will be removed. Comment out this error if needed.\")\n return [seed + offset for seed in range(n)]\n\n\ndef order(exp_args_list: list[ExpArgs]):\n raise DeprecationWarning(\"This function will be removed. Comment out this error if needed.\")\n \"\"\"Store the order of the list of experiments to be able to sort them back.\n\n This is important for progression or ablation studies.\n \"\"\"\n for i, exp_args in enumerate(exp_args_list):\n exp_args.order = i\n return exp_args_list\n\n\n# This was an old function for filtering some issue with the experiments.\ndef hide_some_exp(base_dir, filter: callable, just_test):\n \"\"\"Move all experiments that match the filter to a new name.\"\"\"\n raise DeprecationWarning(\"This function will be removed. Comment out this error if needed.\")\n exp_list = list(yield_all_exp_results(base_dir, progress_fn=None))\n\n msg = f\"Searching {len(exp_list)} experiments to move to _* expriments where `filter(exp_args)` is True.\"\n if just_test:\n msg += \"\\nNote: This is a just a test, no experiments will be moved. Set `just_test=False` to move them.\"\n\n logging.info(msg)\n\n exp_list = tqdm(exp_list, desc=\"Filtering experiments.\")\n\n filtered_out = []\n for exp in exp_list:\n if filter(exp):\n if not just_test:","source_hash":"17d1b5506ed9382907144659933fe9f98306d66360952b7b038b4328e0ccd15c","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.exp_utils.hide_some_exp","uri":"program://AgentLab/function/src.agentlab.experiments.exp_utils.hide_some_exp#L172-L191","kind":"function","name":"hide_some_exp","path":"src/agentlab/experiments/exp_utils.py","language":"python","start_line":172,"end_line":191,"context_start_line":152,"context_end_line":191,"code":" return self\n\n\ndef make_seeds(n, offset=42):\n raise DeprecationWarning(\"This function will be removed. Comment out this error if needed.\")\n return [seed + offset for seed in range(n)]\n\n\ndef order(exp_args_list: list[ExpArgs]):\n raise DeprecationWarning(\"This function will be removed. Comment out this error if needed.\")\n \"\"\"Store the order of the list of experiments to be able to sort them back.\n\n This is important for progression or ablation studies.\n \"\"\"\n for i, exp_args in enumerate(exp_args_list):\n exp_args.order = i\n return exp_args_list\n\n\n# This was an old function for filtering some issue with the experiments.\ndef hide_some_exp(base_dir, filter: callable, just_test):\n \"\"\"Move all experiments that match the filter to a new name.\"\"\"\n raise DeprecationWarning(\"This function will be removed. Comment out this error if needed.\")\n exp_list = list(yield_all_exp_results(base_dir, progress_fn=None))\n\n msg = f\"Searching {len(exp_list)} experiments to move to _* expriments where `filter(exp_args)` is True.\"\n if just_test:\n msg += \"\\nNote: This is a just a test, no experiments will be moved. Set `just_test=False` to move them.\"\n\n logging.info(msg)\n\n exp_list = tqdm(exp_list, desc=\"Filtering experiments.\")\n\n filtered_out = []\n for exp in exp_list:\n if filter(exp):\n if not just_test:\n _move_old_exp(exp.exp_dir)\n filtered_out.append(exp)\n return filtered_out","source_hash":"17d1b5506ed9382907144659933fe9f98306d66360952b7b038b4328e0ccd15c","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.exp_utils.alarm_handler","uri":"program://AgentLab/function/src.agentlab.experiments.exp_utils.alarm_handler#L66-L73","kind":"function","name":"alarm_handler","path":"src/agentlab/experiments/exp_utils.py","language":"python","start_line":66,"end_line":73,"context_start_line":46,"context_end_line":93,"code":" episode_timeout_exp = getattr(exp_arg, \"episode_timeout\", episode_timeout_global)\n\n return min(episode_timeout_global, episode_timeout_exp)\n\n\n@contextmanager\ndef timeout_manager(seconds: int = None):\n \"\"\"Context manager to handle timeouts.\"\"\"\n\n if isinstance(seconds, float):\n seconds = max(1, int(seconds)) # make sure seconds is at least 1\n\n if seconds is None or sys.platform == \"win32\":\n try:\n logger.warning(\"Timeouts are not supported on Windows.\")\n yield\n finally:\n pass\n return\n\n def alarm_handler(signum, frame):\n logger.warning(f\"Operation timed out after {seconds}s, raising TimeoutError.\")\n # send sigint\n # os.kill(os.getpid(), signal.SIGINT) # this doesn't seem to do much I don't know why\n\n # Still raise TimeoutError for immediate handling\n # This works, but it doesn't seem enough to kill the job\n raise TimeoutError(f\"Operation timed out after {seconds} seconds\")\n\n previous_handler = signal.signal(signal.SIGALRM, alarm_handler)\n signal.alarm(seconds)\n\n try:\n yield\n finally:\n signal.alarm(0)\n signal.signal(signal.SIGALRM, previous_handler)\n\n\ndef add_dependencies(exp_args_list: list[ExpArgs], task_dependencies: dict[str, list[str]] = None):\n \"\"\"Add dependencies to a list of ExpArgs.\n\n Args:\n exp_args_list: list[ExpArgs]\n A list of experiments to run.\n task_dependencies: dict\n A dictionary mapping task names to a list of task names that they\n depend on. If None or empty, no dependencies are added.","source_hash":"17d1b5506ed9382907144659933fe9f98306d66360952b7b038b4328e0ccd15c","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.exp_utils.__init__","uri":"program://AgentLab/function/src.agentlab.experiments.exp_utils.__init__#L134-L140","kind":"function","name":"__init__","path":"src/agentlab/experiments/exp_utils.py","language":"python","start_line":134,"end_line":140,"context_start_line":114,"context_end_line":160,"code":" \"Task names are not unique in exp_args_map, \"\n \"you can't run multiple seeds with task dependencies.\"\n )\n )\n\n for task_name in exp_args_map.keys():\n if task_name not in task_dependencies:\n raise ValueError(f\"Task {task_name} is missing from task_dependencies\")\n\n # turn dependencies from task names to exp_ids\n for task_name, exp_args in exp_args_map.items():\n exp_args.depends_on = tuple(\n exp_args_map[dep_name].exp_id for dep_name in task_dependencies[task_name]\n )\n\n return exp_args_list\n\n\n# Mock implementation of the ExpArgs class with timestamp checks for unit testing\nclass MockedExpArgs:\n def __init__(self, exp_id, depends_on=None):\n self.exp_id = exp_id\n self.exp_name = f\"exp_{exp_id}\"\n self.depends_on = depends_on if depends_on else []\n self.start_time = None\n self.end_time = None\n self.env_args = None\n\n def run(self):\n self.start_time = time()\n\n # # simulate playright code, (this was causing issues due to python async loop)\n # import playwright.sync_api\n\n # pw = playwright.sync_api.sync_playwright().start()\n # pw.selectors.set_test_id_attribute(\"mytestid\")\n sleep(3) # Simulate task execution time\n self.end_time = time()\n return self\n\n\ndef make_seeds(n, offset=42):\n raise DeprecationWarning(\"This function will be removed. Comment out this error if needed.\")\n return [seed + offset for seed in range(n)]\n\n\ndef order(exp_args_list: list[ExpArgs]):","source_hash":"17d1b5506ed9382907144659933fe9f98306d66360952b7b038b4328e0ccd15c","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.exp_utils.run","uri":"program://AgentLab/function/src.agentlab.experiments.exp_utils.run#L142-L152","kind":"function","name":"run","path":"src/agentlab/experiments/exp_utils.py","language":"python","start_line":142,"end_line":152,"context_start_line":122,"context_end_line":172,"code":"\n # turn dependencies from task names to exp_ids\n for task_name, exp_args in exp_args_map.items():\n exp_args.depends_on = tuple(\n exp_args_map[dep_name].exp_id for dep_name in task_dependencies[task_name]\n )\n\n return exp_args_list\n\n\n# Mock implementation of the ExpArgs class with timestamp checks for unit testing\nclass MockedExpArgs:\n def __init__(self, exp_id, depends_on=None):\n self.exp_id = exp_id\n self.exp_name = f\"exp_{exp_id}\"\n self.depends_on = depends_on if depends_on else []\n self.start_time = None\n self.end_time = None\n self.env_args = None\n\n def run(self):\n self.start_time = time()\n\n # # simulate playright code, (this was causing issues due to python async loop)\n # import playwright.sync_api\n\n # pw = playwright.sync_api.sync_playwright().start()\n # pw.selectors.set_test_id_attribute(\"mytestid\")\n sleep(3) # Simulate task execution time\n self.end_time = time()\n return self\n\n\ndef make_seeds(n, offset=42):\n raise DeprecationWarning(\"This function will be removed. Comment out this error if needed.\")\n return [seed + offset for seed in range(n)]\n\n\ndef order(exp_args_list: list[ExpArgs]):\n raise DeprecationWarning(\"This function will be removed. Comment out this error if needed.\")\n \"\"\"Store the order of the list of experiments to be able to sort them back.\n\n This is important for progression or ablation studies.\n \"\"\"\n for i, exp_args in enumerate(exp_args_list):\n exp_args.order = i\n return exp_args_list\n\n\n# This was an old function for filtering some issue with the experiments.\ndef hide_some_exp(base_dir, filter: callable, just_test):","source_hash":"17d1b5506ed9382907144659933fe9f98306d66360952b7b038b4328e0ccd15c","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.multi_server","uri":"program://AgentLab/module/src.agentlab.experiments.multi_server#L1-L90","kind":"module","name":"src.agentlab.experiments.multi_server","path":"src/agentlab/experiments/multi_server.py","language":"python","start_line":1,"end_line":90,"context_start_line":1,"context_end_line":90,"code":"from copy import deepcopy\nfrom dataclasses import dataclass\nimport os\nimport sys\nfrom browsergym.webarena.instance import WebArenaInstance\n\n\nclass BaseServer:\n \"\"\"Base class for server instances.\n\n Behaves like an identity function for running in parallel on servers that don't need multiple\n instances.\n \"\"\"\n\n def init(self):\n pass\n\n\n@dataclass\nclass WebArenaInstanceVars(BaseServer):\n base_url: str\n shopping: str\n shopping_admin: str\n reddit: str\n gitlab: str\n wikipedia: str\n map: str\n homepage: str\n full_reset: str\n module_name: str = \"webarena\"\n prefix: str = \"WA_\"\n\n def make_env_vars(self):\n \"\"\"Return a dictionary of environment variables\"\"\"\n return {\n f\"{self.prefix}SHOPPING\": f\"{self.base_url}:{self.shopping}\",\n f\"{self.prefix}SHOPPING_ADMIN\": f\"{self.base_url}:{self.shopping_admin}\",\n f\"{self.prefix}REDDIT\": f\"{self.base_url}:{self.reddit}\",\n f\"{self.prefix}GITLAB\": f\"{self.base_url}:{self.gitlab}\",\n f\"{self.prefix}WIKIPEDIA\": f\"{self.base_url}:{self.wikipedia}\",\n f\"{self.prefix}MAP\": f\"{self.base_url}:{self.map}\",\n f\"{self.prefix}HOMEPAGE\": f\"{self.base_url}:{self.homepage}\",\n f\"{self.prefix}FULL_RESET\": f\"{self.base_url}:{self.full_reset}\",\n }\n\n def init(self):\n # necessary for webarena to re-import the env vars\n unimport_modules(self.module_name)\n for key, value in self.make_env_vars().items():\n os.environ[key] = value\n\n # this is just a dynamic check to see that the env vars are set correctly\n bgym_instance = WebArenaInstance()\n base_url, _ = _split_url(bgym_instance.urls[\"reddit\"])\n assert base_url == self.base_url, f\"Expected {self.base_url}, got {base_url}\"\n\n @staticmethod\n def from_env_vars(prefix=\"WA_\", module_name=\"webarena\"):\n kwargs = {\"module_name\": module_name}\n base_urls = set()\n for key, url in os.environ.items():\n if key.startswith(prefix):\n base_url, url_tail = _split_url(url)\n base_urls.add(base_url)\n kwargs[key[len(prefix) :].lower()] = url_tail\n\n if len(base_urls) > 1:\n raise ValueError(\"Multiple base urls found in environment variables\")\n\n kwargs[\"base_url\"] = base_urls.pop()\n return WebArenaInstanceVars(**kwargs)\n\n def clone(self):\n \"\"\"Return a deep copy of the instance\"\"\"\n return deepcopy(self)\n\n\ndef unimport_modules(base_name):\n \"\"\"un-import any module starting with base_name\"\"\"\n for module in sys.modules.copy():\n if module.startswith(base_name):\n del sys.modules[module]\n\n\ndef _split_url(url: str):\n \"\"\"Extract the base url and the port/page from a url\"\"\"\n parts = url.split(\":\")\n base_url = \":\".join(parts[0:2])\n url_tail = \":\".join(parts[2:])\n return base_url, url_tail","source_hash":"b2e9ed6a57c9dce3c95b9b51a0341cbd691b6d291027868cfd623679c881f805","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.multi_server.BaseServer","uri":"program://AgentLab/class/src.agentlab.experiments.multi_server.BaseServer#L8-L16","kind":"class","name":"BaseServer","path":"src/agentlab/experiments/multi_server.py","language":"python","start_line":8,"end_line":16,"context_start_line":1,"context_end_line":36,"code":"from copy import deepcopy\nfrom dataclasses import dataclass\nimport os\nimport sys\nfrom browsergym.webarena.instance import WebArenaInstance\n\n\nclass BaseServer:\n \"\"\"Base class for server instances.\n\n Behaves like an identity function for running in parallel on servers that don't need multiple\n instances.\n \"\"\"\n\n def init(self):\n pass\n\n\n@dataclass\nclass WebArenaInstanceVars(BaseServer):\n base_url: str\n shopping: str\n shopping_admin: str\n reddit: str\n gitlab: str\n wikipedia: str\n map: str\n homepage: str\n full_reset: str\n module_name: str = \"webarena\"\n prefix: str = \"WA_\"\n\n def make_env_vars(self):\n \"\"\"Return a dictionary of environment variables\"\"\"\n return {\n f\"{self.prefix}SHOPPING\": f\"{self.base_url}:{self.shopping}\",","source_hash":"b2e9ed6a57c9dce3c95b9b51a0341cbd691b6d291027868cfd623679c881f805","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.multi_server.WebArenaInstanceVars","uri":"program://AgentLab/class/src.agentlab.experiments.multi_server.WebArenaInstanceVars#L20-L75","kind":"class","name":"WebArenaInstanceVars","path":"src/agentlab/experiments/multi_server.py","language":"python","start_line":20,"end_line":75,"context_start_line":1,"context_end_line":90,"code":"from copy import deepcopy\nfrom dataclasses import dataclass\nimport os\nimport sys\nfrom browsergym.webarena.instance import WebArenaInstance\n\n\nclass BaseServer:\n \"\"\"Base class for server instances.\n\n Behaves like an identity function for running in parallel on servers that don't need multiple\n instances.\n \"\"\"\n\n def init(self):\n pass\n\n\n@dataclass\nclass WebArenaInstanceVars(BaseServer):\n base_url: str\n shopping: str\n shopping_admin: str\n reddit: str\n gitlab: str\n wikipedia: str\n map: str\n homepage: str\n full_reset: str\n module_name: str = \"webarena\"\n prefix: str = \"WA_\"\n\n def make_env_vars(self):\n \"\"\"Return a dictionary of environment variables\"\"\"\n return {\n f\"{self.prefix}SHOPPING\": f\"{self.base_url}:{self.shopping}\",\n f\"{self.prefix}SHOPPING_ADMIN\": f\"{self.base_url}:{self.shopping_admin}\",\n f\"{self.prefix}REDDIT\": f\"{self.base_url}:{self.reddit}\",\n f\"{self.prefix}GITLAB\": f\"{self.base_url}:{self.gitlab}\",\n f\"{self.prefix}WIKIPEDIA\": f\"{self.base_url}:{self.wikipedia}\",\n f\"{self.prefix}MAP\": f\"{self.base_url}:{self.map}\",\n f\"{self.prefix}HOMEPAGE\": f\"{self.base_url}:{self.homepage}\",\n f\"{self.prefix}FULL_RESET\": f\"{self.base_url}:{self.full_reset}\",\n }\n\n def init(self):\n # necessary for webarena to re-import the env vars\n unimport_modules(self.module_name)\n for key, value in self.make_env_vars().items():\n os.environ[key] = value\n\n # this is just a dynamic check to see that the env vars are set correctly\n bgym_instance = WebArenaInstance()\n base_url, _ = _split_url(bgym_instance.urls[\"reddit\"])\n assert base_url == self.base_url, f\"Expected {self.base_url}, got {base_url}\"\n\n @staticmethod\n def from_env_vars(prefix=\"WA_\", module_name=\"webarena\"):\n kwargs = {\"module_name\": module_name}\n base_urls = set()\n for key, url in os.environ.items():\n if key.startswith(prefix):\n base_url, url_tail = _split_url(url)\n base_urls.add(base_url)\n kwargs[key[len(prefix) :].lower()] = url_tail\n\n if len(base_urls) > 1:\n raise ValueError(\"Multiple base urls found in environment variables\")\n\n kwargs[\"base_url\"] = base_urls.pop()\n return WebArenaInstanceVars(**kwargs)\n\n def clone(self):\n \"\"\"Return a deep copy of the instance\"\"\"\n return deepcopy(self)\n\n\ndef unimport_modules(base_name):\n \"\"\"un-import any module starting with base_name\"\"\"\n for module in sys.modules.copy():\n if module.startswith(base_name):\n del sys.modules[module]\n\n\ndef _split_url(url: str):\n \"\"\"Extract the base url and the port/page from a url\"\"\"\n parts = url.split(\":\")\n base_url = \":\".join(parts[0:2])\n url_tail = \":\".join(parts[2:])\n return base_url, url_tail","source_hash":"b2e9ed6a57c9dce3c95b9b51a0341cbd691b6d291027868cfd623679c881f805","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.multi_server.unimport_modules","uri":"program://AgentLab/function/src.agentlab.experiments.multi_server.unimport_modules#L78-L82","kind":"function","name":"unimport_modules","path":"src/agentlab/experiments/multi_server.py","language":"python","start_line":78,"end_line":82,"context_start_line":58,"context_end_line":90,"code":" def from_env_vars(prefix=\"WA_\", module_name=\"webarena\"):\n kwargs = {\"module_name\": module_name}\n base_urls = set()\n for key, url in os.environ.items():\n if key.startswith(prefix):\n base_url, url_tail = _split_url(url)\n base_urls.add(base_url)\n kwargs[key[len(prefix) :].lower()] = url_tail\n\n if len(base_urls) > 1:\n raise ValueError(\"Multiple base urls found in environment variables\")\n\n kwargs[\"base_url\"] = base_urls.pop()\n return WebArenaInstanceVars(**kwargs)\n\n def clone(self):\n \"\"\"Return a deep copy of the instance\"\"\"\n return deepcopy(self)\n\n\ndef unimport_modules(base_name):\n \"\"\"un-import any module starting with base_name\"\"\"\n for module in sys.modules.copy():\n if module.startswith(base_name):\n del sys.modules[module]\n\n\ndef _split_url(url: str):\n \"\"\"Extract the base url and the port/page from a url\"\"\"\n parts = url.split(\":\")\n base_url = \":\".join(parts[0:2])\n url_tail = \":\".join(parts[2:])\n return base_url, url_tail","source_hash":"b2e9ed6a57c9dce3c95b9b51a0341cbd691b6d291027868cfd623679c881f805","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.multi_server._split_url","uri":"program://AgentLab/function/src.agentlab.experiments.multi_server._split_url#L85-L90","kind":"function","name":"_split_url","path":"src/agentlab/experiments/multi_server.py","language":"python","start_line":85,"end_line":90,"context_start_line":65,"context_end_line":90,"code":" kwargs[key[len(prefix) :].lower()] = url_tail\n\n if len(base_urls) > 1:\n raise ValueError(\"Multiple base urls found in environment variables\")\n\n kwargs[\"base_url\"] = base_urls.pop()\n return WebArenaInstanceVars(**kwargs)\n\n def clone(self):\n \"\"\"Return a deep copy of the instance\"\"\"\n return deepcopy(self)\n\n\ndef unimport_modules(base_name):\n \"\"\"un-import any module starting with base_name\"\"\"\n for module in sys.modules.copy():\n if module.startswith(base_name):\n del sys.modules[module]\n\n\ndef _split_url(url: str):\n \"\"\"Extract the base url and the port/page from a url\"\"\"\n parts = url.split(\":\")\n base_url = \":\".join(parts[0:2])\n url_tail = \":\".join(parts[2:])\n return base_url, url_tail","source_hash":"b2e9ed6a57c9dce3c95b9b51a0341cbd691b6d291027868cfd623679c881f805","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.multi_server.init","uri":"program://AgentLab/function/src.agentlab.experiments.multi_server.init#L46-L55","kind":"function","name":"init","path":"src/agentlab/experiments/multi_server.py","language":"python","start_line":46,"end_line":55,"context_start_line":26,"context_end_line":75,"code":" wikipedia: str\n map: str\n homepage: str\n full_reset: str\n module_name: str = \"webarena\"\n prefix: str = \"WA_\"\n\n def make_env_vars(self):\n \"\"\"Return a dictionary of environment variables\"\"\"\n return {\n f\"{self.prefix}SHOPPING\": f\"{self.base_url}:{self.shopping}\",\n f\"{self.prefix}SHOPPING_ADMIN\": f\"{self.base_url}:{self.shopping_admin}\",\n f\"{self.prefix}REDDIT\": f\"{self.base_url}:{self.reddit}\",\n f\"{self.prefix}GITLAB\": f\"{self.base_url}:{self.gitlab}\",\n f\"{self.prefix}WIKIPEDIA\": f\"{self.base_url}:{self.wikipedia}\",\n f\"{self.prefix}MAP\": f\"{self.base_url}:{self.map}\",\n f\"{self.prefix}HOMEPAGE\": f\"{self.base_url}:{self.homepage}\",\n f\"{self.prefix}FULL_RESET\": f\"{self.base_url}:{self.full_reset}\",\n }\n\n def init(self):\n # necessary for webarena to re-import the env vars\n unimport_modules(self.module_name)\n for key, value in self.make_env_vars().items():\n os.environ[key] = value\n\n # this is just a dynamic check to see that the env vars are set correctly\n bgym_instance = WebArenaInstance()\n base_url, _ = _split_url(bgym_instance.urls[\"reddit\"])\n assert base_url == self.base_url, f\"Expected {self.base_url}, got {base_url}\"\n\n @staticmethod\n def from_env_vars(prefix=\"WA_\", module_name=\"webarena\"):\n kwargs = {\"module_name\": module_name}\n base_urls = set()\n for key, url in os.environ.items():\n if key.startswith(prefix):\n base_url, url_tail = _split_url(url)\n base_urls.add(base_url)\n kwargs[key[len(prefix) :].lower()] = url_tail\n\n if len(base_urls) > 1:\n raise ValueError(\"Multiple base urls found in environment variables\")\n\n kwargs[\"base_url\"] = base_urls.pop()\n return WebArenaInstanceVars(**kwargs)\n\n def clone(self):\n \"\"\"Return a deep copy of the instance\"\"\"\n return deepcopy(self)","source_hash":"b2e9ed6a57c9dce3c95b9b51a0341cbd691b6d291027868cfd623679c881f805","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.multi_server.make_env_vars","uri":"program://AgentLab/function/src.agentlab.experiments.multi_server.make_env_vars#L33-L44","kind":"function","name":"make_env_vars","path":"src/agentlab/experiments/multi_server.py","language":"python","start_line":33,"end_line":44,"context_start_line":13,"context_end_line":64,"code":" \"\"\"\n\n def init(self):\n pass\n\n\n@dataclass\nclass WebArenaInstanceVars(BaseServer):\n base_url: str\n shopping: str\n shopping_admin: str\n reddit: str\n gitlab: str\n wikipedia: str\n map: str\n homepage: str\n full_reset: str\n module_name: str = \"webarena\"\n prefix: str = \"WA_\"\n\n def make_env_vars(self):\n \"\"\"Return a dictionary of environment variables\"\"\"\n return {\n f\"{self.prefix}SHOPPING\": f\"{self.base_url}:{self.shopping}\",\n f\"{self.prefix}SHOPPING_ADMIN\": f\"{self.base_url}:{self.shopping_admin}\",\n f\"{self.prefix}REDDIT\": f\"{self.base_url}:{self.reddit}\",\n f\"{self.prefix}GITLAB\": f\"{self.base_url}:{self.gitlab}\",\n f\"{self.prefix}WIKIPEDIA\": f\"{self.base_url}:{self.wikipedia}\",\n f\"{self.prefix}MAP\": f\"{self.base_url}:{self.map}\",\n f\"{self.prefix}HOMEPAGE\": f\"{self.base_url}:{self.homepage}\",\n f\"{self.prefix}FULL_RESET\": f\"{self.base_url}:{self.full_reset}\",\n }\n\n def init(self):\n # necessary for webarena to re-import the env vars\n unimport_modules(self.module_name)\n for key, value in self.make_env_vars().items():\n os.environ[key] = value\n\n # this is just a dynamic check to see that the env vars are set correctly\n bgym_instance = WebArenaInstance()\n base_url, _ = _split_url(bgym_instance.urls[\"reddit\"])\n assert base_url == self.base_url, f\"Expected {self.base_url}, got {base_url}\"\n\n @staticmethod\n def from_env_vars(prefix=\"WA_\", module_name=\"webarena\"):\n kwargs = {\"module_name\": module_name}\n base_urls = set()\n for key, url in os.environ.items():\n if key.startswith(prefix):\n base_url, url_tail = _split_url(url)\n base_urls.add(base_url)","source_hash":"b2e9ed6a57c9dce3c95b9b51a0341cbd691b6d291027868cfd623679c881f805","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.multi_server.from_env_vars","uri":"program://AgentLab/function/src.agentlab.experiments.multi_server.from_env_vars#L58-L71","kind":"function","name":"from_env_vars","path":"src/agentlab/experiments/multi_server.py","language":"python","start_line":58,"end_line":71,"context_start_line":38,"context_end_line":90,"code":" f\"{self.prefix}REDDIT\": f\"{self.base_url}:{self.reddit}\",\n f\"{self.prefix}GITLAB\": f\"{self.base_url}:{self.gitlab}\",\n f\"{self.prefix}WIKIPEDIA\": f\"{self.base_url}:{self.wikipedia}\",\n f\"{self.prefix}MAP\": f\"{self.base_url}:{self.map}\",\n f\"{self.prefix}HOMEPAGE\": f\"{self.base_url}:{self.homepage}\",\n f\"{self.prefix}FULL_RESET\": f\"{self.base_url}:{self.full_reset}\",\n }\n\n def init(self):\n # necessary for webarena to re-import the env vars\n unimport_modules(self.module_name)\n for key, value in self.make_env_vars().items():\n os.environ[key] = value\n\n # this is just a dynamic check to see that the env vars are set correctly\n bgym_instance = WebArenaInstance()\n base_url, _ = _split_url(bgym_instance.urls[\"reddit\"])\n assert base_url == self.base_url, f\"Expected {self.base_url}, got {base_url}\"\n\n @staticmethod\n def from_env_vars(prefix=\"WA_\", module_name=\"webarena\"):\n kwargs = {\"module_name\": module_name}\n base_urls = set()\n for key, url in os.environ.items():\n if key.startswith(prefix):\n base_url, url_tail = _split_url(url)\n base_urls.add(base_url)\n kwargs[key[len(prefix) :].lower()] = url_tail\n\n if len(base_urls) > 1:\n raise ValueError(\"Multiple base urls found in environment variables\")\n\n kwargs[\"base_url\"] = base_urls.pop()\n return WebArenaInstanceVars(**kwargs)\n\n def clone(self):\n \"\"\"Return a deep copy of the instance\"\"\"\n return deepcopy(self)\n\n\ndef unimport_modules(base_name):\n \"\"\"un-import any module starting with base_name\"\"\"\n for module in sys.modules.copy():\n if module.startswith(base_name):\n del sys.modules[module]\n\n\ndef _split_url(url: str):\n \"\"\"Extract the base url and the port/page from a url\"\"\"\n parts = url.split(\":\")\n base_url = \":\".join(parts[0:2])\n url_tail = \":\".join(parts[2:])\n return base_url, url_tail","source_hash":"b2e9ed6a57c9dce3c95b9b51a0341cbd691b6d291027868cfd623679c881f805","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.multi_server.clone","uri":"program://AgentLab/function/src.agentlab.experiments.multi_server.clone#L73-L75","kind":"function","name":"clone","path":"src/agentlab/experiments/multi_server.py","language":"python","start_line":73,"end_line":75,"context_start_line":53,"context_end_line":90,"code":" bgym_instance = WebArenaInstance()\n base_url, _ = _split_url(bgym_instance.urls[\"reddit\"])\n assert base_url == self.base_url, f\"Expected {self.base_url}, got {base_url}\"\n\n @staticmethod\n def from_env_vars(prefix=\"WA_\", module_name=\"webarena\"):\n kwargs = {\"module_name\": module_name}\n base_urls = set()\n for key, url in os.environ.items():\n if key.startswith(prefix):\n base_url, url_tail = _split_url(url)\n base_urls.add(base_url)\n kwargs[key[len(prefix) :].lower()] = url_tail\n\n if len(base_urls) > 1:\n raise ValueError(\"Multiple base urls found in environment variables\")\n\n kwargs[\"base_url\"] = base_urls.pop()\n return WebArenaInstanceVars(**kwargs)\n\n def clone(self):\n \"\"\"Return a deep copy of the instance\"\"\"\n return deepcopy(self)\n\n\ndef unimport_modules(base_name):\n \"\"\"un-import any module starting with base_name\"\"\"\n for module in sys.modules.copy():\n if module.startswith(base_name):\n del sys.modules[module]\n\n\ndef _split_url(url: str):\n \"\"\"Extract the base url and the port/page from a url\"\"\"\n parts = url.split(\":\")\n base_url = \":\".join(parts[0:2])\n url_tail = \":\".join(parts[2:])\n return base_url, url_tail","source_hash":"b2e9ed6a57c9dce3c95b9b51a0341cbd691b6d291027868cfd623679c881f805","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.experiments.reproduce_study","uri":"program://AgentLab/module/src.agentlab.experiments.reproduce_study#L1-L20","kind":"module","name":"src.agentlab.experiments.reproduce_study","path":"src/agentlab/experiments/reproduce_study.py","language":"python","start_line":1,"end_line":20,"context_start_line":1,"context_end_line":20,"code":"\"\"\"\nThis script will leverage an old study to reproduce it on the same tasks and\nsame seeds. Instead of calling the LLM it will reuse the responses from the old\nllm. Load the study in agent-xray and look at the Agent Info HTML to compare\nthe diff in HTML format.\n\"\"\"\n\nfrom agentlab.agents.generic_agent.reproducibility_agent import reproduce_study\nfrom agentlab.experiments.exp_utils import RESULTS_DIR\n\n\nif __name__ == \"__main__\":\n\n # replace by your study name\n old_study = \"2024-06-03_12-28-51_final_run_miniwob_llama3-70b\"\n\n study = reproduce_study(RESULTS_DIR / old_study)\n n_jobs = 1\n\n study.run(n_jobs=n_jobs, parallel_backend=\"joblib\", strict_reproducibility=False)","source_hash":"fb54634e33ff821da2fe641b1294a4be24293939635948dd5a8835047acbf312","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.osworld_axtree_preprocessing","uri":"program://AgentLab/module/src.agentlab.benchmarks.osworld_axtree_preprocessing#L1-L340","kind":"module","name":"src.agentlab.benchmarks.osworld_axtree_preprocessing","path":"src/agentlab/benchmarks/osworld_axtree_preprocessing.py","language":"python","start_line":1,"end_line":340,"context_start_line":1,"context_end_line":340,"code":"import io\nimport xml.etree.ElementTree as ET\nfrom typing import Tuple, List\n\nfrom PIL import Image, ImageDraw, ImageFont\n\n\ndef find_leaf_nodes(xlm_file_str):\n if not xlm_file_str:\n return []\n\n root = ET.fromstring(xlm_file_str)\n\n # Recursive function to traverse the XML tree and collect leaf nodes\n def collect_leaf_nodes(node, leaf_nodes):\n # If the node has no children, it is a leaf node, add it to the list\n if not list(node):\n leaf_nodes.append(node)\n # If the node has children, recurse on each child\n for child in node:\n collect_leaf_nodes(child, leaf_nodes)\n\n # List to hold all leaf nodes\n leaf_nodes = []\n collect_leaf_nodes(root, leaf_nodes)\n return leaf_nodes\n\n\nattributes_ns_ubuntu = \"https://accessibility.windows.example.org/ns/attributes\"\nattributes_ns_windows = \"https://accessibility.windows.example.org/ns/attributes\"\nstate_ns_ubuntu = \"https://accessibility.ubuntu.example.org/ns/state\"\nstate_ns_windows = \"https://accessibility.windows.example.org/ns/state\"\ncomponent_ns_ubuntu = \"https://accessibility.ubuntu.example.org/ns/component\"\ncomponent_ns_windows = \"https://accessibility.windows.example.org/ns/component\"\nvalue_ns_ubuntu = \"https://accessibility.ubuntu.example.org/ns/value\"\nvalue_ns_windows = \"https://accessibility.windows.example.org/ns/value\"\nclass_ns_windows = \"https://accessibility.windows.example.org/ns/class\"\n\n\ndef judge_node(node: ET, platform=\"ubuntu\", check_image=False) -> bool:\n if platform == \"ubuntu\":\n _state_ns = state_ns_ubuntu\n _component_ns = component_ns_ubuntu\n elif platform == \"windows\":\n _state_ns = state_ns_windows\n _component_ns = component_ns_windows\n else:\n raise ValueError(\"Invalid platform, must be 'ubuntu' or 'windows'\")\n\n keeps: bool = (\n node.tag.startswith(\"document\")\n or node.tag.endswith(\"item\")\n or node.tag.endswith(\"button\")\n or node.tag.endswith(\"heading\")\n or node.tag.endswith(\"label\")\n or node.tag.endswith(\"scrollbar\")\n or node.tag.endswith(\"searchbox\")\n or node.tag.endswith(\"textbox\")\n or node.tag.endswith(\"link\")\n or node.tag.endswith(\"tabelement\")\n or node.tag.endswith(\"textfield\")\n or node.tag.endswith(\"textarea\")\n or node.tag.endswith(\"menu\")\n or node.tag\n in {\n \"alert\",\n \"canvas\",\n \"check-box\",\n \"combo-box\",\n \"entry\",\n \"icon\",\n \"image\",\n \"paragraph\",\n \"scroll-bar\",\n \"section\",\n \"slider\",\n \"static\",\n \"table-cell\",\n \"terminal\",\n \"text\",\n \"netuiribbontab\",\n \"start\",\n \"trayclockwclass\",\n \"traydummysearchcontrol\",\n \"uiimage\",\n \"uiproperty\",\n \"uiribboncommandbar\",\n }\n )\n keeps = (\n keeps\n and (\n platform == \"ubuntu\"\n and node.get(\"{{{:}}}showing\".format(_state_ns), \"false\") == \"true\"\n and node.get(\"{{{:}}}visible\".format(_state_ns), \"false\") == \"true\"\n or platform == \"windows\"\n and node.get(\"{{{:}}}visible\".format(_state_ns), \"false\") == \"true\"\n )\n and (\n node.get(\"{{{:}}}enabled\".format(_state_ns), \"false\") == \"true\"\n or node.get(\"{{{:}}}editable\".format(_state_ns), \"false\") == \"true\"\n or node.get(\"{{{:}}}expandable\".format(_state_ns), \"false\") == \"true\"\n or node.get(\"{{{:}}}checkable\".format(_state_ns), \"false\") == \"true\"\n )\n and (\n node.get(\"name\", \"\") != \"\"\n or node.text is not None\n and len(node.text) > 0\n or check_image\n and node.get(\"image\", \"false\") == \"true\"\n )\n )\n\n coordinates: Tuple[int, int] = eval(\n node.get(\"{{{:}}}screencoord\".format(_component_ns), \"(-1, -1)\")\n )\n sizes: Tuple[int, int] = eval(node.get(\"{{{:}}}size\".format(_component_ns), \"(-1, -1)\"))\n keeps = keeps and coordinates[0] >= 0 and coordinates[1] >= 0 and sizes[0] > 0 and sizes[1] > 0\n return keeps\n\n\ndef filter_nodes(root: ET, platform=\"ubuntu\", check_image=False):\n filtered_nodes = []\n\n for node in root.iter():\n if judge_node(node, platform, check_image):\n filtered_nodes.append(node)\n # print(ET.tostring(node, encoding=\"unicode\"))\n\n return filtered_nodes\n\n\ndef draw_bounding_boxes(nodes, image_file_content, down_sampling_ratio=1.0, platform=\"ubuntu\"):\n\n if platform == \"ubuntu\":\n _state_ns = state_ns_ubuntu\n _component_ns = component_ns_ubuntu\n _value_ns = value_ns_ubuntu\n elif platform == \"windows\":\n _state_ns = state_ns_windows\n _component_ns = component_ns_windows\n _value_ns = value_ns_windows\n else:\n raise ValueError(\"Invalid platform, must be 'ubuntu' or 'windows'\")\n\n # Load the screenshot image\n image_stream = io.BytesIO(image_file_content)\n image = Image.open(image_stream)\n if float(down_sampling_ratio) != 1.0:\n image = image.resize(\n (int(image.size[0] * down_sampling_ratio), int(image.size[1] * down_sampling_ratio))\n )\n draw = ImageDraw.Draw(image)\n marks = []\n drew_nodes = []\n text_informations: List[str] = [\"index\\ttag\\tname\\ttext\"]\n\n try:\n # Adjust the path to the font file you have or use a default one\n font = ImageFont.truetype(\"arial.ttf\", 15)\n except IOError:\n # Fallback to a basic font if the specified font can't be loaded\n font = ImageFont.load_default()\n\n index = 1\n\n # Loop over all the visible nodes and draw their bounding boxes\n for _node in nodes:\n coords_str = _node.attrib.get(\"{{{:}}}screencoord\".format(_component_ns))\n size_str = _node.attrib.get(\"{{{:}}}size\".format(_component_ns))\n\n if coords_str and size_str:\n try:\n # Parse the coordinates and size from the strings\n coords = tuple(map(int, coords_str.strip(\"()\").split(\", \")))\n size = tuple(map(int, size_str.strip(\"()\").split(\", \")))\n\n import copy\n\n original_coords = copy.deepcopy(coords)\n original_size = copy.deepcopy(size)\n\n if float(down_sampling_ratio) != 1.0:\n # Downsample the coordinates and size\n coords = tuple(int(coord * down_sampling_ratio) for coord in coords)\n size = tuple(int(s * down_sampling_ratio) for s in size)\n\n # Check for negative sizes\n if size[0] <= 0 or size[1] <= 0:\n raise ValueError(f\"Size must be positive, got: {size}\")\n\n # Calculate the bottom-right corner of the bounding box\n bottom_right = (coords[0] + size[0], coords[1] + size[1])\n\n # Check that bottom_right > coords (x1 >= x0, y1 >= y0)\n if bottom_right[0] < coords[0] or bottom_right[1] < coords[1]:\n raise ValueError(f\"Invalid coordinates or size, coords: {coords}, size: {size}\")\n\n # Check if the area only contains one color\n cropped_image = image.crop((*coords, *bottom_right))\n if len(set(list(cropped_image.getdata()))) == 1:\n continue\n\n # Draw rectangle on image\n draw.rectangle([coords, bottom_right], outline=\"red\", width=1)\n\n # Draw index number at the bottom left of the bounding box with black background\n text_position = (\n coords[0],\n bottom_right[1],\n ) # Adjust Y to be above the bottom right\n text_bbox: Tuple[int, int, int, int] = draw.textbbox(\n text_position, str(index), font=font, anchor=\"lb\"\n )\n # offset: int = bottom_right[1]-text_bbox[3]\n # text_bbox = (text_bbox[0], text_bbox[1]+offset, text_bbox[2], text_bbox[3]+offset)\n\n # draw.rectangle([text_position, (text_position[0] + 25, text_position[1] + 18)], fill='black')\n draw.rectangle(text_bbox, fill=\"black\")\n draw.text(text_position, str(index), font=font, anchor=\"lb\", fill=\"white\")\n\n # each mark is an x, y, w, h tuple\n marks.append(\n [original_coords[0], original_coords[1], original_size[0], original_size[1]]\n )\n drew_nodes.append(_node)\n\n if _node.text:\n node_text = (\n _node.text\n if '\"' not in _node.text\n else '\"{:}\"'.format(_node.text.replace('\"', '\"\"'))\n )\n elif _node.get(\"{{{:}}}class\".format(class_ns_windows), \"\").endswith(\n \"EditWrapper\"\n ) and _node.get(\"{{{:}}}value\".format(_value_ns)):\n node_text = _node.get(\"{{{:}}}value\".format(_value_ns), \"\")\n node_text = (\n node_text\n if '\"' not in node_text\n else '\"{:}\"'.format(node_text.replace('\"', '\"\"'))\n )\n else:\n node_text = '\"\"'\n text_information: str = \"{:d}\\t{:}\\t{:}\\t{:}\".format(\n index, _node.tag, _node.get(\"name\", \"\"), node_text\n )\n text_informations.append(text_information)\n\n index += 1\n\n except ValueError:\n pass\n\n output_image_stream = io.BytesIO()\n image.save(output_image_stream, format=\"PNG\")\n image_content = output_image_stream.getvalue()\n\n return marks, drew_nodes, \"\\n\".join(text_informations), image_content\n\n\ndef print_nodes_with_indent(nodes, indent=0):\n for node in nodes:\n print(\" \" * indent, node.tag, node.attrib)\n print_nodes_with_indent(node, indent + 2)\n\n\ndef linearize_accessibility_tree(accessibility_tree, platform=\"ubuntu\"):\n\n if platform == \"ubuntu\":\n _attributes_ns = attributes_ns_ubuntu\n _state_ns = state_ns_ubuntu\n _component_ns = component_ns_ubuntu\n _value_ns = value_ns_ubuntu\n elif platform == \"windows\":\n _attributes_ns = attributes_ns_windows\n _state_ns = state_ns_windows\n _component_ns = component_ns_windows\n _value_ns = value_ns_windows\n else:\n raise ValueError(\"Invalid platform, must be 'ubuntu' or 'windows'\")\n\n filtered_nodes = filter_nodes(ET.fromstring(accessibility_tree), platform)\n linearized_accessibility_tree = [\n \"tag\\tname\\ttext\\tclass\\tdescription\\tposition (top-left x&y)\\tsize (w&h)\"\n ]\n\n # Linearize the accessibility tree nodes into a table format\n for node in filtered_nodes:\n if node.text:\n text = (\n node.text if '\"' not in node.text else '\"{:}\"'.format(node.text.replace('\"', '\"\"'))\n )\n\n elif node.get(\"{{{:}}}class\".format(class_ns_windows), \"\").endswith(\n \"EditWrapper\"\n ) and node.get(\"{{{:}}}value\".format(_value_ns)):\n node_text = node.get(\"{{{:}}}value\".format(_value_ns), \"\")\n text = (\n node_text if '\"' not in node_text else '\"{:}\"'.format(node_text.replace('\"', '\"\"'))\n )\n else:\n text = '\"\"'\n\n linearized_accessibility_tree.append(\n \"{:}\\t{:}\\t{:}\\t{:}\\t{:}\\t{:}\\t{:}\".format(\n node.tag,\n node.get(\"name\", \"\"),\n text,\n (\n node.get(\"{{{:}}}class\".format(_attributes_ns), \"\")\n if platform == \"ubuntu\"\n else node.get(\"{{{:}}}class\".format(class_ns_windows), \"\")\n ),\n node.get(\"{{{:}}}description\".format(_attributes_ns), \"\"),\n node.get(\"{{{:}}}screencoord\".format(_component_ns), \"\"),\n node.get(\"{{{:}}}size\".format(_component_ns), \"\"),\n )\n )\n\n return \"\\n\".join(linearized_accessibility_tree)\n\n\ndef tag_screenshot(screenshot, accessibility_tree, platform=\"ubuntu\"):\n nodes = filter_nodes(ET.fromstring(accessibility_tree), platform=platform, check_image=True)\n # Make tag screenshot\n marks, drew_nodes, element_list, tagged_screenshot = draw_bounding_boxes(nodes, screenshot)\n\n return marks, drew_nodes, tagged_screenshot, element_list\n\n\ndef trim_accessibility_tree(linearized_accessibility_tree, max_tokens):\n import tiktoken\n\n enc = tiktoken.encoding_for_model(\"gpt-4\")\n tokens = enc.encode(linearized_accessibility_tree)\n if len(tokens) > max_tokens:\n linearized_accessibility_tree = enc.decode(tokens[:max_tokens])\n linearized_accessibility_tree += \"[...]\\n\"\n return linearized_accessibility_tree","source_hash":"4cb35b293c6393e48a284c855e83a477a2f320e71125d5b15b7f35c5784a5813","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.osworld_axtree_preprocessing.find_leaf_nodes","uri":"program://AgentLab/function/src.agentlab.benchmarks.osworld_axtree_preprocessing.find_leaf_nodes#L8-L26","kind":"function","name":"find_leaf_nodes","path":"src/agentlab/benchmarks/osworld_axtree_preprocessing.py","language":"python","start_line":8,"end_line":26,"context_start_line":1,"context_end_line":46,"code":"import io\nimport xml.etree.ElementTree as ET\nfrom typing import Tuple, List\n\nfrom PIL import Image, ImageDraw, ImageFont\n\n\ndef find_leaf_nodes(xlm_file_str):\n if not xlm_file_str:\n return []\n\n root = ET.fromstring(xlm_file_str)\n\n # Recursive function to traverse the XML tree and collect leaf nodes\n def collect_leaf_nodes(node, leaf_nodes):\n # If the node has no children, it is a leaf node, add it to the list\n if not list(node):\n leaf_nodes.append(node)\n # If the node has children, recurse on each child\n for child in node:\n collect_leaf_nodes(child, leaf_nodes)\n\n # List to hold all leaf nodes\n leaf_nodes = []\n collect_leaf_nodes(root, leaf_nodes)\n return leaf_nodes\n\n\nattributes_ns_ubuntu = \"https://accessibility.windows.example.org/ns/attributes\"\nattributes_ns_windows = \"https://accessibility.windows.example.org/ns/attributes\"\nstate_ns_ubuntu = \"https://accessibility.ubuntu.example.org/ns/state\"\nstate_ns_windows = \"https://accessibility.windows.example.org/ns/state\"\ncomponent_ns_ubuntu = \"https://accessibility.ubuntu.example.org/ns/component\"\ncomponent_ns_windows = \"https://accessibility.windows.example.org/ns/component\"\nvalue_ns_ubuntu = \"https://accessibility.ubuntu.example.org/ns/value\"\nvalue_ns_windows = \"https://accessibility.windows.example.org/ns/value\"\nclass_ns_windows = \"https://accessibility.windows.example.org/ns/class\"\n\n\ndef judge_node(node: ET, platform=\"ubuntu\", check_image=False) -> bool:\n if platform == \"ubuntu\":\n _state_ns = state_ns_ubuntu\n _component_ns = component_ns_ubuntu\n elif platform == \"windows\":\n _state_ns = state_ns_windows\n _component_ns = component_ns_windows","source_hash":"4cb35b293c6393e48a284c855e83a477a2f320e71125d5b15b7f35c5784a5813","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.osworld_axtree_preprocessing.judge_node","uri":"program://AgentLab/function/src.agentlab.benchmarks.osworld_axtree_preprocessing.judge_node#L40-L119","kind":"function","name":"judge_node","path":"src/agentlab/benchmarks/osworld_axtree_preprocessing.py","language":"python","start_line":40,"end_line":119,"context_start_line":20,"context_end_line":139,"code":" for child in node:\n collect_leaf_nodes(child, leaf_nodes)\n\n # List to hold all leaf nodes\n leaf_nodes = []\n collect_leaf_nodes(root, leaf_nodes)\n return leaf_nodes\n\n\nattributes_ns_ubuntu = \"https://accessibility.windows.example.org/ns/attributes\"\nattributes_ns_windows = \"https://accessibility.windows.example.org/ns/attributes\"\nstate_ns_ubuntu = \"https://accessibility.ubuntu.example.org/ns/state\"\nstate_ns_windows = \"https://accessibility.windows.example.org/ns/state\"\ncomponent_ns_ubuntu = \"https://accessibility.ubuntu.example.org/ns/component\"\ncomponent_ns_windows = \"https://accessibility.windows.example.org/ns/component\"\nvalue_ns_ubuntu = \"https://accessibility.ubuntu.example.org/ns/value\"\nvalue_ns_windows = \"https://accessibility.windows.example.org/ns/value\"\nclass_ns_windows = \"https://accessibility.windows.example.org/ns/class\"\n\n\ndef judge_node(node: ET, platform=\"ubuntu\", check_image=False) -> bool:\n if platform == \"ubuntu\":\n _state_ns = state_ns_ubuntu\n _component_ns = component_ns_ubuntu\n elif platform == \"windows\":\n _state_ns = state_ns_windows\n _component_ns = component_ns_windows\n else:\n raise ValueError(\"Invalid platform, must be 'ubuntu' or 'windows'\")\n\n keeps: bool = (\n node.tag.startswith(\"document\")\n or node.tag.endswith(\"item\")\n or node.tag.endswith(\"button\")\n or node.tag.endswith(\"heading\")\n or node.tag.endswith(\"label\")\n or node.tag.endswith(\"scrollbar\")\n or node.tag.endswith(\"searchbox\")\n or node.tag.endswith(\"textbox\")\n or node.tag.endswith(\"link\")\n or node.tag.endswith(\"tabelement\")\n or node.tag.endswith(\"textfield\")\n or node.tag.endswith(\"textarea\")\n or node.tag.endswith(\"menu\")\n or node.tag\n in {\n \"alert\",\n \"canvas\",\n \"check-box\",\n \"combo-box\",\n \"entry\",\n \"icon\",\n \"image\",\n \"paragraph\",\n \"scroll-bar\",\n \"section\",\n \"slider\",\n \"static\",\n \"table-cell\",\n \"terminal\",\n \"text\",\n \"netuiribbontab\",\n \"start\",\n \"trayclockwclass\",\n \"traydummysearchcontrol\",\n \"uiimage\",\n \"uiproperty\",\n \"uiribboncommandbar\",\n }\n )\n keeps = (\n keeps\n and (\n platform == \"ubuntu\"\n and node.get(\"{{{:}}}showing\".format(_state_ns), \"false\") == \"true\"\n and node.get(\"{{{:}}}visible\".format(_state_ns), \"false\") == \"true\"\n or platform == \"windows\"\n and node.get(\"{{{:}}}visible\".format(_state_ns), \"false\") == \"true\"\n )\n and (\n node.get(\"{{{:}}}enabled\".format(_state_ns), \"false\") == \"true\"\n or node.get(\"{{{:}}}editable\".format(_state_ns), \"false\") == \"true\"\n or node.get(\"{{{:}}}expandable\".format(_state_ns), \"false\") == \"true\"\n or node.get(\"{{{:}}}checkable\".format(_state_ns), \"false\") == \"true\"\n )\n and (\n node.get(\"name\", \"\") != \"\"\n or node.text is not None\n and len(node.text) > 0\n or check_image\n and node.get(\"image\", \"false\") == \"true\"\n )\n )\n\n coordinates: Tuple[int, int] = eval(\n node.get(\"{{{:}}}screencoord\".format(_component_ns), \"(-1, -1)\")\n )\n sizes: Tuple[int, int] = eval(node.get(\"{{{:}}}size\".format(_component_ns), \"(-1, -1)\"))\n keeps = keeps and coordinates[0] >= 0 and coordinates[1] >= 0 and sizes[0] > 0 and sizes[1] > 0\n return keeps\n\n\ndef filter_nodes(root: ET, platform=\"ubuntu\", check_image=False):\n filtered_nodes = []\n\n for node in root.iter():\n if judge_node(node, platform, check_image):\n filtered_nodes.append(node)\n # print(ET.tostring(node, encoding=\"unicode\"))\n\n return filtered_nodes\n\n\ndef draw_bounding_boxes(nodes, image_file_content, down_sampling_ratio=1.0, platform=\"ubuntu\"):\n\n if platform == \"ubuntu\":\n _state_ns = state_ns_ubuntu\n _component_ns = component_ns_ubuntu\n _value_ns = value_ns_ubuntu\n elif platform == \"windows\":","source_hash":"4cb35b293c6393e48a284c855e83a477a2f320e71125d5b15b7f35c5784a5813","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.osworld_axtree_preprocessing.filter_nodes","uri":"program://AgentLab/function/src.agentlab.benchmarks.osworld_axtree_preprocessing.filter_nodes#L122-L130","kind":"function","name":"filter_nodes","path":"src/agentlab/benchmarks/osworld_axtree_preprocessing.py","language":"python","start_line":122,"end_line":130,"context_start_line":102,"context_end_line":150,"code":" or node.get(\"{{{:}}}expandable\".format(_state_ns), \"false\") == \"true\"\n or node.get(\"{{{:}}}checkable\".format(_state_ns), \"false\") == \"true\"\n )\n and (\n node.get(\"name\", \"\") != \"\"\n or node.text is not None\n and len(node.text) > 0\n or check_image\n and node.get(\"image\", \"false\") == \"true\"\n )\n )\n\n coordinates: Tuple[int, int] = eval(\n node.get(\"{{{:}}}screencoord\".format(_component_ns), \"(-1, -1)\")\n )\n sizes: Tuple[int, int] = eval(node.get(\"{{{:}}}size\".format(_component_ns), \"(-1, -1)\"))\n keeps = keeps and coordinates[0] >= 0 and coordinates[1] >= 0 and sizes[0] > 0 and sizes[1] > 0\n return keeps\n\n\ndef filter_nodes(root: ET, platform=\"ubuntu\", check_image=False):\n filtered_nodes = []\n\n for node in root.iter():\n if judge_node(node, platform, check_image):\n filtered_nodes.append(node)\n # print(ET.tostring(node, encoding=\"unicode\"))\n\n return filtered_nodes\n\n\ndef draw_bounding_boxes(nodes, image_file_content, down_sampling_ratio=1.0, platform=\"ubuntu\"):\n\n if platform == \"ubuntu\":\n _state_ns = state_ns_ubuntu\n _component_ns = component_ns_ubuntu\n _value_ns = value_ns_ubuntu\n elif platform == \"windows\":\n _state_ns = state_ns_windows\n _component_ns = component_ns_windows\n _value_ns = value_ns_windows\n else:\n raise ValueError(\"Invalid platform, must be 'ubuntu' or 'windows'\")\n\n # Load the screenshot image\n image_stream = io.BytesIO(image_file_content)\n image = Image.open(image_stream)\n if float(down_sampling_ratio) != 1.0:\n image = image.resize(","source_hash":"4cb35b293c6393e48a284c855e83a477a2f320e71125d5b15b7f35c5784a5813","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.osworld_axtree_preprocessing.draw_bounding_boxes","uri":"program://AgentLab/function/src.agentlab.benchmarks.osworld_axtree_preprocessing.draw_bounding_boxes#L133-L259","kind":"function","name":"draw_bounding_boxes","path":"src/agentlab/benchmarks/osworld_axtree_preprocessing.py","language":"python","start_line":133,"end_line":259,"context_start_line":113,"context_end_line":279,"code":"\n coordinates: Tuple[int, int] = eval(\n node.get(\"{{{:}}}screencoord\".format(_component_ns), \"(-1, -1)\")\n )\n sizes: Tuple[int, int] = eval(node.get(\"{{{:}}}size\".format(_component_ns), \"(-1, -1)\"))\n keeps = keeps and coordinates[0] >= 0 and coordinates[1] >= 0 and sizes[0] > 0 and sizes[1] > 0\n return keeps\n\n\ndef filter_nodes(root: ET, platform=\"ubuntu\", check_image=False):\n filtered_nodes = []\n\n for node in root.iter():\n if judge_node(node, platform, check_image):\n filtered_nodes.append(node)\n # print(ET.tostring(node, encoding=\"unicode\"))\n\n return filtered_nodes\n\n\ndef draw_bounding_boxes(nodes, image_file_content, down_sampling_ratio=1.0, platform=\"ubuntu\"):\n\n if platform == \"ubuntu\":\n _state_ns = state_ns_ubuntu\n _component_ns = component_ns_ubuntu\n _value_ns = value_ns_ubuntu\n elif platform == \"windows\":\n _state_ns = state_ns_windows\n _component_ns = component_ns_windows\n _value_ns = value_ns_windows\n else:\n raise ValueError(\"Invalid platform, must be 'ubuntu' or 'windows'\")\n\n # Load the screenshot image\n image_stream = io.BytesIO(image_file_content)\n image = Image.open(image_stream)\n if float(down_sampling_ratio) != 1.0:\n image = image.resize(\n (int(image.size[0] * down_sampling_ratio), int(image.size[1] * down_sampling_ratio))\n )\n draw = ImageDraw.Draw(image)\n marks = []\n drew_nodes = []\n text_informations: List[str] = [\"index\\ttag\\tname\\ttext\"]\n\n try:\n # Adjust the path to the font file you have or use a default one\n font = ImageFont.truetype(\"arial.ttf\", 15)\n except IOError:\n # Fallback to a basic font if the specified font can't be loaded\n font = ImageFont.load_default()\n\n index = 1\n\n # Loop over all the visible nodes and draw their bounding boxes\n for _node in nodes:\n coords_str = _node.attrib.get(\"{{{:}}}screencoord\".format(_component_ns))\n size_str = _node.attrib.get(\"{{{:}}}size\".format(_component_ns))\n\n if coords_str and size_str:\n try:\n # Parse the coordinates and size from the strings\n coords = tuple(map(int, coords_str.strip(\"()\").split(\", \")))\n size = tuple(map(int, size_str.strip(\"()\").split(\", \")))\n\n import copy\n\n original_coords = copy.deepcopy(coords)\n original_size = copy.deepcopy(size)\n\n if float(down_sampling_ratio) != 1.0:\n # Downsample the coordinates and size\n coords = tuple(int(coord * down_sampling_ratio) for coord in coords)\n size = tuple(int(s * down_sampling_ratio) for s in size)\n\n # Check for negative sizes\n if size[0] <= 0 or size[1] <= 0:\n raise ValueError(f\"Size must be positive, got: {size}\")\n\n # Calculate the bottom-right corner of the bounding box\n bottom_right = (coords[0] + size[0], coords[1] + size[1])\n\n # Check that bottom_right > coords (x1 >= x0, y1 >= y0)\n if bottom_right[0] < coords[0] or bottom_right[1] < coords[1]:\n raise ValueError(f\"Invalid coordinates or size, coords: {coords}, size: {size}\")\n\n # Check if the area only contains one color\n cropped_image = image.crop((*coords, *bottom_right))\n if len(set(list(cropped_image.getdata()))) == 1:\n continue\n\n # Draw rectangle on image\n draw.rectangle([coords, bottom_right], outline=\"red\", width=1)\n\n # Draw index number at the bottom left of the bounding box with black background\n text_position = (\n coords[0],\n bottom_right[1],\n ) # Adjust Y to be above the bottom right\n text_bbox: Tuple[int, int, int, int] = draw.textbbox(\n text_position, str(index), font=font, anchor=\"lb\"\n )\n # offset: int = bottom_right[1]-text_bbox[3]\n # text_bbox = (text_bbox[0], text_bbox[1]+offset, text_bbox[2], text_bbox[3]+offset)\n\n # draw.rectangle([text_position, (text_position[0] + 25, text_position[1] + 18)], fill='black')\n draw.rectangle(text_bbox, fill=\"black\")\n draw.text(text_position, str(index), font=font, anchor=\"lb\", fill=\"white\")\n\n # each mark is an x, y, w, h tuple\n marks.append(\n [original_coords[0], original_coords[1], original_size[0], original_size[1]]\n )\n drew_nodes.append(_node)\n\n if _node.text:\n node_text = (\n _node.text\n if '\"' not in _node.text\n else '\"{:}\"'.format(_node.text.replace('\"', '\"\"'))\n )\n elif _node.get(\"{{{:}}}class\".format(class_ns_windows), \"\").endswith(\n \"EditWrapper\"\n ) and _node.get(\"{{{:}}}value\".format(_value_ns)):\n node_text = _node.get(\"{{{:}}}value\".format(_value_ns), \"\")\n node_text = (\n node_text\n if '\"' not in node_text\n else '\"{:}\"'.format(node_text.replace('\"', '\"\"'))\n )\n else:\n node_text = '\"\"'\n text_information: str = \"{:d}\\t{:}\\t{:}\\t{:}\".format(\n index, _node.tag, _node.get(\"name\", \"\"), node_text\n )\n text_informations.append(text_information)\n\n index += 1\n\n except ValueError:\n pass\n\n output_image_stream = io.BytesIO()\n image.save(output_image_stream, format=\"PNG\")\n image_content = output_image_stream.getvalue()\n\n return marks, drew_nodes, \"\\n\".join(text_informations), image_content\n\n\ndef print_nodes_with_indent(nodes, indent=0):\n for node in nodes:\n print(\" \" * indent, node.tag, node.attrib)\n print_nodes_with_indent(node, indent + 2)\n\n\ndef linearize_accessibility_tree(accessibility_tree, platform=\"ubuntu\"):\n\n if platform == \"ubuntu\":\n _attributes_ns = attributes_ns_ubuntu\n _state_ns = state_ns_ubuntu\n _component_ns = component_ns_ubuntu\n _value_ns = value_ns_ubuntu\n elif platform == \"windows\":\n _attributes_ns = attributes_ns_windows\n _state_ns = state_ns_windows\n _component_ns = component_ns_windows\n _value_ns = value_ns_windows","source_hash":"4cb35b293c6393e48a284c855e83a477a2f320e71125d5b15b7f35c5784a5813","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.osworld_axtree_preprocessing.print_nodes_with_indent","uri":"program://AgentLab/function/src.agentlab.benchmarks.osworld_axtree_preprocessing.print_nodes_with_indent#L262-L265","kind":"function","name":"print_nodes_with_indent","path":"src/agentlab/benchmarks/osworld_axtree_preprocessing.py","language":"python","start_line":262,"end_line":265,"context_start_line":242,"context_end_line":285,"code":" )\n else:\n node_text = '\"\"'\n text_information: str = \"{:d}\\t{:}\\t{:}\\t{:}\".format(\n index, _node.tag, _node.get(\"name\", \"\"), node_text\n )\n text_informations.append(text_information)\n\n index += 1\n\n except ValueError:\n pass\n\n output_image_stream = io.BytesIO()\n image.save(output_image_stream, format=\"PNG\")\n image_content = output_image_stream.getvalue()\n\n return marks, drew_nodes, \"\\n\".join(text_informations), image_content\n\n\ndef print_nodes_with_indent(nodes, indent=0):\n for node in nodes:\n print(\" \" * indent, node.tag, node.attrib)\n print_nodes_with_indent(node, indent + 2)\n\n\ndef linearize_accessibility_tree(accessibility_tree, platform=\"ubuntu\"):\n\n if platform == \"ubuntu\":\n _attributes_ns = attributes_ns_ubuntu\n _state_ns = state_ns_ubuntu\n _component_ns = component_ns_ubuntu\n _value_ns = value_ns_ubuntu\n elif platform == \"windows\":\n _attributes_ns = attributes_ns_windows\n _state_ns = state_ns_windows\n _component_ns = component_ns_windows\n _value_ns = value_ns_windows\n else:\n raise ValueError(\"Invalid platform, must be 'ubuntu' or 'windows'\")\n\n filtered_nodes = filter_nodes(ET.fromstring(accessibility_tree), platform)\n linearized_accessibility_tree = [\n \"tag\\tname\\ttext\\tclass\\tdescription\\tposition (top-left x&y)\\tsize (w&h)\"","source_hash":"4cb35b293c6393e48a284c855e83a477a2f320e71125d5b15b7f35c5784a5813","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.osworld_axtree_preprocessing.linearize_accessibility_tree","uri":"program://AgentLab/function/src.agentlab.benchmarks.osworld_axtree_preprocessing.linearize_accessibility_tree#L268-L321","kind":"function","name":"linearize_accessibility_tree","path":"src/agentlab/benchmarks/osworld_axtree_preprocessing.py","language":"python","start_line":268,"end_line":321,"context_start_line":248,"context_end_line":340,"code":" text_informations.append(text_information)\n\n index += 1\n\n except ValueError:\n pass\n\n output_image_stream = io.BytesIO()\n image.save(output_image_stream, format=\"PNG\")\n image_content = output_image_stream.getvalue()\n\n return marks, drew_nodes, \"\\n\".join(text_informations), image_content\n\n\ndef print_nodes_with_indent(nodes, indent=0):\n for node in nodes:\n print(\" \" * indent, node.tag, node.attrib)\n print_nodes_with_indent(node, indent + 2)\n\n\ndef linearize_accessibility_tree(accessibility_tree, platform=\"ubuntu\"):\n\n if platform == \"ubuntu\":\n _attributes_ns = attributes_ns_ubuntu\n _state_ns = state_ns_ubuntu\n _component_ns = component_ns_ubuntu\n _value_ns = value_ns_ubuntu\n elif platform == \"windows\":\n _attributes_ns = attributes_ns_windows\n _state_ns = state_ns_windows\n _component_ns = component_ns_windows\n _value_ns = value_ns_windows\n else:\n raise ValueError(\"Invalid platform, must be 'ubuntu' or 'windows'\")\n\n filtered_nodes = filter_nodes(ET.fromstring(accessibility_tree), platform)\n linearized_accessibility_tree = [\n \"tag\\tname\\ttext\\tclass\\tdescription\\tposition (top-left x&y)\\tsize (w&h)\"\n ]\n\n # Linearize the accessibility tree nodes into a table format\n for node in filtered_nodes:\n if node.text:\n text = (\n node.text if '\"' not in node.text else '\"{:}\"'.format(node.text.replace('\"', '\"\"'))\n )\n\n elif node.get(\"{{{:}}}class\".format(class_ns_windows), \"\").endswith(\n \"EditWrapper\"\n ) and node.get(\"{{{:}}}value\".format(_value_ns)):\n node_text = node.get(\"{{{:}}}value\".format(_value_ns), \"\")\n text = (\n node_text if '\"' not in node_text else '\"{:}\"'.format(node_text.replace('\"', '\"\"'))\n )\n else:\n text = '\"\"'\n\n linearized_accessibility_tree.append(\n \"{:}\\t{:}\\t{:}\\t{:}\\t{:}\\t{:}\\t{:}\".format(\n node.tag,\n node.get(\"name\", \"\"),\n text,\n (\n node.get(\"{{{:}}}class\".format(_attributes_ns), \"\")\n if platform == \"ubuntu\"\n else node.get(\"{{{:}}}class\".format(class_ns_windows), \"\")\n ),\n node.get(\"{{{:}}}description\".format(_attributes_ns), \"\"),\n node.get(\"{{{:}}}screencoord\".format(_component_ns), \"\"),\n node.get(\"{{{:}}}size\".format(_component_ns), \"\"),\n )\n )\n\n return \"\\n\".join(linearized_accessibility_tree)\n\n\ndef tag_screenshot(screenshot, accessibility_tree, platform=\"ubuntu\"):\n nodes = filter_nodes(ET.fromstring(accessibility_tree), platform=platform, check_image=True)\n # Make tag screenshot\n marks, drew_nodes, element_list, tagged_screenshot = draw_bounding_boxes(nodes, screenshot)\n\n return marks, drew_nodes, tagged_screenshot, element_list\n\n\ndef trim_accessibility_tree(linearized_accessibility_tree, max_tokens):\n import tiktoken\n\n enc = tiktoken.encoding_for_model(\"gpt-4\")\n tokens = enc.encode(linearized_accessibility_tree)\n if len(tokens) > max_tokens:\n linearized_accessibility_tree = enc.decode(tokens[:max_tokens])\n linearized_accessibility_tree += \"[...]\\n\"\n return linearized_accessibility_tree","source_hash":"4cb35b293c6393e48a284c855e83a477a2f320e71125d5b15b7f35c5784a5813","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.osworld_axtree_preprocessing.tag_screenshot","uri":"program://AgentLab/function/src.agentlab.benchmarks.osworld_axtree_preprocessing.tag_screenshot#L324-L329","kind":"function","name":"tag_screenshot","path":"src/agentlab/benchmarks/osworld_axtree_preprocessing.py","language":"python","start_line":324,"end_line":329,"context_start_line":304,"context_end_line":340,"code":"\n linearized_accessibility_tree.append(\n \"{:}\\t{:}\\t{:}\\t{:}\\t{:}\\t{:}\\t{:}\".format(\n node.tag,\n node.get(\"name\", \"\"),\n text,\n (\n node.get(\"{{{:}}}class\".format(_attributes_ns), \"\")\n if platform == \"ubuntu\"\n else node.get(\"{{{:}}}class\".format(class_ns_windows), \"\")\n ),\n node.get(\"{{{:}}}description\".format(_attributes_ns), \"\"),\n node.get(\"{{{:}}}screencoord\".format(_component_ns), \"\"),\n node.get(\"{{{:}}}size\".format(_component_ns), \"\"),\n )\n )\n\n return \"\\n\".join(linearized_accessibility_tree)\n\n\ndef tag_screenshot(screenshot, accessibility_tree, platform=\"ubuntu\"):\n nodes = filter_nodes(ET.fromstring(accessibility_tree), platform=platform, check_image=True)\n # Make tag screenshot\n marks, drew_nodes, element_list, tagged_screenshot = draw_bounding_boxes(nodes, screenshot)\n\n return marks, drew_nodes, tagged_screenshot, element_list\n\n\ndef trim_accessibility_tree(linearized_accessibility_tree, max_tokens):\n import tiktoken\n\n enc = tiktoken.encoding_for_model(\"gpt-4\")\n tokens = enc.encode(linearized_accessibility_tree)\n if len(tokens) > max_tokens:\n linearized_accessibility_tree = enc.decode(tokens[:max_tokens])\n linearized_accessibility_tree += \"[...]\\n\"\n return linearized_accessibility_tree","source_hash":"4cb35b293c6393e48a284c855e83a477a2f320e71125d5b15b7f35c5784a5813","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.osworld_axtree_preprocessing.trim_accessibility_tree","uri":"program://AgentLab/function/src.agentlab.benchmarks.osworld_axtree_preprocessing.trim_accessibility_tree#L332-L340","kind":"function","name":"trim_accessibility_tree","path":"src/agentlab/benchmarks/osworld_axtree_preprocessing.py","language":"python","start_line":332,"end_line":340,"context_start_line":312,"context_end_line":340,"code":" if platform == \"ubuntu\"\n else node.get(\"{{{:}}}class\".format(class_ns_windows), \"\")\n ),\n node.get(\"{{{:}}}description\".format(_attributes_ns), \"\"),\n node.get(\"{{{:}}}screencoord\".format(_component_ns), \"\"),\n node.get(\"{{{:}}}size\".format(_component_ns), \"\"),\n )\n )\n\n return \"\\n\".join(linearized_accessibility_tree)\n\n\ndef tag_screenshot(screenshot, accessibility_tree, platform=\"ubuntu\"):\n nodes = filter_nodes(ET.fromstring(accessibility_tree), platform=platform, check_image=True)\n # Make tag screenshot\n marks, drew_nodes, element_list, tagged_screenshot = draw_bounding_boxes(nodes, screenshot)\n\n return marks, drew_nodes, tagged_screenshot, element_list\n\n\ndef trim_accessibility_tree(linearized_accessibility_tree, max_tokens):\n import tiktoken\n\n enc = tiktoken.encoding_for_model(\"gpt-4\")\n tokens = enc.encode(linearized_accessibility_tree)\n if len(tokens) > max_tokens:\n linearized_accessibility_tree = enc.decode(tokens[:max_tokens])\n linearized_accessibility_tree += \"[...]\\n\"\n return linearized_accessibility_tree","source_hash":"4cb35b293c6393e48a284c855e83a477a2f320e71125d5b15b7f35c5784a5813","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.osworld_axtree_preprocessing.collect_leaf_nodes","uri":"program://AgentLab/function/src.agentlab.benchmarks.osworld_axtree_preprocessing.collect_leaf_nodes#L15-L21","kind":"function","name":"collect_leaf_nodes","path":"src/agentlab/benchmarks/osworld_axtree_preprocessing.py","language":"python","start_line":15,"end_line":21,"context_start_line":1,"context_end_line":41,"code":"import io\nimport xml.etree.ElementTree as ET\nfrom typing import Tuple, List\n\nfrom PIL import Image, ImageDraw, ImageFont\n\n\ndef find_leaf_nodes(xlm_file_str):\n if not xlm_file_str:\n return []\n\n root = ET.fromstring(xlm_file_str)\n\n # Recursive function to traverse the XML tree and collect leaf nodes\n def collect_leaf_nodes(node, leaf_nodes):\n # If the node has no children, it is a leaf node, add it to the list\n if not list(node):\n leaf_nodes.append(node)\n # If the node has children, recurse on each child\n for child in node:\n collect_leaf_nodes(child, leaf_nodes)\n\n # List to hold all leaf nodes\n leaf_nodes = []\n collect_leaf_nodes(root, leaf_nodes)\n return leaf_nodes\n\n\nattributes_ns_ubuntu = \"https://accessibility.windows.example.org/ns/attributes\"\nattributes_ns_windows = \"https://accessibility.windows.example.org/ns/attributes\"\nstate_ns_ubuntu = \"https://accessibility.ubuntu.example.org/ns/state\"\nstate_ns_windows = \"https://accessibility.windows.example.org/ns/state\"\ncomponent_ns_ubuntu = \"https://accessibility.ubuntu.example.org/ns/component\"\ncomponent_ns_windows = \"https://accessibility.windows.example.org/ns/component\"\nvalue_ns_ubuntu = \"https://accessibility.ubuntu.example.org/ns/value\"\nvalue_ns_windows = \"https://accessibility.windows.example.org/ns/value\"\nclass_ns_windows = \"https://accessibility.windows.example.org/ns/class\"\n\n\ndef judge_node(node: ET, platform=\"ubuntu\", check_image=False) -> bool:\n if platform == \"ubuntu\":","source_hash":"4cb35b293c6393e48a284c855e83a477a2f320e71125d5b15b7f35c5784a5813","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.multitool_gym","uri":"program://AgentLab/module/src.agentlab.benchmarks.multitool_gym#L1-L57","kind":"module","name":"src.agentlab.benchmarks.multitool_gym","path":"src/agentlab/benchmarks/multitool_gym.py","language":"python","start_line":1,"end_line":57,"context_start_line":1,"context_end_line":57,"code":"import logging\nimport time\n\nfrom tapeagents.core import Action, Observation, StopStep\nfrom tapeagents.environment import ToolCollectionEnvironment\nfrom tapeagents.tools.base import StatefulTool, Tool\n\nfrom agentlab.benchmarks.abstract_env import AbstractEnv\n\nlogger = logging.getLogger(__name__)\n\n\nclass MultiToolGym(AbstractEnv):\n def __init__(self, tools: list[Tool | StatefulTool], max_turns: int = 50):\n self._env = ToolCollectionEnvironment(tools)\n self._actions = self._env.actions()\n self.max_turns = max_turns\n self._turns = 0\n\n def reset(self):\n self._env.reset()\n self._turns = 0\n\n def step(self, action: Action) -> tuple[Observation, float, bool, bool, dict]:\n logger.info(f\"Gym {self.__class__.__name__} step called with action {type(action)}\")\n assert isinstance(action, Action)\n\n action_exec_start = time.time()\n terminated = isinstance(action, StopStep)\n if terminated:\n observation = Observation() # empty observation\n else:\n observation = self._env.step(action)\n terminated = isinstance(observation, StopStep)\n action_exec_stop = time.time()\n self._turns += 1\n\n reward = self.calculate_reward(action)\n\n truncated = self._turns >= self.max_turns\n\n env_info = {\n \"step_metadata\": observation.metadata,\n \"action_exec_start\": action_exec_start,\n \"action_exec_stop\": action_exec_stop,\n \"action_exec_timeout\": 0.0,\n }\n obs_view = observation.short_view() if isinstance(observation, Observation) else observation\n logger.info(f\"Gym {self.__class__.__name__} observation: {obs_view}\")\n return observation, reward, terminated, truncated, env_info\n\n def calculate_reward(self, action: Action) -> float:\n logger.warning(\"Reward calculation is not implemented, returning 0\")\n return 0.0\n\n def close(self):\n self._env.close()","source_hash":"0121f2a4c9e33e166df62de14a979af68c34c4c370c7a854d9045b9cccf7b907","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.multitool_gym.MultiToolGym","uri":"program://AgentLab/class/src.agentlab.benchmarks.multitool_gym.MultiToolGym#L13-L57","kind":"class","name":"MultiToolGym","path":"src/agentlab/benchmarks/multitool_gym.py","language":"python","start_line":13,"end_line":57,"context_start_line":1,"context_end_line":57,"code":"import logging\nimport time\n\nfrom tapeagents.core import Action, Observation, StopStep\nfrom tapeagents.environment import ToolCollectionEnvironment\nfrom tapeagents.tools.base import StatefulTool, Tool\n\nfrom agentlab.benchmarks.abstract_env import AbstractEnv\n\nlogger = logging.getLogger(__name__)\n\n\nclass MultiToolGym(AbstractEnv):\n def __init__(self, tools: list[Tool | StatefulTool], max_turns: int = 50):\n self._env = ToolCollectionEnvironment(tools)\n self._actions = self._env.actions()\n self.max_turns = max_turns\n self._turns = 0\n\n def reset(self):\n self._env.reset()\n self._turns = 0\n\n def step(self, action: Action) -> tuple[Observation, float, bool, bool, dict]:\n logger.info(f\"Gym {self.__class__.__name__} step called with action {type(action)}\")\n assert isinstance(action, Action)\n\n action_exec_start = time.time()\n terminated = isinstance(action, StopStep)\n if terminated:\n observation = Observation() # empty observation\n else:\n observation = self._env.step(action)\n terminated = isinstance(observation, StopStep)\n action_exec_stop = time.time()\n self._turns += 1\n\n reward = self.calculate_reward(action)\n\n truncated = self._turns >= self.max_turns\n\n env_info = {\n \"step_metadata\": observation.metadata,\n \"action_exec_start\": action_exec_start,\n \"action_exec_stop\": action_exec_stop,\n \"action_exec_timeout\": 0.0,\n }\n obs_view = observation.short_view() if isinstance(observation, Observation) else observation\n logger.info(f\"Gym {self.__class__.__name__} observation: {obs_view}\")\n return observation, reward, terminated, truncated, env_info\n\n def calculate_reward(self, action: Action) -> float:\n logger.warning(\"Reward calculation is not implemented, returning 0\")\n return 0.0\n\n def close(self):\n self._env.close()","source_hash":"0121f2a4c9e33e166df62de14a979af68c34c4c370c7a854d9045b9cccf7b907","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.multitool_gym.__init__","uri":"program://AgentLab/function/src.agentlab.benchmarks.multitool_gym.__init__#L14-L18","kind":"function","name":"__init__","path":"src/agentlab/benchmarks/multitool_gym.py","language":"python","start_line":14,"end_line":18,"context_start_line":1,"context_end_line":38,"code":"import logging\nimport time\n\nfrom tapeagents.core import Action, Observation, StopStep\nfrom tapeagents.environment import ToolCollectionEnvironment\nfrom tapeagents.tools.base import StatefulTool, Tool\n\nfrom agentlab.benchmarks.abstract_env import AbstractEnv\n\nlogger = logging.getLogger(__name__)\n\n\nclass MultiToolGym(AbstractEnv):\n def __init__(self, tools: list[Tool | StatefulTool], max_turns: int = 50):\n self._env = ToolCollectionEnvironment(tools)\n self._actions = self._env.actions()\n self.max_turns = max_turns\n self._turns = 0\n\n def reset(self):\n self._env.reset()\n self._turns = 0\n\n def step(self, action: Action) -> tuple[Observation, float, bool, bool, dict]:\n logger.info(f\"Gym {self.__class__.__name__} step called with action {type(action)}\")\n assert isinstance(action, Action)\n\n action_exec_start = time.time()\n terminated = isinstance(action, StopStep)\n if terminated:\n observation = Observation() # empty observation\n else:\n observation = self._env.step(action)\n terminated = isinstance(observation, StopStep)\n action_exec_stop = time.time()\n self._turns += 1\n\n reward = self.calculate_reward(action)","source_hash":"0121f2a4c9e33e166df62de14a979af68c34c4c370c7a854d9045b9cccf7b907","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.multitool_gym.reset","uri":"program://AgentLab/function/src.agentlab.benchmarks.multitool_gym.reset#L20-L22","kind":"function","name":"reset","path":"src/agentlab/benchmarks/multitool_gym.py","language":"python","start_line":20,"end_line":22,"context_start_line":1,"context_end_line":42,"code":"import logging\nimport time\n\nfrom tapeagents.core import Action, Observation, StopStep\nfrom tapeagents.environment import ToolCollectionEnvironment\nfrom tapeagents.tools.base import StatefulTool, Tool\n\nfrom agentlab.benchmarks.abstract_env import AbstractEnv\n\nlogger = logging.getLogger(__name__)\n\n\nclass MultiToolGym(AbstractEnv):\n def __init__(self, tools: list[Tool | StatefulTool], max_turns: int = 50):\n self._env = ToolCollectionEnvironment(tools)\n self._actions = self._env.actions()\n self.max_turns = max_turns\n self._turns = 0\n\n def reset(self):\n self._env.reset()\n self._turns = 0\n\n def step(self, action: Action) -> tuple[Observation, float, bool, bool, dict]:\n logger.info(f\"Gym {self.__class__.__name__} step called with action {type(action)}\")\n assert isinstance(action, Action)\n\n action_exec_start = time.time()\n terminated = isinstance(action, StopStep)\n if terminated:\n observation = Observation() # empty observation\n else:\n observation = self._env.step(action)\n terminated = isinstance(observation, StopStep)\n action_exec_stop = time.time()\n self._turns += 1\n\n reward = self.calculate_reward(action)\n\n truncated = self._turns >= self.max_turns\n\n env_info = {","source_hash":"0121f2a4c9e33e166df62de14a979af68c34c4c370c7a854d9045b9cccf7b907","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.multitool_gym.step","uri":"program://AgentLab/function/src.agentlab.benchmarks.multitool_gym.step#L24-L50","kind":"function","name":"step","path":"src/agentlab/benchmarks/multitool_gym.py","language":"python","start_line":24,"end_line":50,"context_start_line":4,"context_end_line":57,"code":"from tapeagents.core import Action, Observation, StopStep\nfrom tapeagents.environment import ToolCollectionEnvironment\nfrom tapeagents.tools.base import StatefulTool, Tool\n\nfrom agentlab.benchmarks.abstract_env import AbstractEnv\n\nlogger = logging.getLogger(__name__)\n\n\nclass MultiToolGym(AbstractEnv):\n def __init__(self, tools: list[Tool | StatefulTool], max_turns: int = 50):\n self._env = ToolCollectionEnvironment(tools)\n self._actions = self._env.actions()\n self.max_turns = max_turns\n self._turns = 0\n\n def reset(self):\n self._env.reset()\n self._turns = 0\n\n def step(self, action: Action) -> tuple[Observation, float, bool, bool, dict]:\n logger.info(f\"Gym {self.__class__.__name__} step called with action {type(action)}\")\n assert isinstance(action, Action)\n\n action_exec_start = time.time()\n terminated = isinstance(action, StopStep)\n if terminated:\n observation = Observation() # empty observation\n else:\n observation = self._env.step(action)\n terminated = isinstance(observation, StopStep)\n action_exec_stop = time.time()\n self._turns += 1\n\n reward = self.calculate_reward(action)\n\n truncated = self._turns >= self.max_turns\n\n env_info = {\n \"step_metadata\": observation.metadata,\n \"action_exec_start\": action_exec_start,\n \"action_exec_stop\": action_exec_stop,\n \"action_exec_timeout\": 0.0,\n }\n obs_view = observation.short_view() if isinstance(observation, Observation) else observation\n logger.info(f\"Gym {self.__class__.__name__} observation: {obs_view}\")\n return observation, reward, terminated, truncated, env_info\n\n def calculate_reward(self, action: Action) -> float:\n logger.warning(\"Reward calculation is not implemented, returning 0\")\n return 0.0\n\n def close(self):\n self._env.close()","source_hash":"0121f2a4c9e33e166df62de14a979af68c34c4c370c7a854d9045b9cccf7b907","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.multitool_gym.calculate_reward","uri":"program://AgentLab/function/src.agentlab.benchmarks.multitool_gym.calculate_reward#L52-L54","kind":"function","name":"calculate_reward","path":"src/agentlab/benchmarks/multitool_gym.py","language":"python","start_line":52,"end_line":54,"context_start_line":32,"context_end_line":57,"code":" else:\n observation = self._env.step(action)\n terminated = isinstance(observation, StopStep)\n action_exec_stop = time.time()\n self._turns += 1\n\n reward = self.calculate_reward(action)\n\n truncated = self._turns >= self.max_turns\n\n env_info = {\n \"step_metadata\": observation.metadata,\n \"action_exec_start\": action_exec_start,\n \"action_exec_stop\": action_exec_stop,\n \"action_exec_timeout\": 0.0,\n }\n obs_view = observation.short_view() if isinstance(observation, Observation) else observation\n logger.info(f\"Gym {self.__class__.__name__} observation: {obs_view}\")\n return observation, reward, terminated, truncated, env_info\n\n def calculate_reward(self, action: Action) -> float:\n logger.warning(\"Reward calculation is not implemented, returning 0\")\n return 0.0\n\n def close(self):\n self._env.close()","source_hash":"0121f2a4c9e33e166df62de14a979af68c34c4c370c7a854d9045b9cccf7b907","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.multitool_gym.close","uri":"program://AgentLab/function/src.agentlab.benchmarks.multitool_gym.close#L56-L57","kind":"function","name":"close","path":"src/agentlab/benchmarks/multitool_gym.py","language":"python","start_line":56,"end_line":57,"context_start_line":36,"context_end_line":57,"code":" self._turns += 1\n\n reward = self.calculate_reward(action)\n\n truncated = self._turns >= self.max_turns\n\n env_info = {\n \"step_metadata\": observation.metadata,\n \"action_exec_start\": action_exec_start,\n \"action_exec_stop\": action_exec_stop,\n \"action_exec_timeout\": 0.0,\n }\n obs_view = observation.short_view() if isinstance(observation, Observation) else observation\n logger.info(f\"Gym {self.__class__.__name__} observation: {obs_view}\")\n return observation, reward, terminated, truncated, env_info\n\n def calculate_reward(self, action: Action) -> float:\n logger.warning(\"Reward calculation is not implemented, returning 0\")\n return 0.0\n\n def close(self):\n self._env.close()","source_hash":"0121f2a4c9e33e166df62de14a979af68c34c4c370c7a854d9045b9cccf7b907","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.osworld","uri":"program://AgentLab/module/src.agentlab.benchmarks.osworld#L1-L948","kind":"module","name":"src.agentlab.benchmarks.osworld","path":"src/agentlab/benchmarks/osworld.py","language":"python","start_line":1,"end_line":948,"context_start_line":1,"context_end_line":948,"code":"import ast\nimport importlib.util\nimport json\nimport logging\nimport os\nimport time\nfrom copy import deepcopy\nfrom dataclasses import dataclass\nfrom io import BytesIO\nfrom pathlib import Path\nfrom typing import Any, Literal\n\nimport numpy as np\nfrom bgym import AbstractActionSet\nfrom dataclasses_json import DataClassJsonMixin\nfrom PIL import Image\n\nfrom agentlab.benchmarks.abstract_env import (\n AbstractBenchmark,\n AbstractEnv,\n AbstractEnvArgs,\n add_step_timing_to_env_info_decorator,\n)\nfrom agentlab.benchmarks.osworld_axtree_preprocessing import (\n linearize_accessibility_tree,\n tag_screenshot,\n)\n\nspec = importlib.util.find_spec(\"desktop_env\")\nif spec is not None: # desktop_env is available\n from desktop_env.actions import KEYBOARD_KEYS, X_MAX, Y_MAX\n from desktop_env.desktop_env import DesktopEnv\nelse:\n # If desktop_env is not available, set to None or default values\n DesktopEnv = None\n KEYBOARD_KEYS = [\n \"\\t\",\n \"\\n\",\n \"\\r\",\n \" \",\n \"!\",\n '\"',\n \"#\",\n \"$\",\n \"%\",\n \"&\",\n \"'\",\n \"(\",\n \")\",\n \"*\",\n \"+\",\n \",\",\n \"-\",\n \".\",\n \"/\",\n \"0\",\n \"1\",\n \"2\",\n \"3\",\n \"4\",\n \"5\",\n \"6\",\n \"7\",\n \"8\",\n \"9\",\n \":\",\n \";\",\n \"<\",\n \"=\",\n \">\",\n \"?\",\n \"@\",\n \"[\",\n \"\\\\\",\n \"]\",\n \"^\",\n \"_\",\n \"`\",\n \"a\",\n \"b\",\n \"c\",\n \"d\",\n \"e\",\n \"f\",\n \"g\",\n \"h\",\n \"i\",\n \"j\",\n \"k\",\n \"l\",\n \"m\",\n \"n\",\n \"o\",\n \"p\",\n \"q\",\n \"r\",\n \"s\",\n \"t\",\n \"u\",\n \"v\",\n \"w\",\n \"x\",\n \"y\",\n \"z\",\n \"{\",\n \"|\",\n \"}\",\n \"~\",\n \"accept\",\n \"add\",\n \"alt\",\n \"altleft\",\n \"altright\",\n \"apps\",\n \"backspace\",\n \"browserback\",\n \"browserfavorites\",\n \"browserforward\",\n \"browserhome\",\n \"browserrefresh\",\n \"browsersearch\",\n \"browserstop\",\n \"capslock\",\n \"clear\",\n \"convert\",\n \"ctrl\",\n \"ctrlleft\",\n \"ctrlright\",\n \"decimal\",\n \"del\",\n \"delete\",\n \"divide\",\n \"down\",\n \"end\",\n \"enter\",\n \"esc\",\n \"escape\",\n \"execute\",\n \"f1\",\n \"f10\",\n \"f11\",\n \"f12\",\n \"f13\",\n \"f14\",\n \"f15\",\n \"f16\",\n \"f17\",\n \"f18\",\n \"f19\",\n \"f2\",\n \"f20\",\n \"f21\",\n \"f22\",\n \"f23\",\n \"f24\",\n \"f3\",\n \"f4\",\n \"f5\",\n \"f6\",\n \"f7\",\n \"f8\",\n \"f9\",\n \"final\",\n \"fn\",\n \"hanguel\",\n \"hangul\",\n \"hanja\",\n \"help\",\n \"home\",\n \"insert\",\n \"junja\",\n \"kana\",\n \"kanji\",\n \"launchapp1\",\n \"launchapp2\",\n \"launchmail\",\n \"launchmediaselect\",\n \"left\",\n \"modechange\",\n \"multiply\",\n \"nexttrack\",\n \"nonconvert\",\n \"num0\",\n \"num1\",\n \"num2\",\n \"num3\",\n \"num4\",\n \"num5\",\n \"num6\",\n \"num7\",\n \"num8\",\n \"num9\",\n \"numlock\",\n \"pagedown\",\n \"pageup\",\n \"pause\",\n \"pgdn\",\n \"pgup\",\n \"playpause\",\n \"prevtrack\",\n \"print\",\n \"printscreen\",\n \"prntscrn\",\n \"prtsc\",\n \"prtscr\",\n \"return\",\n \"right\",\n \"scrolllock\",\n \"select\",\n \"separator\",\n \"shift\",\n \"shiftleft\",\n \"shiftright\",\n \"sleep\",\n \"stop\",\n \"subtract\",\n \"tab\",\n \"up\",\n \"volumedown\",\n \"volumemute\",\n \"volumeup\",\n \"win\",\n \"winleft\",\n \"winright\",\n \"yen\",\n \"command\",\n \"option\",\n \"optionleft\",\n \"optionright\",\n ]\n X_MAX = 1920\n Y_MAX = 1080\n\nlogger = logging.getLogger(__name__)\nCOMPUTER_13_ACTIONS_OAI_CHATCOMPLETION_TOOLS = [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"move_to\",\n \"description\": \"Move the cursor to the specified position\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"x\": {\n \"type\": \"number\",\n \"description\": \"X coordinate\",\n \"minimum\": 0,\n \"maximum\": X_MAX,\n },\n \"y\": {\n \"type\": \"number\",\n \"description\": \"Y coordinate\",\n \"minimum\": 0,\n \"maximum\": Y_MAX,\n },\n },\n \"required\": [\"x\", \"y\"],\n },\n },\n },\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"click\",\n \"description\": \"Click the left button if the button not specified, otherwise click the specified button; click at the current position if x and y are not specified, otherwise click at the specified position\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"button\": {\n \"type\": \"string\",\n \"enum\": [\"left\", \"right\", \"middle\"],\n \"description\": \"Mouse button to click\",\n },\n \"x\": {\n \"type\": \"number\",\n \"description\": \"X coordinate\",\n \"minimum\": 0,\n \"maximum\": X_MAX,\n },\n \"y\": {\n \"type\": \"number\",\n \"description\": \"Y coordinate\",\n \"minimum\": 0,\n \"maximum\": Y_MAX,\n },\n \"num_clicks\": {\n \"type\": \"integer\",\n \"enum\": [1, 2, 3],\n \"description\": \"Number of clicks\",\n },\n },\n \"required\": [],\n },\n },\n },\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"mouse_down\",\n \"description\": \"Press the left button if the button not specified, otherwise press the specified button\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"button\": {\n \"type\": \"string\",\n \"enum\": [\"left\", \"right\", \"middle\"],\n \"description\": \"Mouse button to press\",\n }\n },\n \"required\": [],\n },\n },\n },\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"mouse_up\",\n \"description\": \"Release the left button if the button not specified, otherwise release the specified button\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"button\": {\n \"type\": \"string\",\n \"enum\": [\"left\", \"right\", \"middle\"],\n \"description\": \"Mouse button to release\",\n }\n },\n \"required\": [],\n },\n },\n },\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"right_click\",\n \"description\": \"Right click at the current position if x and y are not specified, otherwise right click at the specified position\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"x\": {\n \"type\": \"number\",\n \"description\": \"X coordinate\",\n \"minimum\": 0,\n \"maximum\": X_MAX,\n },\n \"y\": {\n \"type\": \"number\",\n \"description\": \"Y coordinate\",\n \"minimum\": 0,\n \"maximum\": Y_MAX,\n },\n },\n \"required\": [],\n },\n },\n },\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"double_click\",\n \"description\": \"Double click at the current position if x and y are not specified, otherwise double click at the specified position\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"x\": {\n \"type\": \"number\",\n \"description\": \"X coordinate\",\n \"minimum\": 0,\n \"maximum\": X_MAX,\n },\n \"y\": {\n \"type\": \"number\",\n \"description\": \"Y coordinate\",\n \"minimum\": 0,\n \"maximum\": Y_MAX,\n },\n },\n \"required\": [],\n },\n },\n },\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"drag_to\",\n \"description\": \"Drag the cursor to the specified position with the left button pressed\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"x\": {\n \"type\": \"number\",\n \"description\": \"X coordinate\",\n \"minimum\": 0,\n \"maximum\": X_MAX,\n },\n \"y\": {\n \"type\": \"number\",\n \"description\": \"Y coordinate\",\n \"minimum\": 0,\n \"maximum\": Y_MAX,\n },\n },\n \"required\": [\"x\", \"y\"],\n },\n },\n },\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"scroll\",\n \"description\": \"Scroll the mouse wheel up or down\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"dx\": {\"type\": \"integer\", \"description\": \"Horizontal scroll amount\"},\n \"dy\": {\"type\": \"integer\", \"description\": \"Vertical scroll amount\"},\n },\n \"required\": [\"dx\", \"dy\"],\n },\n },\n },\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"typing\",\n \"description\": \"Type the specified text\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\"text\": {\"type\": \"string\", \"description\": \"Text to type\"}},\n \"required\": [\"text\"],\n },\n },\n },\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"press\",\n \"description\": \"Press the specified key and release it\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"key\": {\"type\": \"string\", \"enum\": KEYBOARD_KEYS, \"description\": \"Key to press\"}\n },\n \"required\": [\"key\"],\n },\n },\n },\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"key_down\",\n \"description\": \"Press the specified key\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"key\": {\n \"type\": \"string\",\n \"enum\": KEYBOARD_KEYS,\n \"description\": \"Key to press down\",\n }\n },\n \"required\": [\"key\"],\n },\n },\n },\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"key_up\",\n \"description\": \"Release the specified key\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"key\": {\n \"type\": \"string\",\n \"enum\": KEYBOARD_KEYS,\n \"description\": \"Key to release\",\n }\n },\n \"required\": [\"key\"],\n },\n },\n },\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"hotkey\",\n \"description\": \"Press the specified key combination\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"keys\": {\n \"type\": \"array\",\n \"items\": {\"type\": \"string\", \"enum\": KEYBOARD_KEYS},\n \"description\": \"Array of keys to press simultaneously\",\n }\n },\n \"required\": [\"keys\"],\n },\n },\n },\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"wait\",\n \"description\": \"Wait until the next action\",\n \"parameters\": {\"type\": \"object\", \"properties\": {}, \"required\": []},\n },\n },\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"fail\",\n \"description\": \"Decide the task cannot be performed\",\n \"parameters\": {\"type\": \"object\", \"properties\": {}, \"required\": []},\n },\n },\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"done\",\n \"description\": \"Decide the task is done\",\n \"parameters\": {\"type\": \"object\", \"properties\": {}, \"required\": []},\n },\n },\n]\n\n\nclass OsworldGym(AbstractEnv):\n\n def __init__(\n self,\n task: dict,\n provider_name: str,\n region: str | None,\n path_to_vm: str | None,\n snapshot_name: str,\n action_space: str,\n cache_dir: str,\n screen_size: tuple[int, int],\n headless: bool,\n require_a11y_tree: bool,\n require_terminal: bool,\n os_type: str,\n enable_proxy: bool,\n max_steps: int,\n exp_dir: Path,\n record_video: bool = True,\n ):\n self.task = task\n self.env_info = {\n \"provider_name\": provider_name,\n \"region\": region,\n \"path_to_vm\": path_to_vm,\n \"snapshot_name\": snapshot_name,\n \"action_space\": action_space,\n \"cache_dir\": cache_dir,\n \"screen_size\": screen_size,\n \"headless\": headless,\n \"require_a11y_tree\": require_a11y_tree,\n \"require_terminal\": require_terminal,\n \"os_type\": os_type,\n \"enable_proxy\": enable_proxy,\n }\n if DesktopEnv is None:\n raise ImportError(\n \"desktop_env is not installed. Please install it (use `make osworld`) to use OSWorld Gym.\"\n )\n self.env = DesktopEnv(\n action_space=action_space,\n provider_name=provider_name,\n region=region, # type: ignore\n path_to_vm=path_to_vm, # type: ignore\n snapshot_name=snapshot_name,\n cache_dir=cache_dir,\n screen_size=screen_size, # type: ignore\n headless=headless,\n require_a11y_tree=require_a11y_tree,\n require_terminal=require_terminal,\n os_type=os_type,\n )\n self._step_count = 0\n self.max_steps = max_steps\n self.exp_dir = exp_dir\n self.record_video = record_video\n\n def reset(self, seed: int | None = None) -> tuple[dict[str, Any], dict[str, Any]]:\n self.env.reset(task_config=self.task, seed=seed)\n logging.info(f\"Start solving task: {self.task['instruction']}\")\n time.sleep(\n 60\n ) # Wait for the environment to be ready, as in https://github.com/xlang-ai/OSWorld/blob/main/lib_run_single.py#L15\n raw_obs = self.env._get_obs() # Get the initial observation\n if self.record_video:\n self.env.controller.start_recording()\n logging.info(\"Started recording the environment video\")\n obs = self.to_agentlab_observation(raw_obs)\n self._step_count = 0\n return obs, self.env_info\n\n @add_step_timing_to_env_info_decorator\n def step(self, action: str):\n \"\"\"Execute the action in the OS-world environment.\"\"\"\n env_action = self.agentlab_to_env_action(action)\n logger.info(f\"AgentLab Action returned: {action}, converted to: {env_action}\")\n raw_obs, reward, done, info = self.env.step(env_action)\n logger.info(f\"STEP {self.task['id']} {self._step_count + 1}/{self.max_steps}\")\n self._step_count += 1\n truncated = info.get(\"fail\", False) or self._step_count >= self.max_steps\n if done or truncated:\n if done:\n logger.info(f\"Task {self.task['id']} completed successfully.\")\n else:\n logger.warning(f\"Task {self.task['id']} truncated after {self._step_count} steps.\")\n try:\n reward = self.env.evaluate()\n logger.info(f\"Evaluated reward: {reward}\")\n except Exception as e:\n logger.error(f\"Failed to evaluate {self.task} task: {e}\")\n obs = self.to_agentlab_observation(raw_obs)\n return obs, reward, done, truncated, info\n\n def agentlab_to_env_action(self, action: str) -> Any:\n \"\"\"Convert AgentLab agents action format to OSWorld action format.\"\"\"\n if self.env.action_space == \"computer_13\":\n return self.convert_agentlab_action_to_computer_13(action)\n elif self.env.action_space == \"pyautogui\":\n raise NotImplementedError(\n \"PyAutoGUI action space is not supported yet. Please use 'computer_13' action space.\"\n )\n\n def to_agentlab_observation(self, obs: dict[str, Any]) -> dict[str, Any]:\n \"\"\"Convert OSWorld observation to AgentLab format.\"\"\"\n converted_obs = {}\n\n self._add_screenshot(converted_obs, obs)\n # self._add_som_screenshot(converted_obs, obs) #TODO: test this\n converted_obs[\"axtree_txt\"] = linearize_accessibility_tree(\n accessibility_tree=obs[\"accessibility_tree\"], platform=\"ubuntu\"\n )\n converted_obs[\"last_action_error\"] = \"\" # OSWorld doesn't provide this directly\n converted_obs[\"focused_element_bid\"] = \"\" # Extract from accessibility tree if available\n converted_obs = self._add_browser_context(converted_obs)\n converted_obs = self._add_task_context(converted_obs, obs)\n\n return converted_obs\n\n def convert_screenshot_to_numpy(self, screenshot) -> np.ndarray:\n \"\"\"Convert screenshot to numpy array format expected by AgentLab.\"\"\"\n image = Image.open(BytesIO(screenshot))\n image = image.convert(\"RGB\") if image.mode != \"RGB\" else image\n return np.array(image)\n\n def _add_screenshot(self, converted_obs: dict[str, Any], obs: dict[str, Any]) -> None:\n \"\"\"Convert screenshot to numpy array format expected by AgentLab\"\"\"\n converted_obs[\"screenshot\"] = self.convert_screenshot_to_numpy(obs[\"screenshot\"])\n\n def _add_som_screenshot(self, converted_obs: dict[str, Any], obs: dict[str, Any]) -> None:\n \"\"\"Convert SOM screenshot to numpy array format expected by AgentLab\"\"\"\n masks, drew_nodes, tagged_screenshot, linearized_accessibility_tree = tag_screenshot(\n obs[\"screenshot\"], obs[\"accessibility_tree\"], platform=\"ubuntu\"\n )\n converted_obs[\"som_screenshot\"] = self.convert_screenshot_to_numpy(tagged_screenshot)\n\n def _add_browser_context(self, converted_obs: dict[str, Any]):\n \"\"\"Add browser-like context fields adapted for desktop environment.\"\"\"\n converted_obs[\"url\"] = \"\"\n converted_obs[\"open_page\n# ... truncated ...","source_hash":"c9d01601d1e42a7119d50f151f9ea2c1ad3db146dd657c5d551000493811d9b4","truncated":true} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.osworld.OsworldGym","uri":"program://AgentLab/class/src.agentlab.benchmarks.osworld.OsworldGym#L529-L754","kind":"class","name":"OsworldGym","path":"src/agentlab/benchmarks/osworld.py","language":"python","start_line":529,"end_line":754,"context_start_line":509,"context_end_line":774,"code":" },\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"fail\",\n \"description\": \"Decide the task cannot be performed\",\n \"parameters\": {\"type\": \"object\", \"properties\": {}, \"required\": []},\n },\n },\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"done\",\n \"description\": \"Decide the task is done\",\n \"parameters\": {\"type\": \"object\", \"properties\": {}, \"required\": []},\n },\n },\n]\n\n\nclass OsworldGym(AbstractEnv):\n\n def __init__(\n self,\n task: dict,\n provider_name: str,\n region: str | None,\n path_to_vm: str | None,\n snapshot_name: str,\n action_space: str,\n cache_dir: str,\n screen_size: tuple[int, int],\n headless: bool,\n require_a11y_tree: bool,\n require_terminal: bool,\n os_type: str,\n enable_proxy: bool,\n max_steps: int,\n exp_dir: Path,\n record_video: bool = True,\n ):\n self.task = task\n self.env_info = {\n \"provider_name\": provider_name,\n \"region\": region,\n \"path_to_vm\": path_to_vm,\n \"snapshot_name\": snapshot_name,\n \"action_space\": action_space,\n \"cache_dir\": cache_dir,\n \"screen_size\": screen_size,\n \"headless\": headless,\n \"require_a11y_tree\": require_a11y_tree,\n \"require_terminal\": require_terminal,\n \"os_type\": os_type,\n \"enable_proxy\": enable_proxy,\n }\n if DesktopEnv is None:\n raise ImportError(\n \"desktop_env is not installed. Please install it (use `make osworld`) to use OSWorld Gym.\"\n )\n self.env = DesktopEnv(\n action_space=action_space,\n provider_name=provider_name,\n region=region, # type: ignore\n path_to_vm=path_to_vm, # type: ignore\n snapshot_name=snapshot_name,\n cache_dir=cache_dir,\n screen_size=screen_size, # type: ignore\n headless=headless,\n require_a11y_tree=require_a11y_tree,\n require_terminal=require_terminal,\n os_type=os_type,\n )\n self._step_count = 0\n self.max_steps = max_steps\n self.exp_dir = exp_dir\n self.record_video = record_video\n\n def reset(self, seed: int | None = None) -> tuple[dict[str, Any], dict[str, Any]]:\n self.env.reset(task_config=self.task, seed=seed)\n logging.info(f\"Start solving task: {self.task['instruction']}\")\n time.sleep(\n 60\n ) # Wait for the environment to be ready, as in https://github.com/xlang-ai/OSWorld/blob/main/lib_run_single.py#L15\n raw_obs = self.env._get_obs() # Get the initial observation\n if self.record_video:\n self.env.controller.start_recording()\n logging.info(\"Started recording the environment video\")\n obs = self.to_agentlab_observation(raw_obs)\n self._step_count = 0\n return obs, self.env_info\n\n @add_step_timing_to_env_info_decorator\n def step(self, action: str):\n \"\"\"Execute the action in the OS-world environment.\"\"\"\n env_action = self.agentlab_to_env_action(action)\n logger.info(f\"AgentLab Action returned: {action}, converted to: {env_action}\")\n raw_obs, reward, done, info = self.env.step(env_action)\n logger.info(f\"STEP {self.task['id']} {self._step_count + 1}/{self.max_steps}\")\n self._step_count += 1\n truncated = info.get(\"fail\", False) or self._step_count >= self.max_steps\n if done or truncated:\n if done:\n logger.info(f\"Task {self.task['id']} completed successfully.\")\n else:\n logger.warning(f\"Task {self.task['id']} truncated after {self._step_count} steps.\")\n try:\n reward = self.env.evaluate()\n logger.info(f\"Evaluated reward: {reward}\")\n except Exception as e:\n logger.error(f\"Failed to evaluate {self.task} task: {e}\")\n obs = self.to_agentlab_observation(raw_obs)\n return obs, reward, done, truncated, info\n\n def agentlab_to_env_action(self, action: str) -> Any:\n \"\"\"Convert AgentLab agents action format to OSWorld action format.\"\"\"\n if self.env.action_space == \"computer_13\":\n return self.convert_agentlab_action_to_computer_13(action)\n elif self.env.action_space == \"pyautogui\":\n raise NotImplementedError(\n \"PyAutoGUI action space is not supported yet. Please use 'computer_13' action space.\"\n )\n\n def to_agentlab_observation(self, obs: dict[str, Any]) -> dict[str, Any]:\n \"\"\"Convert OSWorld observation to AgentLab format.\"\"\"\n converted_obs = {}\n\n self._add_screenshot(converted_obs, obs)\n # self._add_som_screenshot(converted_obs, obs) #TODO: test this\n converted_obs[\"axtree_txt\"] = linearize_accessibility_tree(\n accessibility_tree=obs[\"accessibility_tree\"], platform=\"ubuntu\"\n )\n converted_obs[\"last_action_error\"] = \"\" # OSWorld doesn't provide this directly\n converted_obs[\"focused_element_bid\"] = \"\" # Extract from accessibility tree if available\n converted_obs = self._add_browser_context(converted_obs)\n converted_obs = self._add_task_context(converted_obs, obs)\n\n return converted_obs\n\n def convert_screenshot_to_numpy(self, screenshot) -> np.ndarray:\n \"\"\"Convert screenshot to numpy array format expected by AgentLab.\"\"\"\n image = Image.open(BytesIO(screenshot))\n image = image.convert(\"RGB\") if image.mode != \"RGB\" else image\n return np.array(image)\n\n def _add_screenshot(self, converted_obs: dict[str, Any], obs: dict[str, Any]) -> None:\n \"\"\"Convert screenshot to numpy array format expected by AgentLab\"\"\"\n converted_obs[\"screenshot\"] = self.convert_screenshot_to_numpy(obs[\"screenshot\"])\n\n def _add_som_screenshot(self, converted_obs: dict[str, Any], obs: dict[str, Any]) -> None:\n \"\"\"Convert SOM screenshot to numpy array format expected by AgentLab\"\"\"\n masks, drew_nodes, tagged_screenshot, linearized_accessibility_tree = tag_screenshot(\n obs[\"screenshot\"], obs[\"accessibility_tree\"], platform=\"ubuntu\"\n )\n converted_obs[\"som_screenshot\"] = self.convert_screenshot_to_numpy(tagged_screenshot)\n\n def _add_browser_context(self, converted_obs: dict[str, Any]):\n \"\"\"Add browser-like context fields adapted for desktop environment.\"\"\"\n converted_obs[\"url\"] = \"\"\n converted_obs[\"open_pages_urls\"] = []\n converted_obs[\"open_pages_titles\"] = []\n converted_obs[\"active_page_index\"] = 0\n return converted_obs\n\n def _add_task_context(self, converted_obs: dict[str, Any], obs: dict[str, Any]):\n \"\"\"Add task and instruction context fields.\"\"\"\n instruction = obs.get(\"instruction\", \"\")\n converted_obs[\"goal_object\"] = [{\"type\": \"text\", \"text\": instruction}]\n if obs.get(\"terminal\"):\n converted_obs[\"terminal_output\"] = obs[\"terminal\"]\n return converted_obs\n\n def convert_agentlab_action_to_computer_13(self, action: str) -> dict[str, Any] | str:\n \"\"\"Convert action string to dictionary format.\n\n Args:\n action (str): Action string in AgentLab format, e.g., \"move_to(x=100, y=200)\".\n\n Returns:\n dict[str, Any] | str: Action in OSWorld Computer 13 format as a dictionary,\n or a string for simple actions like \"wait\", \"done\", or \"fail\".\n\n Examples:\n >>> env = OsworldGym(task={}, provider_name=\"vmware\", region=None, path_to_vm=None,\n ... snapshot_name=\"init_state\", action_space=\"computer_13\",\n ... cache_dir=\"cache\", screen_size=(1920, 1080), headless=True,\n ... require_a11y_tree=True, require_terminal=False, os_type=\"Ubuntu\",\n ... enable_proxy=False, max_steps=50, exp_dir=Path(\".\"))\n >>> env.convert_agentlab_action_to_computer_13(\"move_to(x=100, y=200)\")\n {'action_type': 'MOVE_TO', 'parameters': {'x': 100, 'y': 200}}\n >>> env.convert_agentlab_action_to_computer_13(\"wait()\")\n 'WAIT'\n \"\"\"\n\n action_type, action_args, action_kwargs = self.parse_agentlab_action_str_to_func_args(\n action\n )\n\n if action_type in [\"wait\", \"done\", \"fail\"]:\n return str(action_type).upper()\n if action_args:\n logger.warning(\n f\"\"\"Action '{action_type}' has unexpected positional arguments: {action_args}.\n OSWorld Computer 13 actions are processed as dictionaries.\"\"\"\n )\n action_kwargs = action_kwargs if action_kwargs is not None else {}\n\n return {\"action_type\": str(action_type).upper(), \"parameters\": action_kwargs}\n\n @staticmethod\n def parse_agentlab_action_str_to_func_args(action: str):\n \"\"\"Parse the agentlab action string to extract function name, args, and kwargs.\n\n Args:\n action (str): Action string in AgentLab format, e.g., \"move_to(x=100, y=200)\".\n\n Returns:\n tuple: A tuple containing the function name, a list of positional arguments,\n and a dictionary of keyword arguments.\n\n Examples:\n >>> parse_agentlab_action_str_to_func_args(\"move_to(x=100, y=200)\")\n ('move_to', [], {'x': 100, 'y': 200})\n >>> parse_agentlab_action_str_to_func_args(\"hotkey(keys=['ctrl', 'alt', 't'])\")\n ('hotkey', [], {'keys': ['ctrl', 'alt', 't']})\n \"\"\"\n try:\n action = action.strip()\n parsed = ast.parse(action, mode=\"eval\")\n if isinstance(parsed.body, ast.Call):\n func_name = ast.unparse(parsed.body.func)\n args = [ast.literal_eval(arg) for arg in parsed.body.args]\n kwargs = {kw.arg: ast.literal_eval(kw.value) for kw in parsed.body.keywords}\n return func_name, args, kwargs\n except Exception as e:\n logger.warning(\n f\"Failed to parse agentlab agent's str function call: {action}, error: {e}\"\n )\n return None, None, None\n\n def close(self):\n if self.record_video:\n video_name = str(self.exp_dir / \"recording.mp4\")\n self.env.controller.end_recording(video_name)\n logger.info(f\"Recorded video saved to {video_name}\")\n return self.env.close()\n\n\n@dataclass\nclass OSWorldActionSet(AbstractActionSet, DataClassJsonMixin):\n # TODO: Define and use agentlab AbstractActionSet\n # AbstractActionSet should define some standard format to represent actions.(list of dict with keys that are MCP compatible)\n # Should we have 'abstract function' here for action conversion for backend LLM with fixed action set like UI-Tars or Semi-fixed action set LLMs like OpenAI CUA?\n # TODO: We need to support both 'action space as tools' and 'action space as prompt' for agentlab agents\n # and have conversion functions to convert them to format acceptable by environment.\n action_space: Literal[\"computer_13\", \"pyautogui\"] = \"computer_13\"\n multiaction: bool = False\n\n def describe(self, with_long_description: bool = True, with_examples: bool = True) -> str:\n \"\"\"Describe the OSWorld action set for desktop interactions.\"\"\"\n pass\n\n def example_action(self, abstract: bool) -> str:\n \"\"\"Provide example actions for the action set.\"\"\"\n pass\n","source_hash":"c9d01601d1e42a7119d50f151f9ea2c1ad3db146dd657c5d551000493811d9b4","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.osworld.OSWorldActionSet","uri":"program://AgentLab/class/src.agentlab.benchmarks.osworld.OSWorldActionSet#L758-L810","kind":"class","name":"OSWorldActionSet","path":"src/agentlab/benchmarks/osworld.py","language":"python","start_line":758,"end_line":810,"context_start_line":738,"context_end_line":830,"code":" if isinstance(parsed.body, ast.Call):\n func_name = ast.unparse(parsed.body.func)\n args = [ast.literal_eval(arg) for arg in parsed.body.args]\n kwargs = {kw.arg: ast.literal_eval(kw.value) for kw in parsed.body.keywords}\n return func_name, args, kwargs\n except Exception as e:\n logger.warning(\n f\"Failed to parse agentlab agent's str function call: {action}, error: {e}\"\n )\n return None, None, None\n\n def close(self):\n if self.record_video:\n video_name = str(self.exp_dir / \"recording.mp4\")\n self.env.controller.end_recording(video_name)\n logger.info(f\"Recorded video saved to {video_name}\")\n return self.env.close()\n\n\n@dataclass\nclass OSWorldActionSet(AbstractActionSet, DataClassJsonMixin):\n # TODO: Define and use agentlab AbstractActionSet\n # AbstractActionSet should define some standard format to represent actions.(list of dict with keys that are MCP compatible)\n # Should we have 'abstract function' here for action conversion for backend LLM with fixed action set like UI-Tars or Semi-fixed action set LLMs like OpenAI CUA?\n # TODO: We need to support both 'action space as tools' and 'action space as prompt' for agentlab agents\n # and have conversion functions to convert them to format acceptable by environment.\n action_space: Literal[\"computer_13\", \"pyautogui\"] = \"computer_13\"\n multiaction: bool = False\n\n def describe(self, with_long_description: bool = True, with_examples: bool = True) -> str:\n \"\"\"Describe the OSWorld action set for desktop interactions.\"\"\"\n pass\n\n def example_action(self, abstract: bool) -> str:\n \"\"\"Provide example actions for the action set.\"\"\"\n pass\n\n def to_python_code(self, action) -> str:\n \"\"\"We use the OS-world/desktop_env environment controller\"\"\"\n pass\n\n def to_tool_description(self, api=\"openai\"):\n \"\"\"Convert the action set to a tool description for Tool-Use LLMs.\n\n The default for openai is openai Response API tools format.\n\n Args:\n api (str): The API format to use. Defaults to \"openai\".\n\n Returns:\n list[dict]: List of tool descriptions in the specified API format.\n\n Raises:\n ValueError: If an unsupported action space is specified.\n \"\"\"\n # TODO: Rename bgym AbstractActionSet 'to_tool_descriptor' method as 'to_tool_description' for consistency.\n if self.action_space == \"computer_13\":\n tools = COMPUTER_13_ACTIONS_OAI_CHATCOMPLETION_TOOLS\n\n else:\n raise ValueError(\n \"Only 'computer_13' action space is currently supported for tool description.\"\n )\n api_formatters = {\n \"openai\": lambda: format_chat_completion_tools_to_response_api(tools),\n \"chatcompletion\": lambda: tools,\n \"anthropic\": lambda: format_chat_completion_tools_to_anthropic(tools),\n }\n\n if api not in api_formatters:\n raise ValueError(f\"Unsupported API type: {api}\")\n\n return api_formatters[api]()\n\n\ndef format_chat_completion_tools_to_anthropic(tools: list[dict]) -> list[dict]:\n \"\"\"Convert OpenAI Response API tool format to Anthropic tool format.\"\"\"\n formatted_tools = []\n for tool in tools:\n function_def = tool[\"function\"]\n formatted_tool = {\n \"name\": function_def[\"name\"],\n \"description\": function_def[\"description\"],\n \"input_schema\": function_def[\"parameters\"],\n }\n formatted_tools.append(formatted_tool)\n\n return formatted_tools\n\n\ndef format_chat_completion_tools_to_response_api(tools: list[dict]) -> list[dict]:\n \"\"\"Convert tools from OpenAI Chat Completion format to Responses API format.\n","source_hash":"c9d01601d1e42a7119d50f151f9ea2c1ad3db146dd657c5d551000493811d9b4","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.osworld.format_chat_completion_tools_to_anthropic","uri":"program://AgentLab/function/src.agentlab.benchmarks.osworld.format_chat_completion_tools_to_anthropic#L813-L825","kind":"function","name":"format_chat_completion_tools_to_anthropic","path":"src/agentlab/benchmarks/osworld.py","language":"python","start_line":813,"end_line":825,"context_start_line":793,"context_end_line":845,"code":" # TODO: Rename bgym AbstractActionSet 'to_tool_descriptor' method as 'to_tool_description' for consistency.\n if self.action_space == \"computer_13\":\n tools = COMPUTER_13_ACTIONS_OAI_CHATCOMPLETION_TOOLS\n\n else:\n raise ValueError(\n \"Only 'computer_13' action space is currently supported for tool description.\"\n )\n api_formatters = {\n \"openai\": lambda: format_chat_completion_tools_to_response_api(tools),\n \"chatcompletion\": lambda: tools,\n \"anthropic\": lambda: format_chat_completion_tools_to_anthropic(tools),\n }\n\n if api not in api_formatters:\n raise ValueError(f\"Unsupported API type: {api}\")\n\n return api_formatters[api]()\n\n\ndef format_chat_completion_tools_to_anthropic(tools: list[dict]) -> list[dict]:\n \"\"\"Convert OpenAI Response API tool format to Anthropic tool format.\"\"\"\n formatted_tools = []\n for tool in tools:\n function_def = tool[\"function\"]\n formatted_tool = {\n \"name\": function_def[\"name\"],\n \"description\": function_def[\"description\"],\n \"input_schema\": function_def[\"parameters\"],\n }\n formatted_tools.append(formatted_tool)\n\n return formatted_tools\n\n\ndef format_chat_completion_tools_to_response_api(tools: list[dict]) -> list[dict]:\n \"\"\"Convert tools from OpenAI Chat Completion format to Responses API format.\n\n Args:\n tools: List of tools in Chat Completion format with nested function object\n\n Returns:\n List of tools in Responses API format with flattened structure\n \"\"\"\n formatted_tools = []\n for tool in tools:\n function_def = tool[\"function\"]\n formatted_tool = {\n \"type\": \"function\",\n \"name\": function_def[\"name\"],\n \"description\": function_def[\"description\"],\n \"parameters\": function_def[\"parameters\"],\n }","source_hash":"c9d01601d1e42a7119d50f151f9ea2c1ad3db146dd657c5d551000493811d9b4","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.osworld.format_chat_completion_tools_to_response_api","uri":"program://AgentLab/function/src.agentlab.benchmarks.osworld.format_chat_completion_tools_to_response_api#L828-L853","kind":"function","name":"format_chat_completion_tools_to_response_api","path":"src/agentlab/benchmarks/osworld.py","language":"python","start_line":828,"end_line":853,"context_start_line":808,"context_end_line":873,"code":" raise ValueError(f\"Unsupported API type: {api}\")\n\n return api_formatters[api]()\n\n\ndef format_chat_completion_tools_to_anthropic(tools: list[dict]) -> list[dict]:\n \"\"\"Convert OpenAI Response API tool format to Anthropic tool format.\"\"\"\n formatted_tools = []\n for tool in tools:\n function_def = tool[\"function\"]\n formatted_tool = {\n \"name\": function_def[\"name\"],\n \"description\": function_def[\"description\"],\n \"input_schema\": function_def[\"parameters\"],\n }\n formatted_tools.append(formatted_tool)\n\n return formatted_tools\n\n\ndef format_chat_completion_tools_to_response_api(tools: list[dict]) -> list[dict]:\n \"\"\"Convert tools from OpenAI Chat Completion format to Responses API format.\n\n Args:\n tools: List of tools in Chat Completion format with nested function object\n\n Returns:\n List of tools in Responses API format with flattened structure\n \"\"\"\n formatted_tools = []\n for tool in tools:\n function_def = tool[\"function\"]\n formatted_tool = {\n \"type\": \"function\",\n \"name\": function_def[\"name\"],\n \"description\": function_def[\"description\"],\n \"parameters\": function_def[\"parameters\"],\n }\n\n # Handle the strict field if present\n if \"strict\" in function_def:\n formatted_tool[\"strict\"] = function_def[\"strict\"]\n\n formatted_tools.append(formatted_tool)\n\n return formatted_tools\n\n\n@dataclass\nclass OsworldEnvArgs(AbstractEnvArgs):\n task: dict[str, Any]\n task_seed: int = 0\n task_name: str | None = None\n path_to_vm: str | None = None # path to .vmx file\n provider_name: str = \"docker\" # path to .vmx file\n region: str = \"us-east-1\" # AWS specific, does not apply to all providers\n snapshot_name: str = \"init_state\" # snapshot name to revert to\n action_space: Literal[\"computer_13\", \"pyautogui\"] = \"computer_13\"\n cache_dir: str = \"cache\"\n screen_size: tuple[int, int] = (1920, 1080)\n headless: bool = False\n require_a11y_tree: bool = True\n require_terminal: bool = False\n os_type: str = \"Ubuntu\"\n enable_proxy: bool = False\n max_steps: int = 50","source_hash":"c9d01601d1e42a7119d50f151f9ea2c1ad3db146dd657c5d551000493811d9b4","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.osworld.OsworldEnvArgs","uri":"program://AgentLab/class/src.agentlab.benchmarks.osworld.OsworldEnvArgs#L857-L896","kind":"class","name":"OsworldEnvArgs","path":"src/agentlab/benchmarks/osworld.py","language":"python","start_line":857,"end_line":896,"context_start_line":837,"context_end_line":916,"code":" formatted_tools = []\n for tool in tools:\n function_def = tool[\"function\"]\n formatted_tool = {\n \"type\": \"function\",\n \"name\": function_def[\"name\"],\n \"description\": function_def[\"description\"],\n \"parameters\": function_def[\"parameters\"],\n }\n\n # Handle the strict field if present\n if \"strict\" in function_def:\n formatted_tool[\"strict\"] = function_def[\"strict\"]\n\n formatted_tools.append(formatted_tool)\n\n return formatted_tools\n\n\n@dataclass\nclass OsworldEnvArgs(AbstractEnvArgs):\n task: dict[str, Any]\n task_seed: int = 0\n task_name: str | None = None\n path_to_vm: str | None = None # path to .vmx file\n provider_name: str = \"docker\" # path to .vmx file\n region: str = \"us-east-1\" # AWS specific, does not apply to all providers\n snapshot_name: str = \"init_state\" # snapshot name to revert to\n action_space: Literal[\"computer_13\", \"pyautogui\"] = \"computer_13\"\n cache_dir: str = \"cache\"\n screen_size: tuple[int, int] = (1920, 1080)\n headless: bool = False\n require_a11y_tree: bool = True\n require_terminal: bool = False\n os_type: str = \"Ubuntu\"\n enable_proxy: bool = False\n max_steps: int = 50\n\n def make_env(\n self, exp_dir: Path, action_mapping=None, use_raw_page_output: bool = False\n ) -> OsworldGym:\n logger.info(f\"Creating OSWorld Gym with task: {self.task}\")\n gym = OsworldGym(\n task=self.task,\n provider_name=self.provider_name,\n region=self.region,\n path_to_vm=self.path_to_vm,\n snapshot_name=self.snapshot_name,\n action_space=self.action_space,\n cache_dir=self.cache_dir,\n screen_size=self.screen_size,\n headless=self.headless,\n require_a11y_tree=self.require_a11y_tree,\n require_terminal=self.require_terminal,\n os_type=self.os_type,\n enable_proxy=self.enable_proxy,\n max_steps=self.max_steps,\n exp_dir=exp_dir,\n )\n return gym\n\n\nclass OsworldBenchmark(AbstractBenchmark):\n name: str = \"osworld\"\n is_multi_tab: bool = False\n high_level_action_set_args: OSWorldActionSet = None # type: ignore\n test_set_path: str = \"OSWorld/evaluation_examples\"\n test_set_name: str = \"test_all.json\"\n domain: str = \"all\"\n env_args: OsworldEnvArgs = None # type: ignore # basic env configuration for all tasks\n env_args_list: list[OsworldEnvArgs] = None # type: ignore\n\n def model_post_init(self, __context: Any) -> None:\n self.env_args_list = []\n if not self.env_args:\n self.env_args = OsworldEnvArgs(task={})\n self.high_level_action_set_args = OSWorldActionSet(action_space=self.env_args.action_space)\n with open(os.path.join(self.test_set_path, self.test_set_name)) as f:\n tasks = json.load(f)\n if self.domain != \"all\":","source_hash":"c9d01601d1e42a7119d50f151f9ea2c1ad3db146dd657c5d551000493811d9b4","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.osworld.OsworldBenchmark","uri":"program://AgentLab/class/src.agentlab.benchmarks.osworld.OsworldBenchmark#L899-L948","kind":"class","name":"OsworldBenchmark","path":"src/agentlab/benchmarks/osworld.py","language":"python","start_line":899,"end_line":948,"context_start_line":879,"context_end_line":948,"code":" gym = OsworldGym(\n task=self.task,\n provider_name=self.provider_name,\n region=self.region,\n path_to_vm=self.path_to_vm,\n snapshot_name=self.snapshot_name,\n action_space=self.action_space,\n cache_dir=self.cache_dir,\n screen_size=self.screen_size,\n headless=self.headless,\n require_a11y_tree=self.require_a11y_tree,\n require_terminal=self.require_terminal,\n os_type=self.os_type,\n enable_proxy=self.enable_proxy,\n max_steps=self.max_steps,\n exp_dir=exp_dir,\n )\n return gym\n\n\nclass OsworldBenchmark(AbstractBenchmark):\n name: str = \"osworld\"\n is_multi_tab: bool = False\n high_level_action_set_args: OSWorldActionSet = None # type: ignore\n test_set_path: str = \"OSWorld/evaluation_examples\"\n test_set_name: str = \"test_all.json\"\n domain: str = \"all\"\n env_args: OsworldEnvArgs = None # type: ignore # basic env configuration for all tasks\n env_args_list: list[OsworldEnvArgs] = None # type: ignore\n\n def model_post_init(self, __context: Any) -> None:\n self.env_args_list = []\n if not self.env_args:\n self.env_args = OsworldEnvArgs(task={})\n self.high_level_action_set_args = OSWorldActionSet(action_space=self.env_args.action_space)\n with open(os.path.join(self.test_set_path, self.test_set_name)) as f:\n tasks = json.load(f)\n if self.domain != \"all\":\n tasks = {self.domain: tasks[self.domain]}\n\n for domain in tasks:\n for task_id in tasks[domain]:\n task_file = os.path.join(self.test_set_path, f\"examples/{domain}/{task_id}.json\")\n with open(task_file) as f:\n task = json.load(f)\n task = self.fix_settings_file_path_in_config(task)\n name = f\"{self.name}.{task['id']}\"\n task_env_args = deepcopy(self.env_args)\n task_env_args.task = task\n task_env_args.task_name = name\n self.env_args_list.append(task_env_args)\n logger.info(f\"Loaded {len(self.env_args_list)} tasks from domain '{self.domain}'\")\n\n def fix_settings_file_path_in_config(self, task: dict) -> dict:\n \"\"\"Fix the settings file path in the task configuration.\n\n Args:\n task: Task configuration dictionary.\n\n Returns:\n Updated task configuration with fixed settings file paths.\n \"\"\"\n osworld_repo = os.getenv(\"OSWORLD_REPO\", \"OSWorld\")\n updated_task = deepcopy(task) # Avoid modifying the original task\n for config in updated_task[\"config\"]:\n if config.get(\"parameters\", False) and config[\"parameters\"].get(\"settings_file\", False):\n config[\"parameters\"][\"settings_file\"] = os.path.join(\n osworld_repo, config[\"parameters\"][\"settings_file\"]\n )\n return updated_task","source_hash":"c9d01601d1e42a7119d50f151f9ea2c1ad3db146dd657c5d551000493811d9b4","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.osworld.__init__","uri":"program://AgentLab/function/src.agentlab.benchmarks.osworld.__init__#L531-L585","kind":"function","name":"__init__","path":"src/agentlab/benchmarks/osworld.py","language":"python","start_line":531,"end_line":585,"context_start_line":511,"context_end_line":605,"code":" \"type\": \"function\",\n \"function\": {\n \"name\": \"fail\",\n \"description\": \"Decide the task cannot be performed\",\n \"parameters\": {\"type\": \"object\", \"properties\": {}, \"required\": []},\n },\n },\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"done\",\n \"description\": \"Decide the task is done\",\n \"parameters\": {\"type\": \"object\", \"properties\": {}, \"required\": []},\n },\n },\n]\n\n\nclass OsworldGym(AbstractEnv):\n\n def __init__(\n self,\n task: dict,\n provider_name: str,\n region: str | None,\n path_to_vm: str | None,\n snapshot_name: str,\n action_space: str,\n cache_dir: str,\n screen_size: tuple[int, int],\n headless: bool,\n require_a11y_tree: bool,\n require_terminal: bool,\n os_type: str,\n enable_proxy: bool,\n max_steps: int,\n exp_dir: Path,\n record_video: bool = True,\n ):\n self.task = task\n self.env_info = {\n \"provider_name\": provider_name,\n \"region\": region,\n \"path_to_vm\": path_to_vm,\n \"snapshot_name\": snapshot_name,\n \"action_space\": action_space,\n \"cache_dir\": cache_dir,\n \"screen_size\": screen_size,\n \"headless\": headless,\n \"require_a11y_tree\": require_a11y_tree,\n \"require_terminal\": require_terminal,\n \"os_type\": os_type,\n \"enable_proxy\": enable_proxy,\n }\n if DesktopEnv is None:\n raise ImportError(\n \"desktop_env is not installed. Please install it (use `make osworld`) to use OSWorld Gym.\"\n )\n self.env = DesktopEnv(\n action_space=action_space,\n provider_name=provider_name,\n region=region, # type: ignore\n path_to_vm=path_to_vm, # type: ignore\n snapshot_name=snapshot_name,\n cache_dir=cache_dir,\n screen_size=screen_size, # type: ignore\n headless=headless,\n require_a11y_tree=require_a11y_tree,\n require_terminal=require_terminal,\n os_type=os_type,\n )\n self._step_count = 0\n self.max_steps = max_steps\n self.exp_dir = exp_dir\n self.record_video = record_video\n\n def reset(self, seed: int | None = None) -> tuple[dict[str, Any], dict[str, Any]]:\n self.env.reset(task_config=self.task, seed=seed)\n logging.info(f\"Start solving task: {self.task['instruction']}\")\n time.sleep(\n 60\n ) # Wait for the environment to be ready, as in https://github.com/xlang-ai/OSWorld/blob/main/lib_run_single.py#L15\n raw_obs = self.env._get_obs() # Get the initial observation\n if self.record_video:\n self.env.controller.start_recording()\n logging.info(\"Started recording the environment video\")\n obs = self.to_agentlab_observation(raw_obs)\n self._step_count = 0\n return obs, self.env_info\n\n @add_step_timing_to_env_info_decorator\n def step(self, action: str):\n \"\"\"Execute the action in the OS-world environment.\"\"\"\n env_action = self.agentlab_to_env_action(action)\n logger.info(f\"AgentLab Action returned: {action}, converted to: {env_action}\")","source_hash":"c9d01601d1e42a7119d50f151f9ea2c1ad3db146dd657c5d551000493811d9b4","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.osworld.reset","uri":"program://AgentLab/function/src.agentlab.benchmarks.osworld.reset#L587-L599","kind":"function","name":"reset","path":"src/agentlab/benchmarks/osworld.py","language":"python","start_line":587,"end_line":599,"context_start_line":567,"context_end_line":619,"code":" \"desktop_env is not installed. Please install it (use `make osworld`) to use OSWorld Gym.\"\n )\n self.env = DesktopEnv(\n action_space=action_space,\n provider_name=provider_name,\n region=region, # type: ignore\n path_to_vm=path_to_vm, # type: ignore\n snapshot_name=snapshot_name,\n cache_dir=cache_dir,\n screen_size=screen_size, # type: ignore\n headless=headless,\n require_a11y_tree=require_a11y_tree,\n require_terminal=require_terminal,\n os_type=os_type,\n )\n self._step_count = 0\n self.max_steps = max_steps\n self.exp_dir = exp_dir\n self.record_video = record_video\n\n def reset(self, seed: int | None = None) -> tuple[dict[str, Any], dict[str, Any]]:\n self.env.reset(task_config=self.task, seed=seed)\n logging.info(f\"Start solving task: {self.task['instruction']}\")\n time.sleep(\n 60\n ) # Wait for the environment to be ready, as in https://github.com/xlang-ai/OSWorld/blob/main/lib_run_single.py#L15\n raw_obs = self.env._get_obs() # Get the initial observation\n if self.record_video:\n self.env.controller.start_recording()\n logging.info(\"Started recording the environment video\")\n obs = self.to_agentlab_observation(raw_obs)\n self._step_count = 0\n return obs, self.env_info\n\n @add_step_timing_to_env_info_decorator\n def step(self, action: str):\n \"\"\"Execute the action in the OS-world environment.\"\"\"\n env_action = self.agentlab_to_env_action(action)\n logger.info(f\"AgentLab Action returned: {action}, converted to: {env_action}\")\n raw_obs, reward, done, info = self.env.step(env_action)\n logger.info(f\"STEP {self.task['id']} {self._step_count + 1}/{self.max_steps}\")\n self._step_count += 1\n truncated = info.get(\"fail\", False) or self._step_count >= self.max_steps\n if done or truncated:\n if done:\n logger.info(f\"Task {self.task['id']} completed successfully.\")\n else:\n logger.warning(f\"Task {self.task['id']} truncated after {self._step_count} steps.\")\n try:\n reward = self.env.evaluate()\n logger.info(f\"Evaluated reward: {reward}\")\n except Exception as e:\n logger.error(f\"Failed to evaluate {self.task} task: {e}\")","source_hash":"c9d01601d1e42a7119d50f151f9ea2c1ad3db146dd657c5d551000493811d9b4","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.osworld.step","uri":"program://AgentLab/function/src.agentlab.benchmarks.osworld.step#L602-L621","kind":"function","name":"step","path":"src/agentlab/benchmarks/osworld.py","language":"python","start_line":602,"end_line":621,"context_start_line":582,"context_end_line":641,"code":" self._step_count = 0\n self.max_steps = max_steps\n self.exp_dir = exp_dir\n self.record_video = record_video\n\n def reset(self, seed: int | None = None) -> tuple[dict[str, Any], dict[str, Any]]:\n self.env.reset(task_config=self.task, seed=seed)\n logging.info(f\"Start solving task: {self.task['instruction']}\")\n time.sleep(\n 60\n ) # Wait for the environment to be ready, as in https://github.com/xlang-ai/OSWorld/blob/main/lib_run_single.py#L15\n raw_obs = self.env._get_obs() # Get the initial observation\n if self.record_video:\n self.env.controller.start_recording()\n logging.info(\"Started recording the environment video\")\n obs = self.to_agentlab_observation(raw_obs)\n self._step_count = 0\n return obs, self.env_info\n\n @add_step_timing_to_env_info_decorator\n def step(self, action: str):\n \"\"\"Execute the action in the OS-world environment.\"\"\"\n env_action = self.agentlab_to_env_action(action)\n logger.info(f\"AgentLab Action returned: {action}, converted to: {env_action}\")\n raw_obs, reward, done, info = self.env.step(env_action)\n logger.info(f\"STEP {self.task['id']} {self._step_count + 1}/{self.max_steps}\")\n self._step_count += 1\n truncated = info.get(\"fail\", False) or self._step_count >= self.max_steps\n if done or truncated:\n if done:\n logger.info(f\"Task {self.task['id']} completed successfully.\")\n else:\n logger.warning(f\"Task {self.task['id']} truncated after {self._step_count} steps.\")\n try:\n reward = self.env.evaluate()\n logger.info(f\"Evaluated reward: {reward}\")\n except Exception as e:\n logger.error(f\"Failed to evaluate {self.task} task: {e}\")\n obs = self.to_agentlab_observation(raw_obs)\n return obs, reward, done, truncated, info\n\n def agentlab_to_env_action(self, action: str) -> Any:\n \"\"\"Convert AgentLab agents action format to OSWorld action format.\"\"\"\n if self.env.action_space == \"computer_13\":\n return self.convert_agentlab_action_to_computer_13(action)\n elif self.env.action_space == \"pyautogui\":\n raise NotImplementedError(\n \"PyAutoGUI action space is not supported yet. Please use 'computer_13' action space.\"\n )\n\n def to_agentlab_observation(self, obs: dict[str, Any]) -> dict[str, Any]:\n \"\"\"Convert OSWorld observation to AgentLab format.\"\"\"\n converted_obs = {}\n\n self._add_screenshot(converted_obs, obs)\n # self._add_som_screenshot(converted_obs, obs) #TODO: test this\n converted_obs[\"axtree_txt\"] = linearize_accessibility_tree(\n accessibility_tree=obs[\"accessibility_tree\"], platform=\"ubuntu\"\n )\n converted_obs[\"last_action_error\"] = \"\" # OSWorld doesn't provide this directly","source_hash":"c9d01601d1e42a7119d50f151f9ea2c1ad3db146dd657c5d551000493811d9b4","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.osworld.agentlab_to_env_action","uri":"program://AgentLab/function/src.agentlab.benchmarks.osworld.agentlab_to_env_action#L623-L630","kind":"function","name":"agentlab_to_env_action","path":"src/agentlab/benchmarks/osworld.py","language":"python","start_line":623,"end_line":630,"context_start_line":603,"context_end_line":650,"code":" \"\"\"Execute the action in the OS-world environment.\"\"\"\n env_action = self.agentlab_to_env_action(action)\n logger.info(f\"AgentLab Action returned: {action}, converted to: {env_action}\")\n raw_obs, reward, done, info = self.env.step(env_action)\n logger.info(f\"STEP {self.task['id']} {self._step_count + 1}/{self.max_steps}\")\n self._step_count += 1\n truncated = info.get(\"fail\", False) or self._step_count >= self.max_steps\n if done or truncated:\n if done:\n logger.info(f\"Task {self.task['id']} completed successfully.\")\n else:\n logger.warning(f\"Task {self.task['id']} truncated after {self._step_count} steps.\")\n try:\n reward = self.env.evaluate()\n logger.info(f\"Evaluated reward: {reward}\")\n except Exception as e:\n logger.error(f\"Failed to evaluate {self.task} task: {e}\")\n obs = self.to_agentlab_observation(raw_obs)\n return obs, reward, done, truncated, info\n\n def agentlab_to_env_action(self, action: str) -> Any:\n \"\"\"Convert AgentLab agents action format to OSWorld action format.\"\"\"\n if self.env.action_space == \"computer_13\":\n return self.convert_agentlab_action_to_computer_13(action)\n elif self.env.action_space == \"pyautogui\":\n raise NotImplementedError(\n \"PyAutoGUI action space is not supported yet. Please use 'computer_13' action space.\"\n )\n\n def to_agentlab_observation(self, obs: dict[str, Any]) -> dict[str, Any]:\n \"\"\"Convert OSWorld observation to AgentLab format.\"\"\"\n converted_obs = {}\n\n self._add_screenshot(converted_obs, obs)\n # self._add_som_screenshot(converted_obs, obs) #TODO: test this\n converted_obs[\"axtree_txt\"] = linearize_accessibility_tree(\n accessibility_tree=obs[\"accessibility_tree\"], platform=\"ubuntu\"\n )\n converted_obs[\"last_action_error\"] = \"\" # OSWorld doesn't provide this directly\n converted_obs[\"focused_element_bid\"] = \"\" # Extract from accessibility tree if available\n converted_obs = self._add_browser_context(converted_obs)\n converted_obs = self._add_task_context(converted_obs, obs)\n\n return converted_obs\n\n def convert_screenshot_to_numpy(self, screenshot) -> np.ndarray:\n \"\"\"Convert screenshot to numpy array format expected by AgentLab.\"\"\"\n image = Image.open(BytesIO(screenshot))","source_hash":"c9d01601d1e42a7119d50f151f9ea2c1ad3db146dd657c5d551000493811d9b4","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.osworld.to_agentlab_observation","uri":"program://AgentLab/function/src.agentlab.benchmarks.osworld.to_agentlab_observation#L632-L646","kind":"function","name":"to_agentlab_observation","path":"src/agentlab/benchmarks/osworld.py","language":"python","start_line":632,"end_line":646,"context_start_line":612,"context_end_line":666,"code":" logger.info(f\"Task {self.task['id']} completed successfully.\")\n else:\n logger.warning(f\"Task {self.task['id']} truncated after {self._step_count} steps.\")\n try:\n reward = self.env.evaluate()\n logger.info(f\"Evaluated reward: {reward}\")\n except Exception as e:\n logger.error(f\"Failed to evaluate {self.task} task: {e}\")\n obs = self.to_agentlab_observation(raw_obs)\n return obs, reward, done, truncated, info\n\n def agentlab_to_env_action(self, action: str) -> Any:\n \"\"\"Convert AgentLab agents action format to OSWorld action format.\"\"\"\n if self.env.action_space == \"computer_13\":\n return self.convert_agentlab_action_to_computer_13(action)\n elif self.env.action_space == \"pyautogui\":\n raise NotImplementedError(\n \"PyAutoGUI action space is not supported yet. Please use 'computer_13' action space.\"\n )\n\n def to_agentlab_observation(self, obs: dict[str, Any]) -> dict[str, Any]:\n \"\"\"Convert OSWorld observation to AgentLab format.\"\"\"\n converted_obs = {}\n\n self._add_screenshot(converted_obs, obs)\n # self._add_som_screenshot(converted_obs, obs) #TODO: test this\n converted_obs[\"axtree_txt\"] = linearize_accessibility_tree(\n accessibility_tree=obs[\"accessibility_tree\"], platform=\"ubuntu\"\n )\n converted_obs[\"last_action_error\"] = \"\" # OSWorld doesn't provide this directly\n converted_obs[\"focused_element_bid\"] = \"\" # Extract from accessibility tree if available\n converted_obs = self._add_browser_context(converted_obs)\n converted_obs = self._add_task_context(converted_obs, obs)\n\n return converted_obs\n\n def convert_screenshot_to_numpy(self, screenshot) -> np.ndarray:\n \"\"\"Convert screenshot to numpy array format expected by AgentLab.\"\"\"\n image = Image.open(BytesIO(screenshot))\n image = image.convert(\"RGB\") if image.mode != \"RGB\" else image\n return np.array(image)\n\n def _add_screenshot(self, converted_obs: dict[str, Any], obs: dict[str, Any]) -> None:\n \"\"\"Convert screenshot to numpy array format expected by AgentLab\"\"\"\n converted_obs[\"screenshot\"] = self.convert_screenshot_to_numpy(obs[\"screenshot\"])\n\n def _add_som_screenshot(self, converted_obs: dict[str, Any], obs: dict[str, Any]) -> None:\n \"\"\"Convert SOM screenshot to numpy array format expected by AgentLab\"\"\"\n masks, drew_nodes, tagged_screenshot, linearized_accessibility_tree = tag_screenshot(\n obs[\"screenshot\"], obs[\"accessibility_tree\"], platform=\"ubuntu\"\n )\n converted_obs[\"som_screenshot\"] = self.convert_screenshot_to_numpy(tagged_screenshot)\n\n def _add_browser_context(self, converted_obs: dict[str, Any]):\n \"\"\"Add browser-like context fields adapted for desktop environment.\"\"\"","source_hash":"c9d01601d1e42a7119d50f151f9ea2c1ad3db146dd657c5d551000493811d9b4","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.osworld.convert_screenshot_to_numpy","uri":"program://AgentLab/function/src.agentlab.benchmarks.osworld.convert_screenshot_to_numpy#L648-L652","kind":"function","name":"convert_screenshot_to_numpy","path":"src/agentlab/benchmarks/osworld.py","language":"python","start_line":648,"end_line":652,"context_start_line":628,"context_end_line":672,"code":" raise NotImplementedError(\n \"PyAutoGUI action space is not supported yet. Please use 'computer_13' action space.\"\n )\n\n def to_agentlab_observation(self, obs: dict[str, Any]) -> dict[str, Any]:\n \"\"\"Convert OSWorld observation to AgentLab format.\"\"\"\n converted_obs = {}\n\n self._add_screenshot(converted_obs, obs)\n # self._add_som_screenshot(converted_obs, obs) #TODO: test this\n converted_obs[\"axtree_txt\"] = linearize_accessibility_tree(\n accessibility_tree=obs[\"accessibility_tree\"], platform=\"ubuntu\"\n )\n converted_obs[\"last_action_error\"] = \"\" # OSWorld doesn't provide this directly\n converted_obs[\"focused_element_bid\"] = \"\" # Extract from accessibility tree if available\n converted_obs = self._add_browser_context(converted_obs)\n converted_obs = self._add_task_context(converted_obs, obs)\n\n return converted_obs\n\n def convert_screenshot_to_numpy(self, screenshot) -> np.ndarray:\n \"\"\"Convert screenshot to numpy array format expected by AgentLab.\"\"\"\n image = Image.open(BytesIO(screenshot))\n image = image.convert(\"RGB\") if image.mode != \"RGB\" else image\n return np.array(image)\n\n def _add_screenshot(self, converted_obs: dict[str, Any], obs: dict[str, Any]) -> None:\n \"\"\"Convert screenshot to numpy array format expected by AgentLab\"\"\"\n converted_obs[\"screenshot\"] = self.convert_screenshot_to_numpy(obs[\"screenshot\"])\n\n def _add_som_screenshot(self, converted_obs: dict[str, Any], obs: dict[str, Any]) -> None:\n \"\"\"Convert SOM screenshot to numpy array format expected by AgentLab\"\"\"\n masks, drew_nodes, tagged_screenshot, linearized_accessibility_tree = tag_screenshot(\n obs[\"screenshot\"], obs[\"accessibility_tree\"], platform=\"ubuntu\"\n )\n converted_obs[\"som_screenshot\"] = self.convert_screenshot_to_numpy(tagged_screenshot)\n\n def _add_browser_context(self, converted_obs: dict[str, Any]):\n \"\"\"Add browser-like context fields adapted for desktop environment.\"\"\"\n converted_obs[\"url\"] = \"\"\n converted_obs[\"open_pages_urls\"] = []\n converted_obs[\"open_pages_titles\"] = []\n converted_obs[\"active_page_index\"] = 0\n return converted_obs\n","source_hash":"c9d01601d1e42a7119d50f151f9ea2c1ad3db146dd657c5d551000493811d9b4","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.osworld._add_screenshot","uri":"program://AgentLab/function/src.agentlab.benchmarks.osworld._add_screenshot#L654-L656","kind":"function","name":"_add_screenshot","path":"src/agentlab/benchmarks/osworld.py","language":"python","start_line":654,"end_line":656,"context_start_line":634,"context_end_line":676,"code":" converted_obs = {}\n\n self._add_screenshot(converted_obs, obs)\n # self._add_som_screenshot(converted_obs, obs) #TODO: test this\n converted_obs[\"axtree_txt\"] = linearize_accessibility_tree(\n accessibility_tree=obs[\"accessibility_tree\"], platform=\"ubuntu\"\n )\n converted_obs[\"last_action_error\"] = \"\" # OSWorld doesn't provide this directly\n converted_obs[\"focused_element_bid\"] = \"\" # Extract from accessibility tree if available\n converted_obs = self._add_browser_context(converted_obs)\n converted_obs = self._add_task_context(converted_obs, obs)\n\n return converted_obs\n\n def convert_screenshot_to_numpy(self, screenshot) -> np.ndarray:\n \"\"\"Convert screenshot to numpy array format expected by AgentLab.\"\"\"\n image = Image.open(BytesIO(screenshot))\n image = image.convert(\"RGB\") if image.mode != \"RGB\" else image\n return np.array(image)\n\n def _add_screenshot(self, converted_obs: dict[str, Any], obs: dict[str, Any]) -> None:\n \"\"\"Convert screenshot to numpy array format expected by AgentLab\"\"\"\n converted_obs[\"screenshot\"] = self.convert_screenshot_to_numpy(obs[\"screenshot\"])\n\n def _add_som_screenshot(self, converted_obs: dict[str, Any], obs: dict[str, Any]) -> None:\n \"\"\"Convert SOM screenshot to numpy array format expected by AgentLab\"\"\"\n masks, drew_nodes, tagged_screenshot, linearized_accessibility_tree = tag_screenshot(\n obs[\"screenshot\"], obs[\"accessibility_tree\"], platform=\"ubuntu\"\n )\n converted_obs[\"som_screenshot\"] = self.convert_screenshot_to_numpy(tagged_screenshot)\n\n def _add_browser_context(self, converted_obs: dict[str, Any]):\n \"\"\"Add browser-like context fields adapted for desktop environment.\"\"\"\n converted_obs[\"url\"] = \"\"\n converted_obs[\"open_pages_urls\"] = []\n converted_obs[\"open_pages_titles\"] = []\n converted_obs[\"active_page_index\"] = 0\n return converted_obs\n\n def _add_task_context(self, converted_obs: dict[str, Any], obs: dict[str, Any]):\n \"\"\"Add task and instruction context fields.\"\"\"\n instruction = obs.get(\"instruction\", \"\")\n converted_obs[\"goal_object\"] = [{\"type\": \"text\", \"text\": instruction}]","source_hash":"c9d01601d1e42a7119d50f151f9ea2c1ad3db146dd657c5d551000493811d9b4","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.osworld._add_som_screenshot","uri":"program://AgentLab/function/src.agentlab.benchmarks.osworld._add_som_screenshot#L658-L663","kind":"function","name":"_add_som_screenshot","path":"src/agentlab/benchmarks/osworld.py","language":"python","start_line":658,"end_line":663,"context_start_line":638,"context_end_line":683,"code":" converted_obs[\"axtree_txt\"] = linearize_accessibility_tree(\n accessibility_tree=obs[\"accessibility_tree\"], platform=\"ubuntu\"\n )\n converted_obs[\"last_action_error\"] = \"\" # OSWorld doesn't provide this directly\n converted_obs[\"focused_element_bid\"] = \"\" # Extract from accessibility tree if available\n converted_obs = self._add_browser_context(converted_obs)\n converted_obs = self._add_task_context(converted_obs, obs)\n\n return converted_obs\n\n def convert_screenshot_to_numpy(self, screenshot) -> np.ndarray:\n \"\"\"Convert screenshot to numpy array format expected by AgentLab.\"\"\"\n image = Image.open(BytesIO(screenshot))\n image = image.convert(\"RGB\") if image.mode != \"RGB\" else image\n return np.array(image)\n\n def _add_screenshot(self, converted_obs: dict[str, Any], obs: dict[str, Any]) -> None:\n \"\"\"Convert screenshot to numpy array format expected by AgentLab\"\"\"\n converted_obs[\"screenshot\"] = self.convert_screenshot_to_numpy(obs[\"screenshot\"])\n\n def _add_som_screenshot(self, converted_obs: dict[str, Any], obs: dict[str, Any]) -> None:\n \"\"\"Convert SOM screenshot to numpy array format expected by AgentLab\"\"\"\n masks, drew_nodes, tagged_screenshot, linearized_accessibility_tree = tag_screenshot(\n obs[\"screenshot\"], obs[\"accessibility_tree\"], platform=\"ubuntu\"\n )\n converted_obs[\"som_screenshot\"] = self.convert_screenshot_to_numpy(tagged_screenshot)\n\n def _add_browser_context(self, converted_obs: dict[str, Any]):\n \"\"\"Add browser-like context fields adapted for desktop environment.\"\"\"\n converted_obs[\"url\"] = \"\"\n converted_obs[\"open_pages_urls\"] = []\n converted_obs[\"open_pages_titles\"] = []\n converted_obs[\"active_page_index\"] = 0\n return converted_obs\n\n def _add_task_context(self, converted_obs: dict[str, Any], obs: dict[str, Any]):\n \"\"\"Add task and instruction context fields.\"\"\"\n instruction = obs.get(\"instruction\", \"\")\n converted_obs[\"goal_object\"] = [{\"type\": \"text\", \"text\": instruction}]\n if obs.get(\"terminal\"):\n converted_obs[\"terminal_output\"] = obs[\"terminal\"]\n return converted_obs\n\n def convert_agentlab_action_to_computer_13(self, action: str) -> dict[str, Any] | str:\n \"\"\"Convert action string to dictionary format.\n","source_hash":"c9d01601d1e42a7119d50f151f9ea2c1ad3db146dd657c5d551000493811d9b4","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.osworld._add_browser_context","uri":"program://AgentLab/function/src.agentlab.benchmarks.osworld._add_browser_context#L665-L671","kind":"function","name":"_add_browser_context","path":"src/agentlab/benchmarks/osworld.py","language":"python","start_line":665,"end_line":671,"context_start_line":645,"context_end_line":691,"code":"\n return converted_obs\n\n def convert_screenshot_to_numpy(self, screenshot) -> np.ndarray:\n \"\"\"Convert screenshot to numpy array format expected by AgentLab.\"\"\"\n image = Image.open(BytesIO(screenshot))\n image = image.convert(\"RGB\") if image.mode != \"RGB\" else image\n return np.array(image)\n\n def _add_screenshot(self, converted_obs: dict[str, Any], obs: dict[str, Any]) -> None:\n \"\"\"Convert screenshot to numpy array format expected by AgentLab\"\"\"\n converted_obs[\"screenshot\"] = self.convert_screenshot_to_numpy(obs[\"screenshot\"])\n\n def _add_som_screenshot(self, converted_obs: dict[str, Any], obs: dict[str, Any]) -> None:\n \"\"\"Convert SOM screenshot to numpy array format expected by AgentLab\"\"\"\n masks, drew_nodes, tagged_screenshot, linearized_accessibility_tree = tag_screenshot(\n obs[\"screenshot\"], obs[\"accessibility_tree\"], platform=\"ubuntu\"\n )\n converted_obs[\"som_screenshot\"] = self.convert_screenshot_to_numpy(tagged_screenshot)\n\n def _add_browser_context(self, converted_obs: dict[str, Any]):\n \"\"\"Add browser-like context fields adapted for desktop environment.\"\"\"\n converted_obs[\"url\"] = \"\"\n converted_obs[\"open_pages_urls\"] = []\n converted_obs[\"open_pages_titles\"] = []\n converted_obs[\"active_page_index\"] = 0\n return converted_obs\n\n def _add_task_context(self, converted_obs: dict[str, Any], obs: dict[str, Any]):\n \"\"\"Add task and instruction context fields.\"\"\"\n instruction = obs.get(\"instruction\", \"\")\n converted_obs[\"goal_object\"] = [{\"type\": \"text\", \"text\": instruction}]\n if obs.get(\"terminal\"):\n converted_obs[\"terminal_output\"] = obs[\"terminal\"]\n return converted_obs\n\n def convert_agentlab_action_to_computer_13(self, action: str) -> dict[str, Any] | str:\n \"\"\"Convert action string to dictionary format.\n\n Args:\n action (str): Action string in AgentLab format, e.g., \"move_to(x=100, y=200)\".\n\n Returns:\n dict[str, Any] | str: Action in OSWorld Computer 13 format as a dictionary,\n or a string for simple actions like \"wait\", \"done\", or \"fail\".\n\n Examples:","source_hash":"c9d01601d1e42a7119d50f151f9ea2c1ad3db146dd657c5d551000493811d9b4","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.osworld._add_task_context","uri":"program://AgentLab/function/src.agentlab.benchmarks.osworld._add_task_context#L673-L679","kind":"function","name":"_add_task_context","path":"src/agentlab/benchmarks/osworld.py","language":"python","start_line":673,"end_line":679,"context_start_line":653,"context_end_line":699,"code":"\n def _add_screenshot(self, converted_obs: dict[str, Any], obs: dict[str, Any]) -> None:\n \"\"\"Convert screenshot to numpy array format expected by AgentLab\"\"\"\n converted_obs[\"screenshot\"] = self.convert_screenshot_to_numpy(obs[\"screenshot\"])\n\n def _add_som_screenshot(self, converted_obs: dict[str, Any], obs: dict[str, Any]) -> None:\n \"\"\"Convert SOM screenshot to numpy array format expected by AgentLab\"\"\"\n masks, drew_nodes, tagged_screenshot, linearized_accessibility_tree = tag_screenshot(\n obs[\"screenshot\"], obs[\"accessibility_tree\"], platform=\"ubuntu\"\n )\n converted_obs[\"som_screenshot\"] = self.convert_screenshot_to_numpy(tagged_screenshot)\n\n def _add_browser_context(self, converted_obs: dict[str, Any]):\n \"\"\"Add browser-like context fields adapted for desktop environment.\"\"\"\n converted_obs[\"url\"] = \"\"\n converted_obs[\"open_pages_urls\"] = []\n converted_obs[\"open_pages_titles\"] = []\n converted_obs[\"active_page_index\"] = 0\n return converted_obs\n\n def _add_task_context(self, converted_obs: dict[str, Any], obs: dict[str, Any]):\n \"\"\"Add task and instruction context fields.\"\"\"\n instruction = obs.get(\"instruction\", \"\")\n converted_obs[\"goal_object\"] = [{\"type\": \"text\", \"text\": instruction}]\n if obs.get(\"terminal\"):\n converted_obs[\"terminal_output\"] = obs[\"terminal\"]\n return converted_obs\n\n def convert_agentlab_action_to_computer_13(self, action: str) -> dict[str, Any] | str:\n \"\"\"Convert action string to dictionary format.\n\n Args:\n action (str): Action string in AgentLab format, e.g., \"move_to(x=100, y=200)\".\n\n Returns:\n dict[str, Any] | str: Action in OSWorld Computer 13 format as a dictionary,\n or a string for simple actions like \"wait\", \"done\", or \"fail\".\n\n Examples:\n >>> env = OsworldGym(task={}, provider_name=\"vmware\", region=None, path_to_vm=None,\n ... snapshot_name=\"init_state\", action_space=\"computer_13\",\n ... cache_dir=\"cache\", screen_size=(1920, 1080), headless=True,\n ... require_a11y_tree=True, require_terminal=False, os_type=\"Ubuntu\",\n ... enable_proxy=False, max_steps=50, exp_dir=Path(\".\"))\n >>> env.convert_agentlab_action_to_computer_13(\"move_to(x=100, y=200)\")\n {'action_type': 'MOVE_TO', 'parameters': {'x': 100, 'y': 200}}\n >>> env.convert_agentlab_action_to_computer_13(\"wait()\")","source_hash":"c9d01601d1e42a7119d50f151f9ea2c1ad3db146dd657c5d551000493811d9b4","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.osworld.convert_agentlab_action_to_computer_13","uri":"program://AgentLab/function/src.agentlab.benchmarks.osworld.convert_agentlab_action_to_computer_13#L681-L716","kind":"function","name":"convert_agentlab_action_to_computer_13","path":"src/agentlab/benchmarks/osworld.py","language":"python","start_line":681,"end_line":716,"context_start_line":661,"context_end_line":736,"code":" obs[\"screenshot\"], obs[\"accessibility_tree\"], platform=\"ubuntu\"\n )\n converted_obs[\"som_screenshot\"] = self.convert_screenshot_to_numpy(tagged_screenshot)\n\n def _add_browser_context(self, converted_obs: dict[str, Any]):\n \"\"\"Add browser-like context fields adapted for desktop environment.\"\"\"\n converted_obs[\"url\"] = \"\"\n converted_obs[\"open_pages_urls\"] = []\n converted_obs[\"open_pages_titles\"] = []\n converted_obs[\"active_page_index\"] = 0\n return converted_obs\n\n def _add_task_context(self, converted_obs: dict[str, Any], obs: dict[str, Any]):\n \"\"\"Add task and instruction context fields.\"\"\"\n instruction = obs.get(\"instruction\", \"\")\n converted_obs[\"goal_object\"] = [{\"type\": \"text\", \"text\": instruction}]\n if obs.get(\"terminal\"):\n converted_obs[\"terminal_output\"] = obs[\"terminal\"]\n return converted_obs\n\n def convert_agentlab_action_to_computer_13(self, action: str) -> dict[str, Any] | str:\n \"\"\"Convert action string to dictionary format.\n\n Args:\n action (str): Action string in AgentLab format, e.g., \"move_to(x=100, y=200)\".\n\n Returns:\n dict[str, Any] | str: Action in OSWorld Computer 13 format as a dictionary,\n or a string for simple actions like \"wait\", \"done\", or \"fail\".\n\n Examples:\n >>> env = OsworldGym(task={}, provider_name=\"vmware\", region=None, path_to_vm=None,\n ... snapshot_name=\"init_state\", action_space=\"computer_13\",\n ... cache_dir=\"cache\", screen_size=(1920, 1080), headless=True,\n ... require_a11y_tree=True, require_terminal=False, os_type=\"Ubuntu\",\n ... enable_proxy=False, max_steps=50, exp_dir=Path(\".\"))\n >>> env.convert_agentlab_action_to_computer_13(\"move_to(x=100, y=200)\")\n {'action_type': 'MOVE_TO', 'parameters': {'x': 100, 'y': 200}}\n >>> env.convert_agentlab_action_to_computer_13(\"wait()\")\n 'WAIT'\n \"\"\"\n\n action_type, action_args, action_kwargs = self.parse_agentlab_action_str_to_func_args(\n action\n )\n\n if action_type in [\"wait\", \"done\", \"fail\"]:\n return str(action_type).upper()\n if action_args:\n logger.warning(\n f\"\"\"Action '{action_type}' has unexpected positional arguments: {action_args}.\n OSWorld Computer 13 actions are processed as dictionaries.\"\"\"\n )\n action_kwargs = action_kwargs if action_kwargs is not None else {}\n\n return {\"action_type\": str(action_type).upper(), \"parameters\": action_kwargs}\n\n @staticmethod\n def parse_agentlab_action_str_to_func_args(action: str):\n \"\"\"Parse the agentlab action string to extract function name, args, and kwargs.\n\n Args:\n action (str): Action string in AgentLab format, e.g., \"move_to(x=100, y=200)\".\n\n Returns:\n tuple: A tuple containing the function name, a list of positional arguments,\n and a dictionary of keyword arguments.\n\n Examples:\n >>> parse_agentlab_action_str_to_func_args(\"move_to(x=100, y=200)\")\n ('move_to', [], {'x': 100, 'y': 200})\n >>> parse_agentlab_action_str_to_func_args(\"hotkey(keys=['ctrl', 'alt', 't'])\")\n ('hotkey', [], {'keys': ['ctrl', 'alt', 't']})\n \"\"\"\n try:\n action = action.strip()","source_hash":"c9d01601d1e42a7119d50f151f9ea2c1ad3db146dd657c5d551000493811d9b4","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.osworld.parse_agentlab_action_str_to_func_args","uri":"program://AgentLab/function/src.agentlab.benchmarks.osworld.parse_agentlab_action_str_to_func_args#L719-L747","kind":"function","name":"parse_agentlab_action_str_to_func_args","path":"src/agentlab/benchmarks/osworld.py","language":"python","start_line":719,"end_line":747,"context_start_line":699,"context_end_line":767,"code":" >>> env.convert_agentlab_action_to_computer_13(\"wait()\")\n 'WAIT'\n \"\"\"\n\n action_type, action_args, action_kwargs = self.parse_agentlab_action_str_to_func_args(\n action\n )\n\n if action_type in [\"wait\", \"done\", \"fail\"]:\n return str(action_type).upper()\n if action_args:\n logger.warning(\n f\"\"\"Action '{action_type}' has unexpected positional arguments: {action_args}.\n OSWorld Computer 13 actions are processed as dictionaries.\"\"\"\n )\n action_kwargs = action_kwargs if action_kwargs is not None else {}\n\n return {\"action_type\": str(action_type).upper(), \"parameters\": action_kwargs}\n\n @staticmethod\n def parse_agentlab_action_str_to_func_args(action: str):\n \"\"\"Parse the agentlab action string to extract function name, args, and kwargs.\n\n Args:\n action (str): Action string in AgentLab format, e.g., \"move_to(x=100, y=200)\".\n\n Returns:\n tuple: A tuple containing the function name, a list of positional arguments,\n and a dictionary of keyword arguments.\n\n Examples:\n >>> parse_agentlab_action_str_to_func_args(\"move_to(x=100, y=200)\")\n ('move_to', [], {'x': 100, 'y': 200})\n >>> parse_agentlab_action_str_to_func_args(\"hotkey(keys=['ctrl', 'alt', 't'])\")\n ('hotkey', [], {'keys': ['ctrl', 'alt', 't']})\n \"\"\"\n try:\n action = action.strip()\n parsed = ast.parse(action, mode=\"eval\")\n if isinstance(parsed.body, ast.Call):\n func_name = ast.unparse(parsed.body.func)\n args = [ast.literal_eval(arg) for arg in parsed.body.args]\n kwargs = {kw.arg: ast.literal_eval(kw.value) for kw in parsed.body.keywords}\n return func_name, args, kwargs\n except Exception as e:\n logger.warning(\n f\"Failed to parse agentlab agent's str function call: {action}, error: {e}\"\n )\n return None, None, None\n\n def close(self):\n if self.record_video:\n video_name = str(self.exp_dir / \"recording.mp4\")\n self.env.controller.end_recording(video_name)\n logger.info(f\"Recorded video saved to {video_name}\")\n return self.env.close()\n\n\n@dataclass\nclass OSWorldActionSet(AbstractActionSet, DataClassJsonMixin):\n # TODO: Define and use agentlab AbstractActionSet\n # AbstractActionSet should define some standard format to represent actions.(list of dict with keys that are MCP compatible)\n # Should we have 'abstract function' here for action conversion for backend LLM with fixed action set like UI-Tars or Semi-fixed action set LLMs like OpenAI CUA?\n # TODO: We need to support both 'action space as tools' and 'action space as prompt' for agentlab agents\n # and have conversion functions to convert them to format acceptable by environment.\n action_space: Literal[\"computer_13\", \"pyautogui\"] = \"computer_13\"\n multiaction: bool = False\n\n def describe(self, with_long_description: bool = True, with_examples: bool = True) -> str:","source_hash":"c9d01601d1e42a7119d50f151f9ea2c1ad3db146dd657c5d551000493811d9b4","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.osworld.close","uri":"program://AgentLab/function/src.agentlab.benchmarks.osworld.close#L749-L754","kind":"function","name":"close","path":"src/agentlab/benchmarks/osworld.py","language":"python","start_line":749,"end_line":754,"context_start_line":729,"context_end_line":774,"code":" Examples:\n >>> parse_agentlab_action_str_to_func_args(\"move_to(x=100, y=200)\")\n ('move_to', [], {'x': 100, 'y': 200})\n >>> parse_agentlab_action_str_to_func_args(\"hotkey(keys=['ctrl', 'alt', 't'])\")\n ('hotkey', [], {'keys': ['ctrl', 'alt', 't']})\n \"\"\"\n try:\n action = action.strip()\n parsed = ast.parse(action, mode=\"eval\")\n if isinstance(parsed.body, ast.Call):\n func_name = ast.unparse(parsed.body.func)\n args = [ast.literal_eval(arg) for arg in parsed.body.args]\n kwargs = {kw.arg: ast.literal_eval(kw.value) for kw in parsed.body.keywords}\n return func_name, args, kwargs\n except Exception as e:\n logger.warning(\n f\"Failed to parse agentlab agent's str function call: {action}, error: {e}\"\n )\n return None, None, None\n\n def close(self):\n if self.record_video:\n video_name = str(self.exp_dir / \"recording.mp4\")\n self.env.controller.end_recording(video_name)\n logger.info(f\"Recorded video saved to {video_name}\")\n return self.env.close()\n\n\n@dataclass\nclass OSWorldActionSet(AbstractActionSet, DataClassJsonMixin):\n # TODO: Define and use agentlab AbstractActionSet\n # AbstractActionSet should define some standard format to represent actions.(list of dict with keys that are MCP compatible)\n # Should we have 'abstract function' here for action conversion for backend LLM with fixed action set like UI-Tars or Semi-fixed action set LLMs like OpenAI CUA?\n # TODO: We need to support both 'action space as tools' and 'action space as prompt' for agentlab agents\n # and have conversion functions to convert them to format acceptable by environment.\n action_space: Literal[\"computer_13\", \"pyautogui\"] = \"computer_13\"\n multiaction: bool = False\n\n def describe(self, with_long_description: bool = True, with_examples: bool = True) -> str:\n \"\"\"Describe the OSWorld action set for desktop interactions.\"\"\"\n pass\n\n def example_action(self, abstract: bool) -> str:\n \"\"\"Provide example actions for the action set.\"\"\"\n pass\n","source_hash":"c9d01601d1e42a7119d50f151f9ea2c1ad3db146dd657c5d551000493811d9b4","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.osworld.describe","uri":"program://AgentLab/function/src.agentlab.benchmarks.osworld.describe#L767-L769","kind":"function","name":"describe","path":"src/agentlab/benchmarks/osworld.py","language":"python","start_line":767,"end_line":769,"context_start_line":747,"context_end_line":789,"code":" return None, None, None\n\n def close(self):\n if self.record_video:\n video_name = str(self.exp_dir / \"recording.mp4\")\n self.env.controller.end_recording(video_name)\n logger.info(f\"Recorded video saved to {video_name}\")\n return self.env.close()\n\n\n@dataclass\nclass OSWorldActionSet(AbstractActionSet, DataClassJsonMixin):\n # TODO: Define and use agentlab AbstractActionSet\n # AbstractActionSet should define some standard format to represent actions.(list of dict with keys that are MCP compatible)\n # Should we have 'abstract function' here for action conversion for backend LLM with fixed action set like UI-Tars or Semi-fixed action set LLMs like OpenAI CUA?\n # TODO: We need to support both 'action space as tools' and 'action space as prompt' for agentlab agents\n # and have conversion functions to convert them to format acceptable by environment.\n action_space: Literal[\"computer_13\", \"pyautogui\"] = \"computer_13\"\n multiaction: bool = False\n\n def describe(self, with_long_description: bool = True, with_examples: bool = True) -> str:\n \"\"\"Describe the OSWorld action set for desktop interactions.\"\"\"\n pass\n\n def example_action(self, abstract: bool) -> str:\n \"\"\"Provide example actions for the action set.\"\"\"\n pass\n\n def to_python_code(self, action) -> str:\n \"\"\"We use the OS-world/desktop_env environment controller\"\"\"\n pass\n\n def to_tool_description(self, api=\"openai\"):\n \"\"\"Convert the action set to a tool description for Tool-Use LLMs.\n\n The default for openai is openai Response API tools format.\n\n Args:\n api (str): The API format to use. Defaults to \"openai\".\n\n Returns:\n list[dict]: List of tool descriptions in the specified API format.\n","source_hash":"c9d01601d1e42a7119d50f151f9ea2c1ad3db146dd657c5d551000493811d9b4","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.osworld.example_action","uri":"program://AgentLab/function/src.agentlab.benchmarks.osworld.example_action#L771-L773","kind":"function","name":"example_action","path":"src/agentlab/benchmarks/osworld.py","language":"python","start_line":771,"end_line":773,"context_start_line":751,"context_end_line":793,"code":" video_name = str(self.exp_dir / \"recording.mp4\")\n self.env.controller.end_recording(video_name)\n logger.info(f\"Recorded video saved to {video_name}\")\n return self.env.close()\n\n\n@dataclass\nclass OSWorldActionSet(AbstractActionSet, DataClassJsonMixin):\n # TODO: Define and use agentlab AbstractActionSet\n # AbstractActionSet should define some standard format to represent actions.(list of dict with keys that are MCP compatible)\n # Should we have 'abstract function' here for action conversion for backend LLM with fixed action set like UI-Tars or Semi-fixed action set LLMs like OpenAI CUA?\n # TODO: We need to support both 'action space as tools' and 'action space as prompt' for agentlab agents\n # and have conversion functions to convert them to format acceptable by environment.\n action_space: Literal[\"computer_13\", \"pyautogui\"] = \"computer_13\"\n multiaction: bool = False\n\n def describe(self, with_long_description: bool = True, with_examples: bool = True) -> str:\n \"\"\"Describe the OSWorld action set for desktop interactions.\"\"\"\n pass\n\n def example_action(self, abstract: bool) -> str:\n \"\"\"Provide example actions for the action set.\"\"\"\n pass\n\n def to_python_code(self, action) -> str:\n \"\"\"We use the OS-world/desktop_env environment controller\"\"\"\n pass\n\n def to_tool_description(self, api=\"openai\"):\n \"\"\"Convert the action set to a tool description for Tool-Use LLMs.\n\n The default for openai is openai Response API tools format.\n\n Args:\n api (str): The API format to use. Defaults to \"openai\".\n\n Returns:\n list[dict]: List of tool descriptions in the specified API format.\n\n Raises:\n ValueError: If an unsupported action space is specified.\n \"\"\"\n # TODO: Rename bgym AbstractActionSet 'to_tool_descriptor' method as 'to_tool_description' for consistency.","source_hash":"c9d01601d1e42a7119d50f151f9ea2c1ad3db146dd657c5d551000493811d9b4","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.osworld.to_python_code","uri":"program://AgentLab/function/src.agentlab.benchmarks.osworld.to_python_code#L775-L777","kind":"function","name":"to_python_code","path":"src/agentlab/benchmarks/osworld.py","language":"python","start_line":775,"end_line":777,"context_start_line":755,"context_end_line":797,"code":"\n\n@dataclass\nclass OSWorldActionSet(AbstractActionSet, DataClassJsonMixin):\n # TODO: Define and use agentlab AbstractActionSet\n # AbstractActionSet should define some standard format to represent actions.(list of dict with keys that are MCP compatible)\n # Should we have 'abstract function' here for action conversion for backend LLM with fixed action set like UI-Tars or Semi-fixed action set LLMs like OpenAI CUA?\n # TODO: We need to support both 'action space as tools' and 'action space as prompt' for agentlab agents\n # and have conversion functions to convert them to format acceptable by environment.\n action_space: Literal[\"computer_13\", \"pyautogui\"] = \"computer_13\"\n multiaction: bool = False\n\n def describe(self, with_long_description: bool = True, with_examples: bool = True) -> str:\n \"\"\"Describe the OSWorld action set for desktop interactions.\"\"\"\n pass\n\n def example_action(self, abstract: bool) -> str:\n \"\"\"Provide example actions for the action set.\"\"\"\n pass\n\n def to_python_code(self, action) -> str:\n \"\"\"We use the OS-world/desktop_env environment controller\"\"\"\n pass\n\n def to_tool_description(self, api=\"openai\"):\n \"\"\"Convert the action set to a tool description for Tool-Use LLMs.\n\n The default for openai is openai Response API tools format.\n\n Args:\n api (str): The API format to use. Defaults to \"openai\".\n\n Returns:\n list[dict]: List of tool descriptions in the specified API format.\n\n Raises:\n ValueError: If an unsupported action space is specified.\n \"\"\"\n # TODO: Rename bgym AbstractActionSet 'to_tool_descriptor' method as 'to_tool_description' for consistency.\n if self.action_space == \"computer_13\":\n tools = COMPUTER_13_ACTIONS_OAI_CHATCOMPLETION_TOOLS\n\n else:","source_hash":"c9d01601d1e42a7119d50f151f9ea2c1ad3db146dd657c5d551000493811d9b4","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.osworld.to_tool_description","uri":"program://AgentLab/function/src.agentlab.benchmarks.osworld.to_tool_description#L779-L810","kind":"function","name":"to_tool_description","path":"src/agentlab/benchmarks/osworld.py","language":"python","start_line":779,"end_line":810,"context_start_line":759,"context_end_line":830,"code":" # TODO: Define and use agentlab AbstractActionSet\n # AbstractActionSet should define some standard format to represent actions.(list of dict with keys that are MCP compatible)\n # Should we have 'abstract function' here for action conversion for backend LLM with fixed action set like UI-Tars or Semi-fixed action set LLMs like OpenAI CUA?\n # TODO: We need to support both 'action space as tools' and 'action space as prompt' for agentlab agents\n # and have conversion functions to convert them to format acceptable by environment.\n action_space: Literal[\"computer_13\", \"pyautogui\"] = \"computer_13\"\n multiaction: bool = False\n\n def describe(self, with_long_description: bool = True, with_examples: bool = True) -> str:\n \"\"\"Describe the OSWorld action set for desktop interactions.\"\"\"\n pass\n\n def example_action(self, abstract: bool) -> str:\n \"\"\"Provide example actions for the action set.\"\"\"\n pass\n\n def to_python_code(self, action) -> str:\n \"\"\"We use the OS-world/desktop_env environment controller\"\"\"\n pass\n\n def to_tool_description(self, api=\"openai\"):\n \"\"\"Convert the action set to a tool description for Tool-Use LLMs.\n\n The default for openai is openai Response API tools format.\n\n Args:\n api (str): The API format to use. Defaults to \"openai\".\n\n Returns:\n list[dict]: List of tool descriptions in the specified API format.\n\n Raises:\n ValueError: If an unsupported action space is specified.\n \"\"\"\n # TODO: Rename bgym AbstractActionSet 'to_tool_descriptor' method as 'to_tool_description' for consistency.\n if self.action_space == \"computer_13\":\n tools = COMPUTER_13_ACTIONS_OAI_CHATCOMPLETION_TOOLS\n\n else:\n raise ValueError(\n \"Only 'computer_13' action space is currently supported for tool description.\"\n )\n api_formatters = {\n \"openai\": lambda: format_chat_completion_tools_to_response_api(tools),\n \"chatcompletion\": lambda: tools,\n \"anthropic\": lambda: format_chat_completion_tools_to_anthropic(tools),\n }\n\n if api not in api_formatters:\n raise ValueError(f\"Unsupported API type: {api}\")\n\n return api_formatters[api]()\n\n\ndef format_chat_completion_tools_to_anthropic(tools: list[dict]) -> list[dict]:\n \"\"\"Convert OpenAI Response API tool format to Anthropic tool format.\"\"\"\n formatted_tools = []\n for tool in tools:\n function_def = tool[\"function\"]\n formatted_tool = {\n \"name\": function_def[\"name\"],\n \"description\": function_def[\"description\"],\n \"input_schema\": function_def[\"parameters\"],\n }\n formatted_tools.append(formatted_tool)\n\n return formatted_tools\n\n\ndef format_chat_completion_tools_to_response_api(tools: list[dict]) -> list[dict]:\n \"\"\"Convert tools from OpenAI Chat Completion format to Responses API format.\n","source_hash":"c9d01601d1e42a7119d50f151f9ea2c1ad3db146dd657c5d551000493811d9b4","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.osworld.make_env","uri":"program://AgentLab/function/src.agentlab.benchmarks.osworld.make_env#L875-L896","kind":"function","name":"make_env","path":"src/agentlab/benchmarks/osworld.py","language":"python","start_line":875,"end_line":896,"context_start_line":855,"context_end_line":916,"code":"\n@dataclass\nclass OsworldEnvArgs(AbstractEnvArgs):\n task: dict[str, Any]\n task_seed: int = 0\n task_name: str | None = None\n path_to_vm: str | None = None # path to .vmx file\n provider_name: str = \"docker\" # path to .vmx file\n region: str = \"us-east-1\" # AWS specific, does not apply to all providers\n snapshot_name: str = \"init_state\" # snapshot name to revert to\n action_space: Literal[\"computer_13\", \"pyautogui\"] = \"computer_13\"\n cache_dir: str = \"cache\"\n screen_size: tuple[int, int] = (1920, 1080)\n headless: bool = False\n require_a11y_tree: bool = True\n require_terminal: bool = False\n os_type: str = \"Ubuntu\"\n enable_proxy: bool = False\n max_steps: int = 50\n\n def make_env(\n self, exp_dir: Path, action_mapping=None, use_raw_page_output: bool = False\n ) -> OsworldGym:\n logger.info(f\"Creating OSWorld Gym with task: {self.task}\")\n gym = OsworldGym(\n task=self.task,\n provider_name=self.provider_name,\n region=self.region,\n path_to_vm=self.path_to_vm,\n snapshot_name=self.snapshot_name,\n action_space=self.action_space,\n cache_dir=self.cache_dir,\n screen_size=self.screen_size,\n headless=self.headless,\n require_a11y_tree=self.require_a11y_tree,\n require_terminal=self.require_terminal,\n os_type=self.os_type,\n enable_proxy=self.enable_proxy,\n max_steps=self.max_steps,\n exp_dir=exp_dir,\n )\n return gym\n\n\nclass OsworldBenchmark(AbstractBenchmark):\n name: str = \"osworld\"\n is_multi_tab: bool = False\n high_level_action_set_args: OSWorldActionSet = None # type: ignore\n test_set_path: str = \"OSWorld/evaluation_examples\"\n test_set_name: str = \"test_all.json\"\n domain: str = \"all\"\n env_args: OsworldEnvArgs = None # type: ignore # basic env configuration for all tasks\n env_args_list: list[OsworldEnvArgs] = None # type: ignore\n\n def model_post_init(self, __context: Any) -> None:\n self.env_args_list = []\n if not self.env_args:\n self.env_args = OsworldEnvArgs(task={})\n self.high_level_action_set_args = OSWorldActionSet(action_space=self.env_args.action_space)\n with open(os.path.join(self.test_set_path, self.test_set_name)) as f:\n tasks = json.load(f)\n if self.domain != \"all\":","source_hash":"c9d01601d1e42a7119d50f151f9ea2c1ad3db146dd657c5d551000493811d9b4","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.osworld.model_post_init","uri":"program://AgentLab/function/src.agentlab.benchmarks.osworld.model_post_init#L909-L930","kind":"function","name":"model_post_init","path":"src/agentlab/benchmarks/osworld.py","language":"python","start_line":909,"end_line":930,"context_start_line":889,"context_end_line":948,"code":" require_a11y_tree=self.require_a11y_tree,\n require_terminal=self.require_terminal,\n os_type=self.os_type,\n enable_proxy=self.enable_proxy,\n max_steps=self.max_steps,\n exp_dir=exp_dir,\n )\n return gym\n\n\nclass OsworldBenchmark(AbstractBenchmark):\n name: str = \"osworld\"\n is_multi_tab: bool = False\n high_level_action_set_args: OSWorldActionSet = None # type: ignore\n test_set_path: str = \"OSWorld/evaluation_examples\"\n test_set_name: str = \"test_all.json\"\n domain: str = \"all\"\n env_args: OsworldEnvArgs = None # type: ignore # basic env configuration for all tasks\n env_args_list: list[OsworldEnvArgs] = None # type: ignore\n\n def model_post_init(self, __context: Any) -> None:\n self.env_args_list = []\n if not self.env_args:\n self.env_args = OsworldEnvArgs(task={})\n self.high_level_action_set_args = OSWorldActionSet(action_space=self.env_args.action_space)\n with open(os.path.join(self.test_set_path, self.test_set_name)) as f:\n tasks = json.load(f)\n if self.domain != \"all\":\n tasks = {self.domain: tasks[self.domain]}\n\n for domain in tasks:\n for task_id in tasks[domain]:\n task_file = os.path.join(self.test_set_path, f\"examples/{domain}/{task_id}.json\")\n with open(task_file) as f:\n task = json.load(f)\n task = self.fix_settings_file_path_in_config(task)\n name = f\"{self.name}.{task['id']}\"\n task_env_args = deepcopy(self.env_args)\n task_env_args.task = task\n task_env_args.task_name = name\n self.env_args_list.append(task_env_args)\n logger.info(f\"Loaded {len(self.env_args_list)} tasks from domain '{self.domain}'\")\n\n def fix_settings_file_path_in_config(self, task: dict) -> dict:\n \"\"\"Fix the settings file path in the task configuration.\n\n Args:\n task: Task configuration dictionary.\n\n Returns:\n Updated task configuration with fixed settings file paths.\n \"\"\"\n osworld_repo = os.getenv(\"OSWORLD_REPO\", \"OSWorld\")\n updated_task = deepcopy(task) # Avoid modifying the original task\n for config in updated_task[\"config\"]:\n if config.get(\"parameters\", False) and config[\"parameters\"].get(\"settings_file\", False):\n config[\"parameters\"][\"settings_file\"] = os.path.join(\n osworld_repo, config[\"parameters\"][\"settings_file\"]\n )\n return updated_task","source_hash":"c9d01601d1e42a7119d50f151f9ea2c1ad3db146dd657c5d551000493811d9b4","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.osworld.fix_settings_file_path_in_config","uri":"program://AgentLab/function/src.agentlab.benchmarks.osworld.fix_settings_file_path_in_config#L932-L948","kind":"function","name":"fix_settings_file_path_in_config","path":"src/agentlab/benchmarks/osworld.py","language":"python","start_line":932,"end_line":948,"context_start_line":912,"context_end_line":948,"code":" self.env_args = OsworldEnvArgs(task={})\n self.high_level_action_set_args = OSWorldActionSet(action_space=self.env_args.action_space)\n with open(os.path.join(self.test_set_path, self.test_set_name)) as f:\n tasks = json.load(f)\n if self.domain != \"all\":\n tasks = {self.domain: tasks[self.domain]}\n\n for domain in tasks:\n for task_id in tasks[domain]:\n task_file = os.path.join(self.test_set_path, f\"examples/{domain}/{task_id}.json\")\n with open(task_file) as f:\n task = json.load(f)\n task = self.fix_settings_file_path_in_config(task)\n name = f\"{self.name}.{task['id']}\"\n task_env_args = deepcopy(self.env_args)\n task_env_args.task = task\n task_env_args.task_name = name\n self.env_args_list.append(task_env_args)\n logger.info(f\"Loaded {len(self.env_args_list)} tasks from domain '{self.domain}'\")\n\n def fix_settings_file_path_in_config(self, task: dict) -> dict:\n \"\"\"Fix the settings file path in the task configuration.\n\n Args:\n task: Task configuration dictionary.\n\n Returns:\n Updated task configuration with fixed settings file paths.\n \"\"\"\n osworld_repo = os.getenv(\"OSWORLD_REPO\", \"OSWorld\")\n updated_task = deepcopy(task) # Avoid modifying the original task\n for config in updated_task[\"config\"]:\n if config.get(\"parameters\", False) and config[\"parameters\"].get(\"settings_file\", False):\n config[\"parameters\"][\"settings_file\"] = os.path.join(\n osworld_repo, config[\"parameters\"][\"settings_file\"]\n )\n return updated_task","source_hash":"c9d01601d1e42a7119d50f151f9ea2c1ad3db146dd657c5d551000493811d9b4","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.setup_benchmark","uri":"program://AgentLab/module/src.agentlab.benchmarks.setup_benchmark#L1-L106","kind":"module","name":"src.agentlab.benchmarks.setup_benchmark","path":"src/agentlab/benchmarks/setup_benchmark.py","language":"python","start_line":1,"end_line":106,"context_start_line":1,"context_end_line":106,"code":"\"\"\"Tiny benchmark setup helpers.\n\nCurrently supports MiniWob++: clones the repo at a pinned commit and writes\nMINIWOB_URL to .env. Designed to be minimal and easy to maintain.\n\"\"\"\n\nfrom __future__ import annotations\n\nimport logging\nimport os\nimport pathlib\nfrom typing import Optional\n\nlogger = logging.getLogger(__name__)\n\n\ndef _ensure_repo(repo_url: str, clone_dir: pathlib.Path, commit: Optional[str] = None) -> None:\n \"\"\"Clone repo if missing and optionally checkout a commit (minimal, shell-only).\n\n Args:\n repo_url: URL of the git repository to clone.\n clone_dir: Directory path where the repository should be cloned.\n commit: Optional commit hash to checkout after cloning.\n \"\"\"\n clone_dir = clone_dir.resolve()\n if not clone_dir.exists():\n clone_dir.parent.mkdir(parents=True, exist_ok=True)\n os.system(f\"git clone '{repo_url}' '{clone_dir}' >/dev/null 2>&1 || true\")\n # If it's a git repo and a commit is provided, best-effort checkout\n if commit and (clone_dir / \".git\").exists():\n os.system(f\"git -C '{clone_dir}' fetch --all --tags >/dev/null 2>&1 || true\")\n os.system(f\"git -C '{clone_dir}' checkout {commit} >/dev/null 2>&1 || true\")\n\n\ndef _write_env_kv(env_path: pathlib.Path, key: str, value: str) -> None:\n \"\"\"Idempotently write/update KEY=VALUE in .env file.\"\"\"\n env_path = env_path.resolve()\n env_path.parent.mkdir(parents=True, exist_ok=True)\n lines: list[str] = []\n if env_path.exists():\n lines = env_path.read_text().splitlines()\n key_prefix = f\"{key}=\"\n updated = False\n new_line = f\"{key}={value}\"\n out_lines: list[str] = []\n for line in lines:\n if line.strip().startswith(key_prefix):\n if not updated:\n out_lines.append(new_line)\n updated = True\n # Skip any other occurrences\n else:\n out_lines.append(line)\n if not updated:\n out_lines.append(new_line)\n env_path.write_text(\"\\n\".join(out_lines) + \"\\n\")\n\n\ndef setup_miniwob(project_root: pathlib.Path) -> str:\n \"\"\"Set up MiniWob++ locally and configure MINIWOB_URL in .env.\n\n Steps:\n - Clone https://github.com/Farama-Foundation/miniwob-plusplus.git (if missing)\n - Checkout pinned commit for reproducibility\n - Compute file:// URL to the local miniwob HTML assets\n - Write MINIWOB_URL to /.env\n\n Args:\n project_root: Project root directory path.\n\n Returns:\n The configured MINIWOB_URL string.\n \"\"\"\n # Clone the upstream repo at a pinned commit and use local HTML assets\n repo_url = \"https://github.com/Farama-Foundation/miniwob-plusplus.git\"\n commit = \"7fd85d71a4b60325c6585396ec4f48377d049838\"\n clone_dir = project_root / \"miniwob-plusplus\"\n _ensure_repo(repo_url=repo_url, clone_dir=clone_dir, commit=commit)\n miniwob_dir = (clone_dir / \"miniwob\" / \"html\" / \"miniwob\").resolve()\n # We still set URL even if folder doesn't exist yet; but warn through return\n url = miniwob_dir.as_uri().rstrip(\"/\") + \"/\"\n\n env_path = project_root / \".env\"\n _write_env_kv(env_path, \"MINIWOB_URL\", url)\n os.environ[\"MINIWOB_URL\"] = url # make available in current process immediately\n logger.info(\"MINIWOB_URL set to %s (recorded in %s)\", url, env_path)\n return url\n\n\ndef ensure_benchmark(benchmark: str, project_root: pathlib.Path) -> Optional[str]:\n \"\"\"Run setup lazily if required for the given benchmark.\n\n Args:\n benchmark: Name of the benchmark to ensure setup for.\n project_root: Project root directory path.\n\n Returns:\n The URL when setup is performed, otherwise None.\n \"\"\"\n key = benchmark.strip().lower()\n if key in {\"miniwob\", \"miniwob++\", \"miniwob-plusplus\"}:\n if not os.getenv(\"MINIWOB_URL\"):\n return setup_miniwob(project_root)\n return None\n # No-op for unsupported/other benchmarks\n return None","source_hash":"a2597599a39af02750d5124d62209074fb748a14c1b64b8eb71b075b1221541c","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.setup_benchmark._ensure_repo","uri":"program://AgentLab/function/src.agentlab.benchmarks.setup_benchmark._ensure_repo#L17-L32","kind":"function","name":"_ensure_repo","path":"src/agentlab/benchmarks/setup_benchmark.py","language":"python","start_line":17,"end_line":32,"context_start_line":1,"context_end_line":52,"code":"\"\"\"Tiny benchmark setup helpers.\n\nCurrently supports MiniWob++: clones the repo at a pinned commit and writes\nMINIWOB_URL to .env. Designed to be minimal and easy to maintain.\n\"\"\"\n\nfrom __future__ import annotations\n\nimport logging\nimport os\nimport pathlib\nfrom typing import Optional\n\nlogger = logging.getLogger(__name__)\n\n\ndef _ensure_repo(repo_url: str, clone_dir: pathlib.Path, commit: Optional[str] = None) -> None:\n \"\"\"Clone repo if missing and optionally checkout a commit (minimal, shell-only).\n\n Args:\n repo_url: URL of the git repository to clone.\n clone_dir: Directory path where the repository should be cloned.\n commit: Optional commit hash to checkout after cloning.\n \"\"\"\n clone_dir = clone_dir.resolve()\n if not clone_dir.exists():\n clone_dir.parent.mkdir(parents=True, exist_ok=True)\n os.system(f\"git clone '{repo_url}' '{clone_dir}' >/dev/null 2>&1 || true\")\n # If it's a git repo and a commit is provided, best-effort checkout\n if commit and (clone_dir / \".git\").exists():\n os.system(f\"git -C '{clone_dir}' fetch --all --tags >/dev/null 2>&1 || true\")\n os.system(f\"git -C '{clone_dir}' checkout {commit} >/dev/null 2>&1 || true\")\n\n\ndef _write_env_kv(env_path: pathlib.Path, key: str, value: str) -> None:\n \"\"\"Idempotently write/update KEY=VALUE in .env file.\"\"\"\n env_path = env_path.resolve()\n env_path.parent.mkdir(parents=True, exist_ok=True)\n lines: list[str] = []\n if env_path.exists():\n lines = env_path.read_text().splitlines()\n key_prefix = f\"{key}=\"\n updated = False\n new_line = f\"{key}={value}\"\n out_lines: list[str] = []\n for line in lines:\n if line.strip().startswith(key_prefix):\n if not updated:\n out_lines.append(new_line)\n updated = True\n # Skip any other occurrences\n else:","source_hash":"a2597599a39af02750d5124d62209074fb748a14c1b64b8eb71b075b1221541c","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.setup_benchmark._write_env_kv","uri":"program://AgentLab/function/src.agentlab.benchmarks.setup_benchmark._write_env_kv#L35-L56","kind":"function","name":"_write_env_kv","path":"src/agentlab/benchmarks/setup_benchmark.py","language":"python","start_line":35,"end_line":56,"context_start_line":15,"context_end_line":76,"code":"\n\ndef _ensure_repo(repo_url: str, clone_dir: pathlib.Path, commit: Optional[str] = None) -> None:\n \"\"\"Clone repo if missing and optionally checkout a commit (minimal, shell-only).\n\n Args:\n repo_url: URL of the git repository to clone.\n clone_dir: Directory path where the repository should be cloned.\n commit: Optional commit hash to checkout after cloning.\n \"\"\"\n clone_dir = clone_dir.resolve()\n if not clone_dir.exists():\n clone_dir.parent.mkdir(parents=True, exist_ok=True)\n os.system(f\"git clone '{repo_url}' '{clone_dir}' >/dev/null 2>&1 || true\")\n # If it's a git repo and a commit is provided, best-effort checkout\n if commit and (clone_dir / \".git\").exists():\n os.system(f\"git -C '{clone_dir}' fetch --all --tags >/dev/null 2>&1 || true\")\n os.system(f\"git -C '{clone_dir}' checkout {commit} >/dev/null 2>&1 || true\")\n\n\ndef _write_env_kv(env_path: pathlib.Path, key: str, value: str) -> None:\n \"\"\"Idempotently write/update KEY=VALUE in .env file.\"\"\"\n env_path = env_path.resolve()\n env_path.parent.mkdir(parents=True, exist_ok=True)\n lines: list[str] = []\n if env_path.exists():\n lines = env_path.read_text().splitlines()\n key_prefix = f\"{key}=\"\n updated = False\n new_line = f\"{key}={value}\"\n out_lines: list[str] = []\n for line in lines:\n if line.strip().startswith(key_prefix):\n if not updated:\n out_lines.append(new_line)\n updated = True\n # Skip any other occurrences\n else:\n out_lines.append(line)\n if not updated:\n out_lines.append(new_line)\n env_path.write_text(\"\\n\".join(out_lines) + \"\\n\")\n\n\ndef setup_miniwob(project_root: pathlib.Path) -> str:\n \"\"\"Set up MiniWob++ locally and configure MINIWOB_URL in .env.\n\n Steps:\n - Clone https://github.com/Farama-Foundation/miniwob-plusplus.git (if missing)\n - Checkout pinned commit for reproducibility\n - Compute file:// URL to the local miniwob HTML assets\n - Write MINIWOB_URL to /.env\n\n Args:\n project_root: Project root directory path.\n\n Returns:\n The configured MINIWOB_URL string.\n \"\"\"\n # Clone the upstream repo at a pinned commit and use local HTML assets\n repo_url = \"https://github.com/Farama-Foundation/miniwob-plusplus.git\"\n commit = \"7fd85d71a4b60325c6585396ec4f48377d049838\"","source_hash":"a2597599a39af02750d5124d62209074fb748a14c1b64b8eb71b075b1221541c","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.setup_benchmark.setup_miniwob","uri":"program://AgentLab/function/src.agentlab.benchmarks.setup_benchmark.setup_miniwob#L59-L87","kind":"function","name":"setup_miniwob","path":"src/agentlab/benchmarks/setup_benchmark.py","language":"python","start_line":59,"end_line":87,"context_start_line":39,"context_end_line":106,"code":" lines: list[str] = []\n if env_path.exists():\n lines = env_path.read_text().splitlines()\n key_prefix = f\"{key}=\"\n updated = False\n new_line = f\"{key}={value}\"\n out_lines: list[str] = []\n for line in lines:\n if line.strip().startswith(key_prefix):\n if not updated:\n out_lines.append(new_line)\n updated = True\n # Skip any other occurrences\n else:\n out_lines.append(line)\n if not updated:\n out_lines.append(new_line)\n env_path.write_text(\"\\n\".join(out_lines) + \"\\n\")\n\n\ndef setup_miniwob(project_root: pathlib.Path) -> str:\n \"\"\"Set up MiniWob++ locally and configure MINIWOB_URL in .env.\n\n Steps:\n - Clone https://github.com/Farama-Foundation/miniwob-plusplus.git (if missing)\n - Checkout pinned commit for reproducibility\n - Compute file:// URL to the local miniwob HTML assets\n - Write MINIWOB_URL to /.env\n\n Args:\n project_root: Project root directory path.\n\n Returns:\n The configured MINIWOB_URL string.\n \"\"\"\n # Clone the upstream repo at a pinned commit and use local HTML assets\n repo_url = \"https://github.com/Farama-Foundation/miniwob-plusplus.git\"\n commit = \"7fd85d71a4b60325c6585396ec4f48377d049838\"\n clone_dir = project_root / \"miniwob-plusplus\"\n _ensure_repo(repo_url=repo_url, clone_dir=clone_dir, commit=commit)\n miniwob_dir = (clone_dir / \"miniwob\" / \"html\" / \"miniwob\").resolve()\n # We still set URL even if folder doesn't exist yet; but warn through return\n url = miniwob_dir.as_uri().rstrip(\"/\") + \"/\"\n\n env_path = project_root / \".env\"\n _write_env_kv(env_path, \"MINIWOB_URL\", url)\n os.environ[\"MINIWOB_URL\"] = url # make available in current process immediately\n logger.info(\"MINIWOB_URL set to %s (recorded in %s)\", url, env_path)\n return url\n\n\ndef ensure_benchmark(benchmark: str, project_root: pathlib.Path) -> Optional[str]:\n \"\"\"Run setup lazily if required for the given benchmark.\n\n Args:\n benchmark: Name of the benchmark to ensure setup for.\n project_root: Project root directory path.\n\n Returns:\n The URL when setup is performed, otherwise None.\n \"\"\"\n key = benchmark.strip().lower()\n if key in {\"miniwob\", \"miniwob++\", \"miniwob-plusplus\"}:\n if not os.getenv(\"MINIWOB_URL\"):\n return setup_miniwob(project_root)\n return None\n # No-op for unsupported/other benchmarks\n return None","source_hash":"a2597599a39af02750d5124d62209074fb748a14c1b64b8eb71b075b1221541c","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.setup_benchmark.ensure_benchmark","uri":"program://AgentLab/function/src.agentlab.benchmarks.setup_benchmark.ensure_benchmark#L90-L106","kind":"function","name":"ensure_benchmark","path":"src/agentlab/benchmarks/setup_benchmark.py","language":"python","start_line":90,"end_line":106,"context_start_line":70,"context_end_line":106,"code":"\n Returns:\n The configured MINIWOB_URL string.\n \"\"\"\n # Clone the upstream repo at a pinned commit and use local HTML assets\n repo_url = \"https://github.com/Farama-Foundation/miniwob-plusplus.git\"\n commit = \"7fd85d71a4b60325c6585396ec4f48377d049838\"\n clone_dir = project_root / \"miniwob-plusplus\"\n _ensure_repo(repo_url=repo_url, clone_dir=clone_dir, commit=commit)\n miniwob_dir = (clone_dir / \"miniwob\" / \"html\" / \"miniwob\").resolve()\n # We still set URL even if folder doesn't exist yet; but warn through return\n url = miniwob_dir.as_uri().rstrip(\"/\") + \"/\"\n\n env_path = project_root / \".env\"\n _write_env_kv(env_path, \"MINIWOB_URL\", url)\n os.environ[\"MINIWOB_URL\"] = url # make available in current process immediately\n logger.info(\"MINIWOB_URL set to %s (recorded in %s)\", url, env_path)\n return url\n\n\ndef ensure_benchmark(benchmark: str, project_root: pathlib.Path) -> Optional[str]:\n \"\"\"Run setup lazily if required for the given benchmark.\n\n Args:\n benchmark: Name of the benchmark to ensure setup for.\n project_root: Project root directory path.\n\n Returns:\n The URL when setup is performed, otherwise None.\n \"\"\"\n key = benchmark.strip().lower()\n if key in {\"miniwob\", \"miniwob++\", \"miniwob-plusplus\"}:\n if not os.getenv(\"MINIWOB_URL\"):\n return setup_miniwob(project_root)\n return None\n # No-op for unsupported/other benchmarks\n return None","source_hash":"a2597599a39af02750d5124d62209074fb748a14c1b64b8eb71b075b1221541c","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.gaia","uri":"program://AgentLab/module/src.agentlab.benchmarks.gaia#L1-L385","kind":"module","name":"src.agentlab.benchmarks.gaia","path":"src/agentlab/benchmarks/gaia.py","language":"python","start_line":1,"end_line":385,"context_start_line":1,"context_end_line":385,"code":"import logging\nimport os\nimport re\nimport shutil\nimport string\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Any, Literal, Self\n\nimport datasets\nimport hydra\nimport podman\nimport tapeagents.config\nfrom omegaconf import DictConfig\nfrom pdf2image import convert_from_path\nfrom pydantic import ConfigDict, Field\nfrom tapeagents.core import Action, Observation, StopStep, Thought\nfrom tapeagents.environment import ContainerExecutor, StatefulTool, Tool\nfrom tapeagents.steps import ImageObservation\nfrom tapeagents.tools.simple_browser import SimpleTextBrowser\n\nfrom agentlab.benchmarks.abstract_env import AbstractBenchmark, AbstractEnvArgs\nfrom agentlab.benchmarks.multitool_gym import MultiToolGym\n\nlogger = logging.getLogger(__name__)\n\nCONTAINER_NAME = \"gaia_code_shared\"\n\n\nclass GaiaGym(MultiToolGym):\n task: dict\n exp_dir: str\n\n def __init__(self, tools: list[Tool | StatefulTool], task: dict, exp_dir: str):\n super().__init__(tools=tools)\n self.task = task\n self.exp_dir = exp_dir\n os.makedirs(\".cache\", exist_ok=True)\n\n def reset(self, seed=None) -> tuple[list[Observation], dict]:\n \"\"\"\n Reset the state of all the tools and prepare initial observations from the task again\n \"\"\"\n super().reset()\n return task_to_observations(self.task), {}\n\n def calculate_reward(self, action: Action) -> float:\n if isinstance(action, GaiaAnswer):\n model_answer = action.answer\n ground_truth = self.task[\"Final answer\"]\n reward = 1.0 if question_scorer(model_answer, ground_truth) else 0.0\n else:\n reward = 0.0\n\n if reward == 1.0:\n logger.info(f\"Task {self.task['task_id']} solved.\")\n else:\n logger.info(f\"Task {self.task['task_id']} failed.\")\n\n return reward\n\n\n@dataclass\nclass GaiaGymArgs(AbstractEnvArgs):\n model_config = ConfigDict(arbitrary_types_allowed=True)\n task: dict[str, Any]\n task_seed: int\n task_name: str\n env_config: DictConfig\n\n def __init__(\n self,\n task_name: str,\n task: dict[str, Any],\n env_config: DictConfig,\n task_seed: int = 0,\n ):\n self.task_name = task_name\n self.task = task\n self.task_seed = task_seed\n self.env_config = env_config\n\n def make_env(self, exp_dir: Path, action_mapping=None) -> GaiaGym:\n tapeagents.config.DB_DEFAULT_FILENAME = str(exp_dir.parent / \"tapedata.sqlite\")\n exp_dir_str = str(exp_dir)\n logger.info(f\"Init gaia env with directory {exp_dir_str}\")\n init_code_sandbox(exp_dir_str)\n for i in range(len(self.env_config.tools)):\n if hasattr(self.env_config.tools[i], \"exp_path\"):\n self.env_config.tools[i].exp_path = exp_dir_str\n tools = hydra.utils.instantiate(self.env_config.tools)\n env = GaiaGym(tools=tools, task=self.task, exp_dir=exp_dir_str)\n return env\n\n\ndef init_code_sandbox(exp_dir: str) -> None:\n # Use a common code directory for all tasks in the experiment, which is mounted in the container\n root_exp_dir = Path(exp_dir).parent\n code_path = os.path.join(root_exp_dir, \"shared_code\")\n os.makedirs(code_path, exist_ok=True)\n os.environ[\"COMPUTER_CONTAINER_NAME\"] = CONTAINER_NAME\n\n # symlink task code to the shared code directory\n task_code_path = os.path.join(exp_dir, \"code\")\n if not os.path.exists(task_code_path):\n os.symlink(code_path, task_code_path)\n\n try:\n ContainerExecutor(container_name=CONTAINER_NAME, work_dir=code_path, no_deps=True)\n except Exception as e:\n logger.warning(f\"Failed to initialize container executor: {e}\")\n\n\ndef stop_old_sandbox():\n try:\n podman.from_env().containers.get(CONTAINER_NAME).stop()\n except Exception as e:\n logger.warning(f\"Failed to stop old container {CONTAINER_NAME}: {e}\")\n\n\nclass GaiaBenchmark(AbstractBenchmark):\n model_config = ConfigDict(arbitrary_types_allowed=True)\n name: str = \"gaia\"\n split: Literal[\"test\", \"validation\"]\n level: Literal[\"1\", \"2\", \"3\", \"all\"] = \"all\"\n env_args_list: list[GaiaGymArgs] = None # type: ignore\n dataset: dict | None = None # type: ignore\n env_config: DictConfig = None # type: ignore\n\n @classmethod\n def from_config(cls, config: DictConfig, dataset: dict | None = None) -> Self:\n return cls(\n split=config.split,\n level=config.level,\n env_config=config.environment,\n dataset=dataset,\n )\n\n def model_post_init(self, __context: Any) -> None:\n self.env_args_list = []\n number = 0\n if self.dataset is None:\n self.dataset = datasets.load_dataset(\n path=\"gaia-benchmark/GAIA\",\n name=\"2023_all\",\n trust_remote_code=True,\n ) # type: ignore\n for task in self.dataset[self.split]: # type: ignore\n if self.level != \"all\" and task[\"Level\"] != self.level:\n continue\n number += 1\n task[\"number\"] = number\n name = f\"gaia.{task['task_id']}\"\n env_args = GaiaGymArgs(task_name=name, task=task, env_config=self.env_config)\n self.env_args_list.append(env_args)\n logger.info(f\"Loaded {len(self.env_args_list)} tasks from {self.split} split\")\n\n\nclass ExtractedFacts(Thought):\n \"\"\"\n Thought that contains the list of facts extracted from the document\n \"\"\"\n\n kind: Literal[\"extracted_facts_thought\"] = \"extracted_facts_thought\" # type: ignore\n extracted_facts: list[str] | dict[str, Any] | str = Field(\n description=\"facts extracted from the observation\"\n )\n\n\nclass GaiaQuestion(Observation):\n kind: Literal[\"question\"] = \"question\" # type: ignore\n content: str\n filename: str | None = None\n\n @classmethod\n def from_task(cls, question: dict, files_dir: str = \"/tmp/gaia_files\"):\n os.makedirs(files_dir, exist_ok=True)\n question_prompt = question[\"Question\"]\n filename = None\n if question[\"file_path\"]:\n basename = os.path.basename(question[\"file_path\"])\n tmp_fname = os.path.join(files_dir, basename)\n shutil.copyfile(question[\"file_path\"], tmp_fname)\n assert os.path.exists(tmp_fname)\n filename = tmp_fname\n return cls(content=question_prompt, filename=filename)\n\n\ndef task_to_observations(task: dict, max_doc_length: int = 8000) -> list[Observation]:\n browser = SimpleTextBrowser()\n question = GaiaQuestion.from_task(task)\n if not question.filename:\n return [question]\n\n filename: str | None = question.filename\n question.filename = None\n steps: list[Observation] = []\n name, ext = filename.rsplit(\".\", maxsplit=1)\n ext = ext.lower()\n if ext == \"zip\":\n folder_name = name\n os.makedirs(folder_name, exist_ok=True)\n shutil.unpack_archive(filename, folder_name)\n document_text = \"\\n\\nArchive contains the following files:\\n\"\n for i, file in enumerate(os.listdir(folder_name)):\n file_path = os.path.join(folder_name, file)\n content = browser.get_whole_document(file_path)\n file_text = f\"{i+1}. {file}. Content:\\n{content}\\n\\n\"\n if len(file_text) > max_doc_length:\n file_text = \"\"\n file_text += f\"{i+1}. Path to the '{file}': {file_path}\"\n document_text += file_text\n elif ext in (\"png\", \"jpg\", \"jpeg\"):\n steps.append(ImageObservation(image_path=filename, image_caption=\"Attached image\"))\n document_text = \"\"\n else:\n attach_doc_text = True\n if ext == \"pdf\":\n images, total_pages = pdf_to_images(filename)\n if total_pages <= 3:\n attach_doc_text = False\n for i, img_path in enumerate(images):\n steps.append(ImageObservation(image_path=img_path, image_caption=f\"PDF page {i+1}\"))\n if attach_doc_text:\n try:\n content = browser.get_whole_document(filename)\n except Exception as e:\n logger.exception(f\"Failed to read document: {e}\")\n content = \"\"\n document_text = f\"\\n\\nAttached {ext.upper()} file content:\\n{content}\\n\"\n if not len(content) or len(document_text) > max_doc_length:\n document_text = \"\"\n else:\n document_text = \"\\nDocument pages attached as images below\"\n question.filename = filename\n question.content += document_text\n return [question] + steps\n\n\ndef pdf_to_images(filename: str, n_pages: int = 3):\n images = []\n for i, image in enumerate(convert_from_path(filename)):\n page_index = i + 1\n page_fname = filename[:-4] + f\"_{page_index}.png\"\n if os.path.exists(page_fname):\n images.append(page_fname)\n continue\n image.save(page_fname)\n images.append(page_fname)\n return images[:n_pages], len(images)\n\n\nclass GaiaAnswer(StopStep):\n \"\"\"\n Action that indicates the agent has finished the plan and contains the answer or description of failure.\n The answer should use already determined facts without additional conversion!\n Your final answer should be a number OR as few words as possible OR a comma-separated list of numbers and/or strings.\n ADDITIONALLY, your final answer MUST follow any formatting instructions specified in the original question (e.g., alphabetization, sequencing, units, rounding, decimal places, etc.)\n If asked for a number, express it numerically, don't use commas, do not add anything after the number, don't include units such as $ or percent signs unless specified otherwise in the question.\n If asked for a string, don't use articles or abbreviations (e.g. for cities), unless specified otherwise. Don't output any final sentence punctuation such as '.', '!', or '?'.\n If asked for a comma-separated list, apply the above rules depending on whether the elements are numbers or strings.\n If unable to determine the final answer, output an empty string.\n \"\"\"\n\n kind: Literal[\"gaia_answer_action\"] = \"gaia_answer_action\" # type: ignore\n success: bool = Field(description=\"True if the task was successful, False otherwise\")\n overview: str = Field(\n description=\"List of steps performed to answer the question. If the task was not successful, includes the reason for failure\"\n )\n answer_unit: str = Field(\n description=\"Unit of measurement for the answer, if applicable; otherwise an empty string\"\n )\n answer: Any = Field(description=\"Short final answer\")\n long_answer: str = Field(description=\"Detailed final answer not restricted by format rules\")\n\n\ndef step_error(step_dict: dict, last_action: str | None) -> str:\n kind = step_dict.get(\"kind\", \"unknown\")\n error = \"\"\n if kind == \"search_results_observation\" and not len(step_dict.get(\"serp\", [])):\n error = \"search_empty\"\n elif kind == \"page_observation\" and step_dict.get(\"error\"):\n error = \"browser\"\n elif kind == \"llm_output_parsing_failure_action\":\n error = \"parsing\"\n elif kind == \"action_execution_failure\":\n error = last_action if last_action else \"action_failure\"\n elif kind == \"code_execution_result\" and step_dict.get(\"result\", {}).get(\"exit_code\"):\n error = \"code\"\n return error\n\n\ndef normalize_number_str(number_str: str) -> float:\n # we replace these common units and commas to allow\n # conversion to float\n for char in [\"$\", \"%\", \",\"]:\n number_str = number_str.replace(char, \"\")\n try:\n return float(number_str)\n except ValueError:\n logger.info(f\"String {number_str} cannot be normalized to number str.\")\n return float(\"inf\")\n\n\ndef split_string(\n s: str,\n char_list: list[str] = [\",\", \";\"],\n) -> list[str]:\n pattern = f\"[{''.join(char_list)}]\"\n return re.split(pattern, s)\n\n\ndef question_scorer(\n model_answer: str,\n ground_truth: str,\n) -> bool:\n def is_float(element: Any) -> bool:\n try:\n float(element)\n return True\n except ValueError:\n return False\n\n # if gt is a number\n if is_float(ground_truth):\n logger.info(f\"Evaluating {model_answer} as a number.\")\n normalized_answer = normalize_number_str(model_answer)\n return normalized_answer == float(ground_truth)\n\n # if gt is a list\n elif any(char in ground_truth for char in [\",\", \";\"]):\n logger.info(f\"Evaluating {model_answer} as a comma separated list.\")\n # question with the fish: normalization removes punct\n\n gt_elems = split_string(ground_truth)\n ma_elems = split_string(model_answer)\n\n # check length is the same\n if len(gt_elems) != len(ma_elems):\n logger.warning(\"Answer lists have different lengths, returning False.\")\n return False\n\n # compare each element as float or str\n comparisons = []\n for ma_elem, gt_elem in zip(ma_elems, gt_elems):\n if is_float(gt_elem):\n normalized_ma_elem = normalize_number_str(ma_elem)\n comparisons.append(normalized_ma_elem == float(gt_elem))\n else:\n # we do not remove punct since comparisons can include punct\n comparisons.append(\n normalize_str(ma_elem, remove_punct=False)\n == normalize_str(gt_elem, remove_punct=False)\n )\n return all(comparisons)\n\n # if gt is a str\n else:\n logger.info(f\"Evaluating {model_answer} as a string.\")\n return normalize_str(model_answer) == normalize_str(ground_truth)\n\n\ndef normalize_str(input_str, remove_punct=True) -> str:\n \"\"\"\n Normalize a string by:\n - Removing all white spaces\n - Optionally removing punctuation (if remove_punct is True)\n - Converting to lowercase\n\n Args:\n input_str: str, the string to normalize\n remove_punct: bool, whether to remove punctuation (default: True)\n\n Returns:\n str, the normalized string\n \"\"\"\n # Remove all white spaces. Required e.g for seagull vs. sea gull\n no_spaces = re.sub(r\"\\s\", \"\", input_str)\n\n # Remove punctuation, if specified.\n if remove_punct:\n translator = str.maketrans(\"\", \"\", string.punctuation)\n return no_spaces.lower().translate(translator)\n else:\n return no_spaces.lower()","source_hash":"db73d9f3e673af2bc589b90e998691b15ddaa5957e8fbc0b3d1177e6224f8ec4","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.gaia.GaiaGym","uri":"program://AgentLab/class/src.agentlab.benchmarks.gaia.GaiaGym#L30-L60","kind":"class","name":"GaiaGym","path":"src/agentlab/benchmarks/gaia.py","language":"python","start_line":30,"end_line":60,"context_start_line":10,"context_end_line":80,"code":"import datasets\nimport hydra\nimport podman\nimport tapeagents.config\nfrom omegaconf import DictConfig\nfrom pdf2image import convert_from_path\nfrom pydantic import ConfigDict, Field\nfrom tapeagents.core import Action, Observation, StopStep, Thought\nfrom tapeagents.environment import ContainerExecutor, StatefulTool, Tool\nfrom tapeagents.steps import ImageObservation\nfrom tapeagents.tools.simple_browser import SimpleTextBrowser\n\nfrom agentlab.benchmarks.abstract_env import AbstractBenchmark, AbstractEnvArgs\nfrom agentlab.benchmarks.multitool_gym import MultiToolGym\n\nlogger = logging.getLogger(__name__)\n\nCONTAINER_NAME = \"gaia_code_shared\"\n\n\nclass GaiaGym(MultiToolGym):\n task: dict\n exp_dir: str\n\n def __init__(self, tools: list[Tool | StatefulTool], task: dict, exp_dir: str):\n super().__init__(tools=tools)\n self.task = task\n self.exp_dir = exp_dir\n os.makedirs(\".cache\", exist_ok=True)\n\n def reset(self, seed=None) -> tuple[list[Observation], dict]:\n \"\"\"\n Reset the state of all the tools and prepare initial observations from the task again\n \"\"\"\n super().reset()\n return task_to_observations(self.task), {}\n\n def calculate_reward(self, action: Action) -> float:\n if isinstance(action, GaiaAnswer):\n model_answer = action.answer\n ground_truth = self.task[\"Final answer\"]\n reward = 1.0 if question_scorer(model_answer, ground_truth) else 0.0\n else:\n reward = 0.0\n\n if reward == 1.0:\n logger.info(f\"Task {self.task['task_id']} solved.\")\n else:\n logger.info(f\"Task {self.task['task_id']} failed.\")\n\n return reward\n\n\n@dataclass\nclass GaiaGymArgs(AbstractEnvArgs):\n model_config = ConfigDict(arbitrary_types_allowed=True)\n task: dict[str, Any]\n task_seed: int\n task_name: str\n env_config: DictConfig\n\n def __init__(\n self,\n task_name: str,\n task: dict[str, Any],\n env_config: DictConfig,\n task_seed: int = 0,\n ):\n self.task_name = task_name\n self.task = task\n self.task_seed = task_seed","source_hash":"db73d9f3e673af2bc589b90e998691b15ddaa5957e8fbc0b3d1177e6224f8ec4","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.gaia.GaiaGymArgs","uri":"program://AgentLab/class/src.agentlab.benchmarks.gaia.GaiaGymArgs#L64-L93","kind":"class","name":"GaiaGymArgs","path":"src/agentlab/benchmarks/gaia.py","language":"python","start_line":64,"end_line":93,"context_start_line":44,"context_end_line":113,"code":" super().reset()\n return task_to_observations(self.task), {}\n\n def calculate_reward(self, action: Action) -> float:\n if isinstance(action, GaiaAnswer):\n model_answer = action.answer\n ground_truth = self.task[\"Final answer\"]\n reward = 1.0 if question_scorer(model_answer, ground_truth) else 0.0\n else:\n reward = 0.0\n\n if reward == 1.0:\n logger.info(f\"Task {self.task['task_id']} solved.\")\n else:\n logger.info(f\"Task {self.task['task_id']} failed.\")\n\n return reward\n\n\n@dataclass\nclass GaiaGymArgs(AbstractEnvArgs):\n model_config = ConfigDict(arbitrary_types_allowed=True)\n task: dict[str, Any]\n task_seed: int\n task_name: str\n env_config: DictConfig\n\n def __init__(\n self,\n task_name: str,\n task: dict[str, Any],\n env_config: DictConfig,\n task_seed: int = 0,\n ):\n self.task_name = task_name\n self.task = task\n self.task_seed = task_seed\n self.env_config = env_config\n\n def make_env(self, exp_dir: Path, action_mapping=None) -> GaiaGym:\n tapeagents.config.DB_DEFAULT_FILENAME = str(exp_dir.parent / \"tapedata.sqlite\")\n exp_dir_str = str(exp_dir)\n logger.info(f\"Init gaia env with directory {exp_dir_str}\")\n init_code_sandbox(exp_dir_str)\n for i in range(len(self.env_config.tools)):\n if hasattr(self.env_config.tools[i], \"exp_path\"):\n self.env_config.tools[i].exp_path = exp_dir_str\n tools = hydra.utils.instantiate(self.env_config.tools)\n env = GaiaGym(tools=tools, task=self.task, exp_dir=exp_dir_str)\n return env\n\n\ndef init_code_sandbox(exp_dir: str) -> None:\n # Use a common code directory for all tasks in the experiment, which is mounted in the container\n root_exp_dir = Path(exp_dir).parent\n code_path = os.path.join(root_exp_dir, \"shared_code\")\n os.makedirs(code_path, exist_ok=True)\n os.environ[\"COMPUTER_CONTAINER_NAME\"] = CONTAINER_NAME\n\n # symlink task code to the shared code directory\n task_code_path = os.path.join(exp_dir, \"code\")\n if not os.path.exists(task_code_path):\n os.symlink(code_path, task_code_path)\n\n try:\n ContainerExecutor(container_name=CONTAINER_NAME, work_dir=code_path, no_deps=True)\n except Exception as e:\n logger.warning(f\"Failed to initialize container executor: {e}\")\n\n","source_hash":"db73d9f3e673af2bc589b90e998691b15ddaa5957e8fbc0b3d1177e6224f8ec4","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.gaia.init_code_sandbox","uri":"program://AgentLab/function/src.agentlab.benchmarks.gaia.init_code_sandbox#L96-L111","kind":"function","name":"init_code_sandbox","path":"src/agentlab/benchmarks/gaia.py","language":"python","start_line":96,"end_line":111,"context_start_line":76,"context_end_line":131,"code":" task_seed: int = 0,\n ):\n self.task_name = task_name\n self.task = task\n self.task_seed = task_seed\n self.env_config = env_config\n\n def make_env(self, exp_dir: Path, action_mapping=None) -> GaiaGym:\n tapeagents.config.DB_DEFAULT_FILENAME = str(exp_dir.parent / \"tapedata.sqlite\")\n exp_dir_str = str(exp_dir)\n logger.info(f\"Init gaia env with directory {exp_dir_str}\")\n init_code_sandbox(exp_dir_str)\n for i in range(len(self.env_config.tools)):\n if hasattr(self.env_config.tools[i], \"exp_path\"):\n self.env_config.tools[i].exp_path = exp_dir_str\n tools = hydra.utils.instantiate(self.env_config.tools)\n env = GaiaGym(tools=tools, task=self.task, exp_dir=exp_dir_str)\n return env\n\n\ndef init_code_sandbox(exp_dir: str) -> None:\n # Use a common code directory for all tasks in the experiment, which is mounted in the container\n root_exp_dir = Path(exp_dir).parent\n code_path = os.path.join(root_exp_dir, \"shared_code\")\n os.makedirs(code_path, exist_ok=True)\n os.environ[\"COMPUTER_CONTAINER_NAME\"] = CONTAINER_NAME\n\n # symlink task code to the shared code directory\n task_code_path = os.path.join(exp_dir, \"code\")\n if not os.path.exists(task_code_path):\n os.symlink(code_path, task_code_path)\n\n try:\n ContainerExecutor(container_name=CONTAINER_NAME, work_dir=code_path, no_deps=True)\n except Exception as e:\n logger.warning(f\"Failed to initialize container executor: {e}\")\n\n\ndef stop_old_sandbox():\n try:\n podman.from_env().containers.get(CONTAINER_NAME).stop()\n except Exception as e:\n logger.warning(f\"Failed to stop old container {CONTAINER_NAME}: {e}\")\n\n\nclass GaiaBenchmark(AbstractBenchmark):\n model_config = ConfigDict(arbitrary_types_allowed=True)\n name: str = \"gaia\"\n split: Literal[\"test\", \"validation\"]\n level: Literal[\"1\", \"2\", \"3\", \"all\"] = \"all\"\n env_args_list: list[GaiaGymArgs] = None # type: ignore\n dataset: dict | None = None # type: ignore\n env_config: DictConfig = None # type: ignore\n\n @classmethod\n def from_config(cls, config: DictConfig, dataset: dict | None = None) -> Self:","source_hash":"db73d9f3e673af2bc589b90e998691b15ddaa5957e8fbc0b3d1177e6224f8ec4","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.gaia.stop_old_sandbox","uri":"program://AgentLab/function/src.agentlab.benchmarks.gaia.stop_old_sandbox#L114-L118","kind":"function","name":"stop_old_sandbox","path":"src/agentlab/benchmarks/gaia.py","language":"python","start_line":114,"end_line":118,"context_start_line":94,"context_end_line":138,"code":"\n\ndef init_code_sandbox(exp_dir: str) -> None:\n # Use a common code directory for all tasks in the experiment, which is mounted in the container\n root_exp_dir = Path(exp_dir).parent\n code_path = os.path.join(root_exp_dir, \"shared_code\")\n os.makedirs(code_path, exist_ok=True)\n os.environ[\"COMPUTER_CONTAINER_NAME\"] = CONTAINER_NAME\n\n # symlink task code to the shared code directory\n task_code_path = os.path.join(exp_dir, \"code\")\n if not os.path.exists(task_code_path):\n os.symlink(code_path, task_code_path)\n\n try:\n ContainerExecutor(container_name=CONTAINER_NAME, work_dir=code_path, no_deps=True)\n except Exception as e:\n logger.warning(f\"Failed to initialize container executor: {e}\")\n\n\ndef stop_old_sandbox():\n try:\n podman.from_env().containers.get(CONTAINER_NAME).stop()\n except Exception as e:\n logger.warning(f\"Failed to stop old container {CONTAINER_NAME}: {e}\")\n\n\nclass GaiaBenchmark(AbstractBenchmark):\n model_config = ConfigDict(arbitrary_types_allowed=True)\n name: str = \"gaia\"\n split: Literal[\"test\", \"validation\"]\n level: Literal[\"1\", \"2\", \"3\", \"all\"] = \"all\"\n env_args_list: list[GaiaGymArgs] = None # type: ignore\n dataset: dict | None = None # type: ignore\n env_config: DictConfig = None # type: ignore\n\n @classmethod\n def from_config(cls, config: DictConfig, dataset: dict | None = None) -> Self:\n return cls(\n split=config.split,\n level=config.level,\n env_config=config.environment,\n dataset=dataset,\n )\n","source_hash":"db73d9f3e673af2bc589b90e998691b15ddaa5957e8fbc0b3d1177e6224f8ec4","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.gaia.GaiaBenchmark","uri":"program://AgentLab/class/src.agentlab.benchmarks.gaia.GaiaBenchmark#L121-L156","kind":"class","name":"GaiaBenchmark","path":"src/agentlab/benchmarks/gaia.py","language":"python","start_line":121,"end_line":156,"context_start_line":101,"context_end_line":176,"code":" os.environ[\"COMPUTER_CONTAINER_NAME\"] = CONTAINER_NAME\n\n # symlink task code to the shared code directory\n task_code_path = os.path.join(exp_dir, \"code\")\n if not os.path.exists(task_code_path):\n os.symlink(code_path, task_code_path)\n\n try:\n ContainerExecutor(container_name=CONTAINER_NAME, work_dir=code_path, no_deps=True)\n except Exception as e:\n logger.warning(f\"Failed to initialize container executor: {e}\")\n\n\ndef stop_old_sandbox():\n try:\n podman.from_env().containers.get(CONTAINER_NAME).stop()\n except Exception as e:\n logger.warning(f\"Failed to stop old container {CONTAINER_NAME}: {e}\")\n\n\nclass GaiaBenchmark(AbstractBenchmark):\n model_config = ConfigDict(arbitrary_types_allowed=True)\n name: str = \"gaia\"\n split: Literal[\"test\", \"validation\"]\n level: Literal[\"1\", \"2\", \"3\", \"all\"] = \"all\"\n env_args_list: list[GaiaGymArgs] = None # type: ignore\n dataset: dict | None = None # type: ignore\n env_config: DictConfig = None # type: ignore\n\n @classmethod\n def from_config(cls, config: DictConfig, dataset: dict | None = None) -> Self:\n return cls(\n split=config.split,\n level=config.level,\n env_config=config.environment,\n dataset=dataset,\n )\n\n def model_post_init(self, __context: Any) -> None:\n self.env_args_list = []\n number = 0\n if self.dataset is None:\n self.dataset = datasets.load_dataset(\n path=\"gaia-benchmark/GAIA\",\n name=\"2023_all\",\n trust_remote_code=True,\n ) # type: ignore\n for task in self.dataset[self.split]: # type: ignore\n if self.level != \"all\" and task[\"Level\"] != self.level:\n continue\n number += 1\n task[\"number\"] = number\n name = f\"gaia.{task['task_id']}\"\n env_args = GaiaGymArgs(task_name=name, task=task, env_config=self.env_config)\n self.env_args_list.append(env_args)\n logger.info(f\"Loaded {len(self.env_args_list)} tasks from {self.split} split\")\n\n\nclass ExtractedFacts(Thought):\n \"\"\"\n Thought that contains the list of facts extracted from the document\n \"\"\"\n\n kind: Literal[\"extracted_facts_thought\"] = \"extracted_facts_thought\" # type: ignore\n extracted_facts: list[str] | dict[str, Any] | str = Field(\n description=\"facts extracted from the observation\"\n )\n\n\nclass GaiaQuestion(Observation):\n kind: Literal[\"question\"] = \"question\" # type: ignore\n content: str\n filename: str | None = None\n\n @classmethod\n def from_task(cls, question: dict, files_dir: str = \"/tmp/gaia_files\"):","source_hash":"db73d9f3e673af2bc589b90e998691b15ddaa5957e8fbc0b3d1177e6224f8ec4","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.gaia.ExtractedFacts","uri":"program://AgentLab/class/src.agentlab.benchmarks.gaia.ExtractedFacts#L159-L167","kind":"class","name":"ExtractedFacts","path":"src/agentlab/benchmarks/gaia.py","language":"python","start_line":159,"end_line":167,"context_start_line":139,"context_end_line":187,"code":" def model_post_init(self, __context: Any) -> None:\n self.env_args_list = []\n number = 0\n if self.dataset is None:\n self.dataset = datasets.load_dataset(\n path=\"gaia-benchmark/GAIA\",\n name=\"2023_all\",\n trust_remote_code=True,\n ) # type: ignore\n for task in self.dataset[self.split]: # type: ignore\n if self.level != \"all\" and task[\"Level\"] != self.level:\n continue\n number += 1\n task[\"number\"] = number\n name = f\"gaia.{task['task_id']}\"\n env_args = GaiaGymArgs(task_name=name, task=task, env_config=self.env_config)\n self.env_args_list.append(env_args)\n logger.info(f\"Loaded {len(self.env_args_list)} tasks from {self.split} split\")\n\n\nclass ExtractedFacts(Thought):\n \"\"\"\n Thought that contains the list of facts extracted from the document\n \"\"\"\n\n kind: Literal[\"extracted_facts_thought\"] = \"extracted_facts_thought\" # type: ignore\n extracted_facts: list[str] | dict[str, Any] | str = Field(\n description=\"facts extracted from the observation\"\n )\n\n\nclass GaiaQuestion(Observation):\n kind: Literal[\"question\"] = \"question\" # type: ignore\n content: str\n filename: str | None = None\n\n @classmethod\n def from_task(cls, question: dict, files_dir: str = \"/tmp/gaia_files\"):\n os.makedirs(files_dir, exist_ok=True)\n question_prompt = question[\"Question\"]\n filename = None\n if question[\"file_path\"]:\n basename = os.path.basename(question[\"file_path\"])\n tmp_fname = os.path.join(files_dir, basename)\n shutil.copyfile(question[\"file_path\"], tmp_fname)\n assert os.path.exists(tmp_fname)\n filename = tmp_fname\n return cls(content=question_prompt, filename=filename)\n","source_hash":"db73d9f3e673af2bc589b90e998691b15ddaa5957e8fbc0b3d1177e6224f8ec4","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.gaia.GaiaQuestion","uri":"program://AgentLab/class/src.agentlab.benchmarks.gaia.GaiaQuestion#L170-L186","kind":"class","name":"GaiaQuestion","path":"src/agentlab/benchmarks/gaia.py","language":"python","start_line":170,"end_line":186,"context_start_line":150,"context_end_line":206,"code":" continue\n number += 1\n task[\"number\"] = number\n name = f\"gaia.{task['task_id']}\"\n env_args = GaiaGymArgs(task_name=name, task=task, env_config=self.env_config)\n self.env_args_list.append(env_args)\n logger.info(f\"Loaded {len(self.env_args_list)} tasks from {self.split} split\")\n\n\nclass ExtractedFacts(Thought):\n \"\"\"\n Thought that contains the list of facts extracted from the document\n \"\"\"\n\n kind: Literal[\"extracted_facts_thought\"] = \"extracted_facts_thought\" # type: ignore\n extracted_facts: list[str] | dict[str, Any] | str = Field(\n description=\"facts extracted from the observation\"\n )\n\n\nclass GaiaQuestion(Observation):\n kind: Literal[\"question\"] = \"question\" # type: ignore\n content: str\n filename: str | None = None\n\n @classmethod\n def from_task(cls, question: dict, files_dir: str = \"/tmp/gaia_files\"):\n os.makedirs(files_dir, exist_ok=True)\n question_prompt = question[\"Question\"]\n filename = None\n if question[\"file_path\"]:\n basename = os.path.basename(question[\"file_path\"])\n tmp_fname = os.path.join(files_dir, basename)\n shutil.copyfile(question[\"file_path\"], tmp_fname)\n assert os.path.exists(tmp_fname)\n filename = tmp_fname\n return cls(content=question_prompt, filename=filename)\n\n\ndef task_to_observations(task: dict, max_doc_length: int = 8000) -> list[Observation]:\n browser = SimpleTextBrowser()\n question = GaiaQuestion.from_task(task)\n if not question.filename:\n return [question]\n\n filename: str | None = question.filename\n question.filename = None\n steps: list[Observation] = []\n name, ext = filename.rsplit(\".\", maxsplit=1)\n ext = ext.lower()\n if ext == \"zip\":\n folder_name = name\n os.makedirs(folder_name, exist_ok=True)\n shutil.unpack_archive(filename, folder_name)\n document_text = \"\\n\\nArchive contains the following files:\\n\"\n for i, file in enumerate(os.listdir(folder_name)):\n file_path = os.path.join(folder_name, file)","source_hash":"db73d9f3e673af2bc589b90e998691b15ddaa5957e8fbc0b3d1177e6224f8ec4","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.gaia.task_to_observations","uri":"program://AgentLab/function/src.agentlab.benchmarks.gaia.task_to_observations#L189-L237","kind":"function","name":"task_to_observations","path":"src/agentlab/benchmarks/gaia.py","language":"python","start_line":189,"end_line":237,"context_start_line":169,"context_end_line":257,"code":"\nclass GaiaQuestion(Observation):\n kind: Literal[\"question\"] = \"question\" # type: ignore\n content: str\n filename: str | None = None\n\n @classmethod\n def from_task(cls, question: dict, files_dir: str = \"/tmp/gaia_files\"):\n os.makedirs(files_dir, exist_ok=True)\n question_prompt = question[\"Question\"]\n filename = None\n if question[\"file_path\"]:\n basename = os.path.basename(question[\"file_path\"])\n tmp_fname = os.path.join(files_dir, basename)\n shutil.copyfile(question[\"file_path\"], tmp_fname)\n assert os.path.exists(tmp_fname)\n filename = tmp_fname\n return cls(content=question_prompt, filename=filename)\n\n\ndef task_to_observations(task: dict, max_doc_length: int = 8000) -> list[Observation]:\n browser = SimpleTextBrowser()\n question = GaiaQuestion.from_task(task)\n if not question.filename:\n return [question]\n\n filename: str | None = question.filename\n question.filename = None\n steps: list[Observation] = []\n name, ext = filename.rsplit(\".\", maxsplit=1)\n ext = ext.lower()\n if ext == \"zip\":\n folder_name = name\n os.makedirs(folder_name, exist_ok=True)\n shutil.unpack_archive(filename, folder_name)\n document_text = \"\\n\\nArchive contains the following files:\\n\"\n for i, file in enumerate(os.listdir(folder_name)):\n file_path = os.path.join(folder_name, file)\n content = browser.get_whole_document(file_path)\n file_text = f\"{i+1}. {file}. Content:\\n{content}\\n\\n\"\n if len(file_text) > max_doc_length:\n file_text = \"\"\n file_text += f\"{i+1}. Path to the '{file}': {file_path}\"\n document_text += file_text\n elif ext in (\"png\", \"jpg\", \"jpeg\"):\n steps.append(ImageObservation(image_path=filename, image_caption=\"Attached image\"))\n document_text = \"\"\n else:\n attach_doc_text = True\n if ext == \"pdf\":\n images, total_pages = pdf_to_images(filename)\n if total_pages <= 3:\n attach_doc_text = False\n for i, img_path in enumerate(images):\n steps.append(ImageObservation(image_path=img_path, image_caption=f\"PDF page {i+1}\"))\n if attach_doc_text:\n try:\n content = browser.get_whole_document(filename)\n except Exception as e:\n logger.exception(f\"Failed to read document: {e}\")\n content = \"\"\n document_text = f\"\\n\\nAttached {ext.upper()} file content:\\n{content}\\n\"\n if not len(content) or len(document_text) > max_doc_length:\n document_text = \"\"\n else:\n document_text = \"\\nDocument pages attached as images below\"\n question.filename = filename\n question.content += document_text\n return [question] + steps\n\n\ndef pdf_to_images(filename: str, n_pages: int = 3):\n images = []\n for i, image in enumerate(convert_from_path(filename)):\n page_index = i + 1\n page_fname = filename[:-4] + f\"_{page_index}.png\"\n if os.path.exists(page_fname):\n images.append(page_fname)\n continue\n image.save(page_fname)\n images.append(page_fname)\n return images[:n_pages], len(images)\n\n\nclass GaiaAnswer(StopStep):\n \"\"\"\n Action that indicates the agent has finished the plan and contains the answer or description of failure.\n The answer should use already determined facts without additional conversion!\n Your final answer should be a number OR as few words as possible OR a comma-separated list of numbers and/or strings.","source_hash":"db73d9f3e673af2bc589b90e998691b15ddaa5957e8fbc0b3d1177e6224f8ec4","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.gaia.pdf_to_images","uri":"program://AgentLab/function/src.agentlab.benchmarks.gaia.pdf_to_images#L240-L250","kind":"function","name":"pdf_to_images","path":"src/agentlab/benchmarks/gaia.py","language":"python","start_line":240,"end_line":250,"context_start_line":220,"context_end_line":270,"code":" if total_pages <= 3:\n attach_doc_text = False\n for i, img_path in enumerate(images):\n steps.append(ImageObservation(image_path=img_path, image_caption=f\"PDF page {i+1}\"))\n if attach_doc_text:\n try:\n content = browser.get_whole_document(filename)\n except Exception as e:\n logger.exception(f\"Failed to read document: {e}\")\n content = \"\"\n document_text = f\"\\n\\nAttached {ext.upper()} file content:\\n{content}\\n\"\n if not len(content) or len(document_text) > max_doc_length:\n document_text = \"\"\n else:\n document_text = \"\\nDocument pages attached as images below\"\n question.filename = filename\n question.content += document_text\n return [question] + steps\n\n\ndef pdf_to_images(filename: str, n_pages: int = 3):\n images = []\n for i, image in enumerate(convert_from_path(filename)):\n page_index = i + 1\n page_fname = filename[:-4] + f\"_{page_index}.png\"\n if os.path.exists(page_fname):\n images.append(page_fname)\n continue\n image.save(page_fname)\n images.append(page_fname)\n return images[:n_pages], len(images)\n\n\nclass GaiaAnswer(StopStep):\n \"\"\"\n Action that indicates the agent has finished the plan and contains the answer or description of failure.\n The answer should use already determined facts without additional conversion!\n Your final answer should be a number OR as few words as possible OR a comma-separated list of numbers and/or strings.\n ADDITIONALLY, your final answer MUST follow any formatting instructions specified in the original question (e.g., alphabetization, sequencing, units, rounding, decimal places, etc.)\n If asked for a number, express it numerically, don't use commas, do not add anything after the number, don't include units such as $ or percent signs unless specified otherwise in the question.\n If asked for a string, don't use articles or abbreviations (e.g. for cities), unless specified otherwise. Don't output any final sentence punctuation such as '.', '!', or '?'.\n If asked for a comma-separated list, apply the above rules depending on whether the elements are numbers or strings.\n If unable to determine the final answer, output an empty string.\n \"\"\"\n\n kind: Literal[\"gaia_answer_action\"] = \"gaia_answer_action\" # type: ignore\n success: bool = Field(description=\"True if the task was successful, False otherwise\")\n overview: str = Field(\n description=\"List of steps performed to answer the question. If the task was not successful, includes the reason for failure\"\n )\n answer_unit: str = Field(","source_hash":"db73d9f3e673af2bc589b90e998691b15ddaa5957e8fbc0b3d1177e6224f8ec4","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.gaia.GaiaAnswer","uri":"program://AgentLab/class/src.agentlab.benchmarks.gaia.GaiaAnswer#L253-L274","kind":"class","name":"GaiaAnswer","path":"src/agentlab/benchmarks/gaia.py","language":"python","start_line":253,"end_line":274,"context_start_line":233,"context_end_line":294,"code":" else:\n document_text = \"\\nDocument pages attached as images below\"\n question.filename = filename\n question.content += document_text\n return [question] + steps\n\n\ndef pdf_to_images(filename: str, n_pages: int = 3):\n images = []\n for i, image in enumerate(convert_from_path(filename)):\n page_index = i + 1\n page_fname = filename[:-4] + f\"_{page_index}.png\"\n if os.path.exists(page_fname):\n images.append(page_fname)\n continue\n image.save(page_fname)\n images.append(page_fname)\n return images[:n_pages], len(images)\n\n\nclass GaiaAnswer(StopStep):\n \"\"\"\n Action that indicates the agent has finished the plan and contains the answer or description of failure.\n The answer should use already determined facts without additional conversion!\n Your final answer should be a number OR as few words as possible OR a comma-separated list of numbers and/or strings.\n ADDITIONALLY, your final answer MUST follow any formatting instructions specified in the original question (e.g., alphabetization, sequencing, units, rounding, decimal places, etc.)\n If asked for a number, express it numerically, don't use commas, do not add anything after the number, don't include units such as $ or percent signs unless specified otherwise in the question.\n If asked for a string, don't use articles or abbreviations (e.g. for cities), unless specified otherwise. Don't output any final sentence punctuation such as '.', '!', or '?'.\n If asked for a comma-separated list, apply the above rules depending on whether the elements are numbers or strings.\n If unable to determine the final answer, output an empty string.\n \"\"\"\n\n kind: Literal[\"gaia_answer_action\"] = \"gaia_answer_action\" # type: ignore\n success: bool = Field(description=\"True if the task was successful, False otherwise\")\n overview: str = Field(\n description=\"List of steps performed to answer the question. If the task was not successful, includes the reason for failure\"\n )\n answer_unit: str = Field(\n description=\"Unit of measurement for the answer, if applicable; otherwise an empty string\"\n )\n answer: Any = Field(description=\"Short final answer\")\n long_answer: str = Field(description=\"Detailed final answer not restricted by format rules\")\n\n\ndef step_error(step_dict: dict, last_action: str | None) -> str:\n kind = step_dict.get(\"kind\", \"unknown\")\n error = \"\"\n if kind == \"search_results_observation\" and not len(step_dict.get(\"serp\", [])):\n error = \"search_empty\"\n elif kind == \"page_observation\" and step_dict.get(\"error\"):\n error = \"browser\"\n elif kind == \"llm_output_parsing_failure_action\":\n error = \"parsing\"\n elif kind == \"action_execution_failure\":\n error = last_action if last_action else \"action_failure\"\n elif kind == \"code_execution_result\" and step_dict.get(\"result\", {}).get(\"exit_code\"):\n error = \"code\"\n return error\n\n\ndef normalize_number_str(number_str: str) -> float:\n # we replace these common units and commas to allow","source_hash":"db73d9f3e673af2bc589b90e998691b15ddaa5957e8fbc0b3d1177e6224f8ec4","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.gaia.step_error","uri":"program://AgentLab/function/src.agentlab.benchmarks.gaia.step_error#L277-L290","kind":"function","name":"step_error","path":"src/agentlab/benchmarks/gaia.py","language":"python","start_line":277,"end_line":290,"context_start_line":257,"context_end_line":310,"code":" Your final answer should be a number OR as few words as possible OR a comma-separated list of numbers and/or strings.\n ADDITIONALLY, your final answer MUST follow any formatting instructions specified in the original question (e.g., alphabetization, sequencing, units, rounding, decimal places, etc.)\n If asked for a number, express it numerically, don't use commas, do not add anything after the number, don't include units such as $ or percent signs unless specified otherwise in the question.\n If asked for a string, don't use articles or abbreviations (e.g. for cities), unless specified otherwise. Don't output any final sentence punctuation such as '.', '!', or '?'.\n If asked for a comma-separated list, apply the above rules depending on whether the elements are numbers or strings.\n If unable to determine the final answer, output an empty string.\n \"\"\"\n\n kind: Literal[\"gaia_answer_action\"] = \"gaia_answer_action\" # type: ignore\n success: bool = Field(description=\"True if the task was successful, False otherwise\")\n overview: str = Field(\n description=\"List of steps performed to answer the question. If the task was not successful, includes the reason for failure\"\n )\n answer_unit: str = Field(\n description=\"Unit of measurement for the answer, if applicable; otherwise an empty string\"\n )\n answer: Any = Field(description=\"Short final answer\")\n long_answer: str = Field(description=\"Detailed final answer not restricted by format rules\")\n\n\ndef step_error(step_dict: dict, last_action: str | None) -> str:\n kind = step_dict.get(\"kind\", \"unknown\")\n error = \"\"\n if kind == \"search_results_observation\" and not len(step_dict.get(\"serp\", [])):\n error = \"search_empty\"\n elif kind == \"page_observation\" and step_dict.get(\"error\"):\n error = \"browser\"\n elif kind == \"llm_output_parsing_failure_action\":\n error = \"parsing\"\n elif kind == \"action_execution_failure\":\n error = last_action if last_action else \"action_failure\"\n elif kind == \"code_execution_result\" and step_dict.get(\"result\", {}).get(\"exit_code\"):\n error = \"code\"\n return error\n\n\ndef normalize_number_str(number_str: str) -> float:\n # we replace these common units and commas to allow\n # conversion to float\n for char in [\"$\", \"%\", \",\"]:\n number_str = number_str.replace(char, \"\")\n try:\n return float(number_str)\n except ValueError:\n logger.info(f\"String {number_str} cannot be normalized to number str.\")\n return float(\"inf\")\n\n\ndef split_string(\n s: str,\n char_list: list[str] = [\",\", \";\"],\n) -> list[str]:\n pattern = f\"[{''.join(char_list)}]\"\n return re.split(pattern, s)","source_hash":"db73d9f3e673af2bc589b90e998691b15ddaa5957e8fbc0b3d1177e6224f8ec4","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.gaia.normalize_number_str","uri":"program://AgentLab/function/src.agentlab.benchmarks.gaia.normalize_number_str#L293-L302","kind":"function","name":"normalize_number_str","path":"src/agentlab/benchmarks/gaia.py","language":"python","start_line":293,"end_line":302,"context_start_line":273,"context_end_line":322,"code":" answer: Any = Field(description=\"Short final answer\")\n long_answer: str = Field(description=\"Detailed final answer not restricted by format rules\")\n\n\ndef step_error(step_dict: dict, last_action: str | None) -> str:\n kind = step_dict.get(\"kind\", \"unknown\")\n error = \"\"\n if kind == \"search_results_observation\" and not len(step_dict.get(\"serp\", [])):\n error = \"search_empty\"\n elif kind == \"page_observation\" and step_dict.get(\"error\"):\n error = \"browser\"\n elif kind == \"llm_output_parsing_failure_action\":\n error = \"parsing\"\n elif kind == \"action_execution_failure\":\n error = last_action if last_action else \"action_failure\"\n elif kind == \"code_execution_result\" and step_dict.get(\"result\", {}).get(\"exit_code\"):\n error = \"code\"\n return error\n\n\ndef normalize_number_str(number_str: str) -> float:\n # we replace these common units and commas to allow\n # conversion to float\n for char in [\"$\", \"%\", \",\"]:\n number_str = number_str.replace(char, \"\")\n try:\n return float(number_str)\n except ValueError:\n logger.info(f\"String {number_str} cannot be normalized to number str.\")\n return float(\"inf\")\n\n\ndef split_string(\n s: str,\n char_list: list[str] = [\",\", \";\"],\n) -> list[str]:\n pattern = f\"[{''.join(char_list)}]\"\n return re.split(pattern, s)\n\n\ndef question_scorer(\n model_answer: str,\n ground_truth: str,\n) -> bool:\n def is_float(element: Any) -> bool:\n try:\n float(element)\n return True\n except ValueError:\n return False","source_hash":"db73d9f3e673af2bc589b90e998691b15ddaa5957e8fbc0b3d1177e6224f8ec4","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.gaia.split_string","uri":"program://AgentLab/function/src.agentlab.benchmarks.gaia.split_string#L305-L310","kind":"function","name":"split_string","path":"src/agentlab/benchmarks/gaia.py","language":"python","start_line":305,"end_line":310,"context_start_line":285,"context_end_line":330,"code":" error = \"parsing\"\n elif kind == \"action_execution_failure\":\n error = last_action if last_action else \"action_failure\"\n elif kind == \"code_execution_result\" and step_dict.get(\"result\", {}).get(\"exit_code\"):\n error = \"code\"\n return error\n\n\ndef normalize_number_str(number_str: str) -> float:\n # we replace these common units and commas to allow\n # conversion to float\n for char in [\"$\", \"%\", \",\"]:\n number_str = number_str.replace(char, \"\")\n try:\n return float(number_str)\n except ValueError:\n logger.info(f\"String {number_str} cannot be normalized to number str.\")\n return float(\"inf\")\n\n\ndef split_string(\n s: str,\n char_list: list[str] = [\",\", \";\"],\n) -> list[str]:\n pattern = f\"[{''.join(char_list)}]\"\n return re.split(pattern, s)\n\n\ndef question_scorer(\n model_answer: str,\n ground_truth: str,\n) -> bool:\n def is_float(element: Any) -> bool:\n try:\n float(element)\n return True\n except ValueError:\n return False\n\n # if gt is a number\n if is_float(ground_truth):\n logger.info(f\"Evaluating {model_answer} as a number.\")\n normalized_answer = normalize_number_str(model_answer)\n return normalized_answer == float(ground_truth)\n\n # if gt is a list","source_hash":"db73d9f3e673af2bc589b90e998691b15ddaa5957e8fbc0b3d1177e6224f8ec4","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.gaia.question_scorer","uri":"program://AgentLab/function/src.agentlab.benchmarks.gaia.question_scorer#L313-L360","kind":"function","name":"question_scorer","path":"src/agentlab/benchmarks/gaia.py","language":"python","start_line":313,"end_line":360,"context_start_line":293,"context_end_line":380,"code":"def normalize_number_str(number_str: str) -> float:\n # we replace these common units and commas to allow\n # conversion to float\n for char in [\"$\", \"%\", \",\"]:\n number_str = number_str.replace(char, \"\")\n try:\n return float(number_str)\n except ValueError:\n logger.info(f\"String {number_str} cannot be normalized to number str.\")\n return float(\"inf\")\n\n\ndef split_string(\n s: str,\n char_list: list[str] = [\",\", \";\"],\n) -> list[str]:\n pattern = f\"[{''.join(char_list)}]\"\n return re.split(pattern, s)\n\n\ndef question_scorer(\n model_answer: str,\n ground_truth: str,\n) -> bool:\n def is_float(element: Any) -> bool:\n try:\n float(element)\n return True\n except ValueError:\n return False\n\n # if gt is a number\n if is_float(ground_truth):\n logger.info(f\"Evaluating {model_answer} as a number.\")\n normalized_answer = normalize_number_str(model_answer)\n return normalized_answer == float(ground_truth)\n\n # if gt is a list\n elif any(char in ground_truth for char in [\",\", \";\"]):\n logger.info(f\"Evaluating {model_answer} as a comma separated list.\")\n # question with the fish: normalization removes punct\n\n gt_elems = split_string(ground_truth)\n ma_elems = split_string(model_answer)\n\n # check length is the same\n if len(gt_elems) != len(ma_elems):\n logger.warning(\"Answer lists have different lengths, returning False.\")\n return False\n\n # compare each element as float or str\n comparisons = []\n for ma_elem, gt_elem in zip(ma_elems, gt_elems):\n if is_float(gt_elem):\n normalized_ma_elem = normalize_number_str(ma_elem)\n comparisons.append(normalized_ma_elem == float(gt_elem))\n else:\n # we do not remove punct since comparisons can include punct\n comparisons.append(\n normalize_str(ma_elem, remove_punct=False)\n == normalize_str(gt_elem, remove_punct=False)\n )\n return all(comparisons)\n\n # if gt is a str\n else:\n logger.info(f\"Evaluating {model_answer} as a string.\")\n return normalize_str(model_answer) == normalize_str(ground_truth)\n\n\ndef normalize_str(input_str, remove_punct=True) -> str:\n \"\"\"\n Normalize a string by:\n - Removing all white spaces\n - Optionally removing punctuation (if remove_punct is True)\n - Converting to lowercase\n\n Args:\n input_str: str, the string to normalize\n remove_punct: bool, whether to remove punctuation (default: True)\n\n Returns:\n str, the normalized string\n \"\"\"\n # Remove all white spaces. Required e.g for seagull vs. sea gull\n no_spaces = re.sub(r\"\\s\", \"\", input_str)\n\n # Remove punctuation, if specified.","source_hash":"db73d9f3e673af2bc589b90e998691b15ddaa5957e8fbc0b3d1177e6224f8ec4","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.gaia.normalize_str","uri":"program://AgentLab/function/src.agentlab.benchmarks.gaia.normalize_str#L363-L385","kind":"function","name":"normalize_str","path":"src/agentlab/benchmarks/gaia.py","language":"python","start_line":363,"end_line":385,"context_start_line":343,"context_end_line":385,"code":" # compare each element as float or str\n comparisons = []\n for ma_elem, gt_elem in zip(ma_elems, gt_elems):\n if is_float(gt_elem):\n normalized_ma_elem = normalize_number_str(ma_elem)\n comparisons.append(normalized_ma_elem == float(gt_elem))\n else:\n # we do not remove punct since comparisons can include punct\n comparisons.append(\n normalize_str(ma_elem, remove_punct=False)\n == normalize_str(gt_elem, remove_punct=False)\n )\n return all(comparisons)\n\n # if gt is a str\n else:\n logger.info(f\"Evaluating {model_answer} as a string.\")\n return normalize_str(model_answer) == normalize_str(ground_truth)\n\n\ndef normalize_str(input_str, remove_punct=True) -> str:\n \"\"\"\n Normalize a string by:\n - Removing all white spaces\n - Optionally removing punctuation (if remove_punct is True)\n - Converting to lowercase\n\n Args:\n input_str: str, the string to normalize\n remove_punct: bool, whether to remove punctuation (default: True)\n\n Returns:\n str, the normalized string\n \"\"\"\n # Remove all white spaces. Required e.g for seagull vs. sea gull\n no_spaces = re.sub(r\"\\s\", \"\", input_str)\n\n # Remove punctuation, if specified.\n if remove_punct:\n translator = str.maketrans(\"\", \"\", string.punctuation)\n return no_spaces.lower().translate(translator)\n else:\n return no_spaces.lower()","source_hash":"db73d9f3e673af2bc589b90e998691b15ddaa5957e8fbc0b3d1177e6224f8ec4","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.gaia.__init__","uri":"program://AgentLab/function/src.agentlab.benchmarks.gaia.__init__#L71-L81","kind":"function","name":"__init__","path":"src/agentlab/benchmarks/gaia.py","language":"python","start_line":71,"end_line":81,"context_start_line":51,"context_end_line":101,"code":" reward = 1.0 if question_scorer(model_answer, ground_truth) else 0.0\n else:\n reward = 0.0\n\n if reward == 1.0:\n logger.info(f\"Task {self.task['task_id']} solved.\")\n else:\n logger.info(f\"Task {self.task['task_id']} failed.\")\n\n return reward\n\n\n@dataclass\nclass GaiaGymArgs(AbstractEnvArgs):\n model_config = ConfigDict(arbitrary_types_allowed=True)\n task: dict[str, Any]\n task_seed: int\n task_name: str\n env_config: DictConfig\n\n def __init__(\n self,\n task_name: str,\n task: dict[str, Any],\n env_config: DictConfig,\n task_seed: int = 0,\n ):\n self.task_name = task_name\n self.task = task\n self.task_seed = task_seed\n self.env_config = env_config\n\n def make_env(self, exp_dir: Path, action_mapping=None) -> GaiaGym:\n tapeagents.config.DB_DEFAULT_FILENAME = str(exp_dir.parent / \"tapedata.sqlite\")\n exp_dir_str = str(exp_dir)\n logger.info(f\"Init gaia env with directory {exp_dir_str}\")\n init_code_sandbox(exp_dir_str)\n for i in range(len(self.env_config.tools)):\n if hasattr(self.env_config.tools[i], \"exp_path\"):\n self.env_config.tools[i].exp_path = exp_dir_str\n tools = hydra.utils.instantiate(self.env_config.tools)\n env = GaiaGym(tools=tools, task=self.task, exp_dir=exp_dir_str)\n return env\n\n\ndef init_code_sandbox(exp_dir: str) -> None:\n # Use a common code directory for all tasks in the experiment, which is mounted in the container\n root_exp_dir = Path(exp_dir).parent\n code_path = os.path.join(root_exp_dir, \"shared_code\")\n os.makedirs(code_path, exist_ok=True)\n os.environ[\"COMPUTER_CONTAINER_NAME\"] = CONTAINER_NAME","source_hash":"db73d9f3e673af2bc589b90e998691b15ddaa5957e8fbc0b3d1177e6224f8ec4","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.gaia.reset","uri":"program://AgentLab/function/src.agentlab.benchmarks.gaia.reset#L40-L45","kind":"function","name":"reset","path":"src/agentlab/benchmarks/gaia.py","language":"python","start_line":40,"end_line":45,"context_start_line":20,"context_end_line":65,"code":"from tapeagents.tools.simple_browser import SimpleTextBrowser\n\nfrom agentlab.benchmarks.abstract_env import AbstractBenchmark, AbstractEnvArgs\nfrom agentlab.benchmarks.multitool_gym import MultiToolGym\n\nlogger = logging.getLogger(__name__)\n\nCONTAINER_NAME = \"gaia_code_shared\"\n\n\nclass GaiaGym(MultiToolGym):\n task: dict\n exp_dir: str\n\n def __init__(self, tools: list[Tool | StatefulTool], task: dict, exp_dir: str):\n super().__init__(tools=tools)\n self.task = task\n self.exp_dir = exp_dir\n os.makedirs(\".cache\", exist_ok=True)\n\n def reset(self, seed=None) -> tuple[list[Observation], dict]:\n \"\"\"\n Reset the state of all the tools and prepare initial observations from the task again\n \"\"\"\n super().reset()\n return task_to_observations(self.task), {}\n\n def calculate_reward(self, action: Action) -> float:\n if isinstance(action, GaiaAnswer):\n model_answer = action.answer\n ground_truth = self.task[\"Final answer\"]\n reward = 1.0 if question_scorer(model_answer, ground_truth) else 0.0\n else:\n reward = 0.0\n\n if reward == 1.0:\n logger.info(f\"Task {self.task['task_id']} solved.\")\n else:\n logger.info(f\"Task {self.task['task_id']} failed.\")\n\n return reward\n\n\n@dataclass\nclass GaiaGymArgs(AbstractEnvArgs):\n model_config = ConfigDict(arbitrary_types_allowed=True)","source_hash":"db73d9f3e673af2bc589b90e998691b15ddaa5957e8fbc0b3d1177e6224f8ec4","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.gaia.calculate_reward","uri":"program://AgentLab/function/src.agentlab.benchmarks.gaia.calculate_reward#L47-L60","kind":"function","name":"calculate_reward","path":"src/agentlab/benchmarks/gaia.py","language":"python","start_line":47,"end_line":60,"context_start_line":27,"context_end_line":80,"code":"CONTAINER_NAME = \"gaia_code_shared\"\n\n\nclass GaiaGym(MultiToolGym):\n task: dict\n exp_dir: str\n\n def __init__(self, tools: list[Tool | StatefulTool], task: dict, exp_dir: str):\n super().__init__(tools=tools)\n self.task = task\n self.exp_dir = exp_dir\n os.makedirs(\".cache\", exist_ok=True)\n\n def reset(self, seed=None) -> tuple[list[Observation], dict]:\n \"\"\"\n Reset the state of all the tools and prepare initial observations from the task again\n \"\"\"\n super().reset()\n return task_to_observations(self.task), {}\n\n def calculate_reward(self, action: Action) -> float:\n if isinstance(action, GaiaAnswer):\n model_answer = action.answer\n ground_truth = self.task[\"Final answer\"]\n reward = 1.0 if question_scorer(model_answer, ground_truth) else 0.0\n else:\n reward = 0.0\n\n if reward == 1.0:\n logger.info(f\"Task {self.task['task_id']} solved.\")\n else:\n logger.info(f\"Task {self.task['task_id']} failed.\")\n\n return reward\n\n\n@dataclass\nclass GaiaGymArgs(AbstractEnvArgs):\n model_config = ConfigDict(arbitrary_types_allowed=True)\n task: dict[str, Any]\n task_seed: int\n task_name: str\n env_config: DictConfig\n\n def __init__(\n self,\n task_name: str,\n task: dict[str, Any],\n env_config: DictConfig,\n task_seed: int = 0,\n ):\n self.task_name = task_name\n self.task = task\n self.task_seed = task_seed","source_hash":"db73d9f3e673af2bc589b90e998691b15ddaa5957e8fbc0b3d1177e6224f8ec4","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.gaia.make_env","uri":"program://AgentLab/function/src.agentlab.benchmarks.gaia.make_env#L83-L93","kind":"function","name":"make_env","path":"src/agentlab/benchmarks/gaia.py","language":"python","start_line":83,"end_line":93,"context_start_line":63,"context_end_line":113,"code":"@dataclass\nclass GaiaGymArgs(AbstractEnvArgs):\n model_config = ConfigDict(arbitrary_types_allowed=True)\n task: dict[str, Any]\n task_seed: int\n task_name: str\n env_config: DictConfig\n\n def __init__(\n self,\n task_name: str,\n task: dict[str, Any],\n env_config: DictConfig,\n task_seed: int = 0,\n ):\n self.task_name = task_name\n self.task = task\n self.task_seed = task_seed\n self.env_config = env_config\n\n def make_env(self, exp_dir: Path, action_mapping=None) -> GaiaGym:\n tapeagents.config.DB_DEFAULT_FILENAME = str(exp_dir.parent / \"tapedata.sqlite\")\n exp_dir_str = str(exp_dir)\n logger.info(f\"Init gaia env with directory {exp_dir_str}\")\n init_code_sandbox(exp_dir_str)\n for i in range(len(self.env_config.tools)):\n if hasattr(self.env_config.tools[i], \"exp_path\"):\n self.env_config.tools[i].exp_path = exp_dir_str\n tools = hydra.utils.instantiate(self.env_config.tools)\n env = GaiaGym(tools=tools, task=self.task, exp_dir=exp_dir_str)\n return env\n\n\ndef init_code_sandbox(exp_dir: str) -> None:\n # Use a common code directory for all tasks in the experiment, which is mounted in the container\n root_exp_dir = Path(exp_dir).parent\n code_path = os.path.join(root_exp_dir, \"shared_code\")\n os.makedirs(code_path, exist_ok=True)\n os.environ[\"COMPUTER_CONTAINER_NAME\"] = CONTAINER_NAME\n\n # symlink task code to the shared code directory\n task_code_path = os.path.join(exp_dir, \"code\")\n if not os.path.exists(task_code_path):\n os.symlink(code_path, task_code_path)\n\n try:\n ContainerExecutor(container_name=CONTAINER_NAME, work_dir=code_path, no_deps=True)\n except Exception as e:\n logger.warning(f\"Failed to initialize container executor: {e}\")\n\n","source_hash":"db73d9f3e673af2bc589b90e998691b15ddaa5957e8fbc0b3d1177e6224f8ec4","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.gaia.from_config","uri":"program://AgentLab/function/src.agentlab.benchmarks.gaia.from_config#L131-L137","kind":"function","name":"from_config","path":"src/agentlab/benchmarks/gaia.py","language":"python","start_line":131,"end_line":137,"context_start_line":111,"context_end_line":157,"code":" logger.warning(f\"Failed to initialize container executor: {e}\")\n\n\ndef stop_old_sandbox():\n try:\n podman.from_env().containers.get(CONTAINER_NAME).stop()\n except Exception as e:\n logger.warning(f\"Failed to stop old container {CONTAINER_NAME}: {e}\")\n\n\nclass GaiaBenchmark(AbstractBenchmark):\n model_config = ConfigDict(arbitrary_types_allowed=True)\n name: str = \"gaia\"\n split: Literal[\"test\", \"validation\"]\n level: Literal[\"1\", \"2\", \"3\", \"all\"] = \"all\"\n env_args_list: list[GaiaGymArgs] = None # type: ignore\n dataset: dict | None = None # type: ignore\n env_config: DictConfig = None # type: ignore\n\n @classmethod\n def from_config(cls, config: DictConfig, dataset: dict | None = None) -> Self:\n return cls(\n split=config.split,\n level=config.level,\n env_config=config.environment,\n dataset=dataset,\n )\n\n def model_post_init(self, __context: Any) -> None:\n self.env_args_list = []\n number = 0\n if self.dataset is None:\n self.dataset = datasets.load_dataset(\n path=\"gaia-benchmark/GAIA\",\n name=\"2023_all\",\n trust_remote_code=True,\n ) # type: ignore\n for task in self.dataset[self.split]: # type: ignore\n if self.level != \"all\" and task[\"Level\"] != self.level:\n continue\n number += 1\n task[\"number\"] = number\n name = f\"gaia.{task['task_id']}\"\n env_args = GaiaGymArgs(task_name=name, task=task, env_config=self.env_config)\n self.env_args_list.append(env_args)\n logger.info(f\"Loaded {len(self.env_args_list)} tasks from {self.split} split\")\n","source_hash":"db73d9f3e673af2bc589b90e998691b15ddaa5957e8fbc0b3d1177e6224f8ec4","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.gaia.model_post_init","uri":"program://AgentLab/function/src.agentlab.benchmarks.gaia.model_post_init#L139-L156","kind":"function","name":"model_post_init","path":"src/agentlab/benchmarks/gaia.py","language":"python","start_line":139,"end_line":156,"context_start_line":119,"context_end_line":176,"code":"\n\nclass GaiaBenchmark(AbstractBenchmark):\n model_config = ConfigDict(arbitrary_types_allowed=True)\n name: str = \"gaia\"\n split: Literal[\"test\", \"validation\"]\n level: Literal[\"1\", \"2\", \"3\", \"all\"] = \"all\"\n env_args_list: list[GaiaGymArgs] = None # type: ignore\n dataset: dict | None = None # type: ignore\n env_config: DictConfig = None # type: ignore\n\n @classmethod\n def from_config(cls, config: DictConfig, dataset: dict | None = None) -> Self:\n return cls(\n split=config.split,\n level=config.level,\n env_config=config.environment,\n dataset=dataset,\n )\n\n def model_post_init(self, __context: Any) -> None:\n self.env_args_list = []\n number = 0\n if self.dataset is None:\n self.dataset = datasets.load_dataset(\n path=\"gaia-benchmark/GAIA\",\n name=\"2023_all\",\n trust_remote_code=True,\n ) # type: ignore\n for task in self.dataset[self.split]: # type: ignore\n if self.level != \"all\" and task[\"Level\"] != self.level:\n continue\n number += 1\n task[\"number\"] = number\n name = f\"gaia.{task['task_id']}\"\n env_args = GaiaGymArgs(task_name=name, task=task, env_config=self.env_config)\n self.env_args_list.append(env_args)\n logger.info(f\"Loaded {len(self.env_args_list)} tasks from {self.split} split\")\n\n\nclass ExtractedFacts(Thought):\n \"\"\"\n Thought that contains the list of facts extracted from the document\n \"\"\"\n\n kind: Literal[\"extracted_facts_thought\"] = \"extracted_facts_thought\" # type: ignore\n extracted_facts: list[str] | dict[str, Any] | str = Field(\n description=\"facts extracted from the observation\"\n )\n\n\nclass GaiaQuestion(Observation):\n kind: Literal[\"question\"] = \"question\" # type: ignore\n content: str\n filename: str | None = None\n\n @classmethod\n def from_task(cls, question: dict, files_dir: str = \"/tmp/gaia_files\"):","source_hash":"db73d9f3e673af2bc589b90e998691b15ddaa5957e8fbc0b3d1177e6224f8ec4","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.gaia.from_task","uri":"program://AgentLab/function/src.agentlab.benchmarks.gaia.from_task#L176-L186","kind":"function","name":"from_task","path":"src/agentlab/benchmarks/gaia.py","language":"python","start_line":176,"end_line":186,"context_start_line":156,"context_end_line":206,"code":" logger.info(f\"Loaded {len(self.env_args_list)} tasks from {self.split} split\")\n\n\nclass ExtractedFacts(Thought):\n \"\"\"\n Thought that contains the list of facts extracted from the document\n \"\"\"\n\n kind: Literal[\"extracted_facts_thought\"] = \"extracted_facts_thought\" # type: ignore\n extracted_facts: list[str] | dict[str, Any] | str = Field(\n description=\"facts extracted from the observation\"\n )\n\n\nclass GaiaQuestion(Observation):\n kind: Literal[\"question\"] = \"question\" # type: ignore\n content: str\n filename: str | None = None\n\n @classmethod\n def from_task(cls, question: dict, files_dir: str = \"/tmp/gaia_files\"):\n os.makedirs(files_dir, exist_ok=True)\n question_prompt = question[\"Question\"]\n filename = None\n if question[\"file_path\"]:\n basename = os.path.basename(question[\"file_path\"])\n tmp_fname = os.path.join(files_dir, basename)\n shutil.copyfile(question[\"file_path\"], tmp_fname)\n assert os.path.exists(tmp_fname)\n filename = tmp_fname\n return cls(content=question_prompt, filename=filename)\n\n\ndef task_to_observations(task: dict, max_doc_length: int = 8000) -> list[Observation]:\n browser = SimpleTextBrowser()\n question = GaiaQuestion.from_task(task)\n if not question.filename:\n return [question]\n\n filename: str | None = question.filename\n question.filename = None\n steps: list[Observation] = []\n name, ext = filename.rsplit(\".\", maxsplit=1)\n ext = ext.lower()\n if ext == \"zip\":\n folder_name = name\n os.makedirs(folder_name, exist_ok=True)\n shutil.unpack_archive(filename, folder_name)\n document_text = \"\\n\\nArchive contains the following files:\\n\"\n for i, file in enumerate(os.listdir(folder_name)):\n file_path = os.path.join(folder_name, file)","source_hash":"db73d9f3e673af2bc589b90e998691b15ddaa5957e8fbc0b3d1177e6224f8ec4","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.gaia.is_float","uri":"program://AgentLab/function/src.agentlab.benchmarks.gaia.is_float#L317-L322","kind":"function","name":"is_float","path":"src/agentlab/benchmarks/gaia.py","language":"python","start_line":317,"end_line":322,"context_start_line":297,"context_end_line":342,"code":" number_str = number_str.replace(char, \"\")\n try:\n return float(number_str)\n except ValueError:\n logger.info(f\"String {number_str} cannot be normalized to number str.\")\n return float(\"inf\")\n\n\ndef split_string(\n s: str,\n char_list: list[str] = [\",\", \";\"],\n) -> list[str]:\n pattern = f\"[{''.join(char_list)}]\"\n return re.split(pattern, s)\n\n\ndef question_scorer(\n model_answer: str,\n ground_truth: str,\n) -> bool:\n def is_float(element: Any) -> bool:\n try:\n float(element)\n return True\n except ValueError:\n return False\n\n # if gt is a number\n if is_float(ground_truth):\n logger.info(f\"Evaluating {model_answer} as a number.\")\n normalized_answer = normalize_number_str(model_answer)\n return normalized_answer == float(ground_truth)\n\n # if gt is a list\n elif any(char in ground_truth for char in [\",\", \";\"]):\n logger.info(f\"Evaluating {model_answer} as a comma separated list.\")\n # question with the fish: normalization removes punct\n\n gt_elems = split_string(ground_truth)\n ma_elems = split_string(model_answer)\n\n # check length is the same\n if len(gt_elems) != len(ma_elems):\n logger.warning(\"Answer lists have different lengths, returning False.\")\n return False\n","source_hash":"db73d9f3e673af2bc589b90e998691b15ddaa5957e8fbc0b3d1177e6224f8ec4","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.abstract_env","uri":"program://AgentLab/module/src.agentlab.benchmarks.abstract_env#L1-L110","kind":"module","name":"src.agentlab.benchmarks.abstract_env","path":"src/agentlab/benchmarks/abstract_env.py","language":"python","start_line":1,"end_line":110,"context_start_line":1,"context_end_line":110,"code":"import time\nfrom abc import ABC, abstractmethod\nfrom functools import wraps\n\nimport gymnasium as gym\nfrom dataclasses_json import DataClassJsonMixin\nfrom pydantic import BaseModel\n\n\nclass AbstractEnvArgs(DataClassJsonMixin):\n @abstractmethod\n def make_env(self, action_mapping, exp_dir, exp_task_kwargs) -> \"AbstractEnv\":\n \"\"\"Create an instance of the environment with the arguments stored in this object.\n\n Args:\n action_mapping (dict[str,str]): mapping from the agent's action space to the environment's action space\n see AbstractActionSet.to_python_code from BrowserGym for an example\n exp_dir (str): directory where the experiment is stored\n exp_task_kwargs (dict[str,Any]): additional arguments for the environment\n\n Returns:\n env (AbstractEnv): instance of the environment.\n \"\"\"\n\n\nclass AbstractBenchmark(BaseModel):\n name: str\n env_args_list: list\n\n def get_version(self) -> int:\n return \"1\"\n\n def prepare_backends(self):\n pass\n\n def dependency_graph_over_tasks(self) -> dict[str, list[str]]:\n return {}\n\n\nclass AbstractEnv(gym.Env, ABC):\n @abstractmethod\n def reset(self, seed: int = None) -> tuple[dict[str, any], dict[str, any]]:\n \"\"\"Reset the environment to the initial state, ready for an agent to start a new episode.\n\n Args:\n seed (int): seed to be used for the environment's random number generator. Some task may\n be deterministic and not require a seed.\n\n Returns:\n obs (dict[str,Any]): dictionary containing the observations\n env_info (dict[str,Any]): additional information about the environment (see step's docstring)\n \"\"\"\n\n @abstractmethod\n def step(self, action: str):\n \"\"\"Exection action in the environment and return the next observations\n\n Args:\n action (str): action to be executed in the environment, as a string\n\n Returns:\n obs (dict[str,Any]): dictionary containing the observations\n reward (float): reward obtained after executing the action\n terminated (bool): whether the episode is terminated. The MDP reached a terminal state\n truncated (bool): whether the episode is truncated. The episode was truncated due to external reasons\n env_info (dict[str,Any]): additional information about the environment\n task_info (str): Some potential debugging information about the task, not intended for the agent\n action_exec_start (float): time when the action execution started\n action_exec_stop (float): time when the action execution ended\n action_exec_timeout (float): TODO I don't remember exactly what this is\n \"\"\"\n\n @abstractmethod\n def close(self):\n \"\"\"Close any resources used by the environment\"\"\"\n\n\ndef add_step_timing_to_env_info_decorator(step_func):\n \"\"\"Decorator/wrapper that adds timing information to any step function.\n\n This wrapper can be applied to any step method to automatically\n measure and include action execution timing in the env_info.\n\n Args:\n step_func: The step function to wrap\n\n Returns:\n Wrapped function that includes timing information\n \"\"\"\n\n @wraps(step_func)\n def wrapped_step(self, action: str):\n action_exec_start = time.time()\n obs, reward, terminated, truncated, env_info = step_func(self, action)\n action_exec_stop = time.time()\n\n # Ensure env_info is a dictionary\n if env_info is None:\n env_info = {}\n\n if \"action_exec_start\" not in env_info:\n env_info[\"action_exec_start\"] = action_exec_start\n if \"action_exec_stop\" not in env_info:\n env_info[\"action_exec_stop\"] = action_exec_stop\n if \"action_exec_timeout\" not in env_info:\n env_info[\"action_exec_timeout\"] = 0.0 # Default to 0, override if needed\n\n return obs, reward, terminated, truncated, env_info\n\n return wrapped_step","source_hash":"4054d73c1c64b770c3f4a905170391f03fbefb17e92af7bf39464d4cf98e29c2","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.abstract_env.AbstractEnvArgs","uri":"program://AgentLab/class/src.agentlab.benchmarks.abstract_env.AbstractEnvArgs#L10-L23","kind":"class","name":"AbstractEnvArgs","path":"src/agentlab/benchmarks/abstract_env.py","language":"python","start_line":10,"end_line":23,"context_start_line":1,"context_end_line":43,"code":"import time\nfrom abc import ABC, abstractmethod\nfrom functools import wraps\n\nimport gymnasium as gym\nfrom dataclasses_json import DataClassJsonMixin\nfrom pydantic import BaseModel\n\n\nclass AbstractEnvArgs(DataClassJsonMixin):\n @abstractmethod\n def make_env(self, action_mapping, exp_dir, exp_task_kwargs) -> \"AbstractEnv\":\n \"\"\"Create an instance of the environment with the arguments stored in this object.\n\n Args:\n action_mapping (dict[str,str]): mapping from the agent's action space to the environment's action space\n see AbstractActionSet.to_python_code from BrowserGym for an example\n exp_dir (str): directory where the experiment is stored\n exp_task_kwargs (dict[str,Any]): additional arguments for the environment\n\n Returns:\n env (AbstractEnv): instance of the environment.\n \"\"\"\n\n\nclass AbstractBenchmark(BaseModel):\n name: str\n env_args_list: list\n\n def get_version(self) -> int:\n return \"1\"\n\n def prepare_backends(self):\n pass\n\n def dependency_graph_over_tasks(self) -> dict[str, list[str]]:\n return {}\n\n\nclass AbstractEnv(gym.Env, ABC):\n @abstractmethod\n def reset(self, seed: int = None) -> tuple[dict[str, any], dict[str, any]]:\n \"\"\"Reset the environment to the initial state, ready for an agent to start a new episode.","source_hash":"4054d73c1c64b770c3f4a905170391f03fbefb17e92af7bf39464d4cf98e29c2","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.abstract_env.AbstractBenchmark","uri":"program://AgentLab/class/src.agentlab.benchmarks.abstract_env.AbstractBenchmark#L26-L37","kind":"class","name":"AbstractBenchmark","path":"src/agentlab/benchmarks/abstract_env.py","language":"python","start_line":26,"end_line":37,"context_start_line":6,"context_end_line":57,"code":"from dataclasses_json import DataClassJsonMixin\nfrom pydantic import BaseModel\n\n\nclass AbstractEnvArgs(DataClassJsonMixin):\n @abstractmethod\n def make_env(self, action_mapping, exp_dir, exp_task_kwargs) -> \"AbstractEnv\":\n \"\"\"Create an instance of the environment with the arguments stored in this object.\n\n Args:\n action_mapping (dict[str,str]): mapping from the agent's action space to the environment's action space\n see AbstractActionSet.to_python_code from BrowserGym for an example\n exp_dir (str): directory where the experiment is stored\n exp_task_kwargs (dict[str,Any]): additional arguments for the environment\n\n Returns:\n env (AbstractEnv): instance of the environment.\n \"\"\"\n\n\nclass AbstractBenchmark(BaseModel):\n name: str\n env_args_list: list\n\n def get_version(self) -> int:\n return \"1\"\n\n def prepare_backends(self):\n pass\n\n def dependency_graph_over_tasks(self) -> dict[str, list[str]]:\n return {}\n\n\nclass AbstractEnv(gym.Env, ABC):\n @abstractmethod\n def reset(self, seed: int = None) -> tuple[dict[str, any], dict[str, any]]:\n \"\"\"Reset the environment to the initial state, ready for an agent to start a new episode.\n\n Args:\n seed (int): seed to be used for the environment's random number generator. Some task may\n be deterministic and not require a seed.\n\n Returns:\n obs (dict[str,Any]): dictionary containing the observations\n env_info (dict[str,Any]): additional information about the environment (see step's docstring)\n \"\"\"\n\n @abstractmethod\n def step(self, action: str):\n \"\"\"Exection action in the environment and return the next observations\n","source_hash":"4054d73c1c64b770c3f4a905170391f03fbefb17e92af7bf39464d4cf98e29c2","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.abstract_env.AbstractEnv","uri":"program://AgentLab/class/src.agentlab.benchmarks.abstract_env.AbstractEnv#L40-L75","kind":"class","name":"AbstractEnv","path":"src/agentlab/benchmarks/abstract_env.py","language":"python","start_line":40,"end_line":75,"context_start_line":20,"context_end_line":95,"code":"\n Returns:\n env (AbstractEnv): instance of the environment.\n \"\"\"\n\n\nclass AbstractBenchmark(BaseModel):\n name: str\n env_args_list: list\n\n def get_version(self) -> int:\n return \"1\"\n\n def prepare_backends(self):\n pass\n\n def dependency_graph_over_tasks(self) -> dict[str, list[str]]:\n return {}\n\n\nclass AbstractEnv(gym.Env, ABC):\n @abstractmethod\n def reset(self, seed: int = None) -> tuple[dict[str, any], dict[str, any]]:\n \"\"\"Reset the environment to the initial state, ready for an agent to start a new episode.\n\n Args:\n seed (int): seed to be used for the environment's random number generator. Some task may\n be deterministic and not require a seed.\n\n Returns:\n obs (dict[str,Any]): dictionary containing the observations\n env_info (dict[str,Any]): additional information about the environment (see step's docstring)\n \"\"\"\n\n @abstractmethod\n def step(self, action: str):\n \"\"\"Exection action in the environment and return the next observations\n\n Args:\n action (str): action to be executed in the environment, as a string\n\n Returns:\n obs (dict[str,Any]): dictionary containing the observations\n reward (float): reward obtained after executing the action\n terminated (bool): whether the episode is terminated. The MDP reached a terminal state\n truncated (bool): whether the episode is truncated. The episode was truncated due to external reasons\n env_info (dict[str,Any]): additional information about the environment\n task_info (str): Some potential debugging information about the task, not intended for the agent\n action_exec_start (float): time when the action execution started\n action_exec_stop (float): time when the action execution ended\n action_exec_timeout (float): TODO I don't remember exactly what this is\n \"\"\"\n\n @abstractmethod\n def close(self):\n \"\"\"Close any resources used by the environment\"\"\"\n\n\ndef add_step_timing_to_env_info_decorator(step_func):\n \"\"\"Decorator/wrapper that adds timing information to any step function.\n\n This wrapper can be applied to any step method to automatically\n measure and include action execution timing in the env_info.\n\n Args:\n step_func: The step function to wrap\n\n Returns:\n Wrapped function that includes timing information\n \"\"\"\n\n @wraps(step_func)\n def wrapped_step(self, action: str):\n action_exec_start = time.time()\n obs, reward, terminated, truncated, env_info = step_func(self, action)\n action_exec_stop = time.time()","source_hash":"4054d73c1c64b770c3f4a905170391f03fbefb17e92af7bf39464d4cf98e29c2","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.abstract_env.add_step_timing_to_env_info_decorator","uri":"program://AgentLab/function/src.agentlab.benchmarks.abstract_env.add_step_timing_to_env_info_decorator#L78-L110","kind":"function","name":"add_step_timing_to_env_info_decorator","path":"src/agentlab/benchmarks/abstract_env.py","language":"python","start_line":78,"end_line":110,"context_start_line":58,"context_end_line":110,"code":" Args:\n action (str): action to be executed in the environment, as a string\n\n Returns:\n obs (dict[str,Any]): dictionary containing the observations\n reward (float): reward obtained after executing the action\n terminated (bool): whether the episode is terminated. The MDP reached a terminal state\n truncated (bool): whether the episode is truncated. The episode was truncated due to external reasons\n env_info (dict[str,Any]): additional information about the environment\n task_info (str): Some potential debugging information about the task, not intended for the agent\n action_exec_start (float): time when the action execution started\n action_exec_stop (float): time when the action execution ended\n action_exec_timeout (float): TODO I don't remember exactly what this is\n \"\"\"\n\n @abstractmethod\n def close(self):\n \"\"\"Close any resources used by the environment\"\"\"\n\n\ndef add_step_timing_to_env_info_decorator(step_func):\n \"\"\"Decorator/wrapper that adds timing information to any step function.\n\n This wrapper can be applied to any step method to automatically\n measure and include action execution timing in the env_info.\n\n Args:\n step_func: The step function to wrap\n\n Returns:\n Wrapped function that includes timing information\n \"\"\"\n\n @wraps(step_func)\n def wrapped_step(self, action: str):\n action_exec_start = time.time()\n obs, reward, terminated, truncated, env_info = step_func(self, action)\n action_exec_stop = time.time()\n\n # Ensure env_info is a dictionary\n if env_info is None:\n env_info = {}\n\n if \"action_exec_start\" not in env_info:\n env_info[\"action_exec_start\"] = action_exec_start\n if \"action_exec_stop\" not in env_info:\n env_info[\"action_exec_stop\"] = action_exec_stop\n if \"action_exec_timeout\" not in env_info:\n env_info[\"action_exec_timeout\"] = 0.0 # Default to 0, override if needed\n\n return obs, reward, terminated, truncated, env_info\n\n return wrapped_step","source_hash":"4054d73c1c64b770c3f4a905170391f03fbefb17e92af7bf39464d4cf98e29c2","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.abstract_env.make_env","uri":"program://AgentLab/function/src.agentlab.benchmarks.abstract_env.make_env#L12-L23","kind":"function","name":"make_env","path":"src/agentlab/benchmarks/abstract_env.py","language":"python","start_line":12,"end_line":23,"context_start_line":1,"context_end_line":43,"code":"import time\nfrom abc import ABC, abstractmethod\nfrom functools import wraps\n\nimport gymnasium as gym\nfrom dataclasses_json import DataClassJsonMixin\nfrom pydantic import BaseModel\n\n\nclass AbstractEnvArgs(DataClassJsonMixin):\n @abstractmethod\n def make_env(self, action_mapping, exp_dir, exp_task_kwargs) -> \"AbstractEnv\":\n \"\"\"Create an instance of the environment with the arguments stored in this object.\n\n Args:\n action_mapping (dict[str,str]): mapping from the agent's action space to the environment's action space\n see AbstractActionSet.to_python_code from BrowserGym for an example\n exp_dir (str): directory where the experiment is stored\n exp_task_kwargs (dict[str,Any]): additional arguments for the environment\n\n Returns:\n env (AbstractEnv): instance of the environment.\n \"\"\"\n\n\nclass AbstractBenchmark(BaseModel):\n name: str\n env_args_list: list\n\n def get_version(self) -> int:\n return \"1\"\n\n def prepare_backends(self):\n pass\n\n def dependency_graph_over_tasks(self) -> dict[str, list[str]]:\n return {}\n\n\nclass AbstractEnv(gym.Env, ABC):\n @abstractmethod\n def reset(self, seed: int = None) -> tuple[dict[str, any], dict[str, any]]:\n \"\"\"Reset the environment to the initial state, ready for an agent to start a new episode.","source_hash":"4054d73c1c64b770c3f4a905170391f03fbefb17e92af7bf39464d4cf98e29c2","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.abstract_env.get_version","uri":"program://AgentLab/function/src.agentlab.benchmarks.abstract_env.get_version#L30-L31","kind":"function","name":"get_version","path":"src/agentlab/benchmarks/abstract_env.py","language":"python","start_line":30,"end_line":31,"context_start_line":10,"context_end_line":51,"code":"class AbstractEnvArgs(DataClassJsonMixin):\n @abstractmethod\n def make_env(self, action_mapping, exp_dir, exp_task_kwargs) -> \"AbstractEnv\":\n \"\"\"Create an instance of the environment with the arguments stored in this object.\n\n Args:\n action_mapping (dict[str,str]): mapping from the agent's action space to the environment's action space\n see AbstractActionSet.to_python_code from BrowserGym for an example\n exp_dir (str): directory where the experiment is stored\n exp_task_kwargs (dict[str,Any]): additional arguments for the environment\n\n Returns:\n env (AbstractEnv): instance of the environment.\n \"\"\"\n\n\nclass AbstractBenchmark(BaseModel):\n name: str\n env_args_list: list\n\n def get_version(self) -> int:\n return \"1\"\n\n def prepare_backends(self):\n pass\n\n def dependency_graph_over_tasks(self) -> dict[str, list[str]]:\n return {}\n\n\nclass AbstractEnv(gym.Env, ABC):\n @abstractmethod\n def reset(self, seed: int = None) -> tuple[dict[str, any], dict[str, any]]:\n \"\"\"Reset the environment to the initial state, ready for an agent to start a new episode.\n\n Args:\n seed (int): seed to be used for the environment's random number generator. Some task may\n be deterministic and not require a seed.\n\n Returns:\n obs (dict[str,Any]): dictionary containing the observations\n env_info (dict[str,Any]): additional information about the environment (see step's docstring)","source_hash":"4054d73c1c64b770c3f4a905170391f03fbefb17e92af7bf39464d4cf98e29c2","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.abstract_env.prepare_backends","uri":"program://AgentLab/function/src.agentlab.benchmarks.abstract_env.prepare_backends#L33-L34","kind":"function","name":"prepare_backends","path":"src/agentlab/benchmarks/abstract_env.py","language":"python","start_line":33,"end_line":34,"context_start_line":13,"context_end_line":54,"code":" \"\"\"Create an instance of the environment with the arguments stored in this object.\n\n Args:\n action_mapping (dict[str,str]): mapping from the agent's action space to the environment's action space\n see AbstractActionSet.to_python_code from BrowserGym for an example\n exp_dir (str): directory where the experiment is stored\n exp_task_kwargs (dict[str,Any]): additional arguments for the environment\n\n Returns:\n env (AbstractEnv): instance of the environment.\n \"\"\"\n\n\nclass AbstractBenchmark(BaseModel):\n name: str\n env_args_list: list\n\n def get_version(self) -> int:\n return \"1\"\n\n def prepare_backends(self):\n pass\n\n def dependency_graph_over_tasks(self) -> dict[str, list[str]]:\n return {}\n\n\nclass AbstractEnv(gym.Env, ABC):\n @abstractmethod\n def reset(self, seed: int = None) -> tuple[dict[str, any], dict[str, any]]:\n \"\"\"Reset the environment to the initial state, ready for an agent to start a new episode.\n\n Args:\n seed (int): seed to be used for the environment's random number generator. Some task may\n be deterministic and not require a seed.\n\n Returns:\n obs (dict[str,Any]): dictionary containing the observations\n env_info (dict[str,Any]): additional information about the environment (see step's docstring)\n \"\"\"\n\n @abstractmethod","source_hash":"4054d73c1c64b770c3f4a905170391f03fbefb17e92af7bf39464d4cf98e29c2","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.abstract_env.dependency_graph_over_tasks","uri":"program://AgentLab/function/src.agentlab.benchmarks.abstract_env.dependency_graph_over_tasks#L36-L37","kind":"function","name":"dependency_graph_over_tasks","path":"src/agentlab/benchmarks/abstract_env.py","language":"python","start_line":36,"end_line":37,"context_start_line":16,"context_end_line":57,"code":" action_mapping (dict[str,str]): mapping from the agent's action space to the environment's action space\n see AbstractActionSet.to_python_code from BrowserGym for an example\n exp_dir (str): directory where the experiment is stored\n exp_task_kwargs (dict[str,Any]): additional arguments for the environment\n\n Returns:\n env (AbstractEnv): instance of the environment.\n \"\"\"\n\n\nclass AbstractBenchmark(BaseModel):\n name: str\n env_args_list: list\n\n def get_version(self) -> int:\n return \"1\"\n\n def prepare_backends(self):\n pass\n\n def dependency_graph_over_tasks(self) -> dict[str, list[str]]:\n return {}\n\n\nclass AbstractEnv(gym.Env, ABC):\n @abstractmethod\n def reset(self, seed: int = None) -> tuple[dict[str, any], dict[str, any]]:\n \"\"\"Reset the environment to the initial state, ready for an agent to start a new episode.\n\n Args:\n seed (int): seed to be used for the environment's random number generator. Some task may\n be deterministic and not require a seed.\n\n Returns:\n obs (dict[str,Any]): dictionary containing the observations\n env_info (dict[str,Any]): additional information about the environment (see step's docstring)\n \"\"\"\n\n @abstractmethod\n def step(self, action: str):\n \"\"\"Exection action in the environment and return the next observations\n","source_hash":"4054d73c1c64b770c3f4a905170391f03fbefb17e92af7bf39464d4cf98e29c2","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.abstract_env.reset","uri":"program://AgentLab/function/src.agentlab.benchmarks.abstract_env.reset#L42-L52","kind":"function","name":"reset","path":"src/agentlab/benchmarks/abstract_env.py","language":"python","start_line":42,"end_line":52,"context_start_line":22,"context_end_line":72,"code":" env (AbstractEnv): instance of the environment.\n \"\"\"\n\n\nclass AbstractBenchmark(BaseModel):\n name: str\n env_args_list: list\n\n def get_version(self) -> int:\n return \"1\"\n\n def prepare_backends(self):\n pass\n\n def dependency_graph_over_tasks(self) -> dict[str, list[str]]:\n return {}\n\n\nclass AbstractEnv(gym.Env, ABC):\n @abstractmethod\n def reset(self, seed: int = None) -> tuple[dict[str, any], dict[str, any]]:\n \"\"\"Reset the environment to the initial state, ready for an agent to start a new episode.\n\n Args:\n seed (int): seed to be used for the environment's random number generator. Some task may\n be deterministic and not require a seed.\n\n Returns:\n obs (dict[str,Any]): dictionary containing the observations\n env_info (dict[str,Any]): additional information about the environment (see step's docstring)\n \"\"\"\n\n @abstractmethod\n def step(self, action: str):\n \"\"\"Exection action in the environment and return the next observations\n\n Args:\n action (str): action to be executed in the environment, as a string\n\n Returns:\n obs (dict[str,Any]): dictionary containing the observations\n reward (float): reward obtained after executing the action\n terminated (bool): whether the episode is terminated. The MDP reached a terminal state\n truncated (bool): whether the episode is truncated. The episode was truncated due to external reasons\n env_info (dict[str,Any]): additional information about the environment\n task_info (str): Some potential debugging information about the task, not intended for the agent\n action_exec_start (float): time when the action execution started\n action_exec_stop (float): time when the action execution ended\n action_exec_timeout (float): TODO I don't remember exactly what this is\n \"\"\"\n","source_hash":"4054d73c1c64b770c3f4a905170391f03fbefb17e92af7bf39464d4cf98e29c2","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.abstract_env.step","uri":"program://AgentLab/function/src.agentlab.benchmarks.abstract_env.step#L55-L71","kind":"function","name":"step","path":"src/agentlab/benchmarks/abstract_env.py","language":"python","start_line":55,"end_line":71,"context_start_line":35,"context_end_line":91,"code":"\n def dependency_graph_over_tasks(self) -> dict[str, list[str]]:\n return {}\n\n\nclass AbstractEnv(gym.Env, ABC):\n @abstractmethod\n def reset(self, seed: int = None) -> tuple[dict[str, any], dict[str, any]]:\n \"\"\"Reset the environment to the initial state, ready for an agent to start a new episode.\n\n Args:\n seed (int): seed to be used for the environment's random number generator. Some task may\n be deterministic and not require a seed.\n\n Returns:\n obs (dict[str,Any]): dictionary containing the observations\n env_info (dict[str,Any]): additional information about the environment (see step's docstring)\n \"\"\"\n\n @abstractmethod\n def step(self, action: str):\n \"\"\"Exection action in the environment and return the next observations\n\n Args:\n action (str): action to be executed in the environment, as a string\n\n Returns:\n obs (dict[str,Any]): dictionary containing the observations\n reward (float): reward obtained after executing the action\n terminated (bool): whether the episode is terminated. The MDP reached a terminal state\n truncated (bool): whether the episode is truncated. The episode was truncated due to external reasons\n env_info (dict[str,Any]): additional information about the environment\n task_info (str): Some potential debugging information about the task, not intended for the agent\n action_exec_start (float): time when the action execution started\n action_exec_stop (float): time when the action execution ended\n action_exec_timeout (float): TODO I don't remember exactly what this is\n \"\"\"\n\n @abstractmethod\n def close(self):\n \"\"\"Close any resources used by the environment\"\"\"\n\n\ndef add_step_timing_to_env_info_decorator(step_func):\n \"\"\"Decorator/wrapper that adds timing information to any step function.\n\n This wrapper can be applied to any step method to automatically\n measure and include action execution timing in the env_info.\n\n Args:\n step_func: The step function to wrap\n\n Returns:\n Wrapped function that includes timing information\n \"\"\"\n\n @wraps(step_func)","source_hash":"4054d73c1c64b770c3f4a905170391f03fbefb17e92af7bf39464d4cf98e29c2","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.abstract_env.close","uri":"program://AgentLab/function/src.agentlab.benchmarks.abstract_env.close#L74-L75","kind":"function","name":"close","path":"src/agentlab/benchmarks/abstract_env.py","language":"python","start_line":74,"end_line":75,"context_start_line":54,"context_end_line":95,"code":" @abstractmethod\n def step(self, action: str):\n \"\"\"Exection action in the environment and return the next observations\n\n Args:\n action (str): action to be executed in the environment, as a string\n\n Returns:\n obs (dict[str,Any]): dictionary containing the observations\n reward (float): reward obtained after executing the action\n terminated (bool): whether the episode is terminated. The MDP reached a terminal state\n truncated (bool): whether the episode is truncated. The episode was truncated due to external reasons\n env_info (dict[str,Any]): additional information about the environment\n task_info (str): Some potential debugging information about the task, not intended for the agent\n action_exec_start (float): time when the action execution started\n action_exec_stop (float): time when the action execution ended\n action_exec_timeout (float): TODO I don't remember exactly what this is\n \"\"\"\n\n @abstractmethod\n def close(self):\n \"\"\"Close any resources used by the environment\"\"\"\n\n\ndef add_step_timing_to_env_info_decorator(step_func):\n \"\"\"Decorator/wrapper that adds timing information to any step function.\n\n This wrapper can be applied to any step method to automatically\n measure and include action execution timing in the env_info.\n\n Args:\n step_func: The step function to wrap\n\n Returns:\n Wrapped function that includes timing information\n \"\"\"\n\n @wraps(step_func)\n def wrapped_step(self, action: str):\n action_exec_start = time.time()\n obs, reward, terminated, truncated, env_info = step_func(self, action)\n action_exec_stop = time.time()","source_hash":"4054d73c1c64b770c3f4a905170391f03fbefb17e92af7bf39464d4cf98e29c2","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.benchmarks.abstract_env.wrapped_step","uri":"program://AgentLab/function/src.agentlab.benchmarks.abstract_env.wrapped_step#L92-L108","kind":"function","name":"wrapped_step","path":"src/agentlab/benchmarks/abstract_env.py","language":"python","start_line":92,"end_line":108,"context_start_line":72,"context_end_line":110,"code":"\n @abstractmethod\n def close(self):\n \"\"\"Close any resources used by the environment\"\"\"\n\n\ndef add_step_timing_to_env_info_decorator(step_func):\n \"\"\"Decorator/wrapper that adds timing information to any step function.\n\n This wrapper can be applied to any step method to automatically\n measure and include action execution timing in the env_info.\n\n Args:\n step_func: The step function to wrap\n\n Returns:\n Wrapped function that includes timing information\n \"\"\"\n\n @wraps(step_func)\n def wrapped_step(self, action: str):\n action_exec_start = time.time()\n obs, reward, terminated, truncated, env_info = step_func(self, action)\n action_exec_stop = time.time()\n\n # Ensure env_info is a dictionary\n if env_info is None:\n env_info = {}\n\n if \"action_exec_start\" not in env_info:\n env_info[\"action_exec_start\"] = action_exec_start\n if \"action_exec_stop\" not in env_info:\n env_info[\"action_exec_stop\"] = action_exec_stop\n if \"action_exec_timeout\" not in env_info:\n env_info[\"action_exec_timeout\"] = 0.0 # Default to 0, override if needed\n\n return obs, reward, terminated, truncated, env_info\n\n return wrapped_step","source_hash":"4054d73c1c64b770c3f4a905170391f03fbefb17e92af7bf39464d4cf98e29c2","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.debug_agent","uri":"program://AgentLab/module/src.agentlab.agents.debug_agent#L1-L90","kind":"module","name":"src.agentlab.agents.debug_agent","path":"src/agentlab/agents/debug_agent.py","language":"python","start_line":1,"end_line":90,"context_start_line":1,"context_end_line":90,"code":"from copy import deepcopy\nfrom dataclasses import asdict, dataclass\nfrom functools import partial\n\nimport bgym\nfrom browsergym.experiments.agent import Agent, AgentInfo\nfrom browsergym.utils.obs import flatten_axtree_to_str, flatten_dom_to_str, overlay_som, prune_html\n\nfrom agentlab.agents.agent_args import AgentArgs\nfrom agentlab.llm.chat_api import BaseModelArgs\nfrom agentlab.llm.llm_utils import ParseError, image_to_png_base64_url, parse_html_tags_raise, retry\nfrom agentlab.llm.tracking import cost_tracker_decorator\n\n\n@dataclass\nclass DebugAgentArgs(AgentArgs):\n\n def __post_init__(self):\n try: # some attributes might be temporarily args.CrossProd for hyperparameter generation\n self.agent_name = f\"debug\".replace(\"/\", \"_\")\n except AttributeError:\n pass\n self.action_set_args = bgym.DEFAULT_BENCHMARKS[\n \"miniwob_tiny_test\"\n ]().high_level_action_set_args\n self.use_html = False\n\n def set_benchmark(self, benchmark: bgym.Benchmark, demo_mode):\n if benchmark.name.startswith(\"miniwob\"):\n self.use_html = True\n self.action_set_args = benchmark.high_level_action_set_args\n\n def make_agent(self):\n return DebugAgent(self.action_set_args, use_html=self.use_html)\n\n\nclass DebugAgent(Agent):\n def __init__(\n self,\n action_set_args,\n use_html=False,\n ):\n self.action_set = action_set_args.make_action_set()\n self.use_html = use_html\n\n def obs_preprocessor(self, obs):\n obs = deepcopy(obs)\n obs[\"dom_txt\"] = flatten_dom_to_str(\n obs[\"dom_object\"],\n extra_properties=obs[\"extra_element_properties\"],\n with_visible=True,\n with_clickable=True,\n with_center_coords=True,\n with_bounding_box_coords=True,\n filter_visible_only=False,\n filter_with_bid_only=False,\n filter_som_only=False,\n )\n obs[\"axtree_txt\"] = flatten_axtree_to_str(\n obs[\"axtree_object\"],\n extra_properties=obs[\"extra_element_properties\"],\n with_visible=True,\n with_clickable=True,\n with_center_coords=True,\n with_bounding_box_coords=True,\n filter_visible_only=False,\n filter_with_bid_only=False,\n filter_som_only=False,\n )\n obs[\"pruned_html\"] = prune_html(obs[\"dom_txt\"])\n obs[\"screenshot_som\"] = overlay_som(\n obs[\"screenshot\"], extra_properties=obs[\"extra_element_properties\"]\n )\n return obs\n\n def get_action(self, obs):\n\n # print(obs[\"pruned_html\"])\n print(\"\\n\")\n observation = obs[\"pruned_html\"] if self.use_html else obs[\"axtree_txt\"]\n action = input(observation + \"\\n\")\n agent_info = AgentInfo(\n think=\"nope\",\n chat_messages=[],\n stats={},\n )\n return action, agent_info\n\n\nDEBUG_AGENT = DebugAgentArgs()","source_hash":"48d9323219b0658be63c19e3f41255000da7c606419646f214814b733a4ef6ff","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.debug_agent.DebugAgentArgs","uri":"program://AgentLab/class/src.agentlab.agents.debug_agent.DebugAgentArgs#L16-L34","kind":"class","name":"DebugAgentArgs","path":"src/agentlab/agents/debug_agent.py","language":"python","start_line":16,"end_line":34,"context_start_line":1,"context_end_line":54,"code":"from copy import deepcopy\nfrom dataclasses import asdict, dataclass\nfrom functools import partial\n\nimport bgym\nfrom browsergym.experiments.agent import Agent, AgentInfo\nfrom browsergym.utils.obs import flatten_axtree_to_str, flatten_dom_to_str, overlay_som, prune_html\n\nfrom agentlab.agents.agent_args import AgentArgs\nfrom agentlab.llm.chat_api import BaseModelArgs\nfrom agentlab.llm.llm_utils import ParseError, image_to_png_base64_url, parse_html_tags_raise, retry\nfrom agentlab.llm.tracking import cost_tracker_decorator\n\n\n@dataclass\nclass DebugAgentArgs(AgentArgs):\n\n def __post_init__(self):\n try: # some attributes might be temporarily args.CrossProd for hyperparameter generation\n self.agent_name = f\"debug\".replace(\"/\", \"_\")\n except AttributeError:\n pass\n self.action_set_args = bgym.DEFAULT_BENCHMARKS[\n \"miniwob_tiny_test\"\n ]().high_level_action_set_args\n self.use_html = False\n\n def set_benchmark(self, benchmark: bgym.Benchmark, demo_mode):\n if benchmark.name.startswith(\"miniwob\"):\n self.use_html = True\n self.action_set_args = benchmark.high_level_action_set_args\n\n def make_agent(self):\n return DebugAgent(self.action_set_args, use_html=self.use_html)\n\n\nclass DebugAgent(Agent):\n def __init__(\n self,\n action_set_args,\n use_html=False,\n ):\n self.action_set = action_set_args.make_action_set()\n self.use_html = use_html\n\n def obs_preprocessor(self, obs):\n obs = deepcopy(obs)\n obs[\"dom_txt\"] = flatten_dom_to_str(\n obs[\"dom_object\"],\n extra_properties=obs[\"extra_element_properties\"],\n with_visible=True,\n with_clickable=True,\n with_center_coords=True,\n with_bounding_box_coords=True,","source_hash":"48d9323219b0658be63c19e3f41255000da7c606419646f214814b733a4ef6ff","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.debug_agent.DebugAgent","uri":"program://AgentLab/class/src.agentlab.agents.debug_agent.DebugAgent#L37-L87","kind":"class","name":"DebugAgent","path":"src/agentlab/agents/debug_agent.py","language":"python","start_line":37,"end_line":87,"context_start_line":17,"context_end_line":90,"code":"\n def __post_init__(self):\n try: # some attributes might be temporarily args.CrossProd for hyperparameter generation\n self.agent_name = f\"debug\".replace(\"/\", \"_\")\n except AttributeError:\n pass\n self.action_set_args = bgym.DEFAULT_BENCHMARKS[\n \"miniwob_tiny_test\"\n ]().high_level_action_set_args\n self.use_html = False\n\n def set_benchmark(self, benchmark: bgym.Benchmark, demo_mode):\n if benchmark.name.startswith(\"miniwob\"):\n self.use_html = True\n self.action_set_args = benchmark.high_level_action_set_args\n\n def make_agent(self):\n return DebugAgent(self.action_set_args, use_html=self.use_html)\n\n\nclass DebugAgent(Agent):\n def __init__(\n self,\n action_set_args,\n use_html=False,\n ):\n self.action_set = action_set_args.make_action_set()\n self.use_html = use_html\n\n def obs_preprocessor(self, obs):\n obs = deepcopy(obs)\n obs[\"dom_txt\"] = flatten_dom_to_str(\n obs[\"dom_object\"],\n extra_properties=obs[\"extra_element_properties\"],\n with_visible=True,\n with_clickable=True,\n with_center_coords=True,\n with_bounding_box_coords=True,\n filter_visible_only=False,\n filter_with_bid_only=False,\n filter_som_only=False,\n )\n obs[\"axtree_txt\"] = flatten_axtree_to_str(\n obs[\"axtree_object\"],\n extra_properties=obs[\"extra_element_properties\"],\n with_visible=True,\n with_clickable=True,\n with_center_coords=True,\n with_bounding_box_coords=True,\n filter_visible_only=False,\n filter_with_bid_only=False,\n filter_som_only=False,\n )\n obs[\"pruned_html\"] = prune_html(obs[\"dom_txt\"])\n obs[\"screenshot_som\"] = overlay_som(\n obs[\"screenshot\"], extra_properties=obs[\"extra_element_properties\"]\n )\n return obs\n\n def get_action(self, obs):\n\n # print(obs[\"pruned_html\"])\n print(\"\\n\")\n observation = obs[\"pruned_html\"] if self.use_html else obs[\"axtree_txt\"]\n action = input(observation + \"\\n\")\n agent_info = AgentInfo(\n think=\"nope\",\n chat_messages=[],\n stats={},\n )\n return action, agent_info\n\n\nDEBUG_AGENT = DebugAgentArgs()","source_hash":"48d9323219b0658be63c19e3f41255000da7c606419646f214814b733a4ef6ff","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.debug_agent.__post_init__","uri":"program://AgentLab/function/src.agentlab.agents.debug_agent.__post_init__#L18-L26","kind":"function","name":"__post_init__","path":"src/agentlab/agents/debug_agent.py","language":"python","start_line":18,"end_line":26,"context_start_line":1,"context_end_line":46,"code":"from copy import deepcopy\nfrom dataclasses import asdict, dataclass\nfrom functools import partial\n\nimport bgym\nfrom browsergym.experiments.agent import Agent, AgentInfo\nfrom browsergym.utils.obs import flatten_axtree_to_str, flatten_dom_to_str, overlay_som, prune_html\n\nfrom agentlab.agents.agent_args import AgentArgs\nfrom agentlab.llm.chat_api import BaseModelArgs\nfrom agentlab.llm.llm_utils import ParseError, image_to_png_base64_url, parse_html_tags_raise, retry\nfrom agentlab.llm.tracking import cost_tracker_decorator\n\n\n@dataclass\nclass DebugAgentArgs(AgentArgs):\n\n def __post_init__(self):\n try: # some attributes might be temporarily args.CrossProd for hyperparameter generation\n self.agent_name = f\"debug\".replace(\"/\", \"_\")\n except AttributeError:\n pass\n self.action_set_args = bgym.DEFAULT_BENCHMARKS[\n \"miniwob_tiny_test\"\n ]().high_level_action_set_args\n self.use_html = False\n\n def set_benchmark(self, benchmark: bgym.Benchmark, demo_mode):\n if benchmark.name.startswith(\"miniwob\"):\n self.use_html = True\n self.action_set_args = benchmark.high_level_action_set_args\n\n def make_agent(self):\n return DebugAgent(self.action_set_args, use_html=self.use_html)\n\n\nclass DebugAgent(Agent):\n def __init__(\n self,\n action_set_args,\n use_html=False,\n ):\n self.action_set = action_set_args.make_action_set()\n self.use_html = use_html\n\n def obs_preprocessor(self, obs):","source_hash":"48d9323219b0658be63c19e3f41255000da7c606419646f214814b733a4ef6ff","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.debug_agent.set_benchmark","uri":"program://AgentLab/function/src.agentlab.agents.debug_agent.set_benchmark#L28-L31","kind":"function","name":"set_benchmark","path":"src/agentlab/agents/debug_agent.py","language":"python","start_line":28,"end_line":31,"context_start_line":8,"context_end_line":51,"code":"\nfrom agentlab.agents.agent_args import AgentArgs\nfrom agentlab.llm.chat_api import BaseModelArgs\nfrom agentlab.llm.llm_utils import ParseError, image_to_png_base64_url, parse_html_tags_raise, retry\nfrom agentlab.llm.tracking import cost_tracker_decorator\n\n\n@dataclass\nclass DebugAgentArgs(AgentArgs):\n\n def __post_init__(self):\n try: # some attributes might be temporarily args.CrossProd for hyperparameter generation\n self.agent_name = f\"debug\".replace(\"/\", \"_\")\n except AttributeError:\n pass\n self.action_set_args = bgym.DEFAULT_BENCHMARKS[\n \"miniwob_tiny_test\"\n ]().high_level_action_set_args\n self.use_html = False\n\n def set_benchmark(self, benchmark: bgym.Benchmark, demo_mode):\n if benchmark.name.startswith(\"miniwob\"):\n self.use_html = True\n self.action_set_args = benchmark.high_level_action_set_args\n\n def make_agent(self):\n return DebugAgent(self.action_set_args, use_html=self.use_html)\n\n\nclass DebugAgent(Agent):\n def __init__(\n self,\n action_set_args,\n use_html=False,\n ):\n self.action_set = action_set_args.make_action_set()\n self.use_html = use_html\n\n def obs_preprocessor(self, obs):\n obs = deepcopy(obs)\n obs[\"dom_txt\"] = flatten_dom_to_str(\n obs[\"dom_object\"],\n extra_properties=obs[\"extra_element_properties\"],\n with_visible=True,","source_hash":"48d9323219b0658be63c19e3f41255000da7c606419646f214814b733a4ef6ff","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.debug_agent.make_agent","uri":"program://AgentLab/function/src.agentlab.agents.debug_agent.make_agent#L33-L34","kind":"function","name":"make_agent","path":"src/agentlab/agents/debug_agent.py","language":"python","start_line":33,"end_line":34,"context_start_line":13,"context_end_line":54,"code":"\n\n@dataclass\nclass DebugAgentArgs(AgentArgs):\n\n def __post_init__(self):\n try: # some attributes might be temporarily args.CrossProd for hyperparameter generation\n self.agent_name = f\"debug\".replace(\"/\", \"_\")\n except AttributeError:\n pass\n self.action_set_args = bgym.DEFAULT_BENCHMARKS[\n \"miniwob_tiny_test\"\n ]().high_level_action_set_args\n self.use_html = False\n\n def set_benchmark(self, benchmark: bgym.Benchmark, demo_mode):\n if benchmark.name.startswith(\"miniwob\"):\n self.use_html = True\n self.action_set_args = benchmark.high_level_action_set_args\n\n def make_agent(self):\n return DebugAgent(self.action_set_args, use_html=self.use_html)\n\n\nclass DebugAgent(Agent):\n def __init__(\n self,\n action_set_args,\n use_html=False,\n ):\n self.action_set = action_set_args.make_action_set()\n self.use_html = use_html\n\n def obs_preprocessor(self, obs):\n obs = deepcopy(obs)\n obs[\"dom_txt\"] = flatten_dom_to_str(\n obs[\"dom_object\"],\n extra_properties=obs[\"extra_element_properties\"],\n with_visible=True,\n with_clickable=True,\n with_center_coords=True,\n with_bounding_box_coords=True,","source_hash":"48d9323219b0658be63c19e3f41255000da7c606419646f214814b733a4ef6ff","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.debug_agent.__init__","uri":"program://AgentLab/function/src.agentlab.agents.debug_agent.__init__#L38-L44","kind":"function","name":"__init__","path":"src/agentlab/agents/debug_agent.py","language":"python","start_line":38,"end_line":44,"context_start_line":18,"context_end_line":64,"code":" def __post_init__(self):\n try: # some attributes might be temporarily args.CrossProd for hyperparameter generation\n self.agent_name = f\"debug\".replace(\"/\", \"_\")\n except AttributeError:\n pass\n self.action_set_args = bgym.DEFAULT_BENCHMARKS[\n \"miniwob_tiny_test\"\n ]().high_level_action_set_args\n self.use_html = False\n\n def set_benchmark(self, benchmark: bgym.Benchmark, demo_mode):\n if benchmark.name.startswith(\"miniwob\"):\n self.use_html = True\n self.action_set_args = benchmark.high_level_action_set_args\n\n def make_agent(self):\n return DebugAgent(self.action_set_args, use_html=self.use_html)\n\n\nclass DebugAgent(Agent):\n def __init__(\n self,\n action_set_args,\n use_html=False,\n ):\n self.action_set = action_set_args.make_action_set()\n self.use_html = use_html\n\n def obs_preprocessor(self, obs):\n obs = deepcopy(obs)\n obs[\"dom_txt\"] = flatten_dom_to_str(\n obs[\"dom_object\"],\n extra_properties=obs[\"extra_element_properties\"],\n with_visible=True,\n with_clickable=True,\n with_center_coords=True,\n with_bounding_box_coords=True,\n filter_visible_only=False,\n filter_with_bid_only=False,\n filter_som_only=False,\n )\n obs[\"axtree_txt\"] = flatten_axtree_to_str(\n obs[\"axtree_object\"],\n extra_properties=obs[\"extra_element_properties\"],\n with_visible=True,\n with_clickable=True,\n with_center_coords=True,","source_hash":"48d9323219b0658be63c19e3f41255000da7c606419646f214814b733a4ef6ff","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.debug_agent.obs_preprocessor","uri":"program://AgentLab/function/src.agentlab.agents.debug_agent.obs_preprocessor#L46-L74","kind":"function","name":"obs_preprocessor","path":"src/agentlab/agents/debug_agent.py","language":"python","start_line":46,"end_line":74,"context_start_line":26,"context_end_line":90,"code":" self.use_html = False\n\n def set_benchmark(self, benchmark: bgym.Benchmark, demo_mode):\n if benchmark.name.startswith(\"miniwob\"):\n self.use_html = True\n self.action_set_args = benchmark.high_level_action_set_args\n\n def make_agent(self):\n return DebugAgent(self.action_set_args, use_html=self.use_html)\n\n\nclass DebugAgent(Agent):\n def __init__(\n self,\n action_set_args,\n use_html=False,\n ):\n self.action_set = action_set_args.make_action_set()\n self.use_html = use_html\n\n def obs_preprocessor(self, obs):\n obs = deepcopy(obs)\n obs[\"dom_txt\"] = flatten_dom_to_str(\n obs[\"dom_object\"],\n extra_properties=obs[\"extra_element_properties\"],\n with_visible=True,\n with_clickable=True,\n with_center_coords=True,\n with_bounding_box_coords=True,\n filter_visible_only=False,\n filter_with_bid_only=False,\n filter_som_only=False,\n )\n obs[\"axtree_txt\"] = flatten_axtree_to_str(\n obs[\"axtree_object\"],\n extra_properties=obs[\"extra_element_properties\"],\n with_visible=True,\n with_clickable=True,\n with_center_coords=True,\n with_bounding_box_coords=True,\n filter_visible_only=False,\n filter_with_bid_only=False,\n filter_som_only=False,\n )\n obs[\"pruned_html\"] = prune_html(obs[\"dom_txt\"])\n obs[\"screenshot_som\"] = overlay_som(\n obs[\"screenshot\"], extra_properties=obs[\"extra_element_properties\"]\n )\n return obs\n\n def get_action(self, obs):\n\n # print(obs[\"pruned_html\"])\n print(\"\\n\")\n observation = obs[\"pruned_html\"] if self.use_html else obs[\"axtree_txt\"]\n action = input(observation + \"\\n\")\n agent_info = AgentInfo(\n think=\"nope\",\n chat_messages=[],\n stats={},\n )\n return action, agent_info\n\n\nDEBUG_AGENT = DebugAgentArgs()","source_hash":"48d9323219b0658be63c19e3f41255000da7c606419646f214814b733a4ef6ff","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.debug_agent.get_action","uri":"program://AgentLab/function/src.agentlab.agents.debug_agent.get_action#L76-L87","kind":"function","name":"get_action","path":"src/agentlab/agents/debug_agent.py","language":"python","start_line":76,"end_line":87,"context_start_line":56,"context_end_line":90,"code":" filter_with_bid_only=False,\n filter_som_only=False,\n )\n obs[\"axtree_txt\"] = flatten_axtree_to_str(\n obs[\"axtree_object\"],\n extra_properties=obs[\"extra_element_properties\"],\n with_visible=True,\n with_clickable=True,\n with_center_coords=True,\n with_bounding_box_coords=True,\n filter_visible_only=False,\n filter_with_bid_only=False,\n filter_som_only=False,\n )\n obs[\"pruned_html\"] = prune_html(obs[\"dom_txt\"])\n obs[\"screenshot_som\"] = overlay_som(\n obs[\"screenshot\"], extra_properties=obs[\"extra_element_properties\"]\n )\n return obs\n\n def get_action(self, obs):\n\n # print(obs[\"pruned_html\"])\n print(\"\\n\")\n observation = obs[\"pruned_html\"] if self.use_html else obs[\"axtree_txt\"]\n action = input(observation + \"\\n\")\n agent_info = AgentInfo(\n think=\"nope\",\n chat_messages=[],\n stats={},\n )\n return action, agent_info\n\n\nDEBUG_AGENT = DebugAgentArgs()","source_hash":"48d9323219b0658be63c19e3f41255000da7c606419646f214814b733a4ef6ff","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.agent_utils","uri":"program://AgentLab/module/src.agentlab.agents.agent_utils#L1-L156","kind":"module","name":"src.agentlab.agents.agent_utils","path":"src/agentlab/agents/agent_utils.py","language":"python","start_line":1,"end_line":156,"context_start_line":1,"context_end_line":156,"code":"import copy\n\nfrom PIL import Image, ImageDraw\nfrom playwright.sync_api import Page\n\nfrom agentlab.analyze import overlay_utils\nfrom agentlab.llm.llm_utils import img_to_base_64\n\n\ndef draw_mouse_pointer(image: Image.Image, x: int, y: int) -> Image.Image:\n \"\"\"\n Draws a semi-transparent mouse pointer at (x, y) on the image.\n Returns a new image with the pointer drawn.\n\n Args:\n image: The image to draw the mouse pointer on.\n x: The x coordinate for the mouse pointer.\n y: The y coordinate for the mouse pointer.\n\n Returns:\n A new image with the mouse pointer drawn.\n \"\"\"\n pointer_size = 20 # Length of the pointer\n overlay = image.convert(\"RGBA\").copy()\n draw = ImageDraw.Draw(overlay)\n\n # Define pointer shape (a simple arrow)\n pointer_shape = [\n (x, y),\n (x + pointer_size, y + pointer_size // 2),\n (x + pointer_size // 2, y + pointer_size // 2),\n (x + pointer_size // 2, y + pointer_size),\n ]\n\n draw.polygon(pointer_shape, fill=(0, 0, 0, 128)) # 50% transparent black\n\n return Image.alpha_composite(image.convert(\"RGBA\"), overlay)\n\n\ndef draw_arrowhead(draw, start, end, arrow_length=15, arrow_angle=30):\n from math import atan2, cos, radians, sin\n\n angle = atan2(end[1] - start[1], end[0] - start[0])\n left = (\n end[0] - arrow_length * cos(angle - radians(arrow_angle)),\n end[1] - arrow_length * sin(angle - radians(arrow_angle)),\n )\n right = (\n end[0] - arrow_length * cos(angle + radians(arrow_angle)),\n end[1] - arrow_length * sin(angle + radians(arrow_angle)),\n )\n draw.line([end, left], fill=\"red\", width=4)\n draw.line([end, right], fill=\"red\", width=4)\n\n\ndef draw_click_indicator(image: Image.Image, x: int, y: int) -> Image.Image:\n \"\"\"\n Draws a click indicator (+ shape with disconnected lines) at (x, y) on the image.\n Returns a new image with the click indicator drawn.\n\n Args:\n image: The image to draw the click indicator on.\n x: The x coordinate for the click indicator.\n y: The y coordinate for the click indicator.\n\n Returns:\n A new image with the click indicator drawn.\n \"\"\"\n line_length = 10 # Length of each line segment\n gap = 4 # Gap from center point\n line_width = 2 # Thickness of lines\n\n overlay = image.convert(\"RGBA\").copy()\n draw = ImageDraw.Draw(overlay)\n\n # Draw 4 lines forming a + shape with gaps in the center\n # Each line has a white outline and black center for visibility on any background\n\n # Top line\n draw.line(\n [(x, y - gap - line_length), (x, y - gap)], fill=(255, 255, 255, 200), width=line_width + 2\n ) # White outline\n draw.line(\n [(x, y - gap - line_length), (x, y - gap)], fill=(0, 0, 0, 255), width=line_width\n ) # Black center\n\n # Bottom line\n draw.line(\n [(x, y + gap), (x, y + gap + line_length)], fill=(255, 255, 255, 200), width=line_width + 2\n ) # White outline\n draw.line(\n [(x, y + gap), (x, y + gap + line_length)], fill=(0, 0, 0, 255), width=line_width\n ) # Black center\n\n # Left line\n draw.line(\n [(x - gap - line_length, y), (x - gap, y)], fill=(255, 255, 255, 200), width=line_width + 2\n ) # White outline\n draw.line(\n [(x - gap - line_length, y), (x - gap, y)], fill=(0, 0, 0, 255), width=line_width\n ) # Black center\n\n # Right line\n draw.line(\n [(x + gap, y), (x + gap + line_length, y)], fill=(255, 255, 255, 200), width=line_width + 2\n ) # White outline\n draw.line(\n [(x + gap, y), (x + gap + line_length, y)], fill=(0, 0, 0, 255), width=line_width\n ) # Black center\n\n return Image.alpha_composite(image.convert(\"RGBA\"), overlay)\n\n\ndef zoom_webpage(page: Page, zoom_factor: float = 1.5):\n \"\"\"\n Zooms the webpage to the specified zoom factor.\n\n NOTE: Click actions with bid doesn't work properly when zoomed in.\n\n Args:\n page: The Playwright Page object.\n zoom_factor: The zoom factor to apply (default is 1.5).\n\n Returns:\n Page: The modified Playwright Page object.\n\n Raises:\n ValueError: If zoom_factor is less than or equal to 0.\n \"\"\"\n\n if zoom_factor <= 0:\n raise ValueError(\"Zoom factor must be greater than 0.\")\n\n page.evaluate(f\"document.documentElement.style.zoom='{zoom_factor*100}%'\")\n return page\n\n\ndef overlay_action(obs, action):\n \"\"\"Overlays actions on screenshot in-place\"\"\"\n act_img = copy.deepcopy(obs[\"screenshot\"])\n act_img = Image.fromarray(act_img)\n\n new_obs_properties = copy.deepcopy(obs[\"extra_element_properties\"])\n import os\n\n if os.getenv(\"AGENTLAB_USE_RETINA\"):\n # HACK: divide everything by 2 in the obs\n # TODO: make this more robust by changing login in annotate_action directly (or maybe in the obs section?)\n for key, value in new_obs_properties.items():\n try:\n new_obs_properties[key][\"bbox\"] = [elem / 2 for elem in value[\"bbox\"]]\n except:\n pass\n\n overlay_utils.annotate_action(act_img, action, properties=new_obs_properties)\n return img_to_base_64(act_img)","source_hash":"4462d17e470b9949b50ce3d5d90d0e1bb0197e37747f67e2d3c56f4f25a87451","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.agent_utils.draw_mouse_pointer","uri":"program://AgentLab/function/src.agentlab.agents.agent_utils.draw_mouse_pointer#L10-L37","kind":"function","name":"draw_mouse_pointer","path":"src/agentlab/agents/agent_utils.py","language":"python","start_line":10,"end_line":37,"context_start_line":1,"context_end_line":57,"code":"import copy\n\nfrom PIL import Image, ImageDraw\nfrom playwright.sync_api import Page\n\nfrom agentlab.analyze import overlay_utils\nfrom agentlab.llm.llm_utils import img_to_base_64\n\n\ndef draw_mouse_pointer(image: Image.Image, x: int, y: int) -> Image.Image:\n \"\"\"\n Draws a semi-transparent mouse pointer at (x, y) on the image.\n Returns a new image with the pointer drawn.\n\n Args:\n image: The image to draw the mouse pointer on.\n x: The x coordinate for the mouse pointer.\n y: The y coordinate for the mouse pointer.\n\n Returns:\n A new image with the mouse pointer drawn.\n \"\"\"\n pointer_size = 20 # Length of the pointer\n overlay = image.convert(\"RGBA\").copy()\n draw = ImageDraw.Draw(overlay)\n\n # Define pointer shape (a simple arrow)\n pointer_shape = [\n (x, y),\n (x + pointer_size, y + pointer_size // 2),\n (x + pointer_size // 2, y + pointer_size // 2),\n (x + pointer_size // 2, y + pointer_size),\n ]\n\n draw.polygon(pointer_shape, fill=(0, 0, 0, 128)) # 50% transparent black\n\n return Image.alpha_composite(image.convert(\"RGBA\"), overlay)\n\n\ndef draw_arrowhead(draw, start, end, arrow_length=15, arrow_angle=30):\n from math import atan2, cos, radians, sin\n\n angle = atan2(end[1] - start[1], end[0] - start[0])\n left = (\n end[0] - arrow_length * cos(angle - radians(arrow_angle)),\n end[1] - arrow_length * sin(angle - radians(arrow_angle)),\n )\n right = (\n end[0] - arrow_length * cos(angle + radians(arrow_angle)),\n end[1] - arrow_length * sin(angle + radians(arrow_angle)),\n )\n draw.line([end, left], fill=\"red\", width=4)\n draw.line([end, right], fill=\"red\", width=4)\n\n\ndef draw_click_indicator(image: Image.Image, x: int, y: int) -> Image.Image:\n \"\"\"","source_hash":"4462d17e470b9949b50ce3d5d90d0e1bb0197e37747f67e2d3c56f4f25a87451","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.agent_utils.draw_arrowhead","uri":"program://AgentLab/function/src.agentlab.agents.agent_utils.draw_arrowhead#L40-L53","kind":"function","name":"draw_arrowhead","path":"src/agentlab/agents/agent_utils.py","language":"python","start_line":40,"end_line":53,"context_start_line":20,"context_end_line":73,"code":" Returns:\n A new image with the mouse pointer drawn.\n \"\"\"\n pointer_size = 20 # Length of the pointer\n overlay = image.convert(\"RGBA\").copy()\n draw = ImageDraw.Draw(overlay)\n\n # Define pointer shape (a simple arrow)\n pointer_shape = [\n (x, y),\n (x + pointer_size, y + pointer_size // 2),\n (x + pointer_size // 2, y + pointer_size // 2),\n (x + pointer_size // 2, y + pointer_size),\n ]\n\n draw.polygon(pointer_shape, fill=(0, 0, 0, 128)) # 50% transparent black\n\n return Image.alpha_composite(image.convert(\"RGBA\"), overlay)\n\n\ndef draw_arrowhead(draw, start, end, arrow_length=15, arrow_angle=30):\n from math import atan2, cos, radians, sin\n\n angle = atan2(end[1] - start[1], end[0] - start[0])\n left = (\n end[0] - arrow_length * cos(angle - radians(arrow_angle)),\n end[1] - arrow_length * sin(angle - radians(arrow_angle)),\n )\n right = (\n end[0] - arrow_length * cos(angle + radians(arrow_angle)),\n end[1] - arrow_length * sin(angle + radians(arrow_angle)),\n )\n draw.line([end, left], fill=\"red\", width=4)\n draw.line([end, right], fill=\"red\", width=4)\n\n\ndef draw_click_indicator(image: Image.Image, x: int, y: int) -> Image.Image:\n \"\"\"\n Draws a click indicator (+ shape with disconnected lines) at (x, y) on the image.\n Returns a new image with the click indicator drawn.\n\n Args:\n image: The image to draw the click indicator on.\n x: The x coordinate for the click indicator.\n y: The y coordinate for the click indicator.\n\n Returns:\n A new image with the click indicator drawn.\n \"\"\"\n line_length = 10 # Length of each line segment\n gap = 4 # Gap from center point\n line_width = 2 # Thickness of lines\n\n overlay = image.convert(\"RGBA\").copy()","source_hash":"4462d17e470b9949b50ce3d5d90d0e1bb0197e37747f67e2d3c56f4f25a87451","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.agent_utils.draw_click_indicator","uri":"program://AgentLab/function/src.agentlab.agents.agent_utils.draw_click_indicator#L56-L111","kind":"function","name":"draw_click_indicator","path":"src/agentlab/agents/agent_utils.py","language":"python","start_line":56,"end_line":111,"context_start_line":36,"context_end_line":131,"code":"\n return Image.alpha_composite(image.convert(\"RGBA\"), overlay)\n\n\ndef draw_arrowhead(draw, start, end, arrow_length=15, arrow_angle=30):\n from math import atan2, cos, radians, sin\n\n angle = atan2(end[1] - start[1], end[0] - start[0])\n left = (\n end[0] - arrow_length * cos(angle - radians(arrow_angle)),\n end[1] - arrow_length * sin(angle - radians(arrow_angle)),\n )\n right = (\n end[0] - arrow_length * cos(angle + radians(arrow_angle)),\n end[1] - arrow_length * sin(angle + radians(arrow_angle)),\n )\n draw.line([end, left], fill=\"red\", width=4)\n draw.line([end, right], fill=\"red\", width=4)\n\n\ndef draw_click_indicator(image: Image.Image, x: int, y: int) -> Image.Image:\n \"\"\"\n Draws a click indicator (+ shape with disconnected lines) at (x, y) on the image.\n Returns a new image with the click indicator drawn.\n\n Args:\n image: The image to draw the click indicator on.\n x: The x coordinate for the click indicator.\n y: The y coordinate for the click indicator.\n\n Returns:\n A new image with the click indicator drawn.\n \"\"\"\n line_length = 10 # Length of each line segment\n gap = 4 # Gap from center point\n line_width = 2 # Thickness of lines\n\n overlay = image.convert(\"RGBA\").copy()\n draw = ImageDraw.Draw(overlay)\n\n # Draw 4 lines forming a + shape with gaps in the center\n # Each line has a white outline and black center for visibility on any background\n\n # Top line\n draw.line(\n [(x, y - gap - line_length), (x, y - gap)], fill=(255, 255, 255, 200), width=line_width + 2\n ) # White outline\n draw.line(\n [(x, y - gap - line_length), (x, y - gap)], fill=(0, 0, 0, 255), width=line_width\n ) # Black center\n\n # Bottom line\n draw.line(\n [(x, y + gap), (x, y + gap + line_length)], fill=(255, 255, 255, 200), width=line_width + 2\n ) # White outline\n draw.line(\n [(x, y + gap), (x, y + gap + line_length)], fill=(0, 0, 0, 255), width=line_width\n ) # Black center\n\n # Left line\n draw.line(\n [(x - gap - line_length, y), (x - gap, y)], fill=(255, 255, 255, 200), width=line_width + 2\n ) # White outline\n draw.line(\n [(x - gap - line_length, y), (x - gap, y)], fill=(0, 0, 0, 255), width=line_width\n ) # Black center\n\n # Right line\n draw.line(\n [(x + gap, y), (x + gap + line_length, y)], fill=(255, 255, 255, 200), width=line_width + 2\n ) # White outline\n draw.line(\n [(x + gap, y), (x + gap + line_length, y)], fill=(0, 0, 0, 255), width=line_width\n ) # Black center\n\n return Image.alpha_composite(image.convert(\"RGBA\"), overlay)\n\n\ndef zoom_webpage(page: Page, zoom_factor: float = 1.5):\n \"\"\"\n Zooms the webpage to the specified zoom factor.\n\n NOTE: Click actions with bid doesn't work properly when zoomed in.\n\n Args:\n page: The Playwright Page object.\n zoom_factor: The zoom factor to apply (default is 1.5).\n\n Returns:\n Page: The modified Playwright Page object.\n\n Raises:\n ValueError: If zoom_factor is less than or equal to 0.\n \"\"\"\n\n if zoom_factor <= 0:","source_hash":"4462d17e470b9949b50ce3d5d90d0e1bb0197e37747f67e2d3c56f4f25a87451","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.agent_utils.zoom_webpage","uri":"program://AgentLab/function/src.agentlab.agents.agent_utils.zoom_webpage#L114-L135","kind":"function","name":"zoom_webpage","path":"src/agentlab/agents/agent_utils.py","language":"python","start_line":114,"end_line":135,"context_start_line":94,"context_end_line":155,"code":"\n # Left line\n draw.line(\n [(x - gap - line_length, y), (x - gap, y)], fill=(255, 255, 255, 200), width=line_width + 2\n ) # White outline\n draw.line(\n [(x - gap - line_length, y), (x - gap, y)], fill=(0, 0, 0, 255), width=line_width\n ) # Black center\n\n # Right line\n draw.line(\n [(x + gap, y), (x + gap + line_length, y)], fill=(255, 255, 255, 200), width=line_width + 2\n ) # White outline\n draw.line(\n [(x + gap, y), (x + gap + line_length, y)], fill=(0, 0, 0, 255), width=line_width\n ) # Black center\n\n return Image.alpha_composite(image.convert(\"RGBA\"), overlay)\n\n\ndef zoom_webpage(page: Page, zoom_factor: float = 1.5):\n \"\"\"\n Zooms the webpage to the specified zoom factor.\n\n NOTE: Click actions with bid doesn't work properly when zoomed in.\n\n Args:\n page: The Playwright Page object.\n zoom_factor: The zoom factor to apply (default is 1.5).\n\n Returns:\n Page: The modified Playwright Page object.\n\n Raises:\n ValueError: If zoom_factor is less than or equal to 0.\n \"\"\"\n\n if zoom_factor <= 0:\n raise ValueError(\"Zoom factor must be greater than 0.\")\n\n page.evaluate(f\"document.documentElement.style.zoom='{zoom_factor*100}%'\")\n return page\n\n\ndef overlay_action(obs, action):\n \"\"\"Overlays actions on screenshot in-place\"\"\"\n act_img = copy.deepcopy(obs[\"screenshot\"])\n act_img = Image.fromarray(act_img)\n\n new_obs_properties = copy.deepcopy(obs[\"extra_element_properties\"])\n import os\n\n if os.getenv(\"AGENTLAB_USE_RETINA\"):\n # HACK: divide everything by 2 in the obs\n # TODO: make this more robust by changing login in annotate_action directly (or maybe in the obs section?)\n for key, value in new_obs_properties.items():\n try:\n new_obs_properties[key][\"bbox\"] = [elem / 2 for elem in value[\"bbox\"]]\n except:\n pass\n\n overlay_utils.annotate_action(act_img, action, properties=new_obs_properties)","source_hash":"4462d17e470b9949b50ce3d5d90d0e1bb0197e37747f67e2d3c56f4f25a87451","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.agent_utils.overlay_action","uri":"program://AgentLab/function/src.agentlab.agents.agent_utils.overlay_action#L138-L156","kind":"function","name":"overlay_action","path":"src/agentlab/agents/agent_utils.py","language":"python","start_line":138,"end_line":156,"context_start_line":118,"context_end_line":156,"code":" NOTE: Click actions with bid doesn't work properly when zoomed in.\n\n Args:\n page: The Playwright Page object.\n zoom_factor: The zoom factor to apply (default is 1.5).\n\n Returns:\n Page: The modified Playwright Page object.\n\n Raises:\n ValueError: If zoom_factor is less than or equal to 0.\n \"\"\"\n\n if zoom_factor <= 0:\n raise ValueError(\"Zoom factor must be greater than 0.\")\n\n page.evaluate(f\"document.documentElement.style.zoom='{zoom_factor*100}%'\")\n return page\n\n\ndef overlay_action(obs, action):\n \"\"\"Overlays actions on screenshot in-place\"\"\"\n act_img = copy.deepcopy(obs[\"screenshot\"])\n act_img = Image.fromarray(act_img)\n\n new_obs_properties = copy.deepcopy(obs[\"extra_element_properties\"])\n import os\n\n if os.getenv(\"AGENTLAB_USE_RETINA\"):\n # HACK: divide everything by 2 in the obs\n # TODO: make this more robust by changing login in annotate_action directly (or maybe in the obs section?)\n for key, value in new_obs_properties.items():\n try:\n new_obs_properties[key][\"bbox\"] = [elem / 2 for elem in value[\"bbox\"]]\n except:\n pass\n\n overlay_utils.annotate_action(act_img, action, properties=new_obs_properties)\n return img_to_base_64(act_img)","source_hash":"4462d17e470b9949b50ce3d5d90d0e1bb0197e37747f67e2d3c56f4f25a87451","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.agent_args","uri":"program://AgentLab/module/src.agentlab.agents.agent_args#L1-L47","kind":"module","name":"src.agentlab.agents.agent_args","path":"src/agentlab/agents/agent_args.py","language":"python","start_line":1,"end_line":47,"context_start_line":1,"context_end_line":47,"code":"import bgym\nfrom bgym import AbstractAgentArgs, Benchmark\n\n\nclass AgentArgs(AbstractAgentArgs):\n \"\"\"Base class for agent arguments for instantiating an agent.\n\n Define agent arguments as dataclass variables of this class. For example:\n\n class MyAgentArgs(AgentArgs):\n my_arg: str = \"default_value\"\n my_other_arg: int = 42\n\n Note: for working properly with AgentXRay, the arguments need to be serializable and hasable.\n \"\"\"\n\n def set_benchmark(self, benchmark: Benchmark, demo_mode: bool):\n \"\"\"Optional method to set benchmark specific flags.\n\n This allows the agent to have minor adjustments based on the benchmark.\n E.g. using a benchmark specific action space. Or letting the agent see\n HTML on MiniWoB since AXTree is not enough. Users should avoid making\n extensive benchmark specific prompt engineering.\n\n Args:\n benchmark: str\n Name of the benchmark.\n demo_mode: bool\n If True, the agent should adapt to demo mode. E.g. it can set\n the demo_mode flag in the browsergym action space.\n \"\"\"\n pass\n\n def set_reproducibility_mode(self):\n \"\"\"Optional method to set the agent in a reproducibility mode.\n\n This should adjust the agent configuration to make it as deterministic\n as possible e.g. setting the temperature of the model to 0.\n\n This is only called when reproducibility is requested.\n\n Raises:\n NotImplementedError: If the agent does not support reproducibility.\n \"\"\"\n raise NotImplementedError(\n f\"set_reproducibility_mode is not implemented for agent_args {self.__class__.__name__}\"\n )","source_hash":"d03b7139142f9e096b89377ebb32196c2727f77c251de82f9ae26f189add791a","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.agent_args.AgentArgs","uri":"program://AgentLab/class/src.agentlab.agents.agent_args.AgentArgs#L5-L47","kind":"class","name":"AgentArgs","path":"src/agentlab/agents/agent_args.py","language":"python","start_line":5,"end_line":47,"context_start_line":1,"context_end_line":47,"code":"import bgym\nfrom bgym import AbstractAgentArgs, Benchmark\n\n\nclass AgentArgs(AbstractAgentArgs):\n \"\"\"Base class for agent arguments for instantiating an agent.\n\n Define agent arguments as dataclass variables of this class. For example:\n\n class MyAgentArgs(AgentArgs):\n my_arg: str = \"default_value\"\n my_other_arg: int = 42\n\n Note: for working properly with AgentXRay, the arguments need to be serializable and hasable.\n \"\"\"\n\n def set_benchmark(self, benchmark: Benchmark, demo_mode: bool):\n \"\"\"Optional method to set benchmark specific flags.\n\n This allows the agent to have minor adjustments based on the benchmark.\n E.g. using a benchmark specific action space. Or letting the agent see\n HTML on MiniWoB since AXTree is not enough. Users should avoid making\n extensive benchmark specific prompt engineering.\n\n Args:\n benchmark: str\n Name of the benchmark.\n demo_mode: bool\n If True, the agent should adapt to demo mode. E.g. it can set\n the demo_mode flag in the browsergym action space.\n \"\"\"\n pass\n\n def set_reproducibility_mode(self):\n \"\"\"Optional method to set the agent in a reproducibility mode.\n\n This should adjust the agent configuration to make it as deterministic\n as possible e.g. setting the temperature of the model to 0.\n\n This is only called when reproducibility is requested.\n\n Raises:\n NotImplementedError: If the agent does not support reproducibility.\n \"\"\"\n raise NotImplementedError(\n f\"set_reproducibility_mode is not implemented for agent_args {self.__class__.__name__}\"\n )","source_hash":"d03b7139142f9e096b89377ebb32196c2727f77c251de82f9ae26f189add791a","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.agent_args.set_benchmark","uri":"program://AgentLab/function/src.agentlab.agents.agent_args.set_benchmark#L17-L32","kind":"function","name":"set_benchmark","path":"src/agentlab/agents/agent_args.py","language":"python","start_line":17,"end_line":32,"context_start_line":1,"context_end_line":47,"code":"import bgym\nfrom bgym import AbstractAgentArgs, Benchmark\n\n\nclass AgentArgs(AbstractAgentArgs):\n \"\"\"Base class for agent arguments for instantiating an agent.\n\n Define agent arguments as dataclass variables of this class. For example:\n\n class MyAgentArgs(AgentArgs):\n my_arg: str = \"default_value\"\n my_other_arg: int = 42\n\n Note: for working properly with AgentXRay, the arguments need to be serializable and hasable.\n \"\"\"\n\n def set_benchmark(self, benchmark: Benchmark, demo_mode: bool):\n \"\"\"Optional method to set benchmark specific flags.\n\n This allows the agent to have minor adjustments based on the benchmark.\n E.g. using a benchmark specific action space. Or letting the agent see\n HTML on MiniWoB since AXTree is not enough. Users should avoid making\n extensive benchmark specific prompt engineering.\n\n Args:\n benchmark: str\n Name of the benchmark.\n demo_mode: bool\n If True, the agent should adapt to demo mode. E.g. it can set\n the demo_mode flag in the browsergym action space.\n \"\"\"\n pass\n\n def set_reproducibility_mode(self):\n \"\"\"Optional method to set the agent in a reproducibility mode.\n\n This should adjust the agent configuration to make it as deterministic\n as possible e.g. setting the temperature of the model to 0.\n\n This is only called when reproducibility is requested.\n\n Raises:\n NotImplementedError: If the agent does not support reproducibility.\n \"\"\"\n raise NotImplementedError(\n f\"set_reproducibility_mode is not implemented for agent_args {self.__class__.__name__}\"\n )","source_hash":"d03b7139142f9e096b89377ebb32196c2727f77c251de82f9ae26f189add791a","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.agent_args.set_reproducibility_mode","uri":"program://AgentLab/function/src.agentlab.agents.agent_args.set_reproducibility_mode#L34-L47","kind":"function","name":"set_reproducibility_mode","path":"src/agentlab/agents/agent_args.py","language":"python","start_line":34,"end_line":47,"context_start_line":14,"context_end_line":47,"code":" Note: for working properly with AgentXRay, the arguments need to be serializable and hasable.\n \"\"\"\n\n def set_benchmark(self, benchmark: Benchmark, demo_mode: bool):\n \"\"\"Optional method to set benchmark specific flags.\n\n This allows the agent to have minor adjustments based on the benchmark.\n E.g. using a benchmark specific action space. Or letting the agent see\n HTML on MiniWoB since AXTree is not enough. Users should avoid making\n extensive benchmark specific prompt engineering.\n\n Args:\n benchmark: str\n Name of the benchmark.\n demo_mode: bool\n If True, the agent should adapt to demo mode. E.g. it can set\n the demo_mode flag in the browsergym action space.\n \"\"\"\n pass\n\n def set_reproducibility_mode(self):\n \"\"\"Optional method to set the agent in a reproducibility mode.\n\n This should adjust the agent configuration to make it as deterministic\n as possible e.g. setting the temperature of the model to 0.\n\n This is only called when reproducibility is requested.\n\n Raises:\n NotImplementedError: If the agent does not support reproducibility.\n \"\"\"\n raise NotImplementedError(\n f\"set_reproducibility_mode is not implemented for agent_args {self.__class__.__name__}\"\n )","source_hash":"d03b7139142f9e096b89377ebb32196c2727f77c251de82f9ae26f189add791a","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.dynamic_prompting","uri":"program://AgentLab/module/src.agentlab.agents.dynamic_prompting#L1-L876","kind":"module","name":"src.agentlab.agents.dynamic_prompting","path":"src/agentlab/agents/dynamic_prompting.py","language":"python","start_line":1,"end_line":876,"context_start_line":1,"context_end_line":876,"code":"import abc\nimport logging\nimport platform\nimport time\nfrom copy import copy, deepcopy\nfrom dataclasses import asdict, dataclass\nfrom textwrap import dedent\nfrom typing import Literal\nfrom warnings import warn\n\nimport bgym\nfrom bgym import HighLevelActionSetArgs\nfrom browsergym.core.action.base import AbstractActionSet\nfrom browsergym.utils.obs import flatten_axtree_to_str, flatten_dom_to_str, overlay_som, prune_html\n\nfrom agentlab.llm.llm_utils import (\n BaseMessage,\n ParseError,\n count_tokens,\n extract_code_blocks,\n image_to_jpg_base64_url,\n parse_html_tags_raise,\n)\n\n\nclass Flags:\n \"\"\"Base class for flags. Mostly for backward compatibility.\"\"\"\n\n def copy(self):\n return deepcopy(self)\n\n def asdict(self):\n \"\"\"Helper for JSON serializable requirement.\"\"\"\n return asdict(self)\n\n @classmethod\n def from_dict(self, flags_dict):\n \"\"\"Helper for JSON serializable requirement.\"\"\"\n if isinstance(flags_dict, ObsFlags):\n return flags_dict\n\n if not isinstance(flags_dict, dict):\n raise ValueError(f\"Unregcognized type for flags_dict of type {type(flags_dict)}.\")\n return ObsFlags(**flags_dict)\n\n\n@dataclass\nclass ObsFlags(Flags):\n \"\"\"\n A class to represent various flags used to control features in an application.\n\n Attributes:\n use_html (bool): Use the HTML in the prompt.\n use_ax_tree (bool): Use the accessibility tree in the prompt.\n use_focused_element (bool): Provide the ID of the focused element.\n use_error_logs (bool): Expose the previous error in the prompt.\n use_history (bool): Enable history of previous steps in the prompt.\n use_past_error_logs (bool): If use_history is True, expose all previous errors in the history.\n use_action_history (bool): If use_history is True, include the actions in the history.\n use_think_history (bool): If use_history is True, include all previous chains of thoughts in the history.\n use_diff (bool): Add a diff of the current and previous HTML to the prompt.\n html_type (str): Type of HTML to use in the prompt, may depend on preprocessing of observation.\n use_screenshot (bool): Add a screenshot of the page to the prompt, following OpenAI's API. This will be automatically disabled if the model does not have vision capabilities.\n use_som (bool): Add a set of marks to the screenshot.\n extract_visible_tag (bool): Add a \"visible\" tag to visible elements in the AXTree.\n extract_clickable_tag (bool): Add a \"clickable\" tag to clickable elements in the AXTree.\n extract_coords (Literal['False', 'center', 'box']): Add the coordinates of the elements.\n filter_visible_elements_only (bool): Only show visible elements in the AXTree.\n \"\"\"\n\n use_html: bool = True\n use_ax_tree: bool = False\n use_tabs: bool = False\n use_focused_element: bool = False\n use_error_logs: bool = False\n use_history: bool = False\n use_past_error_logs: bool = False\n use_action_history: bool = False\n use_think_history: bool = False\n use_diff: bool = False #\n html_type: str = \"pruned_html\"\n use_screenshot: bool = True\n use_som: bool = False\n extract_visible_tag: bool = False\n extract_clickable_tag: bool = False\n extract_coords: Literal[\"False\", \"center\", \"box\"] = \"False\"\n filter_visible_elements_only: bool = False\n # low sets the token count of each image to 65 (85?)\n # high sets the token count of each image to 2*65 (2*85?) times the amount of 512x512px patches\n # auto chooses between low and high based on image size (openai default)\n openai_vision_detail: Literal[\"low\", \"high\", \"auto\"] = \"auto\"\n filter_with_bid_only: bool = False\n filter_som_only: bool = False\n\n\n@dataclass\nclass ActionFlags(Flags):\n action_set: HighLevelActionSetArgs = None # should be set by the set_benchmark method\n long_description: bool = True\n individual_examples: bool = False\n\n # for backward compatibility\n multi_actions: bool = None\n is_strict: bool = None\n\n\nclass PromptElement:\n \"\"\"Base class for all prompt elements. Prompt elements can be hidden.\"\"\"\n\n _prompt = \"\"\n _abstract_ex = \"\"\n _concrete_ex = \"\"\n\n def __init__(self, visible: bool = True) -> None:\n \"\"\"Prompt element that can be hidden.\n\n Args:\n visible : bool, optional\n Whether the prompt element should be visible, by default True. Can\n be a callable that returns a bool. This is useful when a specific\n flag changes during a shrink iteration.\n \"\"\"\n self._visible = visible\n\n @property\n def prompt(self) -> str | BaseMessage:\n \"\"\"Avoid overriding this method. Override _prompt instead.\"\"\"\n if self.is_visible:\n return self._prompt\n else:\n return \"\"\n\n @property\n def abstract_ex(self):\n \"\"\"Useful when this prompt element is requesting an answer from the llm.\n Provide an abstract example of the answer here. See Memory for an\n example.\n\n Avoid overriding this method. Override _abstract_ex instead\n\n Returns:\n str: The abstract example\n \"\"\"\n if self.is_visible:\n return self._abstract_ex\n else:\n return \"\"\n\n @property\n def concrete_ex(self):\n \"\"\"Useful when this prompt element is requesting an answer from the llm.\n Provide a concrete example of the answer here. See Memory for an\n example.\n\n Avoid overriding this method. Override _concrete_ex instead\n\n Returns:\n str: The concrete example\n \"\"\"\n if self.is_visible:\n return self._concrete_ex\n else:\n return \"\"\n\n @property\n def is_visible(self):\n \"\"\"Handle the case where visible is a callable.\"\"\"\n visible = self._visible\n if callable(visible):\n visible = visible()\n return visible\n\n def _parse_answer(self, text_answer):\n \"\"\"Override to actually extract elements from the answer.\"\"\"\n return {}\n\n def parse_answer(self, text_answer) -> dict:\n if self.is_visible:\n return self._parse_answer(text_answer)\n else:\n return {}\n\n\nclass Shrinkable(PromptElement, abc.ABC):\n @abc.abstractmethod\n def shrink(self) -> None:\n \"\"\"Implement shrinking of this prompt element.\n\n You need to recursively call all shrinkable elements that are part of\n this prompt. You can also implement a shriking startegy for this prompt.\n Shrinking is can be called multiple times to progressively shrink the\n prompt until it fits max_tokens. Default max shrink iterations is 20.\n \"\"\"\n pass\n\n\nclass Trunkater(Shrinkable):\n \"\"\"Shrinkable element that truncates the prompt element from the bottom\n after a certain number of iterations.\"\"\"\n\n def __init__(self, visible, shrink_speed=0.3, start_trunkate_iteration=10):\n super().__init__(visible=visible)\n self.shrink_speed = shrink_speed\n self.start_trunkate_iteration = start_trunkate_iteration\n self.shrink_calls = 0\n self.deleted_lines = 0\n\n def shrink(self) -> None:\n if self.is_visible and self.shrink_calls >= self.start_trunkate_iteration:\n # remove the fraction of _prompt\n lines = self._prompt.splitlines()\n new_line_count = int(len(lines) * (1 - self.shrink_speed))\n self.deleted_lines += len(lines) - new_line_count\n self._prompt = \"\\n\".join(lines[:new_line_count])\n self._prompt += f\"\\n... Deleted {self.deleted_lines} lines to reduce prompt size.\"\n\n self.shrink_calls += 1\n\n\ndef fit_tokens(\n shrinkable: Shrinkable,\n max_prompt_tokens=None,\n max_iterations=20,\n model_name=\"openai/gpt-4\",\n additional_prompts=[\"\"],\n):\n \"\"\"Shrink a prompt element until it fits `max_prompt_tokens`.\n\n Args:\n shrinkable (Shrinkable): The prompt element to shrink.\n max_prompt_tokens (int): The maximum number of tokens allowed.\n max_iterations (int, optional): The maximum number of shrink iterations, by default 20.\n model_name (str, optional): The name of the model used when tokenizing.\n additional_prompts (str or List[str], optional): Additional prompts to account for when shrinking, by default [\"\"].\n\n Returns:\n str: the prompt after shrinking.\n\n Raises:\n ValueError: Unrecognized type for prompt\n \"\"\"\n\n if max_prompt_tokens is None:\n return shrinkable.prompt\n\n if isinstance(additional_prompts, str):\n additional_prompts = [additional_prompts]\n\n for prompt in additional_prompts:\n max_prompt_tokens -= count_tokens(prompt, model=model_name) + 1 # +1 because why not ?\n\n for _ in range(max_iterations):\n prompt = shrinkable.prompt\n if isinstance(prompt, str):\n prompt_str = prompt\n elif isinstance(prompt, list):\n # warn deprecated\n warn(\n \"Using list of prompts is deprecated. Use a Discussion object instead.\",\n DeprecationWarning,\n )\n prompt_str = \"\\n\".join([p[\"text\"] for p in prompt if p[\"type\"] == \"text\"])\n elif isinstance(prompt, BaseMessage):\n prompt_str = prompt.__str__(warn_if_image=False)\n else:\n raise ValueError(f\"Unrecognized type for prompt: {type(prompt)}\")\n n_token = count_tokens(prompt_str, model=model_name)\n if n_token <= max_prompt_tokens:\n return prompt\n shrinkable.shrink()\n\n logging.info(\n dedent(\n f\"\"\"\\\n After {max_iterations} shrink iterations, the prompt is still\n {count_tokens(prompt_str)} tokens (greater than {max_prompt_tokens}). Returning the prompt as is.\"\"\"\n )\n )\n return prompt\n\n\nclass HTML(Trunkater):\n def __init__(self, html, visible_elements_only: bool, visible: bool = True, prefix=\"\") -> None:\n super().__init__(visible=visible, start_trunkate_iteration=5)\n if visible_elements_only:\n visible_elements_note = \"\"\"\\\nNote: only elements that are visible in the viewport are presented. You might need to scroll the page, or open tabs or menus to see more.\n\n\"\"\"\n else:\n visible_elements_note = \"\"\n self._prompt = f\"\\n{prefix}HTML:\\n{visible_elements_note}{html}\\n\"\n\n\nclass AXTree(Trunkater):\n def __init__(\n self,\n ax_tree,\n visible_elements_only: bool,\n visible: bool = True,\n coord_type=None,\n visible_tag=True,\n prefix=\"\",\n ) -> None:\n super().__init__(visible=visible, start_trunkate_iteration=10)\n bid_info = \"\"\"\\\nNote: [bid] is the unique alpha-numeric identifier at the beginning of lines for each element in the AXTree. Always use bid to refer to elements in your actions.\n\n\"\"\"\n if coord_type == \"center\":\n coord_note = \"\"\"\\\nNote: center coordinates are provided in parenthesis and are relative to the top left corner of the page.\n\n\"\"\"\n elif coord_type == \"box\":\n coord_note = \"\"\"\\\nNote: bounding box of each object are provided in parenthesis and are relative to the top left corner of the page.\n\n\"\"\"\n else:\n coord_note = \"\"\n if visible_elements_only:\n visible_elements_note = \"\"\"\\\nNote: only elements that are visible in the viewport are presented. You might need to scroll the page, or open tabs or menus to see more.\n\n\"\"\"\n else:\n visible_elements_note = \"\"\n\n if visible_tag:\n vsible_tag_note = \"\"\"\\\nNote: You can only interact with visible elements. If the \"visible\" tag is not\npresent, the element is not visible on the page.\n\n\"\"\"\n else:\n vsible_tag_note = \"\"\n self._prompt = f\"\\n{prefix}AXTree:\\n{bid_info}{coord_note}{visible_elements_note}{vsible_tag_note}{ax_tree}\\n\"\n\n\nclass Error(PromptElement):\n def __init__(self, error: str, visible: bool = True, prefix=\"\", limit_logs=True) -> None:\n logs_separator = \"Call log:\"\n if limit_logs and logs_separator in error:\n error, logs = error.split(logs_separator)\n logs = \"\\n\".join(logs.split(\"\\n\")[:10])\n error = error + f\"\\n{logs_separator}\\n{logs}\"\n\n super().__init__(visible=visible)\n self._prompt = f\"\\n{prefix}Error from previous action:\\n{error}\\n\"\n\n\nclass FocusedElement(PromptElement):\n def __init__(self, bid, visible: bool = True, prefix=\"\") -> None:\n super().__init__(visible=visible)\n self._prompt = f\"\"\"\n{prefix}Focused element:\n\"\"\"\n if bid:\n self._prompt += f\"\"\"\\\nbid={repr(bid)}\n\"\"\"\n else:\n self._prompt += f\"\"\"\\\nNone\n\"\"\"\n\n\nclass Tabs(PromptElement):\n def __init__(self, obs, visible: bool = True, prefix=\"\") -> None:\n super().__init__(visible=visible)\n self.obs = obs\n self.prefix = prefix\n\n @property\n def _prompt(self) -> str:\n # by implementing this as a property, it's only coputed if visible\n prompt_pieces = [f\"\\n{self.prefix}Currently open tabs:\"]\n for page_index, (page_url, page_title) in enumerate(\n zip(self.obs[\"open_pages_urls\"], self.obs[\"open_pages_titles\"])\n ):\n active_or_not = \" (active tab)\" if page_index == self.obs[\"active_page_index\"] else \"\"\n prompt_piece = f\"\"\"\\\nTab {page_index}{active_or_not}:\n Title: {page_title}\n URL: {page_url}\n\"\"\"\n prompt_pieces.append(prompt_piece)\n return \"\\n\".join(prompt_pieces)\n\n\nclass Observation(Shrinkable):\n \"\"\"Observation of the current step.\n\n Contains the html, the accessibility tree and the error logs.\n \"\"\"\n\n def __init__(self, obs, flags: ObsFlags) -> None:\n super().__init__()\n self.flags = flags\n self.obs = obs\n\n self.tabs = Tabs(\n obs,\n visible=lambda: flags.use_tabs,\n prefix=\"## \",\n )\n\n self.html = HTML(\n obs[flags.html_type],\n visible_elements_only=flags.filter_visible_elements_only,\n visible=lambda: flags.use_html,\n prefix=\"## \",\n )\n self.ax_tree = AXTree(\n obs[\"axtree_txt\"],\n visible_elements_only=flags.filter_visible_elements_only,\n visible=lambda: flags.use_ax_tree,\n coord_type=flags.extract_coords,\n visible_tag=flags.extract_visible_tag,\n prefix=\"## \",\n )\n self.error = Error(\n obs[\"last_action_error\"],\n visible=lambda: flags.use_error_logs and obs[\"last_action_error\"],\n prefix=\"## \",\n )\n self.focused_element = FocusedElement(\n obs[\"focused_element_bid\"],\n visible=flags.use_focused_element,\n prefix=\"## \",\n )\n\n def shrink(self):\n self.ax_tree.shrink()\n self.html.shrink()\n\n @property\n def _prompt(self) -> str:\n return f\"\"\"\n# Observation of current step:\n{self.tabs.prompt}{self.html.prompt}{self.ax_tree.prompt}{self.focused_element.prompt}{self.error.prompt}\n\n\"\"\"\n\n def add_screenshot(self, prompt: BaseMessage) -> BaseMessage:\n if self.flags.use_screenshot:\n if self.flags.use_som:\n screenshot = self.obs[\"screenshot_som\"]\n prompt.add_text(\n \"\\n## Screenshot:\\nHere is a screenshot of the page, it is annotated with bounding boxes and corresponding bids:\"\n )\n else:\n screenshot = self.obs[\"screenshot\"]\n prompt.add_text(\"\\n## Screenshot:\\nHere is a screenshot of the page:\")\n img_url = image_to_jpg_base64_url(screenshot)\n prompt.add_image(img_url, detail=self.flags.openai_vision_detail)\n return prompt\n\n\nclass MacNote(PromptElement):\n def __init__(self) -> None:\n super().__init__(visible=platform.system() == \"Darwin\")\n self._prompt = (\n \"\\nNote: you are on mac so you should use Meta instead of Control for Control+C etc.\\n\"\n )\n\n\nclass BeCautious(PromptElement):\n def __init__(self, visible: bool = True) -> None:\n super().__init__(visible=visible)\n self._prompt = f\"\"\"\\\n\\nBe very cautious. Avoid submitting anything before verifying the effect of your\nactions. Take the time to explore the effect of safe actions first. For example\nyou can fill a few elements of a form, but don't click submit before verifying\nthat everything was filled correctly.\\n\"\"\"\n\n\nclass GoalInstructions(PromptElement):\n def __init__(self, goal_object, visible: bool = True, extra_instructions=None) -> None:\n super().__init__(visible)\n self._prompt = [\n dict(\n type=\"text\",\n text=f\"\"\"\\\n# Instructions\nReview the current state of the page and all other information to find the best\npossible next action to accomplish your goal. Your answer will be interpreted\nand executed by a program, make sure to follow the formatting instructions.\n\n## Goal:\n\"\"\",\n )\n ]\n\n self._prompt += goal_object\n\n if extra_instructions:\n self._prompt += [\n dict(\n type=\"text\",\n text=f\"\"\"\n\n## Extra instructions:\n\n{extra_instructions}\n\"\"\",\n )\n ]\n\n\nclass ChatInstructions(PromptElement):\n def __init__(self, chat_messages, visible: bool = True, extra_instructions=None) -> None:\n super().__init__(visible)\n self._prompt = f\"\"\"\\\n# Instructions\n\nYou are a UI Assistant, your goal is to help the user perform tasks using a web browser. You can\ncommunicate with the user via a chat, in which the user gives you instructions and in which you\ncan send back messages. You have access to a web browser that both you and the user can see,\nand with which only you can interact via specific commands.\n\nReview the instructions from the user, the current state of the page and all other information\nto find the best possible next action to accomplish your goal. Your answer will be interpreted\nand executed by a program, make sure to follow the formatting instructions.\n\n## Chat messages:\n\n\"\"\"\n self._prompt += \"\\n\".join(\n [\n f\"\"\"\\\n - [{msg['role']}] UTC Time: {time.asctime(time.gmtime(msg['timestamp']))} - Local Time: {time.asctime(time.localtime(msg['timestamp']))} - {msg['message']}\"\"\"\n for msg in chat_messages\n ]\n )\n\n if extra_instructions:\n self._prompt += f\"\"\"\n\n## Extra instructions:\n\n{extra_instructions}\n\"\"\"\n\n\nclass Hints(PromptElement):\n \"\"\"Not super useful and stale.\"\"\"\n\n # NOTE: are these hints still relevant?\n _prompt = \"\"\"\\\nNote:\n* Some tasks may be game like and may require to interact with the mouse position\nin x, y coordinates.\n* Some text field might have auto completion. To see it, you have to type a few\ncharacters and wait until next step.\n* If you have to cut and paste, don't forget to select the text first.\n* Coordinate inside an SVG are relative to it's top left corner.\n* Make sure to use bid to identify elements when using commands.\n* Interacting with combobox, dropdowns and auto-complete fields can be tricky,\nsometimes you need to use select_option, while other times you need to use fill\nor click and wait for the reaction of the page.\n\"\"\"\n\n\nclass SystemPrompt(PromptElement):\n _prompt = \"\"\"\\\nYou are an agent trying to solve a web task based on the content of the page and\nuser instructions. You can interact with the page and explore, and send messages to the user. Each time you\nsubmit an action it will be sent to the browser and you will receive a new page.\"\"\"\n\n\nclass ActionPrompt(PromptElement):\n\n _concrete_ex = \"\"\"\n\nclick('a324')\n\n\"\"\"\n\n def __init__(self, action_set: AbstractActionSet, action_flags: ActionFlags) -> None:\n# ... truncated ...","source_hash":"2ff2b702dfe1b18217dedd423ab941d3b8822982bcd3f0711f52cc8bd6666202","truncated":true} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.dynamic_prompting.Flags","uri":"program://AgentLab/class/src.agentlab.agents.dynamic_prompting.Flags#L26-L44","kind":"class","name":"Flags","path":"src/agentlab/agents/dynamic_prompting.py","language":"python","start_line":26,"end_line":44,"context_start_line":6,"context_end_line":64,"code":"from dataclasses import asdict, dataclass\nfrom textwrap import dedent\nfrom typing import Literal\nfrom warnings import warn\n\nimport bgym\nfrom bgym import HighLevelActionSetArgs\nfrom browsergym.core.action.base import AbstractActionSet\nfrom browsergym.utils.obs import flatten_axtree_to_str, flatten_dom_to_str, overlay_som, prune_html\n\nfrom agentlab.llm.llm_utils import (\n BaseMessage,\n ParseError,\n count_tokens,\n extract_code_blocks,\n image_to_jpg_base64_url,\n parse_html_tags_raise,\n)\n\n\nclass Flags:\n \"\"\"Base class for flags. Mostly for backward compatibility.\"\"\"\n\n def copy(self):\n return deepcopy(self)\n\n def asdict(self):\n \"\"\"Helper for JSON serializable requirement.\"\"\"\n return asdict(self)\n\n @classmethod\n def from_dict(self, flags_dict):\n \"\"\"Helper for JSON serializable requirement.\"\"\"\n if isinstance(flags_dict, ObsFlags):\n return flags_dict\n\n if not isinstance(flags_dict, dict):\n raise ValueError(f\"Unregcognized type for flags_dict of type {type(flags_dict)}.\")\n return ObsFlags(**flags_dict)\n\n\n@dataclass\nclass ObsFlags(Flags):\n \"\"\"\n A class to represent various flags used to control features in an application.\n\n Attributes:\n use_html (bool): Use the HTML in the prompt.\n use_ax_tree (bool): Use the accessibility tree in the prompt.\n use_focused_element (bool): Provide the ID of the focused element.\n use_error_logs (bool): Expose the previous error in the prompt.\n use_history (bool): Enable history of previous steps in the prompt.\n use_past_error_logs (bool): If use_history is True, expose all previous errors in the history.\n use_action_history (bool): If use_history is True, include the actions in the history.\n use_think_history (bool): If use_history is True, include all previous chains of thoughts in the history.\n use_diff (bool): Add a diff of the current and previous HTML to the prompt.\n html_type (str): Type of HTML to use in the prompt, may depend on preprocessing of observation.\n use_screenshot (bool): Add a screenshot of the page to the prompt, following OpenAI's API. This will be automatically disabled if the model does not have vision capabilities.\n use_som (bool): Add a set of marks to the screenshot.","source_hash":"2ff2b702dfe1b18217dedd423ab941d3b8822982bcd3f0711f52cc8bd6666202","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.dynamic_prompting.ObsFlags","uri":"program://AgentLab/class/src.agentlab.agents.dynamic_prompting.ObsFlags#L48-L93","kind":"class","name":"ObsFlags","path":"src/agentlab/agents/dynamic_prompting.py","language":"python","start_line":48,"end_line":93,"context_start_line":28,"context_end_line":113,"code":"\n def copy(self):\n return deepcopy(self)\n\n def asdict(self):\n \"\"\"Helper for JSON serializable requirement.\"\"\"\n return asdict(self)\n\n @classmethod\n def from_dict(self, flags_dict):\n \"\"\"Helper for JSON serializable requirement.\"\"\"\n if isinstance(flags_dict, ObsFlags):\n return flags_dict\n\n if not isinstance(flags_dict, dict):\n raise ValueError(f\"Unregcognized type for flags_dict of type {type(flags_dict)}.\")\n return ObsFlags(**flags_dict)\n\n\n@dataclass\nclass ObsFlags(Flags):\n \"\"\"\n A class to represent various flags used to control features in an application.\n\n Attributes:\n use_html (bool): Use the HTML in the prompt.\n use_ax_tree (bool): Use the accessibility tree in the prompt.\n use_focused_element (bool): Provide the ID of the focused element.\n use_error_logs (bool): Expose the previous error in the prompt.\n use_history (bool): Enable history of previous steps in the prompt.\n use_past_error_logs (bool): If use_history is True, expose all previous errors in the history.\n use_action_history (bool): If use_history is True, include the actions in the history.\n use_think_history (bool): If use_history is True, include all previous chains of thoughts in the history.\n use_diff (bool): Add a diff of the current and previous HTML to the prompt.\n html_type (str): Type of HTML to use in the prompt, may depend on preprocessing of observation.\n use_screenshot (bool): Add a screenshot of the page to the prompt, following OpenAI's API. This will be automatically disabled if the model does not have vision capabilities.\n use_som (bool): Add a set of marks to the screenshot.\n extract_visible_tag (bool): Add a \"visible\" tag to visible elements in the AXTree.\n extract_clickable_tag (bool): Add a \"clickable\" tag to clickable elements in the AXTree.\n extract_coords (Literal['False', 'center', 'box']): Add the coordinates of the elements.\n filter_visible_elements_only (bool): Only show visible elements in the AXTree.\n \"\"\"\n\n use_html: bool = True\n use_ax_tree: bool = False\n use_tabs: bool = False\n use_focused_element: bool = False\n use_error_logs: bool = False\n use_history: bool = False\n use_past_error_logs: bool = False\n use_action_history: bool = False\n use_think_history: bool = False\n use_diff: bool = False #\n html_type: str = \"pruned_html\"\n use_screenshot: bool = True\n use_som: bool = False\n extract_visible_tag: bool = False\n extract_clickable_tag: bool = False\n extract_coords: Literal[\"False\", \"center\", \"box\"] = \"False\"\n filter_visible_elements_only: bool = False\n # low sets the token count of each image to 65 (85?)\n # high sets the token count of each image to 2*65 (2*85?) times the amount of 512x512px patches\n # auto chooses between low and high based on image size (openai default)\n openai_vision_detail: Literal[\"low\", \"high\", \"auto\"] = \"auto\"\n filter_with_bid_only: bool = False\n filter_som_only: bool = False\n\n\n@dataclass\nclass ActionFlags(Flags):\n action_set: HighLevelActionSetArgs = None # should be set by the set_benchmark method\n long_description: bool = True\n individual_examples: bool = False\n\n # for backward compatibility\n multi_actions: bool = None\n is_strict: bool = None\n\n\nclass PromptElement:\n \"\"\"Base class for all prompt elements. Prompt elements can be hidden.\"\"\"\n\n _prompt = \"\"\n _abstract_ex = \"\"\n _concrete_ex = \"\"\n","source_hash":"2ff2b702dfe1b18217dedd423ab941d3b8822982bcd3f0711f52cc8bd6666202","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.dynamic_prompting.ActionFlags","uri":"program://AgentLab/class/src.agentlab.agents.dynamic_prompting.ActionFlags#L97-L104","kind":"class","name":"ActionFlags","path":"src/agentlab/agents/dynamic_prompting.py","language":"python","start_line":97,"end_line":104,"context_start_line":77,"context_end_line":124,"code":" use_past_error_logs: bool = False\n use_action_history: bool = False\n use_think_history: bool = False\n use_diff: bool = False #\n html_type: str = \"pruned_html\"\n use_screenshot: bool = True\n use_som: bool = False\n extract_visible_tag: bool = False\n extract_clickable_tag: bool = False\n extract_coords: Literal[\"False\", \"center\", \"box\"] = \"False\"\n filter_visible_elements_only: bool = False\n # low sets the token count of each image to 65 (85?)\n # high sets the token count of each image to 2*65 (2*85?) times the amount of 512x512px patches\n # auto chooses between low and high based on image size (openai default)\n openai_vision_detail: Literal[\"low\", \"high\", \"auto\"] = \"auto\"\n filter_with_bid_only: bool = False\n filter_som_only: bool = False\n\n\n@dataclass\nclass ActionFlags(Flags):\n action_set: HighLevelActionSetArgs = None # should be set by the set_benchmark method\n long_description: bool = True\n individual_examples: bool = False\n\n # for backward compatibility\n multi_actions: bool = None\n is_strict: bool = None\n\n\nclass PromptElement:\n \"\"\"Base class for all prompt elements. Prompt elements can be hidden.\"\"\"\n\n _prompt = \"\"\n _abstract_ex = \"\"\n _concrete_ex = \"\"\n\n def __init__(self, visible: bool = True) -> None:\n \"\"\"Prompt element that can be hidden.\n\n Args:\n visible : bool, optional\n Whether the prompt element should be visible, by default True. Can\n be a callable that returns a bool. This is useful when a specific\n flag changes during a shrink iteration.\n \"\"\"\n self._visible = visible\n","source_hash":"2ff2b702dfe1b18217dedd423ab941d3b8822982bcd3f0711f52cc8bd6666202","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.dynamic_prompting.PromptElement","uri":"program://AgentLab/class/src.agentlab.agents.dynamic_prompting.PromptElement#L107-L181","kind":"class","name":"PromptElement","path":"src/agentlab/agents/dynamic_prompting.py","language":"python","start_line":107,"end_line":181,"context_start_line":87,"context_end_line":201,"code":" filter_visible_elements_only: bool = False\n # low sets the token count of each image to 65 (85?)\n # high sets the token count of each image to 2*65 (2*85?) times the amount of 512x512px patches\n # auto chooses between low and high based on image size (openai default)\n openai_vision_detail: Literal[\"low\", \"high\", \"auto\"] = \"auto\"\n filter_with_bid_only: bool = False\n filter_som_only: bool = False\n\n\n@dataclass\nclass ActionFlags(Flags):\n action_set: HighLevelActionSetArgs = None # should be set by the set_benchmark method\n long_description: bool = True\n individual_examples: bool = False\n\n # for backward compatibility\n multi_actions: bool = None\n is_strict: bool = None\n\n\nclass PromptElement:\n \"\"\"Base class for all prompt elements. Prompt elements can be hidden.\"\"\"\n\n _prompt = \"\"\n _abstract_ex = \"\"\n _concrete_ex = \"\"\n\n def __init__(self, visible: bool = True) -> None:\n \"\"\"Prompt element that can be hidden.\n\n Args:\n visible : bool, optional\n Whether the prompt element should be visible, by default True. Can\n be a callable that returns a bool. This is useful when a specific\n flag changes during a shrink iteration.\n \"\"\"\n self._visible = visible\n\n @property\n def prompt(self) -> str | BaseMessage:\n \"\"\"Avoid overriding this method. Override _prompt instead.\"\"\"\n if self.is_visible:\n return self._prompt\n else:\n return \"\"\n\n @property\n def abstract_ex(self):\n \"\"\"Useful when this prompt element is requesting an answer from the llm.\n Provide an abstract example of the answer here. See Memory for an\n example.\n\n Avoid overriding this method. Override _abstract_ex instead\n\n Returns:\n str: The abstract example\n \"\"\"\n if self.is_visible:\n return self._abstract_ex\n else:\n return \"\"\n\n @property\n def concrete_ex(self):\n \"\"\"Useful when this prompt element is requesting an answer from the llm.\n Provide a concrete example of the answer here. See Memory for an\n example.\n\n Avoid overriding this method. Override _concrete_ex instead\n\n Returns:\n str: The concrete example\n \"\"\"\n if self.is_visible:\n return self._concrete_ex\n else:\n return \"\"\n\n @property\n def is_visible(self):\n \"\"\"Handle the case where visible is a callable.\"\"\"\n visible = self._visible\n if callable(visible):\n visible = visible()\n return visible\n\n def _parse_answer(self, text_answer):\n \"\"\"Override to actually extract elements from the answer.\"\"\"\n return {}\n\n def parse_answer(self, text_answer) -> dict:\n if self.is_visible:\n return self._parse_answer(text_answer)\n else:\n return {}\n\n\nclass Shrinkable(PromptElement, abc.ABC):\n @abc.abstractmethod\n def shrink(self) -> None:\n \"\"\"Implement shrinking of this prompt element.\n\n You need to recursively call all shrinkable elements that are part of\n this prompt. You can also implement a shriking startegy for this prompt.\n Shrinking is can be called multiple times to progressively shrink the\n prompt until it fits max_tokens. Default max shrink iterations is 20.\n \"\"\"\n pass\n\n\nclass Trunkater(Shrinkable):\n \"\"\"Shrinkable element that truncates the prompt element from the bottom\n after a certain number of iterations.\"\"\"\n\n def __init__(self, visible, shrink_speed=0.3, start_trunkate_iteration=10):","source_hash":"2ff2b702dfe1b18217dedd423ab941d3b8822982bcd3f0711f52cc8bd6666202","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.dynamic_prompting.Shrinkable","uri":"program://AgentLab/class/src.agentlab.agents.dynamic_prompting.Shrinkable#L184-L194","kind":"class","name":"Shrinkable","path":"src/agentlab/agents/dynamic_prompting.py","language":"python","start_line":184,"end_line":194,"context_start_line":164,"context_end_line":214,"code":"\n @property\n def is_visible(self):\n \"\"\"Handle the case where visible is a callable.\"\"\"\n visible = self._visible\n if callable(visible):\n visible = visible()\n return visible\n\n def _parse_answer(self, text_answer):\n \"\"\"Override to actually extract elements from the answer.\"\"\"\n return {}\n\n def parse_answer(self, text_answer) -> dict:\n if self.is_visible:\n return self._parse_answer(text_answer)\n else:\n return {}\n\n\nclass Shrinkable(PromptElement, abc.ABC):\n @abc.abstractmethod\n def shrink(self) -> None:\n \"\"\"Implement shrinking of this prompt element.\n\n You need to recursively call all shrinkable elements that are part of\n this prompt. You can also implement a shriking startegy for this prompt.\n Shrinking is can be called multiple times to progressively shrink the\n prompt until it fits max_tokens. Default max shrink iterations is 20.\n \"\"\"\n pass\n\n\nclass Trunkater(Shrinkable):\n \"\"\"Shrinkable element that truncates the prompt element from the bottom\n after a certain number of iterations.\"\"\"\n\n def __init__(self, visible, shrink_speed=0.3, start_trunkate_iteration=10):\n super().__init__(visible=visible)\n self.shrink_speed = shrink_speed\n self.start_trunkate_iteration = start_trunkate_iteration\n self.shrink_calls = 0\n self.deleted_lines = 0\n\n def shrink(self) -> None:\n if self.is_visible and self.shrink_calls >= self.start_trunkate_iteration:\n # remove the fraction of _prompt\n lines = self._prompt.splitlines()\n new_line_count = int(len(lines) * (1 - self.shrink_speed))\n self.deleted_lines += len(lines) - new_line_count\n self._prompt = \"\\n\".join(lines[:new_line_count])","source_hash":"2ff2b702dfe1b18217dedd423ab941d3b8822982bcd3f0711f52cc8bd6666202","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.dynamic_prompting.Trunkater","uri":"program://AgentLab/class/src.agentlab.agents.dynamic_prompting.Trunkater#L197-L217","kind":"class","name":"Trunkater","path":"src/agentlab/agents/dynamic_prompting.py","language":"python","start_line":197,"end_line":217,"context_start_line":177,"context_end_line":237,"code":" def parse_answer(self, text_answer) -> dict:\n if self.is_visible:\n return self._parse_answer(text_answer)\n else:\n return {}\n\n\nclass Shrinkable(PromptElement, abc.ABC):\n @abc.abstractmethod\n def shrink(self) -> None:\n \"\"\"Implement shrinking of this prompt element.\n\n You need to recursively call all shrinkable elements that are part of\n this prompt. You can also implement a shriking startegy for this prompt.\n Shrinking is can be called multiple times to progressively shrink the\n prompt until it fits max_tokens. Default max shrink iterations is 20.\n \"\"\"\n pass\n\n\nclass Trunkater(Shrinkable):\n \"\"\"Shrinkable element that truncates the prompt element from the bottom\n after a certain number of iterations.\"\"\"\n\n def __init__(self, visible, shrink_speed=0.3, start_trunkate_iteration=10):\n super().__init__(visible=visible)\n self.shrink_speed = shrink_speed\n self.start_trunkate_iteration = start_trunkate_iteration\n self.shrink_calls = 0\n self.deleted_lines = 0\n\n def shrink(self) -> None:\n if self.is_visible and self.shrink_calls >= self.start_trunkate_iteration:\n # remove the fraction of _prompt\n lines = self._prompt.splitlines()\n new_line_count = int(len(lines) * (1 - self.shrink_speed))\n self.deleted_lines += len(lines) - new_line_count\n self._prompt = \"\\n\".join(lines[:new_line_count])\n self._prompt += f\"\\n... Deleted {self.deleted_lines} lines to reduce prompt size.\"\n\n self.shrink_calls += 1\n\n\ndef fit_tokens(\n shrinkable: Shrinkable,\n max_prompt_tokens=None,\n max_iterations=20,\n model_name=\"openai/gpt-4\",\n additional_prompts=[\"\"],\n):\n \"\"\"Shrink a prompt element until it fits `max_prompt_tokens`.\n\n Args:\n shrinkable (Shrinkable): The prompt element to shrink.\n max_prompt_tokens (int): The maximum number of tokens allowed.\n max_iterations (int, optional): The maximum number of shrink iterations, by default 20.\n model_name (str, optional): The name of the model used when tokenizing.\n additional_prompts (str or List[str], optional): Additional prompts to account for when shrinking, by default [\"\"].\n\n Returns:\n str: the prompt after shrinking.","source_hash":"2ff2b702dfe1b18217dedd423ab941d3b8822982bcd3f0711f52cc8bd6666202","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.dynamic_prompting.fit_tokens","uri":"program://AgentLab/function/src.agentlab.agents.dynamic_prompting.fit_tokens#L220-L279","kind":"function","name":"fit_tokens","path":"src/agentlab/agents/dynamic_prompting.py","language":"python","start_line":220,"end_line":279,"context_start_line":200,"context_end_line":299,"code":"\n def __init__(self, visible, shrink_speed=0.3, start_trunkate_iteration=10):\n super().__init__(visible=visible)\n self.shrink_speed = shrink_speed\n self.start_trunkate_iteration = start_trunkate_iteration\n self.shrink_calls = 0\n self.deleted_lines = 0\n\n def shrink(self) -> None:\n if self.is_visible and self.shrink_calls >= self.start_trunkate_iteration:\n # remove the fraction of _prompt\n lines = self._prompt.splitlines()\n new_line_count = int(len(lines) * (1 - self.shrink_speed))\n self.deleted_lines += len(lines) - new_line_count\n self._prompt = \"\\n\".join(lines[:new_line_count])\n self._prompt += f\"\\n... Deleted {self.deleted_lines} lines to reduce prompt size.\"\n\n self.shrink_calls += 1\n\n\ndef fit_tokens(\n shrinkable: Shrinkable,\n max_prompt_tokens=None,\n max_iterations=20,\n model_name=\"openai/gpt-4\",\n additional_prompts=[\"\"],\n):\n \"\"\"Shrink a prompt element until it fits `max_prompt_tokens`.\n\n Args:\n shrinkable (Shrinkable): The prompt element to shrink.\n max_prompt_tokens (int): The maximum number of tokens allowed.\n max_iterations (int, optional): The maximum number of shrink iterations, by default 20.\n model_name (str, optional): The name of the model used when tokenizing.\n additional_prompts (str or List[str], optional): Additional prompts to account for when shrinking, by default [\"\"].\n\n Returns:\n str: the prompt after shrinking.\n\n Raises:\n ValueError: Unrecognized type for prompt\n \"\"\"\n\n if max_prompt_tokens is None:\n return shrinkable.prompt\n\n if isinstance(additional_prompts, str):\n additional_prompts = [additional_prompts]\n\n for prompt in additional_prompts:\n max_prompt_tokens -= count_tokens(prompt, model=model_name) + 1 # +1 because why not ?\n\n for _ in range(max_iterations):\n prompt = shrinkable.prompt\n if isinstance(prompt, str):\n prompt_str = prompt\n elif isinstance(prompt, list):\n # warn deprecated\n warn(\n \"Using list of prompts is deprecated. Use a Discussion object instead.\",\n DeprecationWarning,\n )\n prompt_str = \"\\n\".join([p[\"text\"] for p in prompt if p[\"type\"] == \"text\"])\n elif isinstance(prompt, BaseMessage):\n prompt_str = prompt.__str__(warn_if_image=False)\n else:\n raise ValueError(f\"Unrecognized type for prompt: {type(prompt)}\")\n n_token = count_tokens(prompt_str, model=model_name)\n if n_token <= max_prompt_tokens:\n return prompt\n shrinkable.shrink()\n\n logging.info(\n dedent(\n f\"\"\"\\\n After {max_iterations} shrink iterations, the prompt is still\n {count_tokens(prompt_str)} tokens (greater than {max_prompt_tokens}). Returning the prompt as is.\"\"\"\n )\n )\n return prompt\n\n\nclass HTML(Trunkater):\n def __init__(self, html, visible_elements_only: bool, visible: bool = True, prefix=\"\") -> None:\n super().__init__(visible=visible, start_trunkate_iteration=5)\n if visible_elements_only:\n visible_elements_note = \"\"\"\\\nNote: only elements that are visible in the viewport are presented. You might need to scroll the page, or open tabs or menus to see more.\n\n\"\"\"\n else:\n visible_elements_note = \"\"\n self._prompt = f\"\\n{prefix}HTML:\\n{visible_elements_note}{html}\\n\"\n\n\nclass AXTree(Trunkater):\n def __init__(\n self,\n ax_tree,\n visible_elements_only: bool,","source_hash":"2ff2b702dfe1b18217dedd423ab941d3b8822982bcd3f0711f52cc8bd6666202","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.dynamic_prompting.HTML","uri":"program://AgentLab/class/src.agentlab.agents.dynamic_prompting.HTML#L282-L292","kind":"class","name":"HTML","path":"src/agentlab/agents/dynamic_prompting.py","language":"python","start_line":282,"end_line":292,"context_start_line":262,"context_end_line":312,"code":" prompt_str = \"\\n\".join([p[\"text\"] for p in prompt if p[\"type\"] == \"text\"])\n elif isinstance(prompt, BaseMessage):\n prompt_str = prompt.__str__(warn_if_image=False)\n else:\n raise ValueError(f\"Unrecognized type for prompt: {type(prompt)}\")\n n_token = count_tokens(prompt_str, model=model_name)\n if n_token <= max_prompt_tokens:\n return prompt\n shrinkable.shrink()\n\n logging.info(\n dedent(\n f\"\"\"\\\n After {max_iterations} shrink iterations, the prompt is still\n {count_tokens(prompt_str)} tokens (greater than {max_prompt_tokens}). Returning the prompt as is.\"\"\"\n )\n )\n return prompt\n\n\nclass HTML(Trunkater):\n def __init__(self, html, visible_elements_only: bool, visible: bool = True, prefix=\"\") -> None:\n super().__init__(visible=visible, start_trunkate_iteration=5)\n if visible_elements_only:\n visible_elements_note = \"\"\"\\\nNote: only elements that are visible in the viewport are presented. You might need to scroll the page, or open tabs or menus to see more.\n\n\"\"\"\n else:\n visible_elements_note = \"\"\n self._prompt = f\"\\n{prefix}HTML:\\n{visible_elements_note}{html}\\n\"\n\n\nclass AXTree(Trunkater):\n def __init__(\n self,\n ax_tree,\n visible_elements_only: bool,\n visible: bool = True,\n coord_type=None,\n visible_tag=True,\n prefix=\"\",\n ) -> None:\n super().__init__(visible=visible, start_trunkate_iteration=10)\n bid_info = \"\"\"\\\nNote: [bid] is the unique alpha-numeric identifier at the beginning of lines for each element in the AXTree. Always use bid to refer to elements in your actions.\n\n\"\"\"\n if coord_type == \"center\":\n coord_note = \"\"\"\\\nNote: center coordinates are provided in parenthesis and are relative to the top left corner of the page.","source_hash":"2ff2b702dfe1b18217dedd423ab941d3b8822982bcd3f0711f52cc8bd6666202","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.dynamic_prompting.AXTree","uri":"program://AgentLab/class/src.agentlab.agents.dynamic_prompting.AXTree#L295-L338","kind":"class","name":"AXTree","path":"src/agentlab/agents/dynamic_prompting.py","language":"python","start_line":295,"end_line":338,"context_start_line":275,"context_end_line":358,"code":" After {max_iterations} shrink iterations, the prompt is still\n {count_tokens(prompt_str)} tokens (greater than {max_prompt_tokens}). Returning the prompt as is.\"\"\"\n )\n )\n return prompt\n\n\nclass HTML(Trunkater):\n def __init__(self, html, visible_elements_only: bool, visible: bool = True, prefix=\"\") -> None:\n super().__init__(visible=visible, start_trunkate_iteration=5)\n if visible_elements_only:\n visible_elements_note = \"\"\"\\\nNote: only elements that are visible in the viewport are presented. You might need to scroll the page, or open tabs or menus to see more.\n\n\"\"\"\n else:\n visible_elements_note = \"\"\n self._prompt = f\"\\n{prefix}HTML:\\n{visible_elements_note}{html}\\n\"\n\n\nclass AXTree(Trunkater):\n def __init__(\n self,\n ax_tree,\n visible_elements_only: bool,\n visible: bool = True,\n coord_type=None,\n visible_tag=True,\n prefix=\"\",\n ) -> None:\n super().__init__(visible=visible, start_trunkate_iteration=10)\n bid_info = \"\"\"\\\nNote: [bid] is the unique alpha-numeric identifier at the beginning of lines for each element in the AXTree. Always use bid to refer to elements in your actions.\n\n\"\"\"\n if coord_type == \"center\":\n coord_note = \"\"\"\\\nNote: center coordinates are provided in parenthesis and are relative to the top left corner of the page.\n\n\"\"\"\n elif coord_type == \"box\":\n coord_note = \"\"\"\\\nNote: bounding box of each object are provided in parenthesis and are relative to the top left corner of the page.\n\n\"\"\"\n else:\n coord_note = \"\"\n if visible_elements_only:\n visible_elements_note = \"\"\"\\\nNote: only elements that are visible in the viewport are presented. You might need to scroll the page, or open tabs or menus to see more.\n\n\"\"\"\n else:\n visible_elements_note = \"\"\n\n if visible_tag:\n vsible_tag_note = \"\"\"\\\nNote: You can only interact with visible elements. If the \"visible\" tag is not\npresent, the element is not visible on the page.\n\n\"\"\"\n else:\n vsible_tag_note = \"\"\n self._prompt = f\"\\n{prefix}AXTree:\\n{bid_info}{coord_note}{visible_elements_note}{vsible_tag_note}{ax_tree}\\n\"\n\n\nclass Error(PromptElement):\n def __init__(self, error: str, visible: bool = True, prefix=\"\", limit_logs=True) -> None:\n logs_separator = \"Call log:\"\n if limit_logs and logs_separator in error:\n error, logs = error.split(logs_separator)\n logs = \"\\n\".join(logs.split(\"\\n\")[:10])\n error = error + f\"\\n{logs_separator}\\n{logs}\"\n\n super().__init__(visible=visible)\n self._prompt = f\"\\n{prefix}Error from previous action:\\n{error}\\n\"\n\n\nclass FocusedElement(PromptElement):\n def __init__(self, bid, visible: bool = True, prefix=\"\") -> None:\n super().__init__(visible=visible)\n self._prompt = f\"\"\"\n{prefix}Focused element:\n\"\"\"","source_hash":"2ff2b702dfe1b18217dedd423ab941d3b8822982bcd3f0711f52cc8bd6666202","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.dynamic_prompting.Error","uri":"program://AgentLab/class/src.agentlab.agents.dynamic_prompting.Error#L341-L350","kind":"class","name":"Error","path":"src/agentlab/agents/dynamic_prompting.py","language":"python","start_line":341,"end_line":350,"context_start_line":321,"context_end_line":370,"code":" coord_note = \"\"\n if visible_elements_only:\n visible_elements_note = \"\"\"\\\nNote: only elements that are visible in the viewport are presented. You might need to scroll the page, or open tabs or menus to see more.\n\n\"\"\"\n else:\n visible_elements_note = \"\"\n\n if visible_tag:\n vsible_tag_note = \"\"\"\\\nNote: You can only interact with visible elements. If the \"visible\" tag is not\npresent, the element is not visible on the page.\n\n\"\"\"\n else:\n vsible_tag_note = \"\"\n self._prompt = f\"\\n{prefix}AXTree:\\n{bid_info}{coord_note}{visible_elements_note}{vsible_tag_note}{ax_tree}\\n\"\n\n\nclass Error(PromptElement):\n def __init__(self, error: str, visible: bool = True, prefix=\"\", limit_logs=True) -> None:\n logs_separator = \"Call log:\"\n if limit_logs and logs_separator in error:\n error, logs = error.split(logs_separator)\n logs = \"\\n\".join(logs.split(\"\\n\")[:10])\n error = error + f\"\\n{logs_separator}\\n{logs}\"\n\n super().__init__(visible=visible)\n self._prompt = f\"\\n{prefix}Error from previous action:\\n{error}\\n\"\n\n\nclass FocusedElement(PromptElement):\n def __init__(self, bid, visible: bool = True, prefix=\"\") -> None:\n super().__init__(visible=visible)\n self._prompt = f\"\"\"\n{prefix}Focused element:\n\"\"\"\n if bid:\n self._prompt += f\"\"\"\\\nbid={repr(bid)}\n\"\"\"\n else:\n self._prompt += f\"\"\"\\\nNone\n\"\"\"\n\n\nclass Tabs(PromptElement):\n def __init__(self, obs, visible: bool = True, prefix=\"\") -> None:","source_hash":"2ff2b702dfe1b18217dedd423ab941d3b8822982bcd3f0711f52cc8bd6666202","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.dynamic_prompting.FocusedElement","uri":"program://AgentLab/class/src.agentlab.agents.dynamic_prompting.FocusedElement#L353-L366","kind":"class","name":"FocusedElement","path":"src/agentlab/agents/dynamic_prompting.py","language":"python","start_line":353,"end_line":366,"context_start_line":333,"context_end_line":386,"code":"present, the element is not visible on the page.\n\n\"\"\"\n else:\n vsible_tag_note = \"\"\n self._prompt = f\"\\n{prefix}AXTree:\\n{bid_info}{coord_note}{visible_elements_note}{vsible_tag_note}{ax_tree}\\n\"\n\n\nclass Error(PromptElement):\n def __init__(self, error: str, visible: bool = True, prefix=\"\", limit_logs=True) -> None:\n logs_separator = \"Call log:\"\n if limit_logs and logs_separator in error:\n error, logs = error.split(logs_separator)\n logs = \"\\n\".join(logs.split(\"\\n\")[:10])\n error = error + f\"\\n{logs_separator}\\n{logs}\"\n\n super().__init__(visible=visible)\n self._prompt = f\"\\n{prefix}Error from previous action:\\n{error}\\n\"\n\n\nclass FocusedElement(PromptElement):\n def __init__(self, bid, visible: bool = True, prefix=\"\") -> None:\n super().__init__(visible=visible)\n self._prompt = f\"\"\"\n{prefix}Focused element:\n\"\"\"\n if bid:\n self._prompt += f\"\"\"\\\nbid={repr(bid)}\n\"\"\"\n else:\n self._prompt += f\"\"\"\\\nNone\n\"\"\"\n\n\nclass Tabs(PromptElement):\n def __init__(self, obs, visible: bool = True, prefix=\"\") -> None:\n super().__init__(visible=visible)\n self.obs = obs\n self.prefix = prefix\n\n @property\n def _prompt(self) -> str:\n # by implementing this as a property, it's only coputed if visible\n prompt_pieces = [f\"\\n{self.prefix}Currently open tabs:\"]\n for page_index, (page_url, page_title) in enumerate(\n zip(self.obs[\"open_pages_urls\"], self.obs[\"open_pages_titles\"])\n ):\n active_or_not = \" (active tab)\" if page_index == self.obs[\"active_page_index\"] else \"\"\n prompt_piece = f\"\"\"\\\nTab {page_index}{active_or_not}:\n Title: {page_title}\n URL: {page_url}","source_hash":"2ff2b702dfe1b18217dedd423ab941d3b8822982bcd3f0711f52cc8bd6666202","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.dynamic_prompting.Tabs","uri":"program://AgentLab/class/src.agentlab.agents.dynamic_prompting.Tabs#L369-L389","kind":"class","name":"Tabs","path":"src/agentlab/agents/dynamic_prompting.py","language":"python","start_line":369,"end_line":389,"context_start_line":349,"context_end_line":409,"code":" super().__init__(visible=visible)\n self._prompt = f\"\\n{prefix}Error from previous action:\\n{error}\\n\"\n\n\nclass FocusedElement(PromptElement):\n def __init__(self, bid, visible: bool = True, prefix=\"\") -> None:\n super().__init__(visible=visible)\n self._prompt = f\"\"\"\n{prefix}Focused element:\n\"\"\"\n if bid:\n self._prompt += f\"\"\"\\\nbid={repr(bid)}\n\"\"\"\n else:\n self._prompt += f\"\"\"\\\nNone\n\"\"\"\n\n\nclass Tabs(PromptElement):\n def __init__(self, obs, visible: bool = True, prefix=\"\") -> None:\n super().__init__(visible=visible)\n self.obs = obs\n self.prefix = prefix\n\n @property\n def _prompt(self) -> str:\n # by implementing this as a property, it's only coputed if visible\n prompt_pieces = [f\"\\n{self.prefix}Currently open tabs:\"]\n for page_index, (page_url, page_title) in enumerate(\n zip(self.obs[\"open_pages_urls\"], self.obs[\"open_pages_titles\"])\n ):\n active_or_not = \" (active tab)\" if page_index == self.obs[\"active_page_index\"] else \"\"\n prompt_piece = f\"\"\"\\\nTab {page_index}{active_or_not}:\n Title: {page_title}\n URL: {page_url}\n\"\"\"\n prompt_pieces.append(prompt_piece)\n return \"\\n\".join(prompt_pieces)\n\n\nclass Observation(Shrinkable):\n \"\"\"Observation of the current step.\n\n Contains the html, the accessibility tree and the error logs.\n \"\"\"\n\n def __init__(self, obs, flags: ObsFlags) -> None:\n super().__init__()\n self.flags = flags\n self.obs = obs\n\n self.tabs = Tabs(\n obs,\n visible=lambda: flags.use_tabs,\n prefix=\"## \",\n )\n\n self.html = HTML(","source_hash":"2ff2b702dfe1b18217dedd423ab941d3b8822982bcd3f0711f52cc8bd6666202","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.dynamic_prompting.Observation","uri":"program://AgentLab/class/src.agentlab.agents.dynamic_prompting.Observation#L392-L458","kind":"class","name":"Observation","path":"src/agentlab/agents/dynamic_prompting.py","language":"python","start_line":392,"end_line":458,"context_start_line":372,"context_end_line":478,"code":" self.obs = obs\n self.prefix = prefix\n\n @property\n def _prompt(self) -> str:\n # by implementing this as a property, it's only coputed if visible\n prompt_pieces = [f\"\\n{self.prefix}Currently open tabs:\"]\n for page_index, (page_url, page_title) in enumerate(\n zip(self.obs[\"open_pages_urls\"], self.obs[\"open_pages_titles\"])\n ):\n active_or_not = \" (active tab)\" if page_index == self.obs[\"active_page_index\"] else \"\"\n prompt_piece = f\"\"\"\\\nTab {page_index}{active_or_not}:\n Title: {page_title}\n URL: {page_url}\n\"\"\"\n prompt_pieces.append(prompt_piece)\n return \"\\n\".join(prompt_pieces)\n\n\nclass Observation(Shrinkable):\n \"\"\"Observation of the current step.\n\n Contains the html, the accessibility tree and the error logs.\n \"\"\"\n\n def __init__(self, obs, flags: ObsFlags) -> None:\n super().__init__()\n self.flags = flags\n self.obs = obs\n\n self.tabs = Tabs(\n obs,\n visible=lambda: flags.use_tabs,\n prefix=\"## \",\n )\n\n self.html = HTML(\n obs[flags.html_type],\n visible_elements_only=flags.filter_visible_elements_only,\n visible=lambda: flags.use_html,\n prefix=\"## \",\n )\n self.ax_tree = AXTree(\n obs[\"axtree_txt\"],\n visible_elements_only=flags.filter_visible_elements_only,\n visible=lambda: flags.use_ax_tree,\n coord_type=flags.extract_coords,\n visible_tag=flags.extract_visible_tag,\n prefix=\"## \",\n )\n self.error = Error(\n obs[\"last_action_error\"],\n visible=lambda: flags.use_error_logs and obs[\"last_action_error\"],\n prefix=\"## \",\n )\n self.focused_element = FocusedElement(\n obs[\"focused_element_bid\"],\n visible=flags.use_focused_element,\n prefix=\"## \",\n )\n\n def shrink(self):\n self.ax_tree.shrink()\n self.html.shrink()\n\n @property\n def _prompt(self) -> str:\n return f\"\"\"\n# Observation of current step:\n{self.tabs.prompt}{self.html.prompt}{self.ax_tree.prompt}{self.focused_element.prompt}{self.error.prompt}\n\n\"\"\"\n\n def add_screenshot(self, prompt: BaseMessage) -> BaseMessage:\n if self.flags.use_screenshot:\n if self.flags.use_som:\n screenshot = self.obs[\"screenshot_som\"]\n prompt.add_text(\n \"\\n## Screenshot:\\nHere is a screenshot of the page, it is annotated with bounding boxes and corresponding bids:\"\n )\n else:\n screenshot = self.obs[\"screenshot\"]\n prompt.add_text(\"\\n## Screenshot:\\nHere is a screenshot of the page:\")\n img_url = image_to_jpg_base64_url(screenshot)\n prompt.add_image(img_url, detail=self.flags.openai_vision_detail)\n return prompt\n\n\nclass MacNote(PromptElement):\n def __init__(self) -> None:\n super().__init__(visible=platform.system() == \"Darwin\")\n self._prompt = (\n \"\\nNote: you are on mac so you should use Meta instead of Control for Control+C etc.\\n\"\n )\n\n\nclass BeCautious(PromptElement):\n def __init__(self, visible: bool = True) -> None:\n super().__init__(visible=visible)\n self._prompt = f\"\"\"\\\n\\nBe very cautious. Avoid submitting anything before verifying the effect of your\nactions. Take the time to explore the effect of safe actions first. For example\nyou can fill a few elements of a form, but don't click submit before verifying\nthat everything was filled correctly.\\n\"\"\"\n\n","source_hash":"2ff2b702dfe1b18217dedd423ab941d3b8822982bcd3f0711f52cc8bd6666202","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.dynamic_prompting.MacNote","uri":"program://AgentLab/class/src.agentlab.agents.dynamic_prompting.MacNote#L461-L466","kind":"class","name":"MacNote","path":"src/agentlab/agents/dynamic_prompting.py","language":"python","start_line":461,"end_line":466,"context_start_line":441,"context_end_line":486,"code":"# Observation of current step:\n{self.tabs.prompt}{self.html.prompt}{self.ax_tree.prompt}{self.focused_element.prompt}{self.error.prompt}\n\n\"\"\"\n\n def add_screenshot(self, prompt: BaseMessage) -> BaseMessage:\n if self.flags.use_screenshot:\n if self.flags.use_som:\n screenshot = self.obs[\"screenshot_som\"]\n prompt.add_text(\n \"\\n## Screenshot:\\nHere is a screenshot of the page, it is annotated with bounding boxes and corresponding bids:\"\n )\n else:\n screenshot = self.obs[\"screenshot\"]\n prompt.add_text(\"\\n## Screenshot:\\nHere is a screenshot of the page:\")\n img_url = image_to_jpg_base64_url(screenshot)\n prompt.add_image(img_url, detail=self.flags.openai_vision_detail)\n return prompt\n\n\nclass MacNote(PromptElement):\n def __init__(self) -> None:\n super().__init__(visible=platform.system() == \"Darwin\")\n self._prompt = (\n \"\\nNote: you are on mac so you should use Meta instead of Control for Control+C etc.\\n\"\n )\n\n\nclass BeCautious(PromptElement):\n def __init__(self, visible: bool = True) -> None:\n super().__init__(visible=visible)\n self._prompt = f\"\"\"\\\n\\nBe very cautious. Avoid submitting anything before verifying the effect of your\nactions. Take the time to explore the effect of safe actions first. For example\nyou can fill a few elements of a form, but don't click submit before verifying\nthat everything was filled correctly.\\n\"\"\"\n\n\nclass GoalInstructions(PromptElement):\n def __init__(self, goal_object, visible: bool = True, extra_instructions=None) -> None:\n super().__init__(visible)\n self._prompt = [\n dict(\n type=\"text\",\n text=f\"\"\"\\\n# Instructions","source_hash":"2ff2b702dfe1b18217dedd423ab941d3b8822982bcd3f0711f52cc8bd6666202","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.dynamic_prompting.BeCautious","uri":"program://AgentLab/class/src.agentlab.agents.dynamic_prompting.BeCautious#L469-L476","kind":"class","name":"BeCautious","path":"src/agentlab/agents/dynamic_prompting.py","language":"python","start_line":469,"end_line":476,"context_start_line":449,"context_end_line":496,"code":" screenshot = self.obs[\"screenshot_som\"]\n prompt.add_text(\n \"\\n## Screenshot:\\nHere is a screenshot of the page, it is annotated with bounding boxes and corresponding bids:\"\n )\n else:\n screenshot = self.obs[\"screenshot\"]\n prompt.add_text(\"\\n## Screenshot:\\nHere is a screenshot of the page:\")\n img_url = image_to_jpg_base64_url(screenshot)\n prompt.add_image(img_url, detail=self.flags.openai_vision_detail)\n return prompt\n\n\nclass MacNote(PromptElement):\n def __init__(self) -> None:\n super().__init__(visible=platform.system() == \"Darwin\")\n self._prompt = (\n \"\\nNote: you are on mac so you should use Meta instead of Control for Control+C etc.\\n\"\n )\n\n\nclass BeCautious(PromptElement):\n def __init__(self, visible: bool = True) -> None:\n super().__init__(visible=visible)\n self._prompt = f\"\"\"\\\n\\nBe very cautious. Avoid submitting anything before verifying the effect of your\nactions. Take the time to explore the effect of safe actions first. For example\nyou can fill a few elements of a form, but don't click submit before verifying\nthat everything was filled correctly.\\n\"\"\"\n\n\nclass GoalInstructions(PromptElement):\n def __init__(self, goal_object, visible: bool = True, extra_instructions=None) -> None:\n super().__init__(visible)\n self._prompt = [\n dict(\n type=\"text\",\n text=f\"\"\"\\\n# Instructions\nReview the current state of the page and all other information to find the best\npossible next action to accomplish your goal. Your answer will be interpreted\nand executed by a program, make sure to follow the formatting instructions.\n\n## Goal:\n\"\"\",\n )\n ]\n\n self._prompt += goal_object","source_hash":"2ff2b702dfe1b18217dedd423ab941d3b8822982bcd3f0711f52cc8bd6666202","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.dynamic_prompting.GoalInstructions","uri":"program://AgentLab/class/src.agentlab.agents.dynamic_prompting.GoalInstructions#L479-L509","kind":"class","name":"GoalInstructions","path":"src/agentlab/agents/dynamic_prompting.py","language":"python","start_line":479,"end_line":509,"context_start_line":459,"context_end_line":529,"code":"\n\nclass MacNote(PromptElement):\n def __init__(self) -> None:\n super().__init__(visible=platform.system() == \"Darwin\")\n self._prompt = (\n \"\\nNote: you are on mac so you should use Meta instead of Control for Control+C etc.\\n\"\n )\n\n\nclass BeCautious(PromptElement):\n def __init__(self, visible: bool = True) -> None:\n super().__init__(visible=visible)\n self._prompt = f\"\"\"\\\n\\nBe very cautious. Avoid submitting anything before verifying the effect of your\nactions. Take the time to explore the effect of safe actions first. For example\nyou can fill a few elements of a form, but don't click submit before verifying\nthat everything was filled correctly.\\n\"\"\"\n\n\nclass GoalInstructions(PromptElement):\n def __init__(self, goal_object, visible: bool = True, extra_instructions=None) -> None:\n super().__init__(visible)\n self._prompt = [\n dict(\n type=\"text\",\n text=f\"\"\"\\\n# Instructions\nReview the current state of the page and all other information to find the best\npossible next action to accomplish your goal. Your answer will be interpreted\nand executed by a program, make sure to follow the formatting instructions.\n\n## Goal:\n\"\"\",\n )\n ]\n\n self._prompt += goal_object\n\n if extra_instructions:\n self._prompt += [\n dict(\n type=\"text\",\n text=f\"\"\"\n\n## Extra instructions:\n\n{extra_instructions}\n\"\"\",\n )\n ]\n\n\nclass ChatInstructions(PromptElement):\n def __init__(self, chat_messages, visible: bool = True, extra_instructions=None) -> None:\n super().__init__(visible)\n self._prompt = f\"\"\"\\\n# Instructions\n\nYou are a UI Assistant, your goal is to help the user perform tasks using a web browser. You can\ncommunicate with the user via a chat, in which the user gives you instructions and in which you\ncan send back messages. You have access to a web browser that both you and the user can see,\nand with which only you can interact via specific commands.\n\nReview the instructions from the user, the current state of the page and all other information\nto find the best possible next action to accomplish your goal. Your answer will be interpreted\nand executed by a program, make sure to follow the formatting instructions.\n\n## Chat messages:\n\n\"\"\"","source_hash":"2ff2b702dfe1b18217dedd423ab941d3b8822982bcd3f0711f52cc8bd6666202","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.dynamic_prompting.ChatInstructions","uri":"program://AgentLab/class/src.agentlab.agents.dynamic_prompting.ChatInstructions#L512-L544","kind":"class","name":"ChatInstructions","path":"src/agentlab/agents/dynamic_prompting.py","language":"python","start_line":512,"end_line":544,"context_start_line":492,"context_end_line":564,"code":"\"\"\",\n )\n ]\n\n self._prompt += goal_object\n\n if extra_instructions:\n self._prompt += [\n dict(\n type=\"text\",\n text=f\"\"\"\n\n## Extra instructions:\n\n{extra_instructions}\n\"\"\",\n )\n ]\n\n\nclass ChatInstructions(PromptElement):\n def __init__(self, chat_messages, visible: bool = True, extra_instructions=None) -> None:\n super().__init__(visible)\n self._prompt = f\"\"\"\\\n# Instructions\n\nYou are a UI Assistant, your goal is to help the user perform tasks using a web browser. You can\ncommunicate with the user via a chat, in which the user gives you instructions and in which you\ncan send back messages. You have access to a web browser that both you and the user can see,\nand with which only you can interact via specific commands.\n\nReview the instructions from the user, the current state of the page and all other information\nto find the best possible next action to accomplish your goal. Your answer will be interpreted\nand executed by a program, make sure to follow the formatting instructions.\n\n## Chat messages:\n\n\"\"\"\n self._prompt += \"\\n\".join(\n [\n f\"\"\"\\\n - [{msg['role']}] UTC Time: {time.asctime(time.gmtime(msg['timestamp']))} - Local Time: {time.asctime(time.localtime(msg['timestamp']))} - {msg['message']}\"\"\"\n for msg in chat_messages\n ]\n )\n\n if extra_instructions:\n self._prompt += f\"\"\"\n\n## Extra instructions:\n\n{extra_instructions}\n\"\"\"\n\n\nclass Hints(PromptElement):\n \"\"\"Not super useful and stale.\"\"\"\n\n # NOTE: are these hints still relevant?\n _prompt = \"\"\"\\\nNote:\n* Some tasks may be game like and may require to interact with the mouse position\nin x, y coordinates.\n* Some text field might have auto completion. To see it, you have to type a few\ncharacters and wait until next step.\n* If you have to cut and paste, don't forget to select the text first.\n* Coordinate inside an SVG are relative to it's top left corner.\n* Make sure to use bid to identify elements when using commands.\n* Interacting with combobox, dropdowns and auto-complete fields can be tricky,\nsometimes you need to use select_option, while other times you need to use fill\nor click and wait for the reaction of the page.\n\"\"\"\n","source_hash":"2ff2b702dfe1b18217dedd423ab941d3b8822982bcd3f0711f52cc8bd6666202","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.dynamic_prompting.Hints","uri":"program://AgentLab/class/src.agentlab.agents.dynamic_prompting.Hints#L547-L563","kind":"class","name":"Hints","path":"src/agentlab/agents/dynamic_prompting.py","language":"python","start_line":547,"end_line":563,"context_start_line":527,"context_end_line":583,"code":"## Chat messages:\n\n\"\"\"\n self._prompt += \"\\n\".join(\n [\n f\"\"\"\\\n - [{msg['role']}] UTC Time: {time.asctime(time.gmtime(msg['timestamp']))} - Local Time: {time.asctime(time.localtime(msg['timestamp']))} - {msg['message']}\"\"\"\n for msg in chat_messages\n ]\n )\n\n if extra_instructions:\n self._prompt += f\"\"\"\n\n## Extra instructions:\n\n{extra_instructions}\n\"\"\"\n\n\nclass Hints(PromptElement):\n \"\"\"Not super useful and stale.\"\"\"\n\n # NOTE: are these hints still relevant?\n _prompt = \"\"\"\\\nNote:\n* Some tasks may be game like and may require to interact with the mouse position\nin x, y coordinates.\n* Some text field might have auto completion. To see it, you have to type a few\ncharacters and wait until next step.\n* If you have to cut and paste, don't forget to select the text first.\n* Coordinate inside an SVG are relative to it's top left corner.\n* Make sure to use bid to identify elements when using commands.\n* Interacting with combobox, dropdowns and auto-complete fields can be tricky,\nsometimes you need to use select_option, while other times you need to use fill\nor click and wait for the reaction of the page.\n\"\"\"\n\n\nclass SystemPrompt(PromptElement):\n _prompt = \"\"\"\\\nYou are an agent trying to solve a web task based on the content of the page and\nuser instructions. You can interact with the page and explore, and send messages to the user. Each time you\nsubmit an action it will be sent to the browser and you will receive a new page.\"\"\"\n\n\nclass ActionPrompt(PromptElement):\n\n _concrete_ex = \"\"\"\n\nclick('a324')\n\n\"\"\"\n\n def __init__(self, action_set: AbstractActionSet, action_flags: ActionFlags) -> None:\n super().__init__()\n self.action_set = action_set","source_hash":"2ff2b702dfe1b18217dedd423ab941d3b8822982bcd3f0711f52cc8bd6666202","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.dynamic_prompting.SystemPrompt","uri":"program://AgentLab/class/src.agentlab.agents.dynamic_prompting.SystemPrompt#L566-L570","kind":"class","name":"SystemPrompt","path":"src/agentlab/agents/dynamic_prompting.py","language":"python","start_line":566,"end_line":570,"context_start_line":546,"context_end_line":590,"code":"\nclass Hints(PromptElement):\n \"\"\"Not super useful and stale.\"\"\"\n\n # NOTE: are these hints still relevant?\n _prompt = \"\"\"\\\nNote:\n* Some tasks may be game like and may require to interact with the mouse position\nin x, y coordinates.\n* Some text field might have auto completion. To see it, you have to type a few\ncharacters and wait until next step.\n* If you have to cut and paste, don't forget to select the text first.\n* Coordinate inside an SVG are relative to it's top left corner.\n* Make sure to use bid to identify elements when using commands.\n* Interacting with combobox, dropdowns and auto-complete fields can be tricky,\nsometimes you need to use select_option, while other times you need to use fill\nor click and wait for the reaction of the page.\n\"\"\"\n\n\nclass SystemPrompt(PromptElement):\n _prompt = \"\"\"\\\nYou are an agent trying to solve a web task based on the content of the page and\nuser instructions. You can interact with the page and explore, and send messages to the user. Each time you\nsubmit an action it will be sent to the browser and you will receive a new page.\"\"\"\n\n\nclass ActionPrompt(PromptElement):\n\n _concrete_ex = \"\"\"\n\nclick('a324')\n\n\"\"\"\n\n def __init__(self, action_set: AbstractActionSet, action_flags: ActionFlags) -> None:\n super().__init__()\n self.action_set = action_set\n self.action_flags = action_flags\n action_set_generic_info = \"\"\"\\\nNote: This action set allows you to interact with your environment. Most of them\nare python function executing playwright code. The primary way of referring to\nelements in the page is through bid which are specified in your observations.\n\n\"\"\"","source_hash":"2ff2b702dfe1b18217dedd423ab941d3b8822982bcd3f0711f52cc8bd6666202","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.dynamic_prompting.ActionPrompt","uri":"program://AgentLab/class/src.agentlab.agents.dynamic_prompting.ActionPrompt#L573-L640","kind":"class","name":"ActionPrompt","path":"src/agentlab/agents/dynamic_prompting.py","language":"python","start_line":573,"end_line":640,"context_start_line":553,"context_end_line":660,"code":"* Some tasks may be game like and may require to interact with the mouse position\nin x, y coordinates.\n* Some text field might have auto completion. To see it, you have to type a few\ncharacters and wait until next step.\n* If you have to cut and paste, don't forget to select the text first.\n* Coordinate inside an SVG are relative to it's top left corner.\n* Make sure to use bid to identify elements when using commands.\n* Interacting with combobox, dropdowns and auto-complete fields can be tricky,\nsometimes you need to use select_option, while other times you need to use fill\nor click and wait for the reaction of the page.\n\"\"\"\n\n\nclass SystemPrompt(PromptElement):\n _prompt = \"\"\"\\\nYou are an agent trying to solve a web task based on the content of the page and\nuser instructions. You can interact with the page and explore, and send messages to the user. Each time you\nsubmit an action it will be sent to the browser and you will receive a new page.\"\"\"\n\n\nclass ActionPrompt(PromptElement):\n\n _concrete_ex = \"\"\"\n\nclick('a324')\n\n\"\"\"\n\n def __init__(self, action_set: AbstractActionSet, action_flags: ActionFlags) -> None:\n super().__init__()\n self.action_set = action_set\n self.action_flags = action_flags\n action_set_generic_info = \"\"\"\\\nNote: This action set allows you to interact with your environment. Most of them\nare python function executing playwright code. The primary way of referring to\nelements in the page is through bid which are specified in your observations.\n\n\"\"\"\n action_description = action_set.describe(\n with_long_description=action_flags.long_description,\n with_examples=action_flags.individual_examples,\n )\n self._prompt = (\n f\"# Action space:\\n{action_set_generic_info}{action_description}{MacNote().prompt}\\n\"\n )\n self._abstract_ex = f\"\"\"\n\n{self.action_set.example_action(abstract=True)}\n\n\"\"\"\n\n # self._concrete_ex = f\"\"\"\n # \n # {self.action_set.example_action(abstract=False)}\n # \n # \"\"\"\n\n def _parse_answer(self, text_answer):\n try:\n ans_dict = parse_html_tags_raise(text_answer, keys=[\"action\"], merge_multiple=True)\n except ParseError as e:\n if self.action_flags.is_strict:\n raise e\n else:\n # try to extract code blocks\n blocks = extract_code_blocks(text_answer)\n if len(blocks) == 0:\n raise e\n else:\n code = \"\\n\".join([block for _, block in blocks])\n ans_dict = {\"action\": code, \"parse_error\": str(e)}\n\n try:\n if ans_dict[\"action\"] == \"None\":\n # Used by reproducibility agent for backward compatibility of\n # traces missing LLM's response in chat messages.\n ans_dict[\"action\"] = None\n else:\n # just check if action can be mapped to python code but keep action as is\n # the environment will be responsible for mapping it to python\n self.action_set.to_python_code(ans_dict[\"action\"])\n except Exception as e:\n raise ParseError(\n f\"Error while parsing action\\n: {e}\\n\"\n \"Make sure your answer is restricted to the allowed actions.\"\n )\n\n return ans_dict\n\n\n# def make_action_set(action_flags: ActionFlags) -> AbstractActionSet:\n\n# if action_flags.action_set == \"python\":\n# action_set = PythonActionSet(strict=action_flags.is_strict)\n# if action_flags.demo_mode != \"off\":\n# warn(\n# f'Action_set \"python\" is incompatible with demo_mode={repr(action_flags.demo_mode)}.'\n# )\n# return action_set\n\n# action_set = HighLevelActionSet(\n# subsets=list(set([\"chat\"] + [\"infeas\"] + action_flags.action_set.split(\"+\"))),\n# multiaction=action_flags.multi_actions,\n# strict=action_flags.is_strict,\n# demo_mode=action_flags.demo_mode,\n# )\n\n# return action_set","source_hash":"2ff2b702dfe1b18217dedd423ab941d3b8822982bcd3f0711f52cc8bd6666202","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.dynamic_prompting.Think","uri":"program://AgentLab/class/src.agentlab.agents.dynamic_prompting.Think#L663-L685","kind":"class","name":"Think","path":"src/agentlab/agents/dynamic_prompting.py","language":"python","start_line":663,"end_line":685,"context_start_line":643,"context_end_line":705,"code":"# def make_action_set(action_flags: ActionFlags) -> AbstractActionSet:\n\n# if action_flags.action_set == \"python\":\n# action_set = PythonActionSet(strict=action_flags.is_strict)\n# if action_flags.demo_mode != \"off\":\n# warn(\n# f'Action_set \"python\" is incompatible with demo_mode={repr(action_flags.demo_mode)}.'\n# )\n# return action_set\n\n# action_set = HighLevelActionSet(\n# subsets=list(set([\"chat\"] + [\"infeas\"] + action_flags.action_set.split(\"+\"))),\n# multiaction=action_flags.multi_actions,\n# strict=action_flags.is_strict,\n# demo_mode=action_flags.demo_mode,\n# )\n\n# return action_set\n\n\nclass Think(PromptElement):\n _prompt = \"\"\n\n _abstract_ex = \"\"\"\n\nThink step by step. If you need to make calculations such as coordinates, write them here. Describe the effect\nthat your previous action had on the current content of the page.\n\n\"\"\"\n _concrete_ex = \"\"\"\n\nFrom previous action I tried to set the value of year to \"2022\",\nusing select_option, but it doesn't appear to be in the form. It may be a\ndynamic dropdown, I will try using click with the bid \"a324\" and look at the\nresponse from the page.\n\n\"\"\"\n\n def _parse_answer(self, text_answer):\n try:\n return parse_html_tags_raise(text_answer, keys=[\"think\"], merge_multiple=True)\n except ParseError as e:\n return {\"think\": text_answer, \"parse_error\": str(e)}\n\n\n# def diff(previous, new):\n# \"\"\"Return a string showing the difference between original and new.\n\n# If the difference is above diff_threshold, return the diff string.\"\"\"\n\n# if previous == new:\n# return \"Identical\", []\n\n# if len(previous) == 0 or previous is None:\n# return \"previous is empty\", []\n\n# diff_gen = difflib.ndiff(previous.splitlines(), new.splitlines())\n\n# diff_lines = []\n# plus_count = 0\n# minus_count = 0\n# for line in diff_gen:\n# if line.strip().startswith(\"+\"):","source_hash":"2ff2b702dfe1b18217dedd423ab941d3b8822982bcd3f0711f52cc8bd6666202","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.dynamic_prompting.HistoryStep","uri":"program://AgentLab/class/src.agentlab.agents.dynamic_prompting.HistoryStep#L745-L800","kind":"class","name":"HistoryStep","path":"src/agentlab/agents/dynamic_prompting.py","language":"python","start_line":745,"end_line":800,"context_start_line":725,"context_end_line":820,"code":"# self.new = new\n# self.max_line_diff = max_line_diff\n# self.shrink_speed = shrink_speed\n# self.prefix = prefix\n\n# def shrink(self):\n# self.max_line_diff -= self.shrink_speed\n# self.max_line_diff = max(1, self.max_line_diff)\n\n# @property\n# def _prompt(self) -> str:\n# header, diff_lines = diff(self.previous, self.new)\n\n# diff_str = \"\\n\".join(diff_lines[: self.max_line_diff])\n# if len(diff_lines) > self.max_line_diff:\n# original_count = len(diff_lines)\n# diff_str = f\"{diff_str}\\nDiff truncated, {original_count - self.max_line_diff} changes now shown.\"\n# return f\"{self.prefix}{header}\\n{diff_str}\\n\"\n\n\nclass HistoryStep(Shrinkable):\n def __init__(\n self, previous_obs, current_obs, action, memory, thought, flags: ObsFlags, shrink_speed=1\n ) -> None:\n super().__init__()\n # self.html_diff = Diff(\n # previous_obs[flags.html_type],\n # current_obs[flags.html_type],\n # prefix=\"\\n### HTML diff:\\n\",\n # shrink_speed=shrink_speed,\n # visible=lambda: flags.use_html and flags.use_diff,\n # )\n # self.ax_tree_diff = Diff(\n # previous_obs[\"axtree_txt\"],\n # current_obs[\"axtree_txt\"],\n # prefix=f\"\\n### Accessibility tree diff:\\n\",\n # shrink_speed=shrink_speed,\n # visible=lambda: flags.use_ax_tree and flags.use_diff,\n # )\n self.error = Error(\n current_obs[\"last_action_error\"],\n visible=(\n lambda: flags.use_error_logs\n and current_obs[\"last_action_error\"]\n and flags.use_past_error_logs\n ),\n prefix=\"### \",\n )\n self.shrink_speed = shrink_speed\n self.action = action\n self.memory = memory\n self.thought = thought\n self.flags = flags\n\n def shrink(self):\n super().shrink()\n # self.html_diff.shrink()\n # self.ax_tree_diff.shrink()\n\n @property\n def _prompt(self) -> str:\n prompt = \"\"\n\n if self.flags.use_think_history:\n prompt += f\"\\n\\n{self.thought}\\n\\n\"\n\n if self.flags.use_action_history:\n prompt += f\"\\n\\n{self.action}\\n\\n\"\n\n # prompt += f\"{self.error.prompt}{self.html_diff.prompt}{self.ax_tree_diff.prompt}\"\n prompt += f\"{self.error.prompt}\"\n\n if self.memory is not None:\n prompt += f\"\\n\\n{self.memory}\\n\\n\"\n\n return prompt\n\n\nclass History(Shrinkable):\n def __init__(\n self, history_obs, actions, memories, thoughts, flags: ObsFlags, shrink_speed=1\n ) -> None:\n if memories is None:\n memories = [None] * len(actions)\n super().__init__(visible=lambda: flags.use_history)\n assert len(history_obs) == len(actions) + 1\n assert len(history_obs) == len(memories) + 1\n\n self.shrink_speed = shrink_speed\n self.history_steps: list[HistoryStep] = []\n\n for i in range(1, len(history_obs)):\n self.history_steps.append(\n HistoryStep(\n history_obs[i - 1],\n history_obs[i],","source_hash":"2ff2b702dfe1b18217dedd423ab941d3b8822982bcd3f0711f52cc8bd6666202","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.dynamic_prompting.History","uri":"program://AgentLab/class/src.agentlab.agents.dynamic_prompting.History#L803-L841","kind":"class","name":"History","path":"src/agentlab/agents/dynamic_prompting.py","language":"python","start_line":803,"end_line":841,"context_start_line":783,"context_end_line":861,"code":"\n @property\n def _prompt(self) -> str:\n prompt = \"\"\n\n if self.flags.use_think_history:\n prompt += f\"\\n\\n{self.thought}\\n\\n\"\n\n if self.flags.use_action_history:\n prompt += f\"\\n\\n{self.action}\\n\\n\"\n\n # prompt += f\"{self.error.prompt}{self.html_diff.prompt}{self.ax_tree_diff.prompt}\"\n prompt += f\"{self.error.prompt}\"\n\n if self.memory is not None:\n prompt += f\"\\n\\n{self.memory}\\n\\n\"\n\n return prompt\n\n\nclass History(Shrinkable):\n def __init__(\n self, history_obs, actions, memories, thoughts, flags: ObsFlags, shrink_speed=1\n ) -> None:\n if memories is None:\n memories = [None] * len(actions)\n super().__init__(visible=lambda: flags.use_history)\n assert len(history_obs) == len(actions) + 1\n assert len(history_obs) == len(memories) + 1\n\n self.shrink_speed = shrink_speed\n self.history_steps: list[HistoryStep] = []\n\n for i in range(1, len(history_obs)):\n self.history_steps.append(\n HistoryStep(\n history_obs[i - 1],\n history_obs[i],\n actions[i - 1],\n memories[i - 1],\n thoughts[i - 1],\n flags,\n )\n )\n\n def shrink(self):\n \"\"\"Shrink individual steps\"\"\"\n # TODO set the shrink speed of older steps to be higher\n super().shrink()\n for step in self.history_steps:\n step.shrink()\n\n @property\n def _prompt(self):\n prompts = [\"# History of interaction with the task:\\n\"]\n for i, step in enumerate(self.history_steps):\n prompts.append(f\"## step {i}\")\n prompts.append(step.prompt)\n return \"\\n\".join(prompts) + \"\\n\"\n\n\ndef make_obs_preprocessor(flags: ObsFlags):\n def obs_mapping(obs: dict):\n obs = copy(obs)\n obs[\"dom_txt\"] = flatten_dom_to_str(\n obs[\"dom_object\"],\n extra_properties=obs[\"extra_element_properties\"],\n with_visible=flags.extract_visible_tag,\n with_clickable=flags.extract_clickable_tag,\n with_center_coords=flags.extract_coords == \"center\",\n with_bounding_box_coords=flags.extract_coords == \"box\",\n filter_visible_only=flags.filter_visible_elements_only,\n filter_with_bid_only=flags.filter_with_bid_only,\n filter_som_only=flags.filter_som_only,\n )\n obs[\"axtree_txt\"] = flatten_axtree_to_str(\n obs[\"axtree_object\"],\n extra_properties=obs[\"extra_element_properties\"],\n with_visible=flags.extract_visible_tag,","source_hash":"2ff2b702dfe1b18217dedd423ab941d3b8822982bcd3f0711f52cc8bd6666202","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.dynamic_prompting.make_obs_preprocessor","uri":"program://AgentLab/function/src.agentlab.agents.dynamic_prompting.make_obs_preprocessor#L844-L876","kind":"function","name":"make_obs_preprocessor","path":"src/agentlab/agents/dynamic_prompting.py","language":"python","start_line":844,"end_line":876,"context_start_line":824,"context_end_line":876,"code":" flags,\n )\n )\n\n def shrink(self):\n \"\"\"Shrink individual steps\"\"\"\n # TODO set the shrink speed of older steps to be higher\n super().shrink()\n for step in self.history_steps:\n step.shrink()\n\n @property\n def _prompt(self):\n prompts = [\"# History of interaction with the task:\\n\"]\n for i, step in enumerate(self.history_steps):\n prompts.append(f\"## step {i}\")\n prompts.append(step.prompt)\n return \"\\n\".join(prompts) + \"\\n\"\n\n\ndef make_obs_preprocessor(flags: ObsFlags):\n def obs_mapping(obs: dict):\n obs = copy(obs)\n obs[\"dom_txt\"] = flatten_dom_to_str(\n obs[\"dom_object\"],\n extra_properties=obs[\"extra_element_properties\"],\n with_visible=flags.extract_visible_tag,\n with_clickable=flags.extract_clickable_tag,\n with_center_coords=flags.extract_coords == \"center\",\n with_bounding_box_coords=flags.extract_coords == \"box\",\n filter_visible_only=flags.filter_visible_elements_only,\n filter_with_bid_only=flags.filter_with_bid_only,\n filter_som_only=flags.filter_som_only,\n )\n obs[\"axtree_txt\"] = flatten_axtree_to_str(\n obs[\"axtree_object\"],\n extra_properties=obs[\"extra_element_properties\"],\n with_visible=flags.extract_visible_tag,\n with_clickable=flags.extract_clickable_tag,\n with_center_coords=flags.extract_coords == \"center\",\n with_bounding_box_coords=flags.extract_coords == \"box\",\n filter_visible_only=flags.filter_visible_elements_only,\n filter_with_bid_only=flags.filter_with_bid_only,\n filter_som_only=flags.filter_som_only,\n )\n obs[\"pruned_html\"] = prune_html(obs[\"dom_txt\"])\n obs[\"screenshot_som\"] = overlay_som(\n obs[\"screenshot\"], extra_properties=obs[\"extra_element_properties\"]\n )\n\n return obs\n\n return obs_mapping","source_hash":"2ff2b702dfe1b18217dedd423ab941d3b8822982bcd3f0711f52cc8bd6666202","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.dynamic_prompting.copy","uri":"program://AgentLab/function/src.agentlab.agents.dynamic_prompting.copy#L29-L30","kind":"function","name":"copy","path":"src/agentlab/agents/dynamic_prompting.py","language":"python","start_line":29,"end_line":30,"context_start_line":9,"context_end_line":50,"code":"from warnings import warn\n\nimport bgym\nfrom bgym import HighLevelActionSetArgs\nfrom browsergym.core.action.base import AbstractActionSet\nfrom browsergym.utils.obs import flatten_axtree_to_str, flatten_dom_to_str, overlay_som, prune_html\n\nfrom agentlab.llm.llm_utils import (\n BaseMessage,\n ParseError,\n count_tokens,\n extract_code_blocks,\n image_to_jpg_base64_url,\n parse_html_tags_raise,\n)\n\n\nclass Flags:\n \"\"\"Base class for flags. Mostly for backward compatibility.\"\"\"\n\n def copy(self):\n return deepcopy(self)\n\n def asdict(self):\n \"\"\"Helper for JSON serializable requirement.\"\"\"\n return asdict(self)\n\n @classmethod\n def from_dict(self, flags_dict):\n \"\"\"Helper for JSON serializable requirement.\"\"\"\n if isinstance(flags_dict, ObsFlags):\n return flags_dict\n\n if not isinstance(flags_dict, dict):\n raise ValueError(f\"Unregcognized type for flags_dict of type {type(flags_dict)}.\")\n return ObsFlags(**flags_dict)\n\n\n@dataclass\nclass ObsFlags(Flags):\n \"\"\"\n A class to represent various flags used to control features in an application.","source_hash":"2ff2b702dfe1b18217dedd423ab941d3b8822982bcd3f0711f52cc8bd6666202","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.dynamic_prompting.asdict","uri":"program://AgentLab/function/src.agentlab.agents.dynamic_prompting.asdict#L32-L34","kind":"function","name":"asdict","path":"src/agentlab/agents/dynamic_prompting.py","language":"python","start_line":32,"end_line":34,"context_start_line":12,"context_end_line":54,"code":"from bgym import HighLevelActionSetArgs\nfrom browsergym.core.action.base import AbstractActionSet\nfrom browsergym.utils.obs import flatten_axtree_to_str, flatten_dom_to_str, overlay_som, prune_html\n\nfrom agentlab.llm.llm_utils import (\n BaseMessage,\n ParseError,\n count_tokens,\n extract_code_blocks,\n image_to_jpg_base64_url,\n parse_html_tags_raise,\n)\n\n\nclass Flags:\n \"\"\"Base class for flags. Mostly for backward compatibility.\"\"\"\n\n def copy(self):\n return deepcopy(self)\n\n def asdict(self):\n \"\"\"Helper for JSON serializable requirement.\"\"\"\n return asdict(self)\n\n @classmethod\n def from_dict(self, flags_dict):\n \"\"\"Helper for JSON serializable requirement.\"\"\"\n if isinstance(flags_dict, ObsFlags):\n return flags_dict\n\n if not isinstance(flags_dict, dict):\n raise ValueError(f\"Unregcognized type for flags_dict of type {type(flags_dict)}.\")\n return ObsFlags(**flags_dict)\n\n\n@dataclass\nclass ObsFlags(Flags):\n \"\"\"\n A class to represent various flags used to control features in an application.\n\n Attributes:\n use_html (bool): Use the HTML in the prompt.\n use_ax_tree (bool): Use the accessibility tree in the prompt.","source_hash":"2ff2b702dfe1b18217dedd423ab941d3b8822982bcd3f0711f52cc8bd6666202","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.dynamic_prompting.from_dict","uri":"program://AgentLab/function/src.agentlab.agents.dynamic_prompting.from_dict#L37-L44","kind":"function","name":"from_dict","path":"src/agentlab/agents/dynamic_prompting.py","language":"python","start_line":37,"end_line":44,"context_start_line":17,"context_end_line":64,"code":" BaseMessage,\n ParseError,\n count_tokens,\n extract_code_blocks,\n image_to_jpg_base64_url,\n parse_html_tags_raise,\n)\n\n\nclass Flags:\n \"\"\"Base class for flags. Mostly for backward compatibility.\"\"\"\n\n def copy(self):\n return deepcopy(self)\n\n def asdict(self):\n \"\"\"Helper for JSON serializable requirement.\"\"\"\n return asdict(self)\n\n @classmethod\n def from_dict(self, flags_dict):\n \"\"\"Helper for JSON serializable requirement.\"\"\"\n if isinstance(flags_dict, ObsFlags):\n return flags_dict\n\n if not isinstance(flags_dict, dict):\n raise ValueError(f\"Unregcognized type for flags_dict of type {type(flags_dict)}.\")\n return ObsFlags(**flags_dict)\n\n\n@dataclass\nclass ObsFlags(Flags):\n \"\"\"\n A class to represent various flags used to control features in an application.\n\n Attributes:\n use_html (bool): Use the HTML in the prompt.\n use_ax_tree (bool): Use the accessibility tree in the prompt.\n use_focused_element (bool): Provide the ID of the focused element.\n use_error_logs (bool): Expose the previous error in the prompt.\n use_history (bool): Enable history of previous steps in the prompt.\n use_past_error_logs (bool): If use_history is True, expose all previous errors in the history.\n use_action_history (bool): If use_history is True, include the actions in the history.\n use_think_history (bool): If use_history is True, include all previous chains of thoughts in the history.\n use_diff (bool): Add a diff of the current and previous HTML to the prompt.\n html_type (str): Type of HTML to use in the prompt, may depend on preprocessing of observation.\n use_screenshot (bool): Add a screenshot of the page to the prompt, following OpenAI's API. This will be automatically disabled if the model does not have vision capabilities.\n use_som (bool): Add a set of marks to the screenshot.","source_hash":"2ff2b702dfe1b18217dedd423ab941d3b8822982bcd3f0711f52cc8bd6666202","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.dynamic_prompting.__init__","uri":"program://AgentLab/function/src.agentlab.agents.dynamic_prompting.__init__#L804-L826","kind":"function","name":"__init__","path":"src/agentlab/agents/dynamic_prompting.py","language":"python","start_line":804,"end_line":826,"context_start_line":784,"context_end_line":846,"code":" @property\n def _prompt(self) -> str:\n prompt = \"\"\n\n if self.flags.use_think_history:\n prompt += f\"\\n\\n{self.thought}\\n\\n\"\n\n if self.flags.use_action_history:\n prompt += f\"\\n\\n{self.action}\\n\\n\"\n\n # prompt += f\"{self.error.prompt}{self.html_diff.prompt}{self.ax_tree_diff.prompt}\"\n prompt += f\"{self.error.prompt}\"\n\n if self.memory is not None:\n prompt += f\"\\n\\n{self.memory}\\n\\n\"\n\n return prompt\n\n\nclass History(Shrinkable):\n def __init__(\n self, history_obs, actions, memories, thoughts, flags: ObsFlags, shrink_speed=1\n ) -> None:\n if memories is None:\n memories = [None] * len(actions)\n super().__init__(visible=lambda: flags.use_history)\n assert len(history_obs) == len(actions) + 1\n assert len(history_obs) == len(memories) + 1\n\n self.shrink_speed = shrink_speed\n self.history_steps: list[HistoryStep] = []\n\n for i in range(1, len(history_obs)):\n self.history_steps.append(\n HistoryStep(\n history_obs[i - 1],\n history_obs[i],\n actions[i - 1],\n memories[i - 1],\n thoughts[i - 1],\n flags,\n )\n )\n\n def shrink(self):\n \"\"\"Shrink individual steps\"\"\"\n # TODO set the shrink speed of older steps to be higher\n super().shrink()\n for step in self.history_steps:\n step.shrink()\n\n @property\n def _prompt(self):\n prompts = [\"# History of interaction with the task:\\n\"]\n for i, step in enumerate(self.history_steps):\n prompts.append(f\"## step {i}\")\n prompts.append(step.prompt)\n return \"\\n\".join(prompts) + \"\\n\"\n\n\ndef make_obs_preprocessor(flags: ObsFlags):\n def obs_mapping(obs: dict):\n obs = copy(obs)","source_hash":"2ff2b702dfe1b18217dedd423ab941d3b8822982bcd3f0711f52cc8bd6666202","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.dynamic_prompting.prompt","uri":"program://AgentLab/function/src.agentlab.agents.dynamic_prompting.prompt#L126-L131","kind":"function","name":"prompt","path":"src/agentlab/agents/dynamic_prompting.py","language":"python","start_line":126,"end_line":131,"context_start_line":106,"context_end_line":151,"code":"\nclass PromptElement:\n \"\"\"Base class for all prompt elements. Prompt elements can be hidden.\"\"\"\n\n _prompt = \"\"\n _abstract_ex = \"\"\n _concrete_ex = \"\"\n\n def __init__(self, visible: bool = True) -> None:\n \"\"\"Prompt element that can be hidden.\n\n Args:\n visible : bool, optional\n Whether the prompt element should be visible, by default True. Can\n be a callable that returns a bool. This is useful when a specific\n flag changes during a shrink iteration.\n \"\"\"\n self._visible = visible\n\n @property\n def prompt(self) -> str | BaseMessage:\n \"\"\"Avoid overriding this method. Override _prompt instead.\"\"\"\n if self.is_visible:\n return self._prompt\n else:\n return \"\"\n\n @property\n def abstract_ex(self):\n \"\"\"Useful when this prompt element is requesting an answer from the llm.\n Provide an abstract example of the answer here. See Memory for an\n example.\n\n Avoid overriding this method. Override _abstract_ex instead\n\n Returns:\n str: The abstract example\n \"\"\"\n if self.is_visible:\n return self._abstract_ex\n else:\n return \"\"\n\n @property\n def concrete_ex(self):\n \"\"\"Useful when this prompt element is requesting an answer from the llm.","source_hash":"2ff2b702dfe1b18217dedd423ab941d3b8822982bcd3f0711f52cc8bd6666202","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.dynamic_prompting.abstract_ex","uri":"program://AgentLab/function/src.agentlab.agents.dynamic_prompting.abstract_ex#L134-L147","kind":"function","name":"abstract_ex","path":"src/agentlab/agents/dynamic_prompting.py","language":"python","start_line":134,"end_line":147,"context_start_line":114,"context_end_line":167,"code":" def __init__(self, visible: bool = True) -> None:\n \"\"\"Prompt element that can be hidden.\n\n Args:\n visible : bool, optional\n Whether the prompt element should be visible, by default True. Can\n be a callable that returns a bool. This is useful when a specific\n flag changes during a shrink iteration.\n \"\"\"\n self._visible = visible\n\n @property\n def prompt(self) -> str | BaseMessage:\n \"\"\"Avoid overriding this method. Override _prompt instead.\"\"\"\n if self.is_visible:\n return self._prompt\n else:\n return \"\"\n\n @property\n def abstract_ex(self):\n \"\"\"Useful when this prompt element is requesting an answer from the llm.\n Provide an abstract example of the answer here. See Memory for an\n example.\n\n Avoid overriding this method. Override _abstract_ex instead\n\n Returns:\n str: The abstract example\n \"\"\"\n if self.is_visible:\n return self._abstract_ex\n else:\n return \"\"\n\n @property\n def concrete_ex(self):\n \"\"\"Useful when this prompt element is requesting an answer from the llm.\n Provide a concrete example of the answer here. See Memory for an\n example.\n\n Avoid overriding this method. Override _concrete_ex instead\n\n Returns:\n str: The concrete example\n \"\"\"\n if self.is_visible:\n return self._concrete_ex\n else:\n return \"\"\n\n @property\n def is_visible(self):\n \"\"\"Handle the case where visible is a callable.\"\"\"","source_hash":"2ff2b702dfe1b18217dedd423ab941d3b8822982bcd3f0711f52cc8bd6666202","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.dynamic_prompting.concrete_ex","uri":"program://AgentLab/function/src.agentlab.agents.dynamic_prompting.concrete_ex#L150-L163","kind":"function","name":"concrete_ex","path":"src/agentlab/agents/dynamic_prompting.py","language":"python","start_line":150,"end_line":163,"context_start_line":130,"context_end_line":183,"code":" else:\n return \"\"\n\n @property\n def abstract_ex(self):\n \"\"\"Useful when this prompt element is requesting an answer from the llm.\n Provide an abstract example of the answer here. See Memory for an\n example.\n\n Avoid overriding this method. Override _abstract_ex instead\n\n Returns:\n str: The abstract example\n \"\"\"\n if self.is_visible:\n return self._abstract_ex\n else:\n return \"\"\n\n @property\n def concrete_ex(self):\n \"\"\"Useful when this prompt element is requesting an answer from the llm.\n Provide a concrete example of the answer here. See Memory for an\n example.\n\n Avoid overriding this method. Override _concrete_ex instead\n\n Returns:\n str: The concrete example\n \"\"\"\n if self.is_visible:\n return self._concrete_ex\n else:\n return \"\"\n\n @property\n def is_visible(self):\n \"\"\"Handle the case where visible is a callable.\"\"\"\n visible = self._visible\n if callable(visible):\n visible = visible()\n return visible\n\n def _parse_answer(self, text_answer):\n \"\"\"Override to actually extract elements from the answer.\"\"\"\n return {}\n\n def parse_answer(self, text_answer) -> dict:\n if self.is_visible:\n return self._parse_answer(text_answer)\n else:\n return {}\n\n","source_hash":"2ff2b702dfe1b18217dedd423ab941d3b8822982bcd3f0711f52cc8bd6666202","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.dynamic_prompting.is_visible","uri":"program://AgentLab/function/src.agentlab.agents.dynamic_prompting.is_visible#L166-L171","kind":"function","name":"is_visible","path":"src/agentlab/agents/dynamic_prompting.py","language":"python","start_line":166,"end_line":171,"context_start_line":146,"context_end_line":191,"code":" else:\n return \"\"\n\n @property\n def concrete_ex(self):\n \"\"\"Useful when this prompt element is requesting an answer from the llm.\n Provide a concrete example of the answer here. See Memory for an\n example.\n\n Avoid overriding this method. Override _concrete_ex instead\n\n Returns:\n str: The concrete example\n \"\"\"\n if self.is_visible:\n return self._concrete_ex\n else:\n return \"\"\n\n @property\n def is_visible(self):\n \"\"\"Handle the case where visible is a callable.\"\"\"\n visible = self._visible\n if callable(visible):\n visible = visible()\n return visible\n\n def _parse_answer(self, text_answer):\n \"\"\"Override to actually extract elements from the answer.\"\"\"\n return {}\n\n def parse_answer(self, text_answer) -> dict:\n if self.is_visible:\n return self._parse_answer(text_answer)\n else:\n return {}\n\n\nclass Shrinkable(PromptElement, abc.ABC):\n @abc.abstractmethod\n def shrink(self) -> None:\n \"\"\"Implement shrinking of this prompt element.\n\n You need to recursively call all shrinkable elements that are part of\n this prompt. You can also implement a shriking startegy for this prompt.\n Shrinking is can be called multiple times to progressively shrink the","source_hash":"2ff2b702dfe1b18217dedd423ab941d3b8822982bcd3f0711f52cc8bd6666202","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.dynamic_prompting._parse_answer","uri":"program://AgentLab/function/src.agentlab.agents.dynamic_prompting._parse_answer#L681-L685","kind":"function","name":"_parse_answer","path":"src/agentlab/agents/dynamic_prompting.py","language":"python","start_line":681,"end_line":685,"context_start_line":661,"context_end_line":705,"code":"\n\nclass Think(PromptElement):\n _prompt = \"\"\n\n _abstract_ex = \"\"\"\n\nThink step by step. If you need to make calculations such as coordinates, write them here. Describe the effect\nthat your previous action had on the current content of the page.\n\n\"\"\"\n _concrete_ex = \"\"\"\n\nFrom previous action I tried to set the value of year to \"2022\",\nusing select_option, but it doesn't appear to be in the form. It may be a\ndynamic dropdown, I will try using click with the bid \"a324\" and look at the\nresponse from the page.\n\n\"\"\"\n\n def _parse_answer(self, text_answer):\n try:\n return parse_html_tags_raise(text_answer, keys=[\"think\"], merge_multiple=True)\n except ParseError as e:\n return {\"think\": text_answer, \"parse_error\": str(e)}\n\n\n# def diff(previous, new):\n# \"\"\"Return a string showing the difference between original and new.\n\n# If the difference is above diff_threshold, return the diff string.\"\"\"\n\n# if previous == new:\n# return \"Identical\", []\n\n# if len(previous) == 0 or previous is None:\n# return \"previous is empty\", []\n\n# diff_gen = difflib.ndiff(previous.splitlines(), new.splitlines())\n\n# diff_lines = []\n# plus_count = 0\n# minus_count = 0\n# for line in diff_gen:\n# if line.strip().startswith(\"+\"):","source_hash":"2ff2b702dfe1b18217dedd423ab941d3b8822982bcd3f0711f52cc8bd6666202","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.dynamic_prompting.parse_answer","uri":"program://AgentLab/function/src.agentlab.agents.dynamic_prompting.parse_answer#L177-L181","kind":"function","name":"parse_answer","path":"src/agentlab/agents/dynamic_prompting.py","language":"python","start_line":177,"end_line":181,"context_start_line":157,"context_end_line":201,"code":" Returns:\n str: The concrete example\n \"\"\"\n if self.is_visible:\n return self._concrete_ex\n else:\n return \"\"\n\n @property\n def is_visible(self):\n \"\"\"Handle the case where visible is a callable.\"\"\"\n visible = self._visible\n if callable(visible):\n visible = visible()\n return visible\n\n def _parse_answer(self, text_answer):\n \"\"\"Override to actually extract elements from the answer.\"\"\"\n return {}\n\n def parse_answer(self, text_answer) -> dict:\n if self.is_visible:\n return self._parse_answer(text_answer)\n else:\n return {}\n\n\nclass Shrinkable(PromptElement, abc.ABC):\n @abc.abstractmethod\n def shrink(self) -> None:\n \"\"\"Implement shrinking of this prompt element.\n\n You need to recursively call all shrinkable elements that are part of\n this prompt. You can also implement a shriking startegy for this prompt.\n Shrinking is can be called multiple times to progressively shrink the\n prompt until it fits max_tokens. Default max shrink iterations is 20.\n \"\"\"\n pass\n\n\nclass Trunkater(Shrinkable):\n \"\"\"Shrinkable element that truncates the prompt element from the bottom\n after a certain number of iterations.\"\"\"\n\n def __init__(self, visible, shrink_speed=0.3, start_trunkate_iteration=10):","source_hash":"2ff2b702dfe1b18217dedd423ab941d3b8822982bcd3f0711f52cc8bd6666202","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.dynamic_prompting.shrink","uri":"program://AgentLab/function/src.agentlab.agents.dynamic_prompting.shrink#L828-L833","kind":"function","name":"shrink","path":"src/agentlab/agents/dynamic_prompting.py","language":"python","start_line":828,"end_line":833,"context_start_line":808,"context_end_line":853,"code":" memories = [None] * len(actions)\n super().__init__(visible=lambda: flags.use_history)\n assert len(history_obs) == len(actions) + 1\n assert len(history_obs) == len(memories) + 1\n\n self.shrink_speed = shrink_speed\n self.history_steps: list[HistoryStep] = []\n\n for i in range(1, len(history_obs)):\n self.history_steps.append(\n HistoryStep(\n history_obs[i - 1],\n history_obs[i],\n actions[i - 1],\n memories[i - 1],\n thoughts[i - 1],\n flags,\n )\n )\n\n def shrink(self):\n \"\"\"Shrink individual steps\"\"\"\n # TODO set the shrink speed of older steps to be higher\n super().shrink()\n for step in self.history_steps:\n step.shrink()\n\n @property\n def _prompt(self):\n prompts = [\"# History of interaction with the task:\\n\"]\n for i, step in enumerate(self.history_steps):\n prompts.append(f\"## step {i}\")\n prompts.append(step.prompt)\n return \"\\n\".join(prompts) + \"\\n\"\n\n\ndef make_obs_preprocessor(flags: ObsFlags):\n def obs_mapping(obs: dict):\n obs = copy(obs)\n obs[\"dom_txt\"] = flatten_dom_to_str(\n obs[\"dom_object\"],\n extra_properties=obs[\"extra_element_properties\"],\n with_visible=flags.extract_visible_tag,\n with_clickable=flags.extract_clickable_tag,\n with_center_coords=flags.extract_coords == \"center\",\n with_bounding_box_coords=flags.extract_coords == \"box\",","source_hash":"2ff2b702dfe1b18217dedd423ab941d3b8822982bcd3f0711f52cc8bd6666202","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.dynamic_prompting._prompt","uri":"program://AgentLab/function/src.agentlab.agents.dynamic_prompting._prompt#L836-L841","kind":"function","name":"_prompt","path":"src/agentlab/agents/dynamic_prompting.py","language":"python","start_line":836,"end_line":841,"context_start_line":816,"context_end_line":861,"code":" for i in range(1, len(history_obs)):\n self.history_steps.append(\n HistoryStep(\n history_obs[i - 1],\n history_obs[i],\n actions[i - 1],\n memories[i - 1],\n thoughts[i - 1],\n flags,\n )\n )\n\n def shrink(self):\n \"\"\"Shrink individual steps\"\"\"\n # TODO set the shrink speed of older steps to be higher\n super().shrink()\n for step in self.history_steps:\n step.shrink()\n\n @property\n def _prompt(self):\n prompts = [\"# History of interaction with the task:\\n\"]\n for i, step in enumerate(self.history_steps):\n prompts.append(f\"## step {i}\")\n prompts.append(step.prompt)\n return \"\\n\".join(prompts) + \"\\n\"\n\n\ndef make_obs_preprocessor(flags: ObsFlags):\n def obs_mapping(obs: dict):\n obs = copy(obs)\n obs[\"dom_txt\"] = flatten_dom_to_str(\n obs[\"dom_object\"],\n extra_properties=obs[\"extra_element_properties\"],\n with_visible=flags.extract_visible_tag,\n with_clickable=flags.extract_clickable_tag,\n with_center_coords=flags.extract_coords == \"center\",\n with_bounding_box_coords=flags.extract_coords == \"box\",\n filter_visible_only=flags.filter_visible_elements_only,\n filter_with_bid_only=flags.filter_with_bid_only,\n filter_som_only=flags.filter_som_only,\n )\n obs[\"axtree_txt\"] = flatten_axtree_to_str(\n obs[\"axtree_object\"],\n extra_properties=obs[\"extra_element_properties\"],\n with_visible=flags.extract_visible_tag,","source_hash":"2ff2b702dfe1b18217dedd423ab941d3b8822982bcd3f0711f52cc8bd6666202","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.dynamic_prompting.add_screenshot","uri":"program://AgentLab/function/src.agentlab.agents.dynamic_prompting.add_screenshot#L446-L458","kind":"function","name":"add_screenshot","path":"src/agentlab/agents/dynamic_prompting.py","language":"python","start_line":446,"end_line":458,"context_start_line":426,"context_end_line":478,"code":" prefix=\"## \",\n )\n self.focused_element = FocusedElement(\n obs[\"focused_element_bid\"],\n visible=flags.use_focused_element,\n prefix=\"## \",\n )\n\n def shrink(self):\n self.ax_tree.shrink()\n self.html.shrink()\n\n @property\n def _prompt(self) -> str:\n return f\"\"\"\n# Observation of current step:\n{self.tabs.prompt}{self.html.prompt}{self.ax_tree.prompt}{self.focused_element.prompt}{self.error.prompt}\n\n\"\"\"\n\n def add_screenshot(self, prompt: BaseMessage) -> BaseMessage:\n if self.flags.use_screenshot:\n if self.flags.use_som:\n screenshot = self.obs[\"screenshot_som\"]\n prompt.add_text(\n \"\\n## Screenshot:\\nHere is a screenshot of the page, it is annotated with bounding boxes and corresponding bids:\"\n )\n else:\n screenshot = self.obs[\"screenshot\"]\n prompt.add_text(\"\\n## Screenshot:\\nHere is a screenshot of the page:\")\n img_url = image_to_jpg_base64_url(screenshot)\n prompt.add_image(img_url, detail=self.flags.openai_vision_detail)\n return prompt\n\n\nclass MacNote(PromptElement):\n def __init__(self) -> None:\n super().__init__(visible=platform.system() == \"Darwin\")\n self._prompt = (\n \"\\nNote: you are on mac so you should use Meta instead of Control for Control+C etc.\\n\"\n )\n\n\nclass BeCautious(PromptElement):\n def __init__(self, visible: bool = True) -> None:\n super().__init__(visible=visible)\n self._prompt = f\"\"\"\\\n\\nBe very cautious. Avoid submitting anything before verifying the effect of your\nactions. Take the time to explore the effect of safe actions first. For example\nyou can fill a few elements of a form, but don't click submit before verifying\nthat everything was filled correctly.\\n\"\"\"\n\n","source_hash":"2ff2b702dfe1b18217dedd423ab941d3b8822982bcd3f0711f52cc8bd6666202","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.dynamic_prompting.obs_mapping","uri":"program://AgentLab/function/src.agentlab.agents.dynamic_prompting.obs_mapping#L845-L874","kind":"function","name":"obs_mapping","path":"src/agentlab/agents/dynamic_prompting.py","language":"python","start_line":845,"end_line":874,"context_start_line":825,"context_end_line":876,"code":" )\n )\n\n def shrink(self):\n \"\"\"Shrink individual steps\"\"\"\n # TODO set the shrink speed of older steps to be higher\n super().shrink()\n for step in self.history_steps:\n step.shrink()\n\n @property\n def _prompt(self):\n prompts = [\"# History of interaction with the task:\\n\"]\n for i, step in enumerate(self.history_steps):\n prompts.append(f\"## step {i}\")\n prompts.append(step.prompt)\n return \"\\n\".join(prompts) + \"\\n\"\n\n\ndef make_obs_preprocessor(flags: ObsFlags):\n def obs_mapping(obs: dict):\n obs = copy(obs)\n obs[\"dom_txt\"] = flatten_dom_to_str(\n obs[\"dom_object\"],\n extra_properties=obs[\"extra_element_properties\"],\n with_visible=flags.extract_visible_tag,\n with_clickable=flags.extract_clickable_tag,\n with_center_coords=flags.extract_coords == \"center\",\n with_bounding_box_coords=flags.extract_coords == \"box\",\n filter_visible_only=flags.filter_visible_elements_only,\n filter_with_bid_only=flags.filter_with_bid_only,\n filter_som_only=flags.filter_som_only,\n )\n obs[\"axtree_txt\"] = flatten_axtree_to_str(\n obs[\"axtree_object\"],\n extra_properties=obs[\"extra_element_properties\"],\n with_visible=flags.extract_visible_tag,\n with_clickable=flags.extract_clickable_tag,\n with_center_coords=flags.extract_coords == \"center\",\n with_bounding_box_coords=flags.extract_coords == \"box\",\n filter_visible_only=flags.filter_visible_elements_only,\n filter_with_bid_only=flags.filter_with_bid_only,\n filter_som_only=flags.filter_som_only,\n )\n obs[\"pruned_html\"] = prune_html(obs[\"dom_txt\"])\n obs[\"screenshot_som\"] = overlay_som(\n obs[\"screenshot\"], extra_properties=obs[\"extra_element_properties\"]\n )\n\n return obs\n\n return obs_mapping","source_hash":"2ff2b702dfe1b18217dedd423ab941d3b8822982bcd3f0711f52cc8bd6666202","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.generic_agent.agent_configs","uri":"program://AgentLab/module/src.agentlab.agents.generic_agent.agent_configs#L1-L444","kind":"module","name":"src.agentlab.agents.generic_agent.agent_configs","path":"src/agentlab/agents/generic_agent/agent_configs.py","language":"python","start_line":1,"end_line":444,"context_start_line":1,"context_end_line":444,"code":"\"\"\"\nBasic flags and agent configurations for generic agents.\n\"\"\"\n\nimport bgym\nfrom bgym import HighLevelActionSetArgs\n\nfrom agentlab.agents import dynamic_prompting as dp\nfrom agentlab.experiments import args\nfrom agentlab.llm.llm_configs import CHAT_MODEL_ARGS_DICT\n\nfrom .generic_agent import GenericAgentArgs\nfrom .generic_agent_prompt import GenericPromptFlags\nfrom .tmlr_config import BASE_FLAGS\n\nFLAGS_CUSTOM = GenericPromptFlags(\n obs=dp.ObsFlags(\n use_html=False,\n use_ax_tree=True,\n use_focused_element=True,\n use_error_logs=True,\n use_history=True,\n use_past_error_logs=False,\n use_action_history=True,\n use_think_history=False,\n use_diff=False,\n html_type=\"pruned_html\",\n use_screenshot=False,\n use_som=False,\n extract_visible_tag=True,\n extract_clickable_tag=False,\n extract_coords=\"False\",\n filter_visible_elements_only=False,\n ),\n action=dp.ActionFlags(\n action_set=HighLevelActionSetArgs(\n subsets=[\"bid\"],\n multiaction=False,\n ),\n long_description=False,\n individual_examples=True,\n ),\n use_plan=False,\n use_criticise=False,\n use_thinking=True,\n use_memory=False,\n use_concrete_example=True,\n use_abstract_example=True,\n use_hints=True,\n enable_chat=False,\n max_prompt_tokens=40_000,\n be_cautious=True,\n extra_instructions=None,\n)\n\n\nAGENT_CUSTOM = GenericAgentArgs(\n chat_model_args=CHAT_MODEL_ARGS_DICT[\"openrouter/meta-llama/llama-3.1-8b-instruct\"],\n flags=FLAGS_CUSTOM,\n)\n\n\n# GPT-3.5 default config\nFLAGS_GPT_3_5 = GenericPromptFlags(\n obs=dp.ObsFlags(\n use_html=False, # too big for most benchmark except miniwob\n use_ax_tree=True, # very useful\n use_focused_element=True, # detrimental on minowob according to ablation study\n use_error_logs=True,\n use_history=True,\n use_past_error_logs=False, # very detrimental on L1 and miniwob\n use_action_history=True, # helpful on miniwob\n use_think_history=False, # detrimental on L1 and miniwob\n use_diff=False,\n html_type=\"pruned_html\",\n use_screenshot=False,\n use_som=False,\n extract_visible_tag=True, # doesn't change much\n extract_clickable_tag=False, # doesn't change much\n extract_coords=\"False\",\n filter_visible_elements_only=False,\n ),\n action=dp.ActionFlags(\n action_set=HighLevelActionSetArgs(\n subsets=[\"bid\"],\n multiaction=False,\n ),\n long_description=False,\n individual_examples=True,\n ),\n use_plan=False, # usually detrimental\n use_criticise=False, # usually detrimental\n use_thinking=True, # very useful\n use_memory=False,\n use_concrete_example=True, # useful\n use_abstract_example=True, # useful\n use_hints=True, # useful\n enable_chat=False,\n max_prompt_tokens=40_000,\n be_cautious=True,\n extra_instructions=None,\n)\n\n\nAGENT_3_5 = GenericAgentArgs(\n chat_model_args=CHAT_MODEL_ARGS_DICT[\"openai/gpt-3.5-turbo-1106\"],\n flags=FLAGS_GPT_3_5,\n)\n\n# llama3-70b default config\nFLAGS_LLAMA3_70B = GenericPromptFlags(\n obs=dp.ObsFlags(\n use_html=False,\n use_ax_tree=True,\n use_focused_element=True,\n use_error_logs=False,\n use_history=True,\n use_past_error_logs=False,\n use_action_history=True,\n use_think_history=True,\n use_diff=False,\n html_type=\"pruned_html\",\n use_screenshot=False,\n use_som=False,\n extract_visible_tag=True,\n extract_clickable_tag=False,\n extract_coords=\"False\",\n filter_visible_elements_only=False,\n ),\n action=dp.ActionFlags(\n action_set=HighLevelActionSetArgs(\n subsets=[\"bid\"],\n multiaction=False,\n ),\n long_description=False,\n individual_examples=True,\n ),\n use_plan=False,\n use_criticise=False,\n use_thinking=True,\n use_memory=False,\n use_concrete_example=True,\n use_abstract_example=True,\n use_hints=True,\n enable_chat=False,\n max_prompt_tokens=40_000,\n be_cautious=True,\n extra_instructions=None,\n add_missparsed_messages=True,\n)\n\nAGENT_LLAMA3_70B = GenericAgentArgs(\n chat_model_args=CHAT_MODEL_ARGS_DICT[\"openrouter/meta-llama/llama-3-70b-instruct\"],\n flags=FLAGS_LLAMA3_70B,\n)\nAGENT_LLAMA31_70B = GenericAgentArgs(\n chat_model_args=CHAT_MODEL_ARGS_DICT[\"openrouter/meta-llama/llama-3.1-70b-instruct\"],\n flags=FLAGS_LLAMA3_70B,\n)\n\nFLAGS_8B = GenericPromptFlags(\n obs=dp.ObsFlags(\n use_html=False,\n use_ax_tree=True,\n use_focused_element=True,\n use_error_logs=False,\n use_history=True,\n use_past_error_logs=False,\n use_action_history=True,\n use_think_history=False,\n use_diff=False,\n html_type=\"pruned_html\",\n use_screenshot=False,\n use_som=False,\n extract_visible_tag=False,\n extract_clickable_tag=False,\n extract_coords=\"False\",\n filter_visible_elements_only=False,\n ),\n action=dp.ActionFlags(\n action_set=HighLevelActionSetArgs(\n subsets=[\"bid\"],\n multiaction=True,\n ),\n long_description=False,\n individual_examples=True,\n ),\n use_plan=False,\n use_criticise=False,\n use_thinking=True,\n use_memory=False,\n use_concrete_example=True,\n use_abstract_example=True,\n use_hints=True,\n enable_chat=False,\n max_prompt_tokens=40_000,\n be_cautious=True,\n extra_instructions=None,\n add_missparsed_messages=True,\n)\n\n\nAGENT_8B = GenericAgentArgs(\n chat_model_args=CHAT_MODEL_ARGS_DICT[\"meta-llama/Meta-Llama-3-8B-Instruct\"],\n flags=FLAGS_8B,\n)\n\n\nAGENT_LLAMA31_8B = GenericAgentArgs(\n chat_model_args=CHAT_MODEL_ARGS_DICT[\"openrouter/meta-llama/llama-3.1-8b-instruct\"],\n flags=FLAGS_8B,\n)\n\n\n# GPT-4o default config\nFLAGS_GPT_4o = GenericPromptFlags(\n obs=dp.ObsFlags(\n use_html=False,\n use_ax_tree=True,\n use_focused_element=True,\n use_error_logs=True,\n use_history=True,\n use_past_error_logs=False,\n use_action_history=True,\n use_think_history=False,\n use_diff=False,\n html_type=\"pruned_html\",\n use_screenshot=False,\n use_som=False,\n extract_visible_tag=True,\n extract_clickable_tag=True,\n extract_coords=\"False\",\n filter_visible_elements_only=False,\n ),\n action=dp.ActionFlags(\n action_set=HighLevelActionSetArgs(\n subsets=[\"bid\"],\n multiaction=False,\n ),\n long_description=False,\n individual_examples=False,\n ),\n use_plan=False,\n use_criticise=False,\n use_thinking=True,\n use_memory=False,\n use_concrete_example=True,\n use_abstract_example=True,\n use_hints=True,\n enable_chat=False,\n max_prompt_tokens=40_000,\n be_cautious=True,\n extra_instructions=None,\n)\n\nAGENT_4o = GenericAgentArgs(\n chat_model_args=CHAT_MODEL_ARGS_DICT[\"openai/gpt-4o-2024-05-13\"],\n flags=FLAGS_GPT_4o,\n)\n\nAGENT_4o_MINI = GenericAgentArgs(\n chat_model_args=CHAT_MODEL_ARGS_DICT[\"openai/gpt-4o-mini-2024-07-18\"],\n flags=FLAGS_GPT_4o,\n)\n\nAGENT_AZURE_4o_MINI = GenericAgentArgs(\n chat_model_args=CHAT_MODEL_ARGS_DICT[\"azure/gpt-4o-mini-2024-07-18\"],\n flags=FLAGS_GPT_4o,\n)\nAGENT_AZURE_4o = GenericAgentArgs(\n chat_model_args=CHAT_MODEL_ARGS_DICT[\"azure/gpt-4o-2024-08-06\"],\n flags=FLAGS_GPT_4o,\n)\nAGENT_AZURE_41 = GenericAgentArgs(\n chat_model_args=CHAT_MODEL_ARGS_DICT[\"azure/gpt-4.1-2025-04-14\"],\n flags=FLAGS_GPT_4o,\n)\nAGENT_AZURE_41_MINI = GenericAgentArgs(\n chat_model_args=CHAT_MODEL_ARGS_DICT[\"azure/gpt-4.1-mini-2025-04-14\"],\n flags=FLAGS_GPT_4o,\n)\nAGENT_AZURE_41_NANO = GenericAgentArgs(\n chat_model_args=CHAT_MODEL_ARGS_DICT[\"azure/gpt-4.1-nano-2025-04-14\"],\n flags=FLAGS_GPT_4o,\n)\n\nAGENT_AZURE_5 = GenericAgentArgs(\n chat_model_args=CHAT_MODEL_ARGS_DICT[\"azure/gpt-5-2025-08-07\"],\n flags=FLAGS_GPT_4o,\n)\n\nAGENT_AZURE_5_MINI = GenericAgentArgs(\n chat_model_args=CHAT_MODEL_ARGS_DICT[\"azure/gpt-5-mini-2025-08-07\"],\n flags=FLAGS_GPT_4o,\n)\n\nAGENT_AZURE_5_NANO = GenericAgentArgs(\n chat_model_args=CHAT_MODEL_ARGS_DICT[\"azure/gpt-5-nano-2025-08-07\"],\n flags=FLAGS_GPT_4o,\n)\n\nAGENT_CLAUDE_SONNET_35 = GenericAgentArgs(\n chat_model_args=CHAT_MODEL_ARGS_DICT[\"openrouter/anthropic/claude-3.5-sonnet:beta\"],\n flags=FLAGS_GPT_4o,\n)\nAGENT_37_SONNET = GenericAgentArgs(\n chat_model_args=CHAT_MODEL_ARGS_DICT[\"openrouter/anthropic/claude-3.7-sonnet\"],\n flags=FLAGS_GPT_4o,\n)\n# AGENT_o3_MINI = GenericAgentArgs(\n# chat_model_args=CHAT_MODEL_ARGS_DICT[\"openai/o3-mini-2025-01-31\"],\n# flags=FLAGS_GPT_4o,\n# )\nAGENT_o3_MINI = GenericAgentArgs(\n chat_model_args=CHAT_MODEL_ARGS_DICT[\"openrouter/openai/o3-mini\"],\n flags=FLAGS_GPT_4o,\n)\n\nAGENT_o1_MINI = GenericAgentArgs(\n chat_model_args=CHAT_MODEL_ARGS_DICT[\"openrouter/openai/o1-mini-2024-09-12\"],\n flags=FLAGS_GPT_4o,\n)\n# GPT-4o vision default config\nFLAGS_GPT_4o_VISION = FLAGS_GPT_4o.copy()\nFLAGS_GPT_4o_VISION.obs.use_screenshot = True\nFLAGS_GPT_4o_VISION.obs.use_som = True\n\nAGENT_4o_VISION = GenericAgentArgs(\n chat_model_args=CHAT_MODEL_ARGS_DICT[\"openai/gpt-4o-2024-05-13\"],\n flags=FLAGS_GPT_4o_VISION,\n)\n\nAGENT_4o_MINI_VISION = GenericAgentArgs(\n chat_model_args=CHAT_MODEL_ARGS_DICT[\"openai/gpt-4o-mini-2024-07-18\"],\n flags=FLAGS_GPT_4o_VISION,\n)\n\nAGENT_AZURE_4o_VISION = GenericAgentArgs(\n chat_model_args=CHAT_MODEL_ARGS_DICT[\"azure/gpt-4o-2024-08-06\"],\n flags=FLAGS_GPT_4o_VISION,\n)\n\nAGENT_AZURE_4o_MINI_VISION = GenericAgentArgs(\n chat_model_args=CHAT_MODEL_ARGS_DICT[\"azure/gpt-4o-mini-2024-07-18\"],\n flags=FLAGS_GPT_4o_VISION,\n)\n\nAGENT_AZURE_41_VISION = GenericAgentArgs(\n chat_model_args=CHAT_MODEL_ARGS_DICT[\"azure/gpt-4.1-2025-04-14\"],\n flags=FLAGS_GPT_4o_VISION,\n)\n\nAGENT_AZURE_41_MINI_VISION = GenericAgentArgs(\n chat_model_args=CHAT_MODEL_ARGS_DICT[\"azure/gpt-4.1-mini-2025-04-14\"],\n flags=FLAGS_GPT_4o_VISION,\n)\nAGENT_AZURE_41_NANO_VISION = GenericAgentArgs(\n chat_model_args=CHAT_MODEL_ARGS_DICT[\"azure/gpt-4.1-nano-2025-04-14\"],\n flags=FLAGS_GPT_4o_VISION,\n)\n\nAGENT_AZURE_5_VISION = GenericAgentArgs(\n chat_model_args=CHAT_MODEL_ARGS_DICT[\"azure/gpt-5-2025-08-07\"],\n flags=FLAGS_GPT_4o_VISION,\n)\n\nAGENT_AZURE_5_MINI_VISION = GenericAgentArgs(\n chat_model_args=CHAT_MODEL_ARGS_DICT[\"azure/gpt-5-mini-2025-08-07\"],\n flags=FLAGS_GPT_4o_VISION,\n)\n\nAGENT_AZURE_5_NANO_VISION = GenericAgentArgs(\n chat_model_args=CHAT_MODEL_ARGS_DICT[\"azure/gpt-5-nano-2025-08-07\"],\n flags=FLAGS_GPT_4o_VISION,\n)\n\nAGENT_CLAUDE_SONNET_35_VISION = GenericAgentArgs(\n chat_model_args=CHAT_MODEL_ARGS_DICT[\"openrouter/anthropic/claude-3.5-sonnet:beta\"],\n flags=FLAGS_GPT_4o_VISION,\n)\nAGENT_LLAMA4_17B_INSTRUCT = GenericAgentArgs(\n chat_model_args=CHAT_MODEL_ARGS_DICT[\"openrouter/meta-llama/llama-4-maverick\"],\n flags=BASE_FLAGS,\n)\nGPT5_MINI_FLAGS = BASE_FLAGS.copy()\nGPT5_MINI_FLAGS.action = dp.ActionFlags( # action should not be str to work with agentlab-assistant\n action_set=HighLevelActionSetArgs(\n subsets=[\"bid\"],\n multiaction=False,\n )\n)\n\nAGENT_GPT5_MINI = GenericAgentArgs(\n chat_model_args=CHAT_MODEL_ARGS_DICT[\"openai/gpt-5-mini-2025-08-07\"],\n flags=GPT5_MINI_FLAGS,\n)\n\nDEFAULT_RS_FLAGS = GenericPromptFlags(\n flag_group=\"default_rs\",\n obs=dp.ObsFlags(\n use_html=True,\n use_ax_tree=args.Choice([True, False]),\n use_focused_element=False,\n use_error_logs=True,\n use_history=True,\n use_past_error_logs=args.Choice([True, False], p=[0.7, 0.3]),\n use_action_history=True,\n use_think_history=args.Choice([True, False], p=[0.7, 0.3]),\n use_diff=args.Choice([True, False], p=[0.3, 0.7]),\n html_type=\"pruned_html\",\n use_screenshot=False,\n use_som=False,\n extract_visible_tag=args.Choice([True, False]),\n extract_clickable_tag=False,\n extract_coords=args.Choice([\"center\", \"box\"]),\n filter_visible_elements_only=args.Choice([True, False], p=[0.3, 0.7]),\n ),\n action=dp.ActionFlags(\n action_set=HighLevelActionSetArgs(\n subsets=args.Choice([[\"bid\"], [\"bid\", \"coord\"]]),\n multiaction=args.Choice([True, False], p=[0.7, 0.3]),\n ),\n long_description=False,\n individual_examples=False,\n ),\n # drop_ax_tree_first=True, # this flag is no longer active, according to browsergym doc\n use_plan=args.Choice([True, False]),\n use_criticise=args.Choice([True, False], p=[0.7, 0.3]),\n use_thinking=args.Choice([True, False], p=[0.7, 0.3]),\n use_memory=args.Choice([True, False], p=[0.7, 0.3]),\n use_concrete_example=True,\n use_abstract_example=True,\n use_hints=args.Choice([True, False], p=[0.7, 0.3]),\n be_cautious=args.Choice([True, False]),\n enable_chat=False,\n max_prompt_tokens=40_000,\n extra_instructions=None,\n)\n\n\nRANDOM_SEARCH_AGENT = GenericAgentArgs(\n chat_model_args=CHAT_MODEL_ARGS_DICT[\"openai/gpt-4o-2024-05-13\"],\n flags=DEFAULT_RS_FLAGS,\n)","source_hash":"6020025b89c9e1cc61602abf8aa62eb226f93863096f465504f3e2c8b4320153","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.generic_agent.tmlr_config","uri":"program://AgentLab/module/src.agentlab.agents.generic_agent.tmlr_config#L1-L78","kind":"module","name":"src.agentlab.agents.generic_agent.tmlr_config","path":"src/agentlab/agents/generic_agent/tmlr_config.py","language":"python","start_line":1,"end_line":78,"context_start_line":1,"context_end_line":78,"code":"\"\"\"\nSpecific configurations for our 2024 TMLR submission.\n\"\"\"\n\nfrom copy import deepcopy\n\nfrom agentlab.agents import dynamic_prompting as dp\nfrom agentlab.experiments import args\nfrom agentlab.llm.llm_configs import CHAT_MODEL_ARGS_DICT\n\nfrom .generic_agent import GenericAgentArgs\nfrom .generic_agent_prompt import GenericPromptFlags\n\nBASE_FLAGS = GenericPromptFlags(\n obs=dp.ObsFlags(\n use_html=False,\n use_ax_tree=True,\n use_focused_element=True,\n use_error_logs=True,\n use_history=True,\n use_past_error_logs=False,\n use_action_history=True,\n use_think_history=True, # gpt-4o config except for this line\n use_diff=False,\n html_type=\"pruned_html\",\n use_screenshot=False,\n use_som=False,\n extract_visible_tag=True,\n extract_clickable_tag=True,\n extract_coords=\"False\",\n filter_visible_elements_only=False,\n ),\n action=dp.ActionFlags(\n multi_actions=False,\n action_set=\"bid\",\n long_description=False,\n individual_examples=False,\n ),\n use_plan=False,\n use_criticise=False,\n use_thinking=True,\n use_memory=False,\n use_concrete_example=True,\n use_abstract_example=True,\n use_hints=True,\n enable_chat=False,\n max_prompt_tokens=40_000,\n be_cautious=True,\n extra_instructions=None,\n)\n\n\ndef get_base_agent(llm_config: str):\n return GenericAgentArgs(\n chat_model_args=CHAT_MODEL_ARGS_DICT[llm_config],\n flags=BASE_FLAGS,\n )\n\n\ndef get_vision_agent(llm_config: str):\n flags = deepcopy(BASE_FLAGS)\n flags.obs.use_screenshot = True\n agent_args = GenericAgentArgs(\n chat_model_args=CHAT_MODEL_ARGS_DICT[llm_config],\n flags=flags,\n )\n agent_args.agent_name = f\"{agent_args.agent_name}_vision\"\n return agent_args\n\n\ndef get_som_agent(llm_config: str):\n flags = deepcopy(BASE_FLAGS)\n flags.obs.use_screenshot = True\n flags.obs.use_som = True\n return GenericAgentArgs(\n chat_model_args=CHAT_MODEL_ARGS_DICT[llm_config],\n flags=flags,\n )","source_hash":"06211236905e1cd069c121dedb6f097aa004ae9a3a2d70db8443e52c710b4e5a","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.generic_agent.tmlr_config.get_base_agent","uri":"program://AgentLab/function/src.agentlab.agents.generic_agent.tmlr_config.get_base_agent#L53-L57","kind":"function","name":"get_base_agent","path":"src/agentlab/agents/generic_agent/tmlr_config.py","language":"python","start_line":53,"end_line":57,"context_start_line":33,"context_end_line":77,"code":" action=dp.ActionFlags(\n multi_actions=False,\n action_set=\"bid\",\n long_description=False,\n individual_examples=False,\n ),\n use_plan=False,\n use_criticise=False,\n use_thinking=True,\n use_memory=False,\n use_concrete_example=True,\n use_abstract_example=True,\n use_hints=True,\n enable_chat=False,\n max_prompt_tokens=40_000,\n be_cautious=True,\n extra_instructions=None,\n)\n\n\ndef get_base_agent(llm_config: str):\n return GenericAgentArgs(\n chat_model_args=CHAT_MODEL_ARGS_DICT[llm_config],\n flags=BASE_FLAGS,\n )\n\n\ndef get_vision_agent(llm_config: str):\n flags = deepcopy(BASE_FLAGS)\n flags.obs.use_screenshot = True\n agent_args = GenericAgentArgs(\n chat_model_args=CHAT_MODEL_ARGS_DICT[llm_config],\n flags=flags,\n )\n agent_args.agent_name = f\"{agent_args.agent_name}_vision\"\n return agent_args\n\n\ndef get_som_agent(llm_config: str):\n flags = deepcopy(BASE_FLAGS)\n flags.obs.use_screenshot = True\n flags.obs.use_som = True\n return GenericAgentArgs(\n chat_model_args=CHAT_MODEL_ARGS_DICT[llm_config],\n flags=flags,","source_hash":"06211236905e1cd069c121dedb6f097aa004ae9a3a2d70db8443e52c710b4e5a","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.generic_agent.tmlr_config.get_vision_agent","uri":"program://AgentLab/function/src.agentlab.agents.generic_agent.tmlr_config.get_vision_agent#L60-L68","kind":"function","name":"get_vision_agent","path":"src/agentlab/agents/generic_agent/tmlr_config.py","language":"python","start_line":60,"end_line":68,"context_start_line":40,"context_end_line":78,"code":" use_criticise=False,\n use_thinking=True,\n use_memory=False,\n use_concrete_example=True,\n use_abstract_example=True,\n use_hints=True,\n enable_chat=False,\n max_prompt_tokens=40_000,\n be_cautious=True,\n extra_instructions=None,\n)\n\n\ndef get_base_agent(llm_config: str):\n return GenericAgentArgs(\n chat_model_args=CHAT_MODEL_ARGS_DICT[llm_config],\n flags=BASE_FLAGS,\n )\n\n\ndef get_vision_agent(llm_config: str):\n flags = deepcopy(BASE_FLAGS)\n flags.obs.use_screenshot = True\n agent_args = GenericAgentArgs(\n chat_model_args=CHAT_MODEL_ARGS_DICT[llm_config],\n flags=flags,\n )\n agent_args.agent_name = f\"{agent_args.agent_name}_vision\"\n return agent_args\n\n\ndef get_som_agent(llm_config: str):\n flags = deepcopy(BASE_FLAGS)\n flags.obs.use_screenshot = True\n flags.obs.use_som = True\n return GenericAgentArgs(\n chat_model_args=CHAT_MODEL_ARGS_DICT[llm_config],\n flags=flags,\n )","source_hash":"06211236905e1cd069c121dedb6f097aa004ae9a3a2d70db8443e52c710b4e5a","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.generic_agent.tmlr_config.get_som_agent","uri":"program://AgentLab/function/src.agentlab.agents.generic_agent.tmlr_config.get_som_agent#L71-L78","kind":"function","name":"get_som_agent","path":"src/agentlab/agents/generic_agent/tmlr_config.py","language":"python","start_line":71,"end_line":78,"context_start_line":51,"context_end_line":78,"code":"\n\ndef get_base_agent(llm_config: str):\n return GenericAgentArgs(\n chat_model_args=CHAT_MODEL_ARGS_DICT[llm_config],\n flags=BASE_FLAGS,\n )\n\n\ndef get_vision_agent(llm_config: str):\n flags = deepcopy(BASE_FLAGS)\n flags.obs.use_screenshot = True\n agent_args = GenericAgentArgs(\n chat_model_args=CHAT_MODEL_ARGS_DICT[llm_config],\n flags=flags,\n )\n agent_args.agent_name = f\"{agent_args.agent_name}_vision\"\n return agent_args\n\n\ndef get_som_agent(llm_config: str):\n flags = deepcopy(BASE_FLAGS)\n flags.obs.use_screenshot = True\n flags.obs.use_som = True\n return GenericAgentArgs(\n chat_model_args=CHAT_MODEL_ARGS_DICT[llm_config],\n flags=flags,\n )","source_hash":"06211236905e1cd069c121dedb6f097aa004ae9a3a2d70db8443e52c710b4e5a","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.generic_agent.generic_agent_prompt","uri":"program://AgentLab/module/src.agentlab.agents.generic_agent.generic_agent_prompt#L1-L261","kind":"module","name":"src.agentlab.agents.generic_agent.generic_agent_prompt","path":"src/agentlab/agents/generic_agent/generic_agent_prompt.py","language":"python","start_line":1,"end_line":261,"context_start_line":1,"context_end_line":261,"code":"\"\"\"\nPrompt builder for GenericAgent\n\nIt is based on the dynamic_prompting module from the agentlab package.\n\"\"\"\n\nimport logging\nfrom dataclasses import dataclass\n\nfrom browsergym.core import action\nfrom browsergym.core.action.base import AbstractActionSet\n\nfrom agentlab.agents import dynamic_prompting as dp\nfrom agentlab.llm.llm_utils import HumanMessage, parse_html_tags_raise\n\n\n@dataclass\nclass GenericPromptFlags(dp.Flags):\n \"\"\"\n A class to represent various flags used to control features in an application.\n\n Attributes:\n use_plan (bool): Ask the LLM to provide a plan.\n use_criticise (bool): Ask the LLM to first draft and criticise the action before producing it.\n use_thinking (bool): Enable a chain of thoughts.\n use_concrete_example (bool): Use a concrete example of the answer in the prompt for a generic task.\n use_abstract_example (bool): Use an abstract example of the answer in the prompt.\n use_hints (bool): Add some human-engineered hints to the prompt.\n enable_chat (bool): Enable chat mode, where the agent can interact with the user.\n max_prompt_tokens (int): Maximum number of tokens allowed in the prompt.\n be_cautious (bool): Instruct the agent to be cautious about its actions.\n extra_instructions (Optional[str]): Extra instructions to provide to the agent.\n add_missparsed_messages (bool): When retrying, add the missparsed messages to the prompt.\n flag_group (Optional[str]): Group of flags used.\n \"\"\"\n\n obs: dp.ObsFlags\n action: dp.ActionFlags\n use_plan: bool = False #\n use_criticise: bool = False #\n use_thinking: bool = False\n use_memory: bool = False #\n use_concrete_example: bool = True\n use_abstract_example: bool = False\n use_hints: bool = False\n enable_chat: bool = False\n max_prompt_tokens: int = None\n be_cautious: bool = True\n extra_instructions: str | None = None\n add_missparsed_messages: bool = True\n max_trunc_itr: int = 20\n flag_group: str = None\n\n\nclass MainPrompt(dp.Shrinkable):\n def __init__(\n self,\n action_set: AbstractActionSet,\n obs_history: list[dict],\n actions: list[str],\n memories: list[str],\n thoughts: list[str],\n previous_plan: str,\n step: int,\n flags: GenericPromptFlags,\n ) -> None:\n super().__init__()\n self.flags = flags\n self.history = dp.History(obs_history, actions, memories, thoughts, flags.obs)\n if self.flags.enable_chat:\n self.instructions = dp.ChatInstructions(\n obs_history[-1][\"chat_messages\"], extra_instructions=flags.extra_instructions\n )\n else:\n if sum([msg[\"role\"] == \"user\" for msg in obs_history[-1].get(\"chat_messages\", [])]) > 1:\n logging.warning(\n \"Agent is in goal mode, but multiple user messages are present in the chat. Consider switching to `enable_chat=True`.\"\n )\n self.instructions = dp.GoalInstructions(\n obs_history[-1][\"goal_object\"], extra_instructions=flags.extra_instructions\n )\n\n self.obs = dp.Observation(\n obs_history[-1],\n self.flags.obs,\n )\n\n self.action_prompt = dp.ActionPrompt(action_set, action_flags=flags.action)\n\n def time_for_caution():\n # no need for caution if we're in single action mode\n return flags.be_cautious and (\n flags.action.action_set.multiaction or flags.action.action_set == \"python\"\n )\n\n self.be_cautious = dp.BeCautious(visible=time_for_caution)\n self.think = dp.Think(visible=lambda: flags.use_thinking)\n self.hints = dp.Hints(visible=lambda: flags.use_hints)\n self.plan = Plan(previous_plan, step, lambda: flags.use_plan) # TODO add previous plan\n self.criticise = Criticise(visible=lambda: flags.use_criticise)\n self.memory = Memory(visible=lambda: flags.use_memory)\n\n @property\n def _prompt(self) -> HumanMessage:\n prompt = HumanMessage(self.instructions.prompt)\n prompt.add_text(\n f\"\"\"\\\n{self.obs.prompt}\\\n{self.history.prompt}\\\n{self.action_prompt.prompt}\\\n{self.hints.prompt}\\\n{self.be_cautious.prompt}\\\n{self.think.prompt}\\\n{self.plan.prompt}\\\n{self.memory.prompt}\\\n{self.criticise.prompt}\\\n\"\"\"\n )\n\n if self.flags.use_abstract_example:\n prompt.add_text(\n f\"\"\"\n# Abstract Example\n\nHere is an abstract version of the answer with description of the content of\neach tag. Make sure you follow this structure, but replace the content with your\nanswer:\n{self.think.abstract_ex}\\\n{self.plan.abstract_ex}\\\n{self.memory.abstract_ex}\\\n{self.criticise.abstract_ex}\\\n{self.action_prompt.abstract_ex}\\\n\"\"\"\n )\n\n if self.flags.use_concrete_example:\n prompt.add_text(\n f\"\"\"\n# Concrete Example\n\nHere is a concrete example of how to format your answer.\nMake sure to follow the template with proper tags:\n{self.think.concrete_ex}\\\n{self.plan.concrete_ex}\\\n{self.memory.concrete_ex}\\\n{self.criticise.concrete_ex}\\\n{self.action_prompt.concrete_ex}\\\n\"\"\"\n )\n return self.obs.add_screenshot(prompt)\n\n def shrink(self):\n self.history.shrink()\n self.obs.shrink()\n\n def _parse_answer(self, text_answer):\n ans_dict = {}\n ans_dict.update(self.think.parse_answer(text_answer))\n ans_dict.update(self.plan.parse_answer(text_answer))\n ans_dict.update(self.memory.parse_answer(text_answer))\n ans_dict.update(self.criticise.parse_answer(text_answer))\n ans_dict.update(self.action_prompt.parse_answer(text_answer))\n return ans_dict\n\n\nclass Memory(dp.PromptElement):\n _prompt = \"\" # provided in the abstract and concrete examples\n\n _abstract_ex = \"\"\"\n\nWrite down anything you need to remember for next steps. You will be presented\nwith the list of previous memories and past actions. Some tasks require to\nremember hints from previous steps in order to solve it.\n\n\"\"\"\n\n _concrete_ex = \"\"\"\n\nI clicked on bid \"32\" to activate tab 2. The accessibility tree should mention\nfocusable for elements of the form at next step.\n\n\"\"\"\n\n def _parse_answer(self, text_answer):\n return parse_html_tags_raise(text_answer, optional_keys=[\"memory\"], merge_multiple=True)\n\n\nclass Plan(dp.PromptElement):\n def __init__(self, previous_plan, plan_step, visible: bool = True) -> None:\n super().__init__(visible=visible)\n self.previous_plan = previous_plan\n self._prompt = f\"\"\"\n# Plan:\n\nYou just executed step {plan_step} of the previously proposed plan:\\n{previous_plan}\\n\nAfter reviewing the effect of your previous actions, verify if your plan is still\nrelevant and update it if necessary.\n\"\"\"\n\n _abstract_ex = \"\"\"\n\nProvide a multi step plan that will guide you to accomplish the goal. There\nshould always be steps to verify if the previous action had an effect. The plan\ncan be revisited at each steps. Specifically, if there was something unexpected.\nThe plan should be cautious and favor exploring befor submitting.\n\n\nInteger specifying the step of current action\n\n\"\"\"\n\n _concrete_ex = \"\"\"\n\n1. fill form (failed)\n * type first name\n * type last name\n2. Try to activate the form\n * click on tab 2\n3. fill form again\n * type first name\n * type last name\n4. verify and submit\n * verify form is filled\n * submit if filled, if not, replan\n\n\n2\n\"\"\"\n\n def _parse_answer(self, text_answer):\n return parse_html_tags_raise(text_answer, optional_keys=[\"plan\", \"step\"])\n\n\nclass Criticise(dp.PromptElement):\n _prompt = \"\"\n\n _abstract_ex = \"\"\"\n\nWrite a first version of what you think is the right action.\n\n\n\nCriticise action_draft. What could be wrong with it? Enumerate reasons why it\ncould fail. Did your past actions had the expected effect? Make sure you're not\nrepeating the same mistakes.\n\n\"\"\"\n\n _concrete_ex = \"\"\"\n\nclick(\"32\")\n\n\n\nclick(\"32\") might not work because the element is not visible yet. I need to\nexplore the page to find a way to activate the form.\n\n\"\"\"\n\n def _parse_answer(self, text_answer):\n return parse_html_tags_raise(text_answer, optional_keys=[\"action_draft\", \"criticise\"])","source_hash":"e690d7ed2be97e8ca025ab9ad9919704d985e04dc4a0085e862e7cfb6da199c7","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.generic_agent.generic_agent_prompt.GenericPromptFlags","uri":"program://AgentLab/class/src.agentlab.agents.generic_agent.generic_agent_prompt.GenericPromptFlags#L18-L52","kind":"class","name":"GenericPromptFlags","path":"src/agentlab/agents/generic_agent/generic_agent_prompt.py","language":"python","start_line":18,"end_line":52,"context_start_line":1,"context_end_line":72,"code":"\"\"\"\nPrompt builder for GenericAgent\n\nIt is based on the dynamic_prompting module from the agentlab package.\n\"\"\"\n\nimport logging\nfrom dataclasses import dataclass\n\nfrom browsergym.core import action\nfrom browsergym.core.action.base import AbstractActionSet\n\nfrom agentlab.agents import dynamic_prompting as dp\nfrom agentlab.llm.llm_utils import HumanMessage, parse_html_tags_raise\n\n\n@dataclass\nclass GenericPromptFlags(dp.Flags):\n \"\"\"\n A class to represent various flags used to control features in an application.\n\n Attributes:\n use_plan (bool): Ask the LLM to provide a plan.\n use_criticise (bool): Ask the LLM to first draft and criticise the action before producing it.\n use_thinking (bool): Enable a chain of thoughts.\n use_concrete_example (bool): Use a concrete example of the answer in the prompt for a generic task.\n use_abstract_example (bool): Use an abstract example of the answer in the prompt.\n use_hints (bool): Add some human-engineered hints to the prompt.\n enable_chat (bool): Enable chat mode, where the agent can interact with the user.\n max_prompt_tokens (int): Maximum number of tokens allowed in the prompt.\n be_cautious (bool): Instruct the agent to be cautious about its actions.\n extra_instructions (Optional[str]): Extra instructions to provide to the agent.\n add_missparsed_messages (bool): When retrying, add the missparsed messages to the prompt.\n flag_group (Optional[str]): Group of flags used.\n \"\"\"\n\n obs: dp.ObsFlags\n action: dp.ActionFlags\n use_plan: bool = False #\n use_criticise: bool = False #\n use_thinking: bool = False\n use_memory: bool = False #\n use_concrete_example: bool = True\n use_abstract_example: bool = False\n use_hints: bool = False\n enable_chat: bool = False\n max_prompt_tokens: int = None\n be_cautious: bool = True\n extra_instructions: str | None = None\n add_missparsed_messages: bool = True\n max_trunc_itr: int = 20\n flag_group: str = None\n\n\nclass MainPrompt(dp.Shrinkable):\n def __init__(\n self,\n action_set: AbstractActionSet,\n obs_history: list[dict],\n actions: list[str],\n memories: list[str],\n thoughts: list[str],\n previous_plan: str,\n step: int,\n flags: GenericPromptFlags,\n ) -> None:\n super().__init__()\n self.flags = flags\n self.history = dp.History(obs_history, actions, memories, thoughts, flags.obs)\n if self.flags.enable_chat:\n self.instructions = dp.ChatInstructions(\n obs_history[-1][\"chat_messages\"], extra_instructions=flags.extra_instructions","source_hash":"e690d7ed2be97e8ca025ab9ad9919704d985e04dc4a0085e862e7cfb6da199c7","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.generic_agent.generic_agent_prompt.MainPrompt","uri":"program://AgentLab/class/src.agentlab.agents.generic_agent.generic_agent_prompt.MainPrompt#L55-L163","kind":"class","name":"MainPrompt","path":"src/agentlab/agents/generic_agent/generic_agent_prompt.py","language":"python","start_line":55,"end_line":163,"context_start_line":35,"context_end_line":183,"code":" \"\"\"\n\n obs: dp.ObsFlags\n action: dp.ActionFlags\n use_plan: bool = False #\n use_criticise: bool = False #\n use_thinking: bool = False\n use_memory: bool = False #\n use_concrete_example: bool = True\n use_abstract_example: bool = False\n use_hints: bool = False\n enable_chat: bool = False\n max_prompt_tokens: int = None\n be_cautious: bool = True\n extra_instructions: str | None = None\n add_missparsed_messages: bool = True\n max_trunc_itr: int = 20\n flag_group: str = None\n\n\nclass MainPrompt(dp.Shrinkable):\n def __init__(\n self,\n action_set: AbstractActionSet,\n obs_history: list[dict],\n actions: list[str],\n memories: list[str],\n thoughts: list[str],\n previous_plan: str,\n step: int,\n flags: GenericPromptFlags,\n ) -> None:\n super().__init__()\n self.flags = flags\n self.history = dp.History(obs_history, actions, memories, thoughts, flags.obs)\n if self.flags.enable_chat:\n self.instructions = dp.ChatInstructions(\n obs_history[-1][\"chat_messages\"], extra_instructions=flags.extra_instructions\n )\n else:\n if sum([msg[\"role\"] == \"user\" for msg in obs_history[-1].get(\"chat_messages\", [])]) > 1:\n logging.warning(\n \"Agent is in goal mode, but multiple user messages are present in the chat. Consider switching to `enable_chat=True`.\"\n )\n self.instructions = dp.GoalInstructions(\n obs_history[-1][\"goal_object\"], extra_instructions=flags.extra_instructions\n )\n\n self.obs = dp.Observation(\n obs_history[-1],\n self.flags.obs,\n )\n\n self.action_prompt = dp.ActionPrompt(action_set, action_flags=flags.action)\n\n def time_for_caution():\n # no need for caution if we're in single action mode\n return flags.be_cautious and (\n flags.action.action_set.multiaction or flags.action.action_set == \"python\"\n )\n\n self.be_cautious = dp.BeCautious(visible=time_for_caution)\n self.think = dp.Think(visible=lambda: flags.use_thinking)\n self.hints = dp.Hints(visible=lambda: flags.use_hints)\n self.plan = Plan(previous_plan, step, lambda: flags.use_plan) # TODO add previous plan\n self.criticise = Criticise(visible=lambda: flags.use_criticise)\n self.memory = Memory(visible=lambda: flags.use_memory)\n\n @property\n def _prompt(self) -> HumanMessage:\n prompt = HumanMessage(self.instructions.prompt)\n prompt.add_text(\n f\"\"\"\\\n{self.obs.prompt}\\\n{self.history.prompt}\\\n{self.action_prompt.prompt}\\\n{self.hints.prompt}\\\n{self.be_cautious.prompt}\\\n{self.think.prompt}\\\n{self.plan.prompt}\\\n{self.memory.prompt}\\\n{self.criticise.prompt}\\\n\"\"\"\n )\n\n if self.flags.use_abstract_example:\n prompt.add_text(\n f\"\"\"\n# Abstract Example\n\nHere is an abstract version of the answer with description of the content of\neach tag. Make sure you follow this structure, but replace the content with your\nanswer:\n{self.think.abstract_ex}\\\n{self.plan.abstract_ex}\\\n{self.memory.abstract_ex}\\\n{self.criticise.abstract_ex}\\\n{self.action_prompt.abstract_ex}\\\n\"\"\"\n )\n\n if self.flags.use_concrete_example:\n prompt.add_text(\n f\"\"\"\n# Concrete Example\n\nHere is a concrete example of how to format your answer.\nMake sure to follow the template with proper tags:\n{self.think.concrete_ex}\\\n{self.plan.concrete_ex}\\\n{self.memory.concrete_ex}\\\n{self.criticise.concrete_ex}\\\n{self.action_prompt.concrete_ex}\\\n\"\"\"\n )\n return self.obs.add_screenshot(prompt)\n\n def shrink(self):\n self.history.shrink()\n self.obs.shrink()\n\n def _parse_answer(self, text_answer):\n ans_dict = {}\n ans_dict.update(self.think.parse_answer(text_answer))\n ans_dict.update(self.plan.parse_answer(text_answer))\n ans_dict.update(self.memory.parse_answer(text_answer))\n ans_dict.update(self.criticise.parse_answer(text_answer))\n ans_dict.update(self.action_prompt.parse_answer(text_answer))\n return ans_dict\n\n\nclass Memory(dp.PromptElement):\n _prompt = \"\" # provided in the abstract and concrete examples\n\n _abstract_ex = \"\"\"\n\nWrite down anything you need to remember for next steps. You will be presented\nwith the list of previous memories and past actions. Some tasks require to\nremember hints from previous steps in order to solve it.\n\n\"\"\"\n\n _concrete_ex = \"\"\"\n\nI clicked on bid \"32\" to activate tab 2. The accessibility tree should mention\nfocusable for elements of the form at next step.\n\n\"\"\"\n","source_hash":"e690d7ed2be97e8ca025ab9ad9919704d985e04dc4a0085e862e7cfb6da199c7","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.generic_agent.generic_agent_prompt.Memory","uri":"program://AgentLab/class/src.agentlab.agents.generic_agent.generic_agent_prompt.Memory#L166-L185","kind":"class","name":"Memory","path":"src/agentlab/agents/generic_agent/generic_agent_prompt.py","language":"python","start_line":166,"end_line":185,"context_start_line":146,"context_end_line":205,"code":"{self.criticise.concrete_ex}\\\n{self.action_prompt.concrete_ex}\\\n\"\"\"\n )\n return self.obs.add_screenshot(prompt)\n\n def shrink(self):\n self.history.shrink()\n self.obs.shrink()\n\n def _parse_answer(self, text_answer):\n ans_dict = {}\n ans_dict.update(self.think.parse_answer(text_answer))\n ans_dict.update(self.plan.parse_answer(text_answer))\n ans_dict.update(self.memory.parse_answer(text_answer))\n ans_dict.update(self.criticise.parse_answer(text_answer))\n ans_dict.update(self.action_prompt.parse_answer(text_answer))\n return ans_dict\n\n\nclass Memory(dp.PromptElement):\n _prompt = \"\" # provided in the abstract and concrete examples\n\n _abstract_ex = \"\"\"\n\nWrite down anything you need to remember for next steps. You will be presented\nwith the list of previous memories and past actions. Some tasks require to\nremember hints from previous steps in order to solve it.\n\n\"\"\"\n\n _concrete_ex = \"\"\"\n\nI clicked on bid \"32\" to activate tab 2. The accessibility tree should mention\nfocusable for elements of the form at next step.\n\n\"\"\"\n\n def _parse_answer(self, text_answer):\n return parse_html_tags_raise(text_answer, optional_keys=[\"memory\"], merge_multiple=True)\n\n\nclass Plan(dp.PromptElement):\n def __init__(self, previous_plan, plan_step, visible: bool = True) -> None:\n super().__init__(visible=visible)\n self.previous_plan = previous_plan\n self._prompt = f\"\"\"\n# Plan:\n\nYou just executed step {plan_step} of the previously proposed plan:\\n{previous_plan}\\n\nAfter reviewing the effect of your previous actions, verify if your plan is still\nrelevant and update it if necessary.\n\"\"\"\n\n _abstract_ex = \"\"\"\n\nProvide a multi step plan that will guide you to accomplish the goal. There\nshould always be steps to verify if the previous action had an effect. The plan\ncan be revisited at each steps. Specifically, if there was something unexpected.\nThe plan should be cautious and favor exploring befor submitting.","source_hash":"e690d7ed2be97e8ca025ab9ad9919704d985e04dc4a0085e862e7cfb6da199c7","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.generic_agent.generic_agent_prompt.Plan","uri":"program://AgentLab/class/src.agentlab.agents.generic_agent.generic_agent_prompt.Plan#L188-L231","kind":"class","name":"Plan","path":"src/agentlab/agents/generic_agent/generic_agent_prompt.py","language":"python","start_line":188,"end_line":231,"context_start_line":168,"context_end_line":251,"code":"\n _abstract_ex = \"\"\"\n\nWrite down anything you need to remember for next steps. You will be presented\nwith the list of previous memories and past actions. Some tasks require to\nremember hints from previous steps in order to solve it.\n\n\"\"\"\n\n _concrete_ex = \"\"\"\n\nI clicked on bid \"32\" to activate tab 2. The accessibility tree should mention\nfocusable for elements of the form at next step.\n\n\"\"\"\n\n def _parse_answer(self, text_answer):\n return parse_html_tags_raise(text_answer, optional_keys=[\"memory\"], merge_multiple=True)\n\n\nclass Plan(dp.PromptElement):\n def __init__(self, previous_plan, plan_step, visible: bool = True) -> None:\n super().__init__(visible=visible)\n self.previous_plan = previous_plan\n self._prompt = f\"\"\"\n# Plan:\n\nYou just executed step {plan_step} of the previously proposed plan:\\n{previous_plan}\\n\nAfter reviewing the effect of your previous actions, verify if your plan is still\nrelevant and update it if necessary.\n\"\"\"\n\n _abstract_ex = \"\"\"\n\nProvide a multi step plan that will guide you to accomplish the goal. There\nshould always be steps to verify if the previous action had an effect. The plan\ncan be revisited at each steps. Specifically, if there was something unexpected.\nThe plan should be cautious and favor exploring befor submitting.\n\n\nInteger specifying the step of current action\n\n\"\"\"\n\n _concrete_ex = \"\"\"\n\n1. fill form (failed)\n * type first name\n * type last name\n2. Try to activate the form\n * click on tab 2\n3. fill form again\n * type first name\n * type last name\n4. verify and submit\n * verify form is filled\n * submit if filled, if not, replan\n\n\n2\n\"\"\"\n\n def _parse_answer(self, text_answer):\n return parse_html_tags_raise(text_answer, optional_keys=[\"plan\", \"step\"])\n\n\nclass Criticise(dp.PromptElement):\n _prompt = \"\"\n\n _abstract_ex = \"\"\"\n\nWrite a first version of what you think is the right action.\n\n\n\nCriticise action_draft. What could be wrong with it? Enumerate reasons why it\ncould fail. Did your past actions had the expected effect? Make sure you're not\nrepeating the same mistakes.\n\n\"\"\"\n\n _concrete_ex = \"\"\"\n\nclick(\"32\")","source_hash":"e690d7ed2be97e8ca025ab9ad9919704d985e04dc4a0085e862e7cfb6da199c7","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.generic_agent.generic_agent_prompt.Criticise","uri":"program://AgentLab/class/src.agentlab.agents.generic_agent.generic_agent_prompt.Criticise#L234-L261","kind":"class","name":"Criticise","path":"src/agentlab/agents/generic_agent/generic_agent_prompt.py","language":"python","start_line":234,"end_line":261,"context_start_line":214,"context_end_line":261,"code":"1. fill form (failed)\n * type first name\n * type last name\n2. Try to activate the form\n * click on tab 2\n3. fill form again\n * type first name\n * type last name\n4. verify and submit\n * verify form is filled\n * submit if filled, if not, replan\n\n\n2\n\"\"\"\n\n def _parse_answer(self, text_answer):\n return parse_html_tags_raise(text_answer, optional_keys=[\"plan\", \"step\"])\n\n\nclass Criticise(dp.PromptElement):\n _prompt = \"\"\n\n _abstract_ex = \"\"\"\n\nWrite a first version of what you think is the right action.\n\n\n\nCriticise action_draft. What could be wrong with it? Enumerate reasons why it\ncould fail. Did your past actions had the expected effect? Make sure you're not\nrepeating the same mistakes.\n\n\"\"\"\n\n _concrete_ex = \"\"\"\n\nclick(\"32\")\n\n\n\nclick(\"32\") might not work because the element is not visible yet. I need to\nexplore the page to find a way to activate the form.\n\n\"\"\"\n\n def _parse_answer(self, text_answer):\n return parse_html_tags_raise(text_answer, optional_keys=[\"action_draft\", \"criticise\"])","source_hash":"e690d7ed2be97e8ca025ab9ad9919704d985e04dc4a0085e862e7cfb6da199c7","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.generic_agent.generic_agent_prompt.__init__","uri":"program://AgentLab/function/src.agentlab.agents.generic_agent.generic_agent_prompt.__init__#L189-L198","kind":"function","name":"__init__","path":"src/agentlab/agents/generic_agent/generic_agent_prompt.py","language":"python","start_line":189,"end_line":198,"context_start_line":169,"context_end_line":218,"code":" _abstract_ex = \"\"\"\n\nWrite down anything you need to remember for next steps. You will be presented\nwith the list of previous memories and past actions. Some tasks require to\nremember hints from previous steps in order to solve it.\n\n\"\"\"\n\n _concrete_ex = \"\"\"\n\nI clicked on bid \"32\" to activate tab 2. The accessibility tree should mention\nfocusable for elements of the form at next step.\n\n\"\"\"\n\n def _parse_answer(self, text_answer):\n return parse_html_tags_raise(text_answer, optional_keys=[\"memory\"], merge_multiple=True)\n\n\nclass Plan(dp.PromptElement):\n def __init__(self, previous_plan, plan_step, visible: bool = True) -> None:\n super().__init__(visible=visible)\n self.previous_plan = previous_plan\n self._prompt = f\"\"\"\n# Plan:\n\nYou just executed step {plan_step} of the previously proposed plan:\\n{previous_plan}\\n\nAfter reviewing the effect of your previous actions, verify if your plan is still\nrelevant and update it if necessary.\n\"\"\"\n\n _abstract_ex = \"\"\"\n\nProvide a multi step plan that will guide you to accomplish the goal. There\nshould always be steps to verify if the previous action had an effect. The plan\ncan be revisited at each steps. Specifically, if there was something unexpected.\nThe plan should be cautious and favor exploring befor submitting.\n\n\nInteger specifying the step of current action\n\n\"\"\"\n\n _concrete_ex = \"\"\"\n\n1. fill form (failed)\n * type first name\n * type last name\n2. Try to activate the form\n * click on tab 2","source_hash":"e690d7ed2be97e8ca025ab9ad9919704d985e04dc4a0085e862e7cfb6da199c7","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.generic_agent.generic_agent_prompt._prompt","uri":"program://AgentLab/function/src.agentlab.agents.generic_agent.generic_agent_prompt._prompt#L104-L150","kind":"function","name":"_prompt","path":"src/agentlab/agents/generic_agent/generic_agent_prompt.py","language":"python","start_line":104,"end_line":150,"context_start_line":84,"context_end_line":170,"code":" obs_history[-1],\n self.flags.obs,\n )\n\n self.action_prompt = dp.ActionPrompt(action_set, action_flags=flags.action)\n\n def time_for_caution():\n # no need for caution if we're in single action mode\n return flags.be_cautious and (\n flags.action.action_set.multiaction or flags.action.action_set == \"python\"\n )\n\n self.be_cautious = dp.BeCautious(visible=time_for_caution)\n self.think = dp.Think(visible=lambda: flags.use_thinking)\n self.hints = dp.Hints(visible=lambda: flags.use_hints)\n self.plan = Plan(previous_plan, step, lambda: flags.use_plan) # TODO add previous plan\n self.criticise = Criticise(visible=lambda: flags.use_criticise)\n self.memory = Memory(visible=lambda: flags.use_memory)\n\n @property\n def _prompt(self) -> HumanMessage:\n prompt = HumanMessage(self.instructions.prompt)\n prompt.add_text(\n f\"\"\"\\\n{self.obs.prompt}\\\n{self.history.prompt}\\\n{self.action_prompt.prompt}\\\n{self.hints.prompt}\\\n{self.be_cautious.prompt}\\\n{self.think.prompt}\\\n{self.plan.prompt}\\\n{self.memory.prompt}\\\n{self.criticise.prompt}\\\n\"\"\"\n )\n\n if self.flags.use_abstract_example:\n prompt.add_text(\n f\"\"\"\n# Abstract Example\n\nHere is an abstract version of the answer with description of the content of\neach tag. Make sure you follow this structure, but replace the content with your\nanswer:\n{self.think.abstract_ex}\\\n{self.plan.abstract_ex}\\\n{self.memory.abstract_ex}\\\n{self.criticise.abstract_ex}\\\n{self.action_prompt.abstract_ex}\\\n\"\"\"\n )\n\n if self.flags.use_concrete_example:\n prompt.add_text(\n f\"\"\"\n# Concrete Example\n\nHere is a concrete example of how to format your answer.\nMake sure to follow the template with proper tags:\n{self.think.concrete_ex}\\\n{self.plan.concrete_ex}\\\n{self.memory.concrete_ex}\\\n{self.criticise.concrete_ex}\\\n{self.action_prompt.concrete_ex}\\\n\"\"\"\n )\n return self.obs.add_screenshot(prompt)\n\n def shrink(self):\n self.history.shrink()\n self.obs.shrink()\n\n def _parse_answer(self, text_answer):\n ans_dict = {}\n ans_dict.update(self.think.parse_answer(text_answer))\n ans_dict.update(self.plan.parse_answer(text_answer))\n ans_dict.update(self.memory.parse_answer(text_answer))\n ans_dict.update(self.criticise.parse_answer(text_answer))\n ans_dict.update(self.action_prompt.parse_answer(text_answer))\n return ans_dict\n\n\nclass Memory(dp.PromptElement):\n _prompt = \"\" # provided in the abstract and concrete examples\n\n _abstract_ex = \"\"\"\n","source_hash":"e690d7ed2be97e8ca025ab9ad9919704d985e04dc4a0085e862e7cfb6da199c7","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.generic_agent.generic_agent_prompt.shrink","uri":"program://AgentLab/function/src.agentlab.agents.generic_agent.generic_agent_prompt.shrink#L152-L154","kind":"function","name":"shrink","path":"src/agentlab/agents/generic_agent/generic_agent_prompt.py","language":"python","start_line":152,"end_line":154,"context_start_line":132,"context_end_line":174,"code":"{self.action_prompt.abstract_ex}\\\n\"\"\"\n )\n\n if self.flags.use_concrete_example:\n prompt.add_text(\n f\"\"\"\n# Concrete Example\n\nHere is a concrete example of how to format your answer.\nMake sure to follow the template with proper tags:\n{self.think.concrete_ex}\\\n{self.plan.concrete_ex}\\\n{self.memory.concrete_ex}\\\n{self.criticise.concrete_ex}\\\n{self.action_prompt.concrete_ex}\\\n\"\"\"\n )\n return self.obs.add_screenshot(prompt)\n\n def shrink(self):\n self.history.shrink()\n self.obs.shrink()\n\n def _parse_answer(self, text_answer):\n ans_dict = {}\n ans_dict.update(self.think.parse_answer(text_answer))\n ans_dict.update(self.plan.parse_answer(text_answer))\n ans_dict.update(self.memory.parse_answer(text_answer))\n ans_dict.update(self.criticise.parse_answer(text_answer))\n ans_dict.update(self.action_prompt.parse_answer(text_answer))\n return ans_dict\n\n\nclass Memory(dp.PromptElement):\n _prompt = \"\" # provided in the abstract and concrete examples\n\n _abstract_ex = \"\"\"\n\nWrite down anything you need to remember for next steps. You will be presented\nwith the list of previous memories and past actions. Some tasks require to\nremember hints from previous steps in order to solve it.\n","source_hash":"e690d7ed2be97e8ca025ab9ad9919704d985e04dc4a0085e862e7cfb6da199c7","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.generic_agent.generic_agent_prompt._parse_answer","uri":"program://AgentLab/function/src.agentlab.agents.generic_agent.generic_agent_prompt._parse_answer#L260-L261","kind":"function","name":"_parse_answer","path":"src/agentlab/agents/generic_agent/generic_agent_prompt.py","language":"python","start_line":260,"end_line":261,"context_start_line":240,"context_end_line":261,"code":"
    \n\n\nCriticise action_draft. What could be wrong with it? Enumerate reasons why it\ncould fail. Did your past actions had the expected effect? Make sure you're not\nrepeating the same mistakes.\n\n\"\"\"\n\n _concrete_ex = \"\"\"\n\nclick(\"32\")\n\n\n\nclick(\"32\") might not work because the element is not visible yet. I need to\nexplore the page to find a way to activate the form.\n\n\"\"\"\n\n def _parse_answer(self, text_answer):\n return parse_html_tags_raise(text_answer, optional_keys=[\"action_draft\", \"criticise\"])","source_hash":"e690d7ed2be97e8ca025ab9ad9919704d985e04dc4a0085e862e7cfb6da199c7","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.generic_agent.generic_agent_prompt.time_for_caution","uri":"program://AgentLab/function/src.agentlab.agents.generic_agent.generic_agent_prompt.time_for_caution#L90-L94","kind":"function","name":"time_for_caution","path":"src/agentlab/agents/generic_agent/generic_agent_prompt.py","language":"python","start_line":90,"end_line":94,"context_start_line":70,"context_end_line":114,"code":" if self.flags.enable_chat:\n self.instructions = dp.ChatInstructions(\n obs_history[-1][\"chat_messages\"], extra_instructions=flags.extra_instructions\n )\n else:\n if sum([msg[\"role\"] == \"user\" for msg in obs_history[-1].get(\"chat_messages\", [])]) > 1:\n logging.warning(\n \"Agent is in goal mode, but multiple user messages are present in the chat. Consider switching to `enable_chat=True`.\"\n )\n self.instructions = dp.GoalInstructions(\n obs_history[-1][\"goal_object\"], extra_instructions=flags.extra_instructions\n )\n\n self.obs = dp.Observation(\n obs_history[-1],\n self.flags.obs,\n )\n\n self.action_prompt = dp.ActionPrompt(action_set, action_flags=flags.action)\n\n def time_for_caution():\n # no need for caution if we're in single action mode\n return flags.be_cautious and (\n flags.action.action_set.multiaction or flags.action.action_set == \"python\"\n )\n\n self.be_cautious = dp.BeCautious(visible=time_for_caution)\n self.think = dp.Think(visible=lambda: flags.use_thinking)\n self.hints = dp.Hints(visible=lambda: flags.use_hints)\n self.plan = Plan(previous_plan, step, lambda: flags.use_plan) # TODO add previous plan\n self.criticise = Criticise(visible=lambda: flags.use_criticise)\n self.memory = Memory(visible=lambda: flags.use_memory)\n\n @property\n def _prompt(self) -> HumanMessage:\n prompt = HumanMessage(self.instructions.prompt)\n prompt.add_text(\n f\"\"\"\\\n{self.obs.prompt}\\\n{self.history.prompt}\\\n{self.action_prompt.prompt}\\\n{self.hints.prompt}\\\n{self.be_cautious.prompt}\\\n{self.think.prompt}\\\n{self.plan.prompt}\\","source_hash":"e690d7ed2be97e8ca025ab9ad9919704d985e04dc4a0085e862e7cfb6da199c7","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.generic_agent.reproducibility_agent","uri":"program://AgentLab/module/src.agentlab.agents.generic_agent.reproducibility_agent#L1-L308","kind":"module","name":"src.agentlab.agents.generic_agent.reproducibility_agent","path":"src/agentlab/agents/generic_agent/reproducibility_agent.py","language":"python","start_line":1,"end_line":308,"context_start_line":1,"context_end_line":308,"code":"\"\"\"\nAn agent that reproduces exactly the same traces as GenericAgent, to compare the results.\n\n\nThis module contains the classes and functions to reproduce the results of a\nstudy. It is used to create a new study that will run the same experiments as\nthe original study, but with a reproducibility agent that will mimic the same\nanswers as the original agent.\n\nStats are collected to compare the original agent's answers with the new agent's\nanswers. Load the this reproducibility study in agent-xray to compare the results.\n\"\"\"\n\nimport difflib\nimport logging\nimport time\nfrom copy import copy\nfrom dataclasses import dataclass\nfrom pathlib import Path\n\nimport bgym\nfrom bgym import HighLevelActionSetArgs\nfrom browsergym.experiments.agent import AgentInfo\nfrom bs4 import BeautifulSoup\n\nfrom agentlab.agents.agent_args import AgentArgs\nfrom agentlab.experiments.loop import ExpArgs, ExpResult, yield_all_exp_results\nfrom agentlab.experiments.study import Study\nfrom agentlab.llm.chat_api import make_assistant_message\nfrom agentlab.llm.llm_utils import Discussion, messages_to_dict\n\nfrom .generic_agent import GenericAgent, GenericAgentArgs\n\n\nclass ReproChatModel:\n \"\"\"A chat model that reproduces a conversation.\n\n Args:\n messages (list): A list of messages previously executed.\n delay (int): A delay to simulate the time it takes to generate a response.\n \"\"\"\n\n def __init__(self, old_messages, delay=1) -> None:\n self.old_messages = old_messages\n self.delay = delay\n\n def __call__(self, messages: list | Discussion):\n self.new_messages = copy(messages)\n\n if len(messages) >= len(self.old_messages):\n # if for some reason the llm response was not saved\n return make_assistant_message(\"\"\"None\"\"\")\n\n old_response = self.old_messages[len(messages)]\n self.new_messages.append(old_response)\n time.sleep(self.delay)\n # return the next message in the list\n return old_response\n\n def get_stats(self):\n return {}\n\n\n@dataclass\nclass ReproAgentArgs(GenericAgentArgs):\n # starting with \"_\" will prevent from being part of the index in the load_results function\n _repro_dir: str = None\n\n def __post_init__(self):\n try: # some attributes might be temporarily args.CrossProd for hyperparameter generation\n super().__post_init__()\n self.agent_name = f\"Repro_{self.agent_name}\"\n except AttributeError:\n pass\n\n def make_agent(self):\n return ReproAgent(self.chat_model_args, self.flags, self.max_retry, self._repro_dir)\n\n\nclass ReproAgent(GenericAgent):\n def __init__(\n self,\n chat_model_args,\n flags,\n max_retry=4,\n repro_dir=None,\n ):\n self.exp_result = ExpResult(repro_dir)\n super().__init__(chat_model_args, flags, max_retry)\n\n def get_action(self, obs):\n # replace the chat model with a reproducible chat that will mimic the\n # same answers\n step = len(self.actions)\n step_info = self.exp_result.get_step_info(step)\n old_chat_messages = step_info.agent_info.get(\"chat_messages\", None) # type: Discussion\n\n if old_chat_messages is None:\n err_msg = self.exp_result.summary_info[\"err_msg\"]\n\n agent_info = AgentInfo(\n markdown_page=f\"Agent had no chat messages. Perhaps there was an error. err_msg:\\n{err_msg}\",\n )\n return None, agent_info\n\n # an old bug prevented the response from being saved.\n if len(old_chat_messages) == 2:\n recorded_action = step_info.action\n if recorded_action:\n # Recreate the 3rd message based on the recorded action\n assistant_message = make_assistant_message(f\"{recorded_action}\")\n old_chat_messages.append(assistant_message)\n\n self.chat_llm = ReproChatModel(old_chat_messages)\n action, agent_info = super().get_action(obs)\n\n return _make_agent_stats(\n action, agent_info, step_info, old_chat_messages, self.chat_llm.new_messages\n )\n\n\ndef _make_agent_stats(action, agent_info, step_info, old_chat_messages, new_chat_messages):\n if isinstance(agent_info, dict):\n agent_info = AgentInfo(**agent_info)\n\n old_msg_str = _format_messages(old_chat_messages)\n new_msg_str = _format_messages(new_chat_messages)\n\n agent_info.html_page = _make_diff(old_str=old_msg_str, new_str=new_msg_str)\n agent_info.stats.update(_diff_stats(old_msg_str, new_msg_str))\n\n return action, agent_info\n\n\ndef _format_messages(messages: list[dict]):\n if isinstance(messages, Discussion):\n return messages.to_string()\n messages = messages_to_dict(messages)\n return \"\\n\".join(f\"{m['role']} message:\\n{m['content']}\\n\" for m in messages)\n\n\ndef _make_backward_compatible(agent_args: GenericAgentArgs):\n action_set = agent_args.flags.action.action_set\n if isinstance(action_set, (str, list)):\n if isinstance(action_set, str):\n action_set = action_set.split(\"+\")\n\n agent_args.flags.action.action_set = HighLevelActionSetArgs(\n subsets=action_set,\n multiaction=agent_args.flags.action.multi_actions,\n )\n\n return agent_args\n\n\ndef reproduce_study(original_study_dir: Path | str, log_level=logging.INFO):\n \"\"\"Reproduce a study by running the same experiments with the same agent.\"\"\"\n\n original_study_dir = Path(original_study_dir)\n\n exp_args_list: list[ExpArgs] = []\n for exp_result in yield_all_exp_results(original_study_dir, progress_fn=None):\n agent_args = _make_backward_compatible(exp_result.exp_args.agent_args)\n agent_args = make_repro_agent(agent_args, exp_dir=exp_result.exp_dir)\n exp_args_list.append(\n ExpArgs(\n agent_args=agent_args,\n env_args=exp_result.exp_args.env_args,\n logging_level=log_level,\n )\n )\n\n # infer benchmark name from task list for backward compatible\n benchmark_name = exp_args_list[0].env_args.task_name.split(\".\")[0]\n\n study = Study(\n benchmark=benchmark_name,\n agent_args=[agent_args],\n )\n # this exp_args_list has a different agent_args for each experiment as repro_agent takes the exp_dir as argument\n # so we overwrite exp_args_list with the one we created above\n study.exp_args_list = exp_args_list\n return study\n\n\ndef make_repro_agent(agent_args: AgentArgs, exp_dir: Path | str):\n \"\"\"Create a reproducibility agent from an existing agent.\n\n Note, if a new flag was added, it was not saved in the original pickle. When\n loading the pickle it silently adds the missing flag and set it to its\n default value. The new repro agent_args will thus have the new flag set to\n its default value.\n\n Args:\n agent_args (AgentArgs): The original agent args.\n exp_dir (Path | str): The directory where the experiment was saved.\n\n Returns:\n ReproAgentArgs: The new agent args.\n \"\"\"\n exp_dir = Path(exp_dir)\n assert isinstance(agent_args, GenericAgentArgs)\n assert exp_dir.exists() # sanity check\n\n return ReproAgentArgs(\n agent_name=f\"Repro_{agent_args.agent_name}\",\n chat_model_args=agent_args.chat_model_args,\n flags=agent_args.flags,\n max_retry=agent_args.max_retry,\n _repro_dir=exp_dir,\n )\n\n\ndef _make_diff(old_str, new_str):\n page = difflib.HtmlDiff().make_file(\n old_str.splitlines(), new_str.splitlines(), fromdesc=\"Old Version\", todesc=\"New Version\"\n )\n page = page.replace('nowrap=\"nowrap\"', \"\") # Remove nowrap attribute\n page = _set_style(page, DIFF_STYLE)\n return page\n\n\ndef _diff_stats(str1: str, str2: str):\n \"\"\"Try some kind of metrics to make stats about the amount of diffs between two strings.\"\"\"\n lines1 = str1.splitlines()\n lines2 = str2.splitlines()\n\n diff = list(difflib.Differ().compare(lines1, lines2))\n\n # Count added and removed lines\n added = sum(1 for line in diff if line.startswith(\"+ \"))\n removed = sum(1 for line in diff if line.startswith(\"- \"))\n\n # Calculate difference ratio\n difference_ratio = (added + removed) / (2 * max(len(lines1), len(lines2)))\n\n return dict(lines_added=added, lines_removed=removed, difference_ratio=difference_ratio)\n\n\ndef _set_style(html_str: str, style: str, prepend_previous_style: bool = False):\n \"\"\"Add a style tag to an HTML string.\"\"\"\n\n soup = BeautifulSoup(html_str, \"html.parser\")\n style_tag = soup.find(\"style\")\n\n if not style_tag:\n style_tag = soup.new_tag(\"style\")\n soup.head.append(style_tag)\n\n current_style = style_tag.string or \"\"\n\n if prepend_previous_style:\n style = f\"{style}\\n{current_style}\"\n else:\n style = f\"{current_style}\\n{style}\"\n\n style_tag.string = style\n\n return str(soup)\n\n\n# this is the style to adjust the diff table inside gradio\nDIFF_STYLE = \"\"\"\n table.diff {\n font-size: 10px;\n font-family: Courier;\n border: medium;\n width: 100%;\n max-width: 100%; /* Ensure table does not exceed its container */\n table-layout: auto; /* Adjust column sizes dynamically */\n word-wrap: break-word;\n overflow-wrap: break-word;\n }\n /* Constrain the max-width of the 3rd and 6th columns */\n td:nth-child(3), td:nth-child(6) {\n max-width: 200px; /* Adjust this value to suit your content */\n white-space: normal; /* Allow wrapping in content columns */\n overflow-wrap: break-word; /* Break long words/content */\n }\n /* Ensure span elements wrap inside the table */\n .diff_add, .diff_chg, .diff_sub {\n word-wrap: break-word; /* Wrap long text */\n overflow-wrap: break-word;\n }\n\n /* Keep the rest of the table flexible */\n td {\n white-space: normal; /* Allow wrapping for content */\n }\n .diff_header {\n background-color: #e0e0e0;\n }\n td.diff_header {\n text-align: right;\n }\n .diff_next {\n background-color: #c0c0c0;\n }\n .diff_add {\n background-color: #aaffaa;\n }\n .diff_chg {\n background-color: #ffff77;\n }\n .diff_sub {\n background-color: #ffaaaa;\n }\n\"\"\"","source_hash":"4795ed880ced937c515cd3e8ddf827383ae64e84c23a3ca7ddb43c6677e307b3","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.generic_agent.reproducibility_agent.ReproChatModel","uri":"program://AgentLab/class/src.agentlab.agents.generic_agent.reproducibility_agent.ReproChatModel#L35-L61","kind":"class","name":"ReproChatModel","path":"src/agentlab/agents/generic_agent/reproducibility_agent.py","language":"python","start_line":35,"end_line":61,"context_start_line":15,"context_end_line":81,"code":"import logging\nimport time\nfrom copy import copy\nfrom dataclasses import dataclass\nfrom pathlib import Path\n\nimport bgym\nfrom bgym import HighLevelActionSetArgs\nfrom browsergym.experiments.agent import AgentInfo\nfrom bs4 import BeautifulSoup\n\nfrom agentlab.agents.agent_args import AgentArgs\nfrom agentlab.experiments.loop import ExpArgs, ExpResult, yield_all_exp_results\nfrom agentlab.experiments.study import Study\nfrom agentlab.llm.chat_api import make_assistant_message\nfrom agentlab.llm.llm_utils import Discussion, messages_to_dict\n\nfrom .generic_agent import GenericAgent, GenericAgentArgs\n\n\nclass ReproChatModel:\n \"\"\"A chat model that reproduces a conversation.\n\n Args:\n messages (list): A list of messages previously executed.\n delay (int): A delay to simulate the time it takes to generate a response.\n \"\"\"\n\n def __init__(self, old_messages, delay=1) -> None:\n self.old_messages = old_messages\n self.delay = delay\n\n def __call__(self, messages: list | Discussion):\n self.new_messages = copy(messages)\n\n if len(messages) >= len(self.old_messages):\n # if for some reason the llm response was not saved\n return make_assistant_message(\"\"\"None\"\"\")\n\n old_response = self.old_messages[len(messages)]\n self.new_messages.append(old_response)\n time.sleep(self.delay)\n # return the next message in the list\n return old_response\n\n def get_stats(self):\n return {}\n\n\n@dataclass\nclass ReproAgentArgs(GenericAgentArgs):\n # starting with \"_\" will prevent from being part of the index in the load_results function\n _repro_dir: str = None\n\n def __post_init__(self):\n try: # some attributes might be temporarily args.CrossProd for hyperparameter generation\n super().__post_init__()\n self.agent_name = f\"Repro_{self.agent_name}\"\n except AttributeError:\n pass\n\n def make_agent(self):\n return ReproAgent(self.chat_model_args, self.flags, self.max_retry, self._repro_dir)\n\n\nclass ReproAgent(GenericAgent):\n def __init__(","source_hash":"4795ed880ced937c515cd3e8ddf827383ae64e84c23a3ca7ddb43c6677e307b3","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.generic_agent.reproducibility_agent.ReproAgentArgs","uri":"program://AgentLab/class/src.agentlab.agents.generic_agent.reproducibility_agent.ReproAgentArgs#L65-L77","kind":"class","name":"ReproAgentArgs","path":"src/agentlab/agents/generic_agent/reproducibility_agent.py","language":"python","start_line":65,"end_line":77,"context_start_line":45,"context_end_line":97,"code":" self.delay = delay\n\n def __call__(self, messages: list | Discussion):\n self.new_messages = copy(messages)\n\n if len(messages) >= len(self.old_messages):\n # if for some reason the llm response was not saved\n return make_assistant_message(\"\"\"None\"\"\")\n\n old_response = self.old_messages[len(messages)]\n self.new_messages.append(old_response)\n time.sleep(self.delay)\n # return the next message in the list\n return old_response\n\n def get_stats(self):\n return {}\n\n\n@dataclass\nclass ReproAgentArgs(GenericAgentArgs):\n # starting with \"_\" will prevent from being part of the index in the load_results function\n _repro_dir: str = None\n\n def __post_init__(self):\n try: # some attributes might be temporarily args.CrossProd for hyperparameter generation\n super().__post_init__()\n self.agent_name = f\"Repro_{self.agent_name}\"\n except AttributeError:\n pass\n\n def make_agent(self):\n return ReproAgent(self.chat_model_args, self.flags, self.max_retry, self._repro_dir)\n\n\nclass ReproAgent(GenericAgent):\n def __init__(\n self,\n chat_model_args,\n flags,\n max_retry=4,\n repro_dir=None,\n ):\n self.exp_result = ExpResult(repro_dir)\n super().__init__(chat_model_args, flags, max_retry)\n\n def get_action(self, obs):\n # replace the chat model with a reproducible chat that will mimic the\n # same answers\n step = len(self.actions)\n step_info = self.exp_result.get_step_info(step)\n old_chat_messages = step_info.agent_info.get(\"chat_messages\", None) # type: Discussion\n","source_hash":"4795ed880ced937c515cd3e8ddf827383ae64e84c23a3ca7ddb43c6677e307b3","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.generic_agent.reproducibility_agent.ReproAgent","uri":"program://AgentLab/class/src.agentlab.agents.generic_agent.reproducibility_agent.ReproAgent#L80-L119","kind":"class","name":"ReproAgent","path":"src/agentlab/agents/generic_agent/reproducibility_agent.py","language":"python","start_line":80,"end_line":119,"context_start_line":60,"context_end_line":139,"code":" def get_stats(self):\n return {}\n\n\n@dataclass\nclass ReproAgentArgs(GenericAgentArgs):\n # starting with \"_\" will prevent from being part of the index in the load_results function\n _repro_dir: str = None\n\n def __post_init__(self):\n try: # some attributes might be temporarily args.CrossProd for hyperparameter generation\n super().__post_init__()\n self.agent_name = f\"Repro_{self.agent_name}\"\n except AttributeError:\n pass\n\n def make_agent(self):\n return ReproAgent(self.chat_model_args, self.flags, self.max_retry, self._repro_dir)\n\n\nclass ReproAgent(GenericAgent):\n def __init__(\n self,\n chat_model_args,\n flags,\n max_retry=4,\n repro_dir=None,\n ):\n self.exp_result = ExpResult(repro_dir)\n super().__init__(chat_model_args, flags, max_retry)\n\n def get_action(self, obs):\n # replace the chat model with a reproducible chat that will mimic the\n # same answers\n step = len(self.actions)\n step_info = self.exp_result.get_step_info(step)\n old_chat_messages = step_info.agent_info.get(\"chat_messages\", None) # type: Discussion\n\n if old_chat_messages is None:\n err_msg = self.exp_result.summary_info[\"err_msg\"]\n\n agent_info = AgentInfo(\n markdown_page=f\"Agent had no chat messages. Perhaps there was an error. err_msg:\\n{err_msg}\",\n )\n return None, agent_info\n\n # an old bug prevented the response from being saved.\n if len(old_chat_messages) == 2:\n recorded_action = step_info.action\n if recorded_action:\n # Recreate the 3rd message based on the recorded action\n assistant_message = make_assistant_message(f\"{recorded_action}\")\n old_chat_messages.append(assistant_message)\n\n self.chat_llm = ReproChatModel(old_chat_messages)\n action, agent_info = super().get_action(obs)\n\n return _make_agent_stats(\n action, agent_info, step_info, old_chat_messages, self.chat_llm.new_messages\n )\n\n\ndef _make_agent_stats(action, agent_info, step_info, old_chat_messages, new_chat_messages):\n if isinstance(agent_info, dict):\n agent_info = AgentInfo(**agent_info)\n\n old_msg_str = _format_messages(old_chat_messages)\n new_msg_str = _format_messages(new_chat_messages)\n\n agent_info.html_page = _make_diff(old_str=old_msg_str, new_str=new_msg_str)\n agent_info.stats.update(_diff_stats(old_msg_str, new_msg_str))\n\n return action, agent_info\n\n\ndef _format_messages(messages: list[dict]):\n if isinstance(messages, Discussion):\n return messages.to_string()\n messages = messages_to_dict(messages)\n return \"\\n\".join(f\"{m['role']} message:\\n{m['content']}\\n\" for m in messages)","source_hash":"4795ed880ced937c515cd3e8ddf827383ae64e84c23a3ca7ddb43c6677e307b3","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.generic_agent.reproducibility_agent._make_agent_stats","uri":"program://AgentLab/function/src.agentlab.agents.generic_agent.reproducibility_agent._make_agent_stats#L122-L132","kind":"function","name":"_make_agent_stats","path":"src/agentlab/agents/generic_agent/reproducibility_agent.py","language":"python","start_line":122,"end_line":132,"context_start_line":102,"context_end_line":152,"code":" markdown_page=f\"Agent had no chat messages. Perhaps there was an error. err_msg:\\n{err_msg}\",\n )\n return None, agent_info\n\n # an old bug prevented the response from being saved.\n if len(old_chat_messages) == 2:\n recorded_action = step_info.action\n if recorded_action:\n # Recreate the 3rd message based on the recorded action\n assistant_message = make_assistant_message(f\"{recorded_action}\")\n old_chat_messages.append(assistant_message)\n\n self.chat_llm = ReproChatModel(old_chat_messages)\n action, agent_info = super().get_action(obs)\n\n return _make_agent_stats(\n action, agent_info, step_info, old_chat_messages, self.chat_llm.new_messages\n )\n\n\ndef _make_agent_stats(action, agent_info, step_info, old_chat_messages, new_chat_messages):\n if isinstance(agent_info, dict):\n agent_info = AgentInfo(**agent_info)\n\n old_msg_str = _format_messages(old_chat_messages)\n new_msg_str = _format_messages(new_chat_messages)\n\n agent_info.html_page = _make_diff(old_str=old_msg_str, new_str=new_msg_str)\n agent_info.stats.update(_diff_stats(old_msg_str, new_msg_str))\n\n return action, agent_info\n\n\ndef _format_messages(messages: list[dict]):\n if isinstance(messages, Discussion):\n return messages.to_string()\n messages = messages_to_dict(messages)\n return \"\\n\".join(f\"{m['role']} message:\\n{m['content']}\\n\" for m in messages)\n\n\ndef _make_backward_compatible(agent_args: GenericAgentArgs):\n action_set = agent_args.flags.action.action_set\n if isinstance(action_set, (str, list)):\n if isinstance(action_set, str):\n action_set = action_set.split(\"+\")\n\n agent_args.flags.action.action_set = HighLevelActionSetArgs(\n subsets=action_set,\n multiaction=agent_args.flags.action.multi_actions,\n )\n","source_hash":"4795ed880ced937c515cd3e8ddf827383ae64e84c23a3ca7ddb43c6677e307b3","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.generic_agent.reproducibility_agent._format_messages","uri":"program://AgentLab/function/src.agentlab.agents.generic_agent.reproducibility_agent._format_messages#L135-L139","kind":"function","name":"_format_messages","path":"src/agentlab/agents/generic_agent/reproducibility_agent.py","language":"python","start_line":135,"end_line":139,"context_start_line":115,"context_end_line":159,"code":" action, agent_info = super().get_action(obs)\n\n return _make_agent_stats(\n action, agent_info, step_info, old_chat_messages, self.chat_llm.new_messages\n )\n\n\ndef _make_agent_stats(action, agent_info, step_info, old_chat_messages, new_chat_messages):\n if isinstance(agent_info, dict):\n agent_info = AgentInfo(**agent_info)\n\n old_msg_str = _format_messages(old_chat_messages)\n new_msg_str = _format_messages(new_chat_messages)\n\n agent_info.html_page = _make_diff(old_str=old_msg_str, new_str=new_msg_str)\n agent_info.stats.update(_diff_stats(old_msg_str, new_msg_str))\n\n return action, agent_info\n\n\ndef _format_messages(messages: list[dict]):\n if isinstance(messages, Discussion):\n return messages.to_string()\n messages = messages_to_dict(messages)\n return \"\\n\".join(f\"{m['role']} message:\\n{m['content']}\\n\" for m in messages)\n\n\ndef _make_backward_compatible(agent_args: GenericAgentArgs):\n action_set = agent_args.flags.action.action_set\n if isinstance(action_set, (str, list)):\n if isinstance(action_set, str):\n action_set = action_set.split(\"+\")\n\n agent_args.flags.action.action_set = HighLevelActionSetArgs(\n subsets=action_set,\n multiaction=agent_args.flags.action.multi_actions,\n )\n\n return agent_args\n\n\ndef reproduce_study(original_study_dir: Path | str, log_level=logging.INFO):\n \"\"\"Reproduce a study by running the same experiments with the same agent.\"\"\"\n\n original_study_dir = Path(original_study_dir)","source_hash":"4795ed880ced937c515cd3e8ddf827383ae64e84c23a3ca7ddb43c6677e307b3","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.generic_agent.reproducibility_agent._make_backward_compatible","uri":"program://AgentLab/function/src.agentlab.agents.generic_agent.reproducibility_agent._make_backward_compatible#L142-L153","kind":"function","name":"_make_backward_compatible","path":"src/agentlab/agents/generic_agent/reproducibility_agent.py","language":"python","start_line":142,"end_line":153,"context_start_line":122,"context_end_line":173,"code":"def _make_agent_stats(action, agent_info, step_info, old_chat_messages, new_chat_messages):\n if isinstance(agent_info, dict):\n agent_info = AgentInfo(**agent_info)\n\n old_msg_str = _format_messages(old_chat_messages)\n new_msg_str = _format_messages(new_chat_messages)\n\n agent_info.html_page = _make_diff(old_str=old_msg_str, new_str=new_msg_str)\n agent_info.stats.update(_diff_stats(old_msg_str, new_msg_str))\n\n return action, agent_info\n\n\ndef _format_messages(messages: list[dict]):\n if isinstance(messages, Discussion):\n return messages.to_string()\n messages = messages_to_dict(messages)\n return \"\\n\".join(f\"{m['role']} message:\\n{m['content']}\\n\" for m in messages)\n\n\ndef _make_backward_compatible(agent_args: GenericAgentArgs):\n action_set = agent_args.flags.action.action_set\n if isinstance(action_set, (str, list)):\n if isinstance(action_set, str):\n action_set = action_set.split(\"+\")\n\n agent_args.flags.action.action_set = HighLevelActionSetArgs(\n subsets=action_set,\n multiaction=agent_args.flags.action.multi_actions,\n )\n\n return agent_args\n\n\ndef reproduce_study(original_study_dir: Path | str, log_level=logging.INFO):\n \"\"\"Reproduce a study by running the same experiments with the same agent.\"\"\"\n\n original_study_dir = Path(original_study_dir)\n\n exp_args_list: list[ExpArgs] = []\n for exp_result in yield_all_exp_results(original_study_dir, progress_fn=None):\n agent_args = _make_backward_compatible(exp_result.exp_args.agent_args)\n agent_args = make_repro_agent(agent_args, exp_dir=exp_result.exp_dir)\n exp_args_list.append(\n ExpArgs(\n agent_args=agent_args,\n env_args=exp_result.exp_args.env_args,\n logging_level=log_level,\n )\n )\n\n # infer benchmark name from task list for backward compatible","source_hash":"4795ed880ced937c515cd3e8ddf827383ae64e84c23a3ca7ddb43c6677e307b3","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.generic_agent.reproducibility_agent.reproduce_study","uri":"program://AgentLab/function/src.agentlab.agents.generic_agent.reproducibility_agent.reproduce_study#L156-L183","kind":"function","name":"reproduce_study","path":"src/agentlab/agents/generic_agent/reproducibility_agent.py","language":"python","start_line":156,"end_line":183,"context_start_line":136,"context_end_line":203,"code":" if isinstance(messages, Discussion):\n return messages.to_string()\n messages = messages_to_dict(messages)\n return \"\\n\".join(f\"{m['role']} message:\\n{m['content']}\\n\" for m in messages)\n\n\ndef _make_backward_compatible(agent_args: GenericAgentArgs):\n action_set = agent_args.flags.action.action_set\n if isinstance(action_set, (str, list)):\n if isinstance(action_set, str):\n action_set = action_set.split(\"+\")\n\n agent_args.flags.action.action_set = HighLevelActionSetArgs(\n subsets=action_set,\n multiaction=agent_args.flags.action.multi_actions,\n )\n\n return agent_args\n\n\ndef reproduce_study(original_study_dir: Path | str, log_level=logging.INFO):\n \"\"\"Reproduce a study by running the same experiments with the same agent.\"\"\"\n\n original_study_dir = Path(original_study_dir)\n\n exp_args_list: list[ExpArgs] = []\n for exp_result in yield_all_exp_results(original_study_dir, progress_fn=None):\n agent_args = _make_backward_compatible(exp_result.exp_args.agent_args)\n agent_args = make_repro_agent(agent_args, exp_dir=exp_result.exp_dir)\n exp_args_list.append(\n ExpArgs(\n agent_args=agent_args,\n env_args=exp_result.exp_args.env_args,\n logging_level=log_level,\n )\n )\n\n # infer benchmark name from task list for backward compatible\n benchmark_name = exp_args_list[0].env_args.task_name.split(\".\")[0]\n\n study = Study(\n benchmark=benchmark_name,\n agent_args=[agent_args],\n )\n # this exp_args_list has a different agent_args for each experiment as repro_agent takes the exp_dir as argument\n # so we overwrite exp_args_list with the one we created above\n study.exp_args_list = exp_args_list\n return study\n\n\ndef make_repro_agent(agent_args: AgentArgs, exp_dir: Path | str):\n \"\"\"Create a reproducibility agent from an existing agent.\n\n Note, if a new flag was added, it was not saved in the original pickle. When\n loading the pickle it silently adds the missing flag and set it to its\n default value. The new repro agent_args will thus have the new flag set to\n its default value.\n\n Args:\n agent_args (AgentArgs): The original agent args.\n exp_dir (Path | str): The directory where the experiment was saved.\n\n Returns:\n ReproAgentArgs: The new agent args.\n \"\"\"\n exp_dir = Path(exp_dir)\n assert isinstance(agent_args, GenericAgentArgs)\n assert exp_dir.exists() # sanity check","source_hash":"4795ed880ced937c515cd3e8ddf827383ae64e84c23a3ca7ddb43c6677e307b3","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.generic_agent.reproducibility_agent.make_repro_agent","uri":"program://AgentLab/function/src.agentlab.agents.generic_agent.reproducibility_agent.make_repro_agent#L186-L211","kind":"function","name":"make_repro_agent","path":"src/agentlab/agents/generic_agent/reproducibility_agent.py","language":"python","start_line":186,"end_line":211,"context_start_line":166,"context_end_line":231,"code":" ExpArgs(\n agent_args=agent_args,\n env_args=exp_result.exp_args.env_args,\n logging_level=log_level,\n )\n )\n\n # infer benchmark name from task list for backward compatible\n benchmark_name = exp_args_list[0].env_args.task_name.split(\".\")[0]\n\n study = Study(\n benchmark=benchmark_name,\n agent_args=[agent_args],\n )\n # this exp_args_list has a different agent_args for each experiment as repro_agent takes the exp_dir as argument\n # so we overwrite exp_args_list with the one we created above\n study.exp_args_list = exp_args_list\n return study\n\n\ndef make_repro_agent(agent_args: AgentArgs, exp_dir: Path | str):\n \"\"\"Create a reproducibility agent from an existing agent.\n\n Note, if a new flag was added, it was not saved in the original pickle. When\n loading the pickle it silently adds the missing flag and set it to its\n default value. The new repro agent_args will thus have the new flag set to\n its default value.\n\n Args:\n agent_args (AgentArgs): The original agent args.\n exp_dir (Path | str): The directory where the experiment was saved.\n\n Returns:\n ReproAgentArgs: The new agent args.\n \"\"\"\n exp_dir = Path(exp_dir)\n assert isinstance(agent_args, GenericAgentArgs)\n assert exp_dir.exists() # sanity check\n\n return ReproAgentArgs(\n agent_name=f\"Repro_{agent_args.agent_name}\",\n chat_model_args=agent_args.chat_model_args,\n flags=agent_args.flags,\n max_retry=agent_args.max_retry,\n _repro_dir=exp_dir,\n )\n\n\ndef _make_diff(old_str, new_str):\n page = difflib.HtmlDiff().make_file(\n old_str.splitlines(), new_str.splitlines(), fromdesc=\"Old Version\", todesc=\"New Version\"\n )\n page = page.replace('nowrap=\"nowrap\"', \"\") # Remove nowrap attribute\n page = _set_style(page, DIFF_STYLE)\n return page\n\n\ndef _diff_stats(str1: str, str2: str):\n \"\"\"Try some kind of metrics to make stats about the amount of diffs between two strings.\"\"\"\n lines1 = str1.splitlines()\n lines2 = str2.splitlines()\n\n diff = list(difflib.Differ().compare(lines1, lines2))\n\n # Count added and removed lines\n added = sum(1 for line in diff if line.startswith(\"+ \"))","source_hash":"4795ed880ced937c515cd3e8ddf827383ae64e84c23a3ca7ddb43c6677e307b3","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.generic_agent.reproducibility_agent._make_diff","uri":"program://AgentLab/function/src.agentlab.agents.generic_agent.reproducibility_agent._make_diff#L214-L220","kind":"function","name":"_make_diff","path":"src/agentlab/agents/generic_agent/reproducibility_agent.py","language":"python","start_line":214,"end_line":220,"context_start_line":194,"context_end_line":240,"code":" Args:\n agent_args (AgentArgs): The original agent args.\n exp_dir (Path | str): The directory where the experiment was saved.\n\n Returns:\n ReproAgentArgs: The new agent args.\n \"\"\"\n exp_dir = Path(exp_dir)\n assert isinstance(agent_args, GenericAgentArgs)\n assert exp_dir.exists() # sanity check\n\n return ReproAgentArgs(\n agent_name=f\"Repro_{agent_args.agent_name}\",\n chat_model_args=agent_args.chat_model_args,\n flags=agent_args.flags,\n max_retry=agent_args.max_retry,\n _repro_dir=exp_dir,\n )\n\n\ndef _make_diff(old_str, new_str):\n page = difflib.HtmlDiff().make_file(\n old_str.splitlines(), new_str.splitlines(), fromdesc=\"Old Version\", todesc=\"New Version\"\n )\n page = page.replace('nowrap=\"nowrap\"', \"\") # Remove nowrap attribute\n page = _set_style(page, DIFF_STYLE)\n return page\n\n\ndef _diff_stats(str1: str, str2: str):\n \"\"\"Try some kind of metrics to make stats about the amount of diffs between two strings.\"\"\"\n lines1 = str1.splitlines()\n lines2 = str2.splitlines()\n\n diff = list(difflib.Differ().compare(lines1, lines2))\n\n # Count added and removed lines\n added = sum(1 for line in diff if line.startswith(\"+ \"))\n removed = sum(1 for line in diff if line.startswith(\"- \"))\n\n # Calculate difference ratio\n difference_ratio = (added + removed) / (2 * max(len(lines1), len(lines2)))\n\n return dict(lines_added=added, lines_removed=removed, difference_ratio=difference_ratio)\n\n\ndef _set_style(html_str: str, style: str, prepend_previous_style: bool = False):","source_hash":"4795ed880ced937c515cd3e8ddf827383ae64e84c23a3ca7ddb43c6677e307b3","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.generic_agent.reproducibility_agent._diff_stats","uri":"program://AgentLab/function/src.agentlab.agents.generic_agent.reproducibility_agent._diff_stats#L223-L237","kind":"function","name":"_diff_stats","path":"src/agentlab/agents/generic_agent/reproducibility_agent.py","language":"python","start_line":223,"end_line":237,"context_start_line":203,"context_end_line":257,"code":" assert exp_dir.exists() # sanity check\n\n return ReproAgentArgs(\n agent_name=f\"Repro_{agent_args.agent_name}\",\n chat_model_args=agent_args.chat_model_args,\n flags=agent_args.flags,\n max_retry=agent_args.max_retry,\n _repro_dir=exp_dir,\n )\n\n\ndef _make_diff(old_str, new_str):\n page = difflib.HtmlDiff().make_file(\n old_str.splitlines(), new_str.splitlines(), fromdesc=\"Old Version\", todesc=\"New Version\"\n )\n page = page.replace('nowrap=\"nowrap\"', \"\") # Remove nowrap attribute\n page = _set_style(page, DIFF_STYLE)\n return page\n\n\ndef _diff_stats(str1: str, str2: str):\n \"\"\"Try some kind of metrics to make stats about the amount of diffs between two strings.\"\"\"\n lines1 = str1.splitlines()\n lines2 = str2.splitlines()\n\n diff = list(difflib.Differ().compare(lines1, lines2))\n\n # Count added and removed lines\n added = sum(1 for line in diff if line.startswith(\"+ \"))\n removed = sum(1 for line in diff if line.startswith(\"- \"))\n\n # Calculate difference ratio\n difference_ratio = (added + removed) / (2 * max(len(lines1), len(lines2)))\n\n return dict(lines_added=added, lines_removed=removed, difference_ratio=difference_ratio)\n\n\ndef _set_style(html_str: str, style: str, prepend_previous_style: bool = False):\n \"\"\"Add a style tag to an HTML string.\"\"\"\n\n soup = BeautifulSoup(html_str, \"html.parser\")\n style_tag = soup.find(\"style\")\n\n if not style_tag:\n style_tag = soup.new_tag(\"style\")\n soup.head.append(style_tag)\n\n current_style = style_tag.string or \"\"\n\n if prepend_previous_style:\n style = f\"{style}\\n{current_style}\"\n else:\n style = f\"{current_style}\\n{style}\"\n\n style_tag.string = style","source_hash":"4795ed880ced937c515cd3e8ddf827383ae64e84c23a3ca7ddb43c6677e307b3","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.generic_agent.reproducibility_agent._set_style","uri":"program://AgentLab/function/src.agentlab.agents.generic_agent.reproducibility_agent._set_style#L240-L259","kind":"function","name":"_set_style","path":"src/agentlab/agents/generic_agent/reproducibility_agent.py","language":"python","start_line":240,"end_line":259,"context_start_line":220,"context_end_line":279,"code":" return page\n\n\ndef _diff_stats(str1: str, str2: str):\n \"\"\"Try some kind of metrics to make stats about the amount of diffs between two strings.\"\"\"\n lines1 = str1.splitlines()\n lines2 = str2.splitlines()\n\n diff = list(difflib.Differ().compare(lines1, lines2))\n\n # Count added and removed lines\n added = sum(1 for line in diff if line.startswith(\"+ \"))\n removed = sum(1 for line in diff if line.startswith(\"- \"))\n\n # Calculate difference ratio\n difference_ratio = (added + removed) / (2 * max(len(lines1), len(lines2)))\n\n return dict(lines_added=added, lines_removed=removed, difference_ratio=difference_ratio)\n\n\ndef _set_style(html_str: str, style: str, prepend_previous_style: bool = False):\n \"\"\"Add a style tag to an HTML string.\"\"\"\n\n soup = BeautifulSoup(html_str, \"html.parser\")\n style_tag = soup.find(\"style\")\n\n if not style_tag:\n style_tag = soup.new_tag(\"style\")\n soup.head.append(style_tag)\n\n current_style = style_tag.string or \"\"\n\n if prepend_previous_style:\n style = f\"{style}\\n{current_style}\"\n else:\n style = f\"{current_style}\\n{style}\"\n\n style_tag.string = style\n\n return str(soup)\n\n\n# this is the style to adjust the diff table inside gradio\nDIFF_STYLE = \"\"\"\n table.diff {\n font-size: 10px;\n font-family: Courier;\n border: medium;\n width: 100%;\n max-width: 100%; /* Ensure table does not exceed its container */\n table-layout: auto; /* Adjust column sizes dynamically */\n word-wrap: break-word;\n overflow-wrap: break-word;\n }\n /* Constrain the max-width of the 3rd and 6th columns */\n td:nth-child(3), td:nth-child(6) {\n max-width: 200px; /* Adjust this value to suit your content */\n white-space: normal; /* Allow wrapping in content columns */\n overflow-wrap: break-word; /* Break long words/content */\n }","source_hash":"4795ed880ced937c515cd3e8ddf827383ae64e84c23a3ca7ddb43c6677e307b3","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.generic_agent.reproducibility_agent.__init__","uri":"program://AgentLab/function/src.agentlab.agents.generic_agent.reproducibility_agent.__init__#L81-L89","kind":"function","name":"__init__","path":"src/agentlab/agents/generic_agent/reproducibility_agent.py","language":"python","start_line":81,"end_line":89,"context_start_line":61,"context_end_line":109,"code":" return {}\n\n\n@dataclass\nclass ReproAgentArgs(GenericAgentArgs):\n # starting with \"_\" will prevent from being part of the index in the load_results function\n _repro_dir: str = None\n\n def __post_init__(self):\n try: # some attributes might be temporarily args.CrossProd for hyperparameter generation\n super().__post_init__()\n self.agent_name = f\"Repro_{self.agent_name}\"\n except AttributeError:\n pass\n\n def make_agent(self):\n return ReproAgent(self.chat_model_args, self.flags, self.max_retry, self._repro_dir)\n\n\nclass ReproAgent(GenericAgent):\n def __init__(\n self,\n chat_model_args,\n flags,\n max_retry=4,\n repro_dir=None,\n ):\n self.exp_result = ExpResult(repro_dir)\n super().__init__(chat_model_args, flags, max_retry)\n\n def get_action(self, obs):\n # replace the chat model with a reproducible chat that will mimic the\n # same answers\n step = len(self.actions)\n step_info = self.exp_result.get_step_info(step)\n old_chat_messages = step_info.agent_info.get(\"chat_messages\", None) # type: Discussion\n\n if old_chat_messages is None:\n err_msg = self.exp_result.summary_info[\"err_msg\"]\n\n agent_info = AgentInfo(\n markdown_page=f\"Agent had no chat messages. Perhaps there was an error. err_msg:\\n{err_msg}\",\n )\n return None, agent_info\n\n # an old bug prevented the response from being saved.\n if len(old_chat_messages) == 2:\n recorded_action = step_info.action\n if recorded_action:","source_hash":"4795ed880ced937c515cd3e8ddf827383ae64e84c23a3ca7ddb43c6677e307b3","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.generic_agent.reproducibility_agent.__call__","uri":"program://AgentLab/function/src.agentlab.agents.generic_agent.reproducibility_agent.__call__#L47-L58","kind":"function","name":"__call__","path":"src/agentlab/agents/generic_agent/reproducibility_agent.py","language":"python","start_line":47,"end_line":58,"context_start_line":27,"context_end_line":78,"code":"from agentlab.experiments.loop import ExpArgs, ExpResult, yield_all_exp_results\nfrom agentlab.experiments.study import Study\nfrom agentlab.llm.chat_api import make_assistant_message\nfrom agentlab.llm.llm_utils import Discussion, messages_to_dict\n\nfrom .generic_agent import GenericAgent, GenericAgentArgs\n\n\nclass ReproChatModel:\n \"\"\"A chat model that reproduces a conversation.\n\n Args:\n messages (list): A list of messages previously executed.\n delay (int): A delay to simulate the time it takes to generate a response.\n \"\"\"\n\n def __init__(self, old_messages, delay=1) -> None:\n self.old_messages = old_messages\n self.delay = delay\n\n def __call__(self, messages: list | Discussion):\n self.new_messages = copy(messages)\n\n if len(messages) >= len(self.old_messages):\n # if for some reason the llm response was not saved\n return make_assistant_message(\"\"\"None\"\"\")\n\n old_response = self.old_messages[len(messages)]\n self.new_messages.append(old_response)\n time.sleep(self.delay)\n # return the next message in the list\n return old_response\n\n def get_stats(self):\n return {}\n\n\n@dataclass\nclass ReproAgentArgs(GenericAgentArgs):\n # starting with \"_\" will prevent from being part of the index in the load_results function\n _repro_dir: str = None\n\n def __post_init__(self):\n try: # some attributes might be temporarily args.CrossProd for hyperparameter generation\n super().__post_init__()\n self.agent_name = f\"Repro_{self.agent_name}\"\n except AttributeError:\n pass\n\n def make_agent(self):\n return ReproAgent(self.chat_model_args, self.flags, self.max_retry, self._repro_dir)\n","source_hash":"4795ed880ced937c515cd3e8ddf827383ae64e84c23a3ca7ddb43c6677e307b3","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.generic_agent.reproducibility_agent.get_stats","uri":"program://AgentLab/function/src.agentlab.agents.generic_agent.reproducibility_agent.get_stats#L60-L61","kind":"function","name":"get_stats","path":"src/agentlab/agents/generic_agent/reproducibility_agent.py","language":"python","start_line":60,"end_line":61,"context_start_line":40,"context_end_line":81,"code":" delay (int): A delay to simulate the time it takes to generate a response.\n \"\"\"\n\n def __init__(self, old_messages, delay=1) -> None:\n self.old_messages = old_messages\n self.delay = delay\n\n def __call__(self, messages: list | Discussion):\n self.new_messages = copy(messages)\n\n if len(messages) >= len(self.old_messages):\n # if for some reason the llm response was not saved\n return make_assistant_message(\"\"\"None\"\"\")\n\n old_response = self.old_messages[len(messages)]\n self.new_messages.append(old_response)\n time.sleep(self.delay)\n # return the next message in the list\n return old_response\n\n def get_stats(self):\n return {}\n\n\n@dataclass\nclass ReproAgentArgs(GenericAgentArgs):\n # starting with \"_\" will prevent from being part of the index in the load_results function\n _repro_dir: str = None\n\n def __post_init__(self):\n try: # some attributes might be temporarily args.CrossProd for hyperparameter generation\n super().__post_init__()\n self.agent_name = f\"Repro_{self.agent_name}\"\n except AttributeError:\n pass\n\n def make_agent(self):\n return ReproAgent(self.chat_model_args, self.flags, self.max_retry, self._repro_dir)\n\n\nclass ReproAgent(GenericAgent):\n def __init__(","source_hash":"4795ed880ced937c515cd3e8ddf827383ae64e84c23a3ca7ddb43c6677e307b3","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.generic_agent.reproducibility_agent.__post_init__","uri":"program://AgentLab/function/src.agentlab.agents.generic_agent.reproducibility_agent.__post_init__#L69-L74","kind":"function","name":"__post_init__","path":"src/agentlab/agents/generic_agent/reproducibility_agent.py","language":"python","start_line":69,"end_line":74,"context_start_line":49,"context_end_line":94,"code":"\n if len(messages) >= len(self.old_messages):\n # if for some reason the llm response was not saved\n return make_assistant_message(\"\"\"None\"\"\")\n\n old_response = self.old_messages[len(messages)]\n self.new_messages.append(old_response)\n time.sleep(self.delay)\n # return the next message in the list\n return old_response\n\n def get_stats(self):\n return {}\n\n\n@dataclass\nclass ReproAgentArgs(GenericAgentArgs):\n # starting with \"_\" will prevent from being part of the index in the load_results function\n _repro_dir: str = None\n\n def __post_init__(self):\n try: # some attributes might be temporarily args.CrossProd for hyperparameter generation\n super().__post_init__()\n self.agent_name = f\"Repro_{self.agent_name}\"\n except AttributeError:\n pass\n\n def make_agent(self):\n return ReproAgent(self.chat_model_args, self.flags, self.max_retry, self._repro_dir)\n\n\nclass ReproAgent(GenericAgent):\n def __init__(\n self,\n chat_model_args,\n flags,\n max_retry=4,\n repro_dir=None,\n ):\n self.exp_result = ExpResult(repro_dir)\n super().__init__(chat_model_args, flags, max_retry)\n\n def get_action(self, obs):\n # replace the chat model with a reproducible chat that will mimic the\n # same answers\n step = len(self.actions)","source_hash":"4795ed880ced937c515cd3e8ddf827383ae64e84c23a3ca7ddb43c6677e307b3","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.generic_agent.reproducibility_agent.make_agent","uri":"program://AgentLab/function/src.agentlab.agents.generic_agent.reproducibility_agent.make_agent#L76-L77","kind":"function","name":"make_agent","path":"src/agentlab/agents/generic_agent/reproducibility_agent.py","language":"python","start_line":76,"end_line":77,"context_start_line":56,"context_end_line":97,"code":" time.sleep(self.delay)\n # return the next message in the list\n return old_response\n\n def get_stats(self):\n return {}\n\n\n@dataclass\nclass ReproAgentArgs(GenericAgentArgs):\n # starting with \"_\" will prevent from being part of the index in the load_results function\n _repro_dir: str = None\n\n def __post_init__(self):\n try: # some attributes might be temporarily args.CrossProd for hyperparameter generation\n super().__post_init__()\n self.agent_name = f\"Repro_{self.agent_name}\"\n except AttributeError:\n pass\n\n def make_agent(self):\n return ReproAgent(self.chat_model_args, self.flags, self.max_retry, self._repro_dir)\n\n\nclass ReproAgent(GenericAgent):\n def __init__(\n self,\n chat_model_args,\n flags,\n max_retry=4,\n repro_dir=None,\n ):\n self.exp_result = ExpResult(repro_dir)\n super().__init__(chat_model_args, flags, max_retry)\n\n def get_action(self, obs):\n # replace the chat model with a reproducible chat that will mimic the\n # same answers\n step = len(self.actions)\n step_info = self.exp_result.get_step_info(step)\n old_chat_messages = step_info.agent_info.get(\"chat_messages\", None) # type: Discussion\n","source_hash":"4795ed880ced937c515cd3e8ddf827383ae64e84c23a3ca7ddb43c6677e307b3","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.generic_agent.reproducibility_agent.get_action","uri":"program://AgentLab/function/src.agentlab.agents.generic_agent.reproducibility_agent.get_action#L91-L119","kind":"function","name":"get_action","path":"src/agentlab/agents/generic_agent/reproducibility_agent.py","language":"python","start_line":91,"end_line":119,"context_start_line":71,"context_end_line":139,"code":" super().__post_init__()\n self.agent_name = f\"Repro_{self.agent_name}\"\n except AttributeError:\n pass\n\n def make_agent(self):\n return ReproAgent(self.chat_model_args, self.flags, self.max_retry, self._repro_dir)\n\n\nclass ReproAgent(GenericAgent):\n def __init__(\n self,\n chat_model_args,\n flags,\n max_retry=4,\n repro_dir=None,\n ):\n self.exp_result = ExpResult(repro_dir)\n super().__init__(chat_model_args, flags, max_retry)\n\n def get_action(self, obs):\n # replace the chat model with a reproducible chat that will mimic the\n # same answers\n step = len(self.actions)\n step_info = self.exp_result.get_step_info(step)\n old_chat_messages = step_info.agent_info.get(\"chat_messages\", None) # type: Discussion\n\n if old_chat_messages is None:\n err_msg = self.exp_result.summary_info[\"err_msg\"]\n\n agent_info = AgentInfo(\n markdown_page=f\"Agent had no chat messages. Perhaps there was an error. err_msg:\\n{err_msg}\",\n )\n return None, agent_info\n\n # an old bug prevented the response from being saved.\n if len(old_chat_messages) == 2:\n recorded_action = step_info.action\n if recorded_action:\n # Recreate the 3rd message based on the recorded action\n assistant_message = make_assistant_message(f\"{recorded_action}\")\n old_chat_messages.append(assistant_message)\n\n self.chat_llm = ReproChatModel(old_chat_messages)\n action, agent_info = super().get_action(obs)\n\n return _make_agent_stats(\n action, agent_info, step_info, old_chat_messages, self.chat_llm.new_messages\n )\n\n\ndef _make_agent_stats(action, agent_info, step_info, old_chat_messages, new_chat_messages):\n if isinstance(agent_info, dict):\n agent_info = AgentInfo(**agent_info)\n\n old_msg_str = _format_messages(old_chat_messages)\n new_msg_str = _format_messages(new_chat_messages)\n\n agent_info.html_page = _make_diff(old_str=old_msg_str, new_str=new_msg_str)\n agent_info.stats.update(_diff_stats(old_msg_str, new_msg_str))\n\n return action, agent_info\n\n\ndef _format_messages(messages: list[dict]):\n if isinstance(messages, Discussion):\n return messages.to_string()\n messages = messages_to_dict(messages)\n return \"\\n\".join(f\"{m['role']} message:\\n{m['content']}\\n\" for m in messages)","source_hash":"4795ed880ced937c515cd3e8ddf827383ae64e84c23a3ca7ddb43c6677e307b3","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.generic_agent.generic_agent","uri":"program://AgentLab/module/src.agentlab.agents.generic_agent.generic_agent#L1-L203","kind":"module","name":"src.agentlab.agents.generic_agent.generic_agent","path":"src/agentlab/agents/generic_agent/generic_agent.py","language":"python","start_line":1,"end_line":203,"context_start_line":1,"context_end_line":203,"code":"\"\"\"\nGenericAgent implementation for AgentLab\n\nThis module defines a `GenericAgent` class and its associated arguments for use in the AgentLab framework. \\\nThe `GenericAgent` class is designed to interact with a chat-based model to determine actions based on \\\nobservations. It includes methods for preprocessing observations, generating actions, and managing internal \\\nstate such as plans, memories, and thoughts. The `GenericAgentArgs` class provides configuration options for \\\nthe agent, including model arguments and flags for various behaviors.\n\"\"\"\n\nfrom copy import deepcopy\nfrom dataclasses import asdict, dataclass\nfrom functools import partial\nfrom warnings import warn\n\nimport bgym\nfrom bgym import Benchmark\nfrom browsergym.experiments.agent import Agent, AgentInfo\n\nfrom agentlab.agents import dynamic_prompting as dp\nfrom agentlab.agents.agent_args import AgentArgs\nfrom agentlab.llm.chat_api import BaseModelArgs\nfrom agentlab.llm.llm_utils import Discussion, ParseError, SystemMessage, retry\nfrom agentlab.llm.tracking import cost_tracker_decorator\n\nfrom .generic_agent_prompt import GenericPromptFlags, MainPrompt\n\n\n@dataclass\nclass GenericAgentArgs(AgentArgs):\n chat_model_args: BaseModelArgs = None\n flags: GenericPromptFlags = None\n max_retry: int = 4\n\n def __post_init__(self):\n try: # some attributes might be temporarily args.CrossProd for hyperparameter generation\n self.agent_name = f\"GenericAgent-{self.chat_model_args.model_name}\".replace(\"/\", \"_\")\n except AttributeError:\n pass\n\n def set_benchmark(self, benchmark: Benchmark, demo_mode):\n \"\"\"Override Some flags based on the benchmark.\"\"\"\n if benchmark.name.startswith(\"miniwob\"):\n self.flags.obs.use_html = True\n\n self.flags.obs.use_tabs = benchmark.is_multi_tab\n self.flags.action.action_set = deepcopy(benchmark.high_level_action_set_args)\n\n # for backward compatibility with old traces\n if self.flags.action.multi_actions is not None:\n self.flags.action.action_set.multiaction = self.flags.action.multi_actions\n if self.flags.action.is_strict is not None:\n self.flags.action.action_set.strict = self.flags.action.is_strict\n\n # verify if we can remove this\n if demo_mode:\n self.flags.action.action_set.demo_mode = \"all_blue\"\n\n def set_reproducibility_mode(self):\n self.chat_model_args.temperature = 0\n\n def prepare(self):\n return self.chat_model_args.prepare_server()\n\n def close(self):\n return self.chat_model_args.close_server()\n\n def make_agent(self):\n return GenericAgent(\n chat_model_args=self.chat_model_args, flags=self.flags, max_retry=self.max_retry\n )\n\n\nclass GenericAgent(Agent):\n\n def __init__(\n self,\n chat_model_args: BaseModelArgs,\n flags: GenericPromptFlags,\n max_retry: int = 4,\n ):\n\n self.chat_llm = chat_model_args.make_model()\n self.chat_model_args = chat_model_args\n self.max_retry = max_retry\n\n self.flags = flags\n self.action_set = self.flags.action.action_set.make_action_set()\n self._obs_preprocessor = dp.make_obs_preprocessor(flags.obs)\n\n self._check_flag_constancy()\n self.reset(seed=None)\n\n def obs_preprocessor(self, obs: dict) -> dict:\n return self._obs_preprocessor(obs)\n\n @cost_tracker_decorator\n def get_action(self, obs):\n\n self.obs_history.append(obs)\n main_prompt = MainPrompt(\n action_set=self.action_set,\n obs_history=self.obs_history,\n actions=self.actions,\n memories=self.memories,\n thoughts=self.thoughts,\n previous_plan=self.plan,\n step=self.plan_step,\n flags=self.flags,\n )\n\n max_prompt_tokens, max_trunc_itr = self._get_maxes()\n\n system_prompt = SystemMessage(dp.SystemPrompt().prompt)\n\n human_prompt = dp.fit_tokens(\n shrinkable=main_prompt,\n max_prompt_tokens=max_prompt_tokens,\n model_name=self.chat_model_args.model_name,\n max_iterations=max_trunc_itr,\n additional_prompts=system_prompt,\n )\n try:\n # TODO, we would need to further shrink the prompt if the retry\n # cause it to be too long\n\n chat_messages = Discussion([system_prompt, human_prompt])\n ans_dict = retry(\n self.chat_llm,\n chat_messages,\n n_retry=self.max_retry,\n parser=main_prompt._parse_answer,\n )\n ans_dict[\"busted_retry\"] = 0\n # inferring the number of retries, TODO: make this less hacky\n ans_dict[\"n_retry\"] = (len(chat_messages) - 3) / 2\n except ParseError as e:\n ans_dict = dict(\n action=None,\n n_retry=self.max_retry + 1,\n busted_retry=1,\n )\n\n stats = self.chat_llm.get_stats()\n stats[\"n_retry\"] = ans_dict[\"n_retry\"]\n stats[\"busted_retry\"] = ans_dict[\"busted_retry\"]\n\n self.plan = ans_dict.get(\"plan\", self.plan)\n self.plan_step = ans_dict.get(\"step\", self.plan_step)\n self.actions.append(ans_dict[\"action\"])\n self.memories.append(ans_dict.get(\"memory\", None))\n self.thoughts.append(ans_dict.get(\"think\", None))\n\n agent_info = AgentInfo(\n think=ans_dict.get(\"think\", None),\n chat_messages=chat_messages,\n stats=stats,\n extra_info={\"chat_model_args\": asdict(self.chat_model_args)},\n )\n return ans_dict[\"action\"], agent_info\n\n def reset(self, seed=None):\n self.seed = seed\n self.plan = \"No plan yet\"\n self.plan_step = -1\n self.memories = []\n self.thoughts = []\n self.actions = []\n self.obs_history = []\n\n def _check_flag_constancy(self):\n flags = self.flags\n if flags.obs.use_som:\n if not flags.obs.use_screenshot:\n warn(\n \"\"\"\nWarning: use_som=True requires use_screenshot=True. Disabling use_som.\"\"\"\n )\n flags.obs.use_som = False\n if flags.obs.use_screenshot:\n if not self.chat_model_args.vision_support:\n warn(\n \"\"\"\nWarning: use_screenshot is set to True, but the chat model \\\ndoes not support vision. Disabling use_screenshot.\"\"\"\n )\n flags.obs.use_screenshot = False\n return flags\n\n def _get_maxes(self):\n maxes = (\n self.flags.max_prompt_tokens,\n self.chat_model_args.max_total_tokens,\n self.chat_model_args.max_input_tokens,\n )\n maxes = [m for m in maxes if m is not None]\n max_prompt_tokens = min(maxes) if maxes else None\n max_trunc_itr = (\n self.flags.max_trunc_itr\n if self.flags.max_trunc_itr\n else 20 # dangerous to change the default value here?\n )\n return max_prompt_tokens, max_trunc_itr","source_hash":"9e02ab608fc8c0e5ecb5f89846584a35c120ce97e2c5af5dfc39a9532f51895c","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.generic_agent.generic_agent.GenericAgentArgs","uri":"program://AgentLab/class/src.agentlab.agents.generic_agent.generic_agent.GenericAgentArgs#L30-L71","kind":"class","name":"GenericAgentArgs","path":"src/agentlab/agents/generic_agent/generic_agent.py","language":"python","start_line":30,"end_line":71,"context_start_line":10,"context_end_line":91,"code":"\nfrom copy import deepcopy\nfrom dataclasses import asdict, dataclass\nfrom functools import partial\nfrom warnings import warn\n\nimport bgym\nfrom bgym import Benchmark\nfrom browsergym.experiments.agent import Agent, AgentInfo\n\nfrom agentlab.agents import dynamic_prompting as dp\nfrom agentlab.agents.agent_args import AgentArgs\nfrom agentlab.llm.chat_api import BaseModelArgs\nfrom agentlab.llm.llm_utils import Discussion, ParseError, SystemMessage, retry\nfrom agentlab.llm.tracking import cost_tracker_decorator\n\nfrom .generic_agent_prompt import GenericPromptFlags, MainPrompt\n\n\n@dataclass\nclass GenericAgentArgs(AgentArgs):\n chat_model_args: BaseModelArgs = None\n flags: GenericPromptFlags = None\n max_retry: int = 4\n\n def __post_init__(self):\n try: # some attributes might be temporarily args.CrossProd for hyperparameter generation\n self.agent_name = f\"GenericAgent-{self.chat_model_args.model_name}\".replace(\"/\", \"_\")\n except AttributeError:\n pass\n\n def set_benchmark(self, benchmark: Benchmark, demo_mode):\n \"\"\"Override Some flags based on the benchmark.\"\"\"\n if benchmark.name.startswith(\"miniwob\"):\n self.flags.obs.use_html = True\n\n self.flags.obs.use_tabs = benchmark.is_multi_tab\n self.flags.action.action_set = deepcopy(benchmark.high_level_action_set_args)\n\n # for backward compatibility with old traces\n if self.flags.action.multi_actions is not None:\n self.flags.action.action_set.multiaction = self.flags.action.multi_actions\n if self.flags.action.is_strict is not None:\n self.flags.action.action_set.strict = self.flags.action.is_strict\n\n # verify if we can remove this\n if demo_mode:\n self.flags.action.action_set.demo_mode = \"all_blue\"\n\n def set_reproducibility_mode(self):\n self.chat_model_args.temperature = 0\n\n def prepare(self):\n return self.chat_model_args.prepare_server()\n\n def close(self):\n return self.chat_model_args.close_server()\n\n def make_agent(self):\n return GenericAgent(\n chat_model_args=self.chat_model_args, flags=self.flags, max_retry=self.max_retry\n )\n\n\nclass GenericAgent(Agent):\n\n def __init__(\n self,\n chat_model_args: BaseModelArgs,\n flags: GenericPromptFlags,\n max_retry: int = 4,\n ):\n\n self.chat_llm = chat_model_args.make_model()\n self.chat_model_args = chat_model_args\n self.max_retry = max_retry\n\n self.flags = flags\n self.action_set = self.flags.action.action_set.make_action_set()\n self._obs_preprocessor = dp.make_obs_preprocessor(flags.obs)\n\n self._check_flag_constancy()","source_hash":"9e02ab608fc8c0e5ecb5f89846584a35c120ce97e2c5af5dfc39a9532f51895c","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.generic_agent.generic_agent.GenericAgent","uri":"program://AgentLab/class/src.agentlab.agents.generic_agent.generic_agent.GenericAgent#L74-L203","kind":"class","name":"GenericAgent","path":"src/agentlab/agents/generic_agent/generic_agent.py","language":"python","start_line":74,"end_line":203,"context_start_line":54,"context_end_line":203,"code":"\n # verify if we can remove this\n if demo_mode:\n self.flags.action.action_set.demo_mode = \"all_blue\"\n\n def set_reproducibility_mode(self):\n self.chat_model_args.temperature = 0\n\n def prepare(self):\n return self.chat_model_args.prepare_server()\n\n def close(self):\n return self.chat_model_args.close_server()\n\n def make_agent(self):\n return GenericAgent(\n chat_model_args=self.chat_model_args, flags=self.flags, max_retry=self.max_retry\n )\n\n\nclass GenericAgent(Agent):\n\n def __init__(\n self,\n chat_model_args: BaseModelArgs,\n flags: GenericPromptFlags,\n max_retry: int = 4,\n ):\n\n self.chat_llm = chat_model_args.make_model()\n self.chat_model_args = chat_model_args\n self.max_retry = max_retry\n\n self.flags = flags\n self.action_set = self.flags.action.action_set.make_action_set()\n self._obs_preprocessor = dp.make_obs_preprocessor(flags.obs)\n\n self._check_flag_constancy()\n self.reset(seed=None)\n\n def obs_preprocessor(self, obs: dict) -> dict:\n return self._obs_preprocessor(obs)\n\n @cost_tracker_decorator\n def get_action(self, obs):\n\n self.obs_history.append(obs)\n main_prompt = MainPrompt(\n action_set=self.action_set,\n obs_history=self.obs_history,\n actions=self.actions,\n memories=self.memories,\n thoughts=self.thoughts,\n previous_plan=self.plan,\n step=self.plan_step,\n flags=self.flags,\n )\n\n max_prompt_tokens, max_trunc_itr = self._get_maxes()\n\n system_prompt = SystemMessage(dp.SystemPrompt().prompt)\n\n human_prompt = dp.fit_tokens(\n shrinkable=main_prompt,\n max_prompt_tokens=max_prompt_tokens,\n model_name=self.chat_model_args.model_name,\n max_iterations=max_trunc_itr,\n additional_prompts=system_prompt,\n )\n try:\n # TODO, we would need to further shrink the prompt if the retry\n # cause it to be too long\n\n chat_messages = Discussion([system_prompt, human_prompt])\n ans_dict = retry(\n self.chat_llm,\n chat_messages,\n n_retry=self.max_retry,\n parser=main_prompt._parse_answer,\n )\n ans_dict[\"busted_retry\"] = 0\n # inferring the number of retries, TODO: make this less hacky\n ans_dict[\"n_retry\"] = (len(chat_messages) - 3) / 2\n except ParseError as e:\n ans_dict = dict(\n action=None,\n n_retry=self.max_retry + 1,\n busted_retry=1,\n )\n\n stats = self.chat_llm.get_stats()\n stats[\"n_retry\"] = ans_dict[\"n_retry\"]\n stats[\"busted_retry\"] = ans_dict[\"busted_retry\"]\n\n self.plan = ans_dict.get(\"plan\", self.plan)\n self.plan_step = ans_dict.get(\"step\", self.plan_step)\n self.actions.append(ans_dict[\"action\"])\n self.memories.append(ans_dict.get(\"memory\", None))\n self.thoughts.append(ans_dict.get(\"think\", None))\n\n agent_info = AgentInfo(\n think=ans_dict.get(\"think\", None),\n chat_messages=chat_messages,\n stats=stats,\n extra_info={\"chat_model_args\": asdict(self.chat_model_args)},\n )\n return ans_dict[\"action\"], agent_info\n\n def reset(self, seed=None):\n self.seed = seed\n self.plan = \"No plan yet\"\n self.plan_step = -1\n self.memories = []\n self.thoughts = []\n self.actions = []\n self.obs_history = []\n\n def _check_flag_constancy(self):\n flags = self.flags\n if flags.obs.use_som:\n if not flags.obs.use_screenshot:\n warn(\n \"\"\"\nWarning: use_som=True requires use_screenshot=True. Disabling use_som.\"\"\"\n )\n flags.obs.use_som = False\n if flags.obs.use_screenshot:\n if not self.chat_model_args.vision_support:\n warn(\n \"\"\"\nWarning: use_screenshot is set to True, but the chat model \\\ndoes not support vision. Disabling use_screenshot.\"\"\"\n )\n flags.obs.use_screenshot = False\n return flags\n\n def _get_maxes(self):\n maxes = (\n self.flags.max_prompt_tokens,\n self.chat_model_args.max_total_tokens,\n self.chat_model_args.max_input_tokens,\n )\n maxes = [m for m in maxes if m is not None]\n max_prompt_tokens = min(maxes) if maxes else None\n max_trunc_itr = (\n self.flags.max_trunc_itr\n if self.flags.max_trunc_itr\n else 20 # dangerous to change the default value here?\n )\n return max_prompt_tokens, max_trunc_itr","source_hash":"9e02ab608fc8c0e5ecb5f89846584a35c120ce97e2c5af5dfc39a9532f51895c","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.generic_agent.generic_agent.__post_init__","uri":"program://AgentLab/function/src.agentlab.agents.generic_agent.generic_agent.__post_init__#L35-L39","kind":"function","name":"__post_init__","path":"src/agentlab/agents/generic_agent/generic_agent.py","language":"python","start_line":35,"end_line":39,"context_start_line":15,"context_end_line":59,"code":"\nimport bgym\nfrom bgym import Benchmark\nfrom browsergym.experiments.agent import Agent, AgentInfo\n\nfrom agentlab.agents import dynamic_prompting as dp\nfrom agentlab.agents.agent_args import AgentArgs\nfrom agentlab.llm.chat_api import BaseModelArgs\nfrom agentlab.llm.llm_utils import Discussion, ParseError, SystemMessage, retry\nfrom agentlab.llm.tracking import cost_tracker_decorator\n\nfrom .generic_agent_prompt import GenericPromptFlags, MainPrompt\n\n\n@dataclass\nclass GenericAgentArgs(AgentArgs):\n chat_model_args: BaseModelArgs = None\n flags: GenericPromptFlags = None\n max_retry: int = 4\n\n def __post_init__(self):\n try: # some attributes might be temporarily args.CrossProd for hyperparameter generation\n self.agent_name = f\"GenericAgent-{self.chat_model_args.model_name}\".replace(\"/\", \"_\")\n except AttributeError:\n pass\n\n def set_benchmark(self, benchmark: Benchmark, demo_mode):\n \"\"\"Override Some flags based on the benchmark.\"\"\"\n if benchmark.name.startswith(\"miniwob\"):\n self.flags.obs.use_html = True\n\n self.flags.obs.use_tabs = benchmark.is_multi_tab\n self.flags.action.action_set = deepcopy(benchmark.high_level_action_set_args)\n\n # for backward compatibility with old traces\n if self.flags.action.multi_actions is not None:\n self.flags.action.action_set.multiaction = self.flags.action.multi_actions\n if self.flags.action.is_strict is not None:\n self.flags.action.action_set.strict = self.flags.action.is_strict\n\n # verify if we can remove this\n if demo_mode:\n self.flags.action.action_set.demo_mode = \"all_blue\"\n\n def set_reproducibility_mode(self):","source_hash":"9e02ab608fc8c0e5ecb5f89846584a35c120ce97e2c5af5dfc39a9532f51895c","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.generic_agent.generic_agent.set_benchmark","uri":"program://AgentLab/function/src.agentlab.agents.generic_agent.generic_agent.set_benchmark#L41-L57","kind":"function","name":"set_benchmark","path":"src/agentlab/agents/generic_agent/generic_agent.py","language":"python","start_line":41,"end_line":57,"context_start_line":21,"context_end_line":77,"code":"from agentlab.agents.agent_args import AgentArgs\nfrom agentlab.llm.chat_api import BaseModelArgs\nfrom agentlab.llm.llm_utils import Discussion, ParseError, SystemMessage, retry\nfrom agentlab.llm.tracking import cost_tracker_decorator\n\nfrom .generic_agent_prompt import GenericPromptFlags, MainPrompt\n\n\n@dataclass\nclass GenericAgentArgs(AgentArgs):\n chat_model_args: BaseModelArgs = None\n flags: GenericPromptFlags = None\n max_retry: int = 4\n\n def __post_init__(self):\n try: # some attributes might be temporarily args.CrossProd for hyperparameter generation\n self.agent_name = f\"GenericAgent-{self.chat_model_args.model_name}\".replace(\"/\", \"_\")\n except AttributeError:\n pass\n\n def set_benchmark(self, benchmark: Benchmark, demo_mode):\n \"\"\"Override Some flags based on the benchmark.\"\"\"\n if benchmark.name.startswith(\"miniwob\"):\n self.flags.obs.use_html = True\n\n self.flags.obs.use_tabs = benchmark.is_multi_tab\n self.flags.action.action_set = deepcopy(benchmark.high_level_action_set_args)\n\n # for backward compatibility with old traces\n if self.flags.action.multi_actions is not None:\n self.flags.action.action_set.multiaction = self.flags.action.multi_actions\n if self.flags.action.is_strict is not None:\n self.flags.action.action_set.strict = self.flags.action.is_strict\n\n # verify if we can remove this\n if demo_mode:\n self.flags.action.action_set.demo_mode = \"all_blue\"\n\n def set_reproducibility_mode(self):\n self.chat_model_args.temperature = 0\n\n def prepare(self):\n return self.chat_model_args.prepare_server()\n\n def close(self):\n return self.chat_model_args.close_server()\n\n def make_agent(self):\n return GenericAgent(\n chat_model_args=self.chat_model_args, flags=self.flags, max_retry=self.max_retry\n )\n\n\nclass GenericAgent(Agent):\n\n def __init__(\n self,","source_hash":"9e02ab608fc8c0e5ecb5f89846584a35c120ce97e2c5af5dfc39a9532f51895c","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.generic_agent.generic_agent.set_reproducibility_mode","uri":"program://AgentLab/function/src.agentlab.agents.generic_agent.generic_agent.set_reproducibility_mode#L59-L60","kind":"function","name":"set_reproducibility_mode","path":"src/agentlab/agents/generic_agent/generic_agent.py","language":"python","start_line":59,"end_line":60,"context_start_line":39,"context_end_line":80,"code":" pass\n\n def set_benchmark(self, benchmark: Benchmark, demo_mode):\n \"\"\"Override Some flags based on the benchmark.\"\"\"\n if benchmark.name.startswith(\"miniwob\"):\n self.flags.obs.use_html = True\n\n self.flags.obs.use_tabs = benchmark.is_multi_tab\n self.flags.action.action_set = deepcopy(benchmark.high_level_action_set_args)\n\n # for backward compatibility with old traces\n if self.flags.action.multi_actions is not None:\n self.flags.action.action_set.multiaction = self.flags.action.multi_actions\n if self.flags.action.is_strict is not None:\n self.flags.action.action_set.strict = self.flags.action.is_strict\n\n # verify if we can remove this\n if demo_mode:\n self.flags.action.action_set.demo_mode = \"all_blue\"\n\n def set_reproducibility_mode(self):\n self.chat_model_args.temperature = 0\n\n def prepare(self):\n return self.chat_model_args.prepare_server()\n\n def close(self):\n return self.chat_model_args.close_server()\n\n def make_agent(self):\n return GenericAgent(\n chat_model_args=self.chat_model_args, flags=self.flags, max_retry=self.max_retry\n )\n\n\nclass GenericAgent(Agent):\n\n def __init__(\n self,\n chat_model_args: BaseModelArgs,\n flags: GenericPromptFlags,\n max_retry: int = 4,","source_hash":"9e02ab608fc8c0e5ecb5f89846584a35c120ce97e2c5af5dfc39a9532f51895c","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.generic_agent.generic_agent.prepare","uri":"program://AgentLab/function/src.agentlab.agents.generic_agent.generic_agent.prepare#L62-L63","kind":"function","name":"prepare","path":"src/agentlab/agents/generic_agent/generic_agent.py","language":"python","start_line":62,"end_line":63,"context_start_line":42,"context_end_line":83,"code":" \"\"\"Override Some flags based on the benchmark.\"\"\"\n if benchmark.name.startswith(\"miniwob\"):\n self.flags.obs.use_html = True\n\n self.flags.obs.use_tabs = benchmark.is_multi_tab\n self.flags.action.action_set = deepcopy(benchmark.high_level_action_set_args)\n\n # for backward compatibility with old traces\n if self.flags.action.multi_actions is not None:\n self.flags.action.action_set.multiaction = self.flags.action.multi_actions\n if self.flags.action.is_strict is not None:\n self.flags.action.action_set.strict = self.flags.action.is_strict\n\n # verify if we can remove this\n if demo_mode:\n self.flags.action.action_set.demo_mode = \"all_blue\"\n\n def set_reproducibility_mode(self):\n self.chat_model_args.temperature = 0\n\n def prepare(self):\n return self.chat_model_args.prepare_server()\n\n def close(self):\n return self.chat_model_args.close_server()\n\n def make_agent(self):\n return GenericAgent(\n chat_model_args=self.chat_model_args, flags=self.flags, max_retry=self.max_retry\n )\n\n\nclass GenericAgent(Agent):\n\n def __init__(\n self,\n chat_model_args: BaseModelArgs,\n flags: GenericPromptFlags,\n max_retry: int = 4,\n ):\n\n self.chat_llm = chat_model_args.make_model()","source_hash":"9e02ab608fc8c0e5ecb5f89846584a35c120ce97e2c5af5dfc39a9532f51895c","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.generic_agent.generic_agent.close","uri":"program://AgentLab/function/src.agentlab.agents.generic_agent.generic_agent.close#L65-L66","kind":"function","name":"close","path":"src/agentlab/agents/generic_agent/generic_agent.py","language":"python","start_line":65,"end_line":66,"context_start_line":45,"context_end_line":86,"code":"\n self.flags.obs.use_tabs = benchmark.is_multi_tab\n self.flags.action.action_set = deepcopy(benchmark.high_level_action_set_args)\n\n # for backward compatibility with old traces\n if self.flags.action.multi_actions is not None:\n self.flags.action.action_set.multiaction = self.flags.action.multi_actions\n if self.flags.action.is_strict is not None:\n self.flags.action.action_set.strict = self.flags.action.is_strict\n\n # verify if we can remove this\n if demo_mode:\n self.flags.action.action_set.demo_mode = \"all_blue\"\n\n def set_reproducibility_mode(self):\n self.chat_model_args.temperature = 0\n\n def prepare(self):\n return self.chat_model_args.prepare_server()\n\n def close(self):\n return self.chat_model_args.close_server()\n\n def make_agent(self):\n return GenericAgent(\n chat_model_args=self.chat_model_args, flags=self.flags, max_retry=self.max_retry\n )\n\n\nclass GenericAgent(Agent):\n\n def __init__(\n self,\n chat_model_args: BaseModelArgs,\n flags: GenericPromptFlags,\n max_retry: int = 4,\n ):\n\n self.chat_llm = chat_model_args.make_model()\n self.chat_model_args = chat_model_args\n self.max_retry = max_retry\n","source_hash":"9e02ab608fc8c0e5ecb5f89846584a35c120ce97e2c5af5dfc39a9532f51895c","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.generic_agent.generic_agent.make_agent","uri":"program://AgentLab/function/src.agentlab.agents.generic_agent.generic_agent.make_agent#L68-L71","kind":"function","name":"make_agent","path":"src/agentlab/agents/generic_agent/generic_agent.py","language":"python","start_line":68,"end_line":71,"context_start_line":48,"context_end_line":91,"code":"\n # for backward compatibility with old traces\n if self.flags.action.multi_actions is not None:\n self.flags.action.action_set.multiaction = self.flags.action.multi_actions\n if self.flags.action.is_strict is not None:\n self.flags.action.action_set.strict = self.flags.action.is_strict\n\n # verify if we can remove this\n if demo_mode:\n self.flags.action.action_set.demo_mode = \"all_blue\"\n\n def set_reproducibility_mode(self):\n self.chat_model_args.temperature = 0\n\n def prepare(self):\n return self.chat_model_args.prepare_server()\n\n def close(self):\n return self.chat_model_args.close_server()\n\n def make_agent(self):\n return GenericAgent(\n chat_model_args=self.chat_model_args, flags=self.flags, max_retry=self.max_retry\n )\n\n\nclass GenericAgent(Agent):\n\n def __init__(\n self,\n chat_model_args: BaseModelArgs,\n flags: GenericPromptFlags,\n max_retry: int = 4,\n ):\n\n self.chat_llm = chat_model_args.make_model()\n self.chat_model_args = chat_model_args\n self.max_retry = max_retry\n\n self.flags = flags\n self.action_set = self.flags.action.action_set.make_action_set()\n self._obs_preprocessor = dp.make_obs_preprocessor(flags.obs)\n\n self._check_flag_constancy()","source_hash":"9e02ab608fc8c0e5ecb5f89846584a35c120ce97e2c5af5dfc39a9532f51895c","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.generic_agent.generic_agent.__init__","uri":"program://AgentLab/function/src.agentlab.agents.generic_agent.generic_agent.__init__#L76-L92","kind":"function","name":"__init__","path":"src/agentlab/agents/generic_agent/generic_agent.py","language":"python","start_line":76,"end_line":92,"context_start_line":56,"context_end_line":112,"code":" if demo_mode:\n self.flags.action.action_set.demo_mode = \"all_blue\"\n\n def set_reproducibility_mode(self):\n self.chat_model_args.temperature = 0\n\n def prepare(self):\n return self.chat_model_args.prepare_server()\n\n def close(self):\n return self.chat_model_args.close_server()\n\n def make_agent(self):\n return GenericAgent(\n chat_model_args=self.chat_model_args, flags=self.flags, max_retry=self.max_retry\n )\n\n\nclass GenericAgent(Agent):\n\n def __init__(\n self,\n chat_model_args: BaseModelArgs,\n flags: GenericPromptFlags,\n max_retry: int = 4,\n ):\n\n self.chat_llm = chat_model_args.make_model()\n self.chat_model_args = chat_model_args\n self.max_retry = max_retry\n\n self.flags = flags\n self.action_set = self.flags.action.action_set.make_action_set()\n self._obs_preprocessor = dp.make_obs_preprocessor(flags.obs)\n\n self._check_flag_constancy()\n self.reset(seed=None)\n\n def obs_preprocessor(self, obs: dict) -> dict:\n return self._obs_preprocessor(obs)\n\n @cost_tracker_decorator\n def get_action(self, obs):\n\n self.obs_history.append(obs)\n main_prompt = MainPrompt(\n action_set=self.action_set,\n obs_history=self.obs_history,\n actions=self.actions,\n memories=self.memories,\n thoughts=self.thoughts,\n previous_plan=self.plan,\n step=self.plan_step,\n flags=self.flags,\n )\n\n max_prompt_tokens, max_trunc_itr = self._get_maxes()","source_hash":"9e02ab608fc8c0e5ecb5f89846584a35c120ce97e2c5af5dfc39a9532f51895c","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.generic_agent.generic_agent.obs_preprocessor","uri":"program://AgentLab/function/src.agentlab.agents.generic_agent.generic_agent.obs_preprocessor#L94-L95","kind":"function","name":"obs_preprocessor","path":"src/agentlab/agents/generic_agent/generic_agent.py","language":"python","start_line":94,"end_line":95,"context_start_line":74,"context_end_line":115,"code":"class GenericAgent(Agent):\n\n def __init__(\n self,\n chat_model_args: BaseModelArgs,\n flags: GenericPromptFlags,\n max_retry: int = 4,\n ):\n\n self.chat_llm = chat_model_args.make_model()\n self.chat_model_args = chat_model_args\n self.max_retry = max_retry\n\n self.flags = flags\n self.action_set = self.flags.action.action_set.make_action_set()\n self._obs_preprocessor = dp.make_obs_preprocessor(flags.obs)\n\n self._check_flag_constancy()\n self.reset(seed=None)\n\n def obs_preprocessor(self, obs: dict) -> dict:\n return self._obs_preprocessor(obs)\n\n @cost_tracker_decorator\n def get_action(self, obs):\n\n self.obs_history.append(obs)\n main_prompt = MainPrompt(\n action_set=self.action_set,\n obs_history=self.obs_history,\n actions=self.actions,\n memories=self.memories,\n thoughts=self.thoughts,\n previous_plan=self.plan,\n step=self.plan_step,\n flags=self.flags,\n )\n\n max_prompt_tokens, max_trunc_itr = self._get_maxes()\n\n system_prompt = SystemMessage(dp.SystemPrompt().prompt)\n","source_hash":"9e02ab608fc8c0e5ecb5f89846584a35c120ce97e2c5af5dfc39a9532f51895c","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.generic_agent.generic_agent.get_action","uri":"program://AgentLab/function/src.agentlab.agents.generic_agent.generic_agent.get_action#L98-L160","kind":"function","name":"get_action","path":"src/agentlab/agents/generic_agent/generic_agent.py","language":"python","start_line":98,"end_line":160,"context_start_line":78,"context_end_line":180,"code":" chat_model_args: BaseModelArgs,\n flags: GenericPromptFlags,\n max_retry: int = 4,\n ):\n\n self.chat_llm = chat_model_args.make_model()\n self.chat_model_args = chat_model_args\n self.max_retry = max_retry\n\n self.flags = flags\n self.action_set = self.flags.action.action_set.make_action_set()\n self._obs_preprocessor = dp.make_obs_preprocessor(flags.obs)\n\n self._check_flag_constancy()\n self.reset(seed=None)\n\n def obs_preprocessor(self, obs: dict) -> dict:\n return self._obs_preprocessor(obs)\n\n @cost_tracker_decorator\n def get_action(self, obs):\n\n self.obs_history.append(obs)\n main_prompt = MainPrompt(\n action_set=self.action_set,\n obs_history=self.obs_history,\n actions=self.actions,\n memories=self.memories,\n thoughts=self.thoughts,\n previous_plan=self.plan,\n step=self.plan_step,\n flags=self.flags,\n )\n\n max_prompt_tokens, max_trunc_itr = self._get_maxes()\n\n system_prompt = SystemMessage(dp.SystemPrompt().prompt)\n\n human_prompt = dp.fit_tokens(\n shrinkable=main_prompt,\n max_prompt_tokens=max_prompt_tokens,\n model_name=self.chat_model_args.model_name,\n max_iterations=max_trunc_itr,\n additional_prompts=system_prompt,\n )\n try:\n # TODO, we would need to further shrink the prompt if the retry\n # cause it to be too long\n\n chat_messages = Discussion([system_prompt, human_prompt])\n ans_dict = retry(\n self.chat_llm,\n chat_messages,\n n_retry=self.max_retry,\n parser=main_prompt._parse_answer,\n )\n ans_dict[\"busted_retry\"] = 0\n # inferring the number of retries, TODO: make this less hacky\n ans_dict[\"n_retry\"] = (len(chat_messages) - 3) / 2\n except ParseError as e:\n ans_dict = dict(\n action=None,\n n_retry=self.max_retry + 1,\n busted_retry=1,\n )\n\n stats = self.chat_llm.get_stats()\n stats[\"n_retry\"] = ans_dict[\"n_retry\"]\n stats[\"busted_retry\"] = ans_dict[\"busted_retry\"]\n\n self.plan = ans_dict.get(\"plan\", self.plan)\n self.plan_step = ans_dict.get(\"step\", self.plan_step)\n self.actions.append(ans_dict[\"action\"])\n self.memories.append(ans_dict.get(\"memory\", None))\n self.thoughts.append(ans_dict.get(\"think\", None))\n\n agent_info = AgentInfo(\n think=ans_dict.get(\"think\", None),\n chat_messages=chat_messages,\n stats=stats,\n extra_info={\"chat_model_args\": asdict(self.chat_model_args)},\n )\n return ans_dict[\"action\"], agent_info\n\n def reset(self, seed=None):\n self.seed = seed\n self.plan = \"No plan yet\"\n self.plan_step = -1\n self.memories = []\n self.thoughts = []\n self.actions = []\n self.obs_history = []\n\n def _check_flag_constancy(self):\n flags = self.flags\n if flags.obs.use_som:\n if not flags.obs.use_screenshot:\n warn(\n \"\"\"\nWarning: use_som=True requires use_screenshot=True. Disabling use_som.\"\"\"\n )\n flags.obs.use_som = False\n if flags.obs.use_screenshot:","source_hash":"9e02ab608fc8c0e5ecb5f89846584a35c120ce97e2c5af5dfc39a9532f51895c","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.generic_agent.generic_agent.reset","uri":"program://AgentLab/function/src.agentlab.agents.generic_agent.generic_agent.reset#L162-L169","kind":"function","name":"reset","path":"src/agentlab/agents/generic_agent/generic_agent.py","language":"python","start_line":162,"end_line":169,"context_start_line":142,"context_end_line":189,"code":" )\n\n stats = self.chat_llm.get_stats()\n stats[\"n_retry\"] = ans_dict[\"n_retry\"]\n stats[\"busted_retry\"] = ans_dict[\"busted_retry\"]\n\n self.plan = ans_dict.get(\"plan\", self.plan)\n self.plan_step = ans_dict.get(\"step\", self.plan_step)\n self.actions.append(ans_dict[\"action\"])\n self.memories.append(ans_dict.get(\"memory\", None))\n self.thoughts.append(ans_dict.get(\"think\", None))\n\n agent_info = AgentInfo(\n think=ans_dict.get(\"think\", None),\n chat_messages=chat_messages,\n stats=stats,\n extra_info={\"chat_model_args\": asdict(self.chat_model_args)},\n )\n return ans_dict[\"action\"], agent_info\n\n def reset(self, seed=None):\n self.seed = seed\n self.plan = \"No plan yet\"\n self.plan_step = -1\n self.memories = []\n self.thoughts = []\n self.actions = []\n self.obs_history = []\n\n def _check_flag_constancy(self):\n flags = self.flags\n if flags.obs.use_som:\n if not flags.obs.use_screenshot:\n warn(\n \"\"\"\nWarning: use_som=True requires use_screenshot=True. Disabling use_som.\"\"\"\n )\n flags.obs.use_som = False\n if flags.obs.use_screenshot:\n if not self.chat_model_args.vision_support:\n warn(\n \"\"\"\nWarning: use_screenshot is set to True, but the chat model \\\ndoes not support vision. Disabling use_screenshot.\"\"\"\n )\n flags.obs.use_screenshot = False\n return flags\n","source_hash":"9e02ab608fc8c0e5ecb5f89846584a35c120ce97e2c5af5dfc39a9532f51895c","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.generic_agent.generic_agent._check_flag_constancy","uri":"program://AgentLab/function/src.agentlab.agents.generic_agent.generic_agent._check_flag_constancy#L171-L188","kind":"function","name":"_check_flag_constancy","path":"src/agentlab/agents/generic_agent/generic_agent.py","language":"python","start_line":171,"end_line":188,"context_start_line":151,"context_end_line":203,"code":" self.memories.append(ans_dict.get(\"memory\", None))\n self.thoughts.append(ans_dict.get(\"think\", None))\n\n agent_info = AgentInfo(\n think=ans_dict.get(\"think\", None),\n chat_messages=chat_messages,\n stats=stats,\n extra_info={\"chat_model_args\": asdict(self.chat_model_args)},\n )\n return ans_dict[\"action\"], agent_info\n\n def reset(self, seed=None):\n self.seed = seed\n self.plan = \"No plan yet\"\n self.plan_step = -1\n self.memories = []\n self.thoughts = []\n self.actions = []\n self.obs_history = []\n\n def _check_flag_constancy(self):\n flags = self.flags\n if flags.obs.use_som:\n if not flags.obs.use_screenshot:\n warn(\n \"\"\"\nWarning: use_som=True requires use_screenshot=True. Disabling use_som.\"\"\"\n )\n flags.obs.use_som = False\n if flags.obs.use_screenshot:\n if not self.chat_model_args.vision_support:\n warn(\n \"\"\"\nWarning: use_screenshot is set to True, but the chat model \\\ndoes not support vision. Disabling use_screenshot.\"\"\"\n )\n flags.obs.use_screenshot = False\n return flags\n\n def _get_maxes(self):\n maxes = (\n self.flags.max_prompt_tokens,\n self.chat_model_args.max_total_tokens,\n self.chat_model_args.max_input_tokens,\n )\n maxes = [m for m in maxes if m is not None]\n max_prompt_tokens = min(maxes) if maxes else None\n max_trunc_itr = (\n self.flags.max_trunc_itr\n if self.flags.max_trunc_itr\n else 20 # dangerous to change the default value here?\n )\n return max_prompt_tokens, max_trunc_itr","source_hash":"9e02ab608fc8c0e5ecb5f89846584a35c120ce97e2c5af5dfc39a9532f51895c","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.generic_agent.generic_agent._get_maxes","uri":"program://AgentLab/function/src.agentlab.agents.generic_agent.generic_agent._get_maxes#L190-L203","kind":"function","name":"_get_maxes","path":"src/agentlab/agents/generic_agent/generic_agent.py","language":"python","start_line":190,"end_line":203,"context_start_line":170,"context_end_line":203,"code":"\n def _check_flag_constancy(self):\n flags = self.flags\n if flags.obs.use_som:\n if not flags.obs.use_screenshot:\n warn(\n \"\"\"\nWarning: use_som=True requires use_screenshot=True. Disabling use_som.\"\"\"\n )\n flags.obs.use_som = False\n if flags.obs.use_screenshot:\n if not self.chat_model_args.vision_support:\n warn(\n \"\"\"\nWarning: use_screenshot is set to True, but the chat model \\\ndoes not support vision. Disabling use_screenshot.\"\"\"\n )\n flags.obs.use_screenshot = False\n return flags\n\n def _get_maxes(self):\n maxes = (\n self.flags.max_prompt_tokens,\n self.chat_model_args.max_total_tokens,\n self.chat_model_args.max_input_tokens,\n )\n maxes = [m for m in maxes if m is not None]\n max_prompt_tokens = min(maxes) if maxes else None\n max_trunc_itr = (\n self.flags.max_trunc_itr\n if self.flags.max_trunc_itr\n else 20 # dangerous to change the default value here?\n )\n return max_prompt_tokens, max_trunc_itr","source_hash":"9e02ab608fc8c0e5ecb5f89846584a35c120ce97e2c5af5dfc39a9532f51895c","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.hitl_agent.base_multi_candidate_agent","uri":"program://AgentLab/module/src.agentlab.agents.hitl_agent.base_multi_candidate_agent#L1-L50","kind":"module","name":"src.agentlab.agents.hitl_agent.base_multi_candidate_agent","path":"src/agentlab/agents/hitl_agent/base_multi_candidate_agent.py","language":"python","start_line":1,"end_line":50,"context_start_line":1,"context_end_line":50,"code":"from typing_extensions import Protocol\n\nfrom agentlab.agents.agent_args import AgentArgs\n\n\nclass MultiCandidateAgent(Protocol):\n \"\"\"\n Protocol for agents that generate multiple candidates for get_action.\n\n This protocol defines the contract for agents that can generate\n multiple candidate actions and allow selection of one of them for execution.\n \"\"\"\n\n def get_candidate_generations(\n self, obs: dict, hint: list[str] | None = None, n_candidates: int = 3\n ) -> \"list[dict]\":\n \"\"\"\n Generate multiple candidate actions for the given observation.\n\n You can pass extra info in agent_info to update internal state of the\n agent based on the selected candidate. Your internal state management\n should be robust to multiple calls to the get_candidate_generations method\n in a single step.\n\n Args:\n obs: The current observation dictionary containing environment state\n hint: Optional list of hint strings to guide candidate generation\n n_candidates: Number of candidate actions to generate\n \"\"\"\n ...\n\n def update_agent_state_from_selected_candidate(self, output: dict):\n \"\"\"\n Update the agent's internal state based on the selected candidate.\n This can include any memory or planning updates.\n\n Args:\n output: The selected candidate action dictionary\n \"\"\"\n pass\n\n\nclass MultiCandidateAgentArgs(AgentArgs):\n def make_agent(self) -> MultiCandidateAgent: ...\n\n def __post_init__(self):\n \"\"\"Prefix subagent name with 'MC-'.\"\"\"\n super().__post_init__()\n if hasattr(self, \"agent_name\") and self.agent_name:\n self.agent_name = \"MC-\" + self.agent_name","source_hash":"9be787fa4619fecac3c7f3d314dbf3a8b369797bfbcd9da134729e601b18a596","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.hitl_agent.base_multi_candidate_agent.MultiCandidateAgent","uri":"program://AgentLab/class/src.agentlab.agents.hitl_agent.base_multi_candidate_agent.MultiCandidateAgent#L6-L40","kind":"class","name":"MultiCandidateAgent","path":"src/agentlab/agents/hitl_agent/base_multi_candidate_agent.py","language":"python","start_line":6,"end_line":40,"context_start_line":1,"context_end_line":50,"code":"from typing_extensions import Protocol\n\nfrom agentlab.agents.agent_args import AgentArgs\n\n\nclass MultiCandidateAgent(Protocol):\n \"\"\"\n Protocol for agents that generate multiple candidates for get_action.\n\n This protocol defines the contract for agents that can generate\n multiple candidate actions and allow selection of one of them for execution.\n \"\"\"\n\n def get_candidate_generations(\n self, obs: dict, hint: list[str] | None = None, n_candidates: int = 3\n ) -> \"list[dict]\":\n \"\"\"\n Generate multiple candidate actions for the given observation.\n\n You can pass extra info in agent_info to update internal state of the\n agent based on the selected candidate. Your internal state management\n should be robust to multiple calls to the get_candidate_generations method\n in a single step.\n\n Args:\n obs: The current observation dictionary containing environment state\n hint: Optional list of hint strings to guide candidate generation\n n_candidates: Number of candidate actions to generate\n \"\"\"\n ...\n\n def update_agent_state_from_selected_candidate(self, output: dict):\n \"\"\"\n Update the agent's internal state based on the selected candidate.\n This can include any memory or planning updates.\n\n Args:\n output: The selected candidate action dictionary\n \"\"\"\n pass\n\n\nclass MultiCandidateAgentArgs(AgentArgs):\n def make_agent(self) -> MultiCandidateAgent: ...\n\n def __post_init__(self):\n \"\"\"Prefix subagent name with 'MC-'.\"\"\"\n super().__post_init__()\n if hasattr(self, \"agent_name\") and self.agent_name:\n self.agent_name = \"MC-\" + self.agent_name","source_hash":"9be787fa4619fecac3c7f3d314dbf3a8b369797bfbcd9da134729e601b18a596","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.hitl_agent.base_multi_candidate_agent.MultiCandidateAgentArgs","uri":"program://AgentLab/class/src.agentlab.agents.hitl_agent.base_multi_candidate_agent.MultiCandidateAgentArgs#L43-L50","kind":"class","name":"MultiCandidateAgentArgs","path":"src/agentlab/agents/hitl_agent/base_multi_candidate_agent.py","language":"python","start_line":43,"end_line":50,"context_start_line":23,"context_end_line":50,"code":" in a single step.\n\n Args:\n obs: The current observation dictionary containing environment state\n hint: Optional list of hint strings to guide candidate generation\n n_candidates: Number of candidate actions to generate\n \"\"\"\n ...\n\n def update_agent_state_from_selected_candidate(self, output: dict):\n \"\"\"\n Update the agent's internal state based on the selected candidate.\n This can include any memory or planning updates.\n\n Args:\n output: The selected candidate action dictionary\n \"\"\"\n pass\n\n\nclass MultiCandidateAgentArgs(AgentArgs):\n def make_agent(self) -> MultiCandidateAgent: ...\n\n def __post_init__(self):\n \"\"\"Prefix subagent name with 'MC-'.\"\"\"\n super().__post_init__()\n if hasattr(self, \"agent_name\") and self.agent_name:\n self.agent_name = \"MC-\" + self.agent_name","source_hash":"9be787fa4619fecac3c7f3d314dbf3a8b369797bfbcd9da134729e601b18a596","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.hitl_agent.base_multi_candidate_agent.get_candidate_generations","uri":"program://AgentLab/function/src.agentlab.agents.hitl_agent.base_multi_candidate_agent.get_candidate_generations#L14-L30","kind":"function","name":"get_candidate_generations","path":"src/agentlab/agents/hitl_agent/base_multi_candidate_agent.py","language":"python","start_line":14,"end_line":30,"context_start_line":1,"context_end_line":50,"code":"from typing_extensions import Protocol\n\nfrom agentlab.agents.agent_args import AgentArgs\n\n\nclass MultiCandidateAgent(Protocol):\n \"\"\"\n Protocol for agents that generate multiple candidates for get_action.\n\n This protocol defines the contract for agents that can generate\n multiple candidate actions and allow selection of one of them for execution.\n \"\"\"\n\n def get_candidate_generations(\n self, obs: dict, hint: list[str] | None = None, n_candidates: int = 3\n ) -> \"list[dict]\":\n \"\"\"\n Generate multiple candidate actions for the given observation.\n\n You can pass extra info in agent_info to update internal state of the\n agent based on the selected candidate. Your internal state management\n should be robust to multiple calls to the get_candidate_generations method\n in a single step.\n\n Args:\n obs: The current observation dictionary containing environment state\n hint: Optional list of hint strings to guide candidate generation\n n_candidates: Number of candidate actions to generate\n \"\"\"\n ...\n\n def update_agent_state_from_selected_candidate(self, output: dict):\n \"\"\"\n Update the agent's internal state based on the selected candidate.\n This can include any memory or planning updates.\n\n Args:\n output: The selected candidate action dictionary\n \"\"\"\n pass\n\n\nclass MultiCandidateAgentArgs(AgentArgs):\n def make_agent(self) -> MultiCandidateAgent: ...\n\n def __post_init__(self):\n \"\"\"Prefix subagent name with 'MC-'.\"\"\"\n super().__post_init__()\n if hasattr(self, \"agent_name\") and self.agent_name:\n self.agent_name = \"MC-\" + self.agent_name","source_hash":"9be787fa4619fecac3c7f3d314dbf3a8b369797bfbcd9da134729e601b18a596","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.hitl_agent.base_multi_candidate_agent.update_agent_state_from_selected_candidate","uri":"program://AgentLab/function/src.agentlab.agents.hitl_agent.base_multi_candidate_agent.update_agent_state_from_selected_candidate#L32-L40","kind":"function","name":"update_agent_state_from_selected_candidate","path":"src/agentlab/agents/hitl_agent/base_multi_candidate_agent.py","language":"python","start_line":32,"end_line":40,"context_start_line":12,"context_end_line":50,"code":" \"\"\"\n\n def get_candidate_generations(\n self, obs: dict, hint: list[str] | None = None, n_candidates: int = 3\n ) -> \"list[dict]\":\n \"\"\"\n Generate multiple candidate actions for the given observation.\n\n You can pass extra info in agent_info to update internal state of the\n agent based on the selected candidate. Your internal state management\n should be robust to multiple calls to the get_candidate_generations method\n in a single step.\n\n Args:\n obs: The current observation dictionary containing environment state\n hint: Optional list of hint strings to guide candidate generation\n n_candidates: Number of candidate actions to generate\n \"\"\"\n ...\n\n def update_agent_state_from_selected_candidate(self, output: dict):\n \"\"\"\n Update the agent's internal state based on the selected candidate.\n This can include any memory or planning updates.\n\n Args:\n output: The selected candidate action dictionary\n \"\"\"\n pass\n\n\nclass MultiCandidateAgentArgs(AgentArgs):\n def make_agent(self) -> MultiCandidateAgent: ...\n\n def __post_init__(self):\n \"\"\"Prefix subagent name with 'MC-'.\"\"\"\n super().__post_init__()\n if hasattr(self, \"agent_name\") and self.agent_name:\n self.agent_name = \"MC-\" + self.agent_name","source_hash":"9be787fa4619fecac3c7f3d314dbf3a8b369797bfbcd9da134729e601b18a596","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.hitl_agent.base_multi_candidate_agent.make_agent","uri":"program://AgentLab/function/src.agentlab.agents.hitl_agent.base_multi_candidate_agent.make_agent#L44-L44","kind":"function","name":"make_agent","path":"src/agentlab/agents/hitl_agent/base_multi_candidate_agent.py","language":"python","start_line":44,"end_line":44,"context_start_line":24,"context_end_line":50,"code":"\n Args:\n obs: The current observation dictionary containing environment state\n hint: Optional list of hint strings to guide candidate generation\n n_candidates: Number of candidate actions to generate\n \"\"\"\n ...\n\n def update_agent_state_from_selected_candidate(self, output: dict):\n \"\"\"\n Update the agent's internal state based on the selected candidate.\n This can include any memory or planning updates.\n\n Args:\n output: The selected candidate action dictionary\n \"\"\"\n pass\n\n\nclass MultiCandidateAgentArgs(AgentArgs):\n def make_agent(self) -> MultiCandidateAgent: ...\n\n def __post_init__(self):\n \"\"\"Prefix subagent name with 'MC-'.\"\"\"\n super().__post_init__()\n if hasattr(self, \"agent_name\") and self.agent_name:\n self.agent_name = \"MC-\" + self.agent_name","source_hash":"9be787fa4619fecac3c7f3d314dbf3a8b369797bfbcd9da134729e601b18a596","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.hitl_agent.base_multi_candidate_agent.__post_init__","uri":"program://AgentLab/function/src.agentlab.agents.hitl_agent.base_multi_candidate_agent.__post_init__#L46-L50","kind":"function","name":"__post_init__","path":"src/agentlab/agents/hitl_agent/base_multi_candidate_agent.py","language":"python","start_line":46,"end_line":50,"context_start_line":26,"context_end_line":50,"code":" obs: The current observation dictionary containing environment state\n hint: Optional list of hint strings to guide candidate generation\n n_candidates: Number of candidate actions to generate\n \"\"\"\n ...\n\n def update_agent_state_from_selected_candidate(self, output: dict):\n \"\"\"\n Update the agent's internal state based on the selected candidate.\n This can include any memory or planning updates.\n\n Args:\n output: The selected candidate action dictionary\n \"\"\"\n pass\n\n\nclass MultiCandidateAgentArgs(AgentArgs):\n def make_agent(self) -> MultiCandidateAgent: ...\n\n def __post_init__(self):\n \"\"\"Prefix subagent name with 'MC-'.\"\"\"\n super().__post_init__()\n if hasattr(self, \"agent_name\") and self.agent_name:\n self.agent_name = \"MC-\" + self.agent_name","source_hash":"9be787fa4619fecac3c7f3d314dbf3a8b369797bfbcd9da134729e601b18a596","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.hitl_agent.launch_hint_ui","uri":"program://AgentLab/module/src.agentlab.agents.hitl_agent.launch_hint_ui#L1-L176","kind":"module","name":"src.agentlab.agents.hitl_agent.launch_hint_ui","path":"src/agentlab/agents/hitl_agent/launch_hint_ui.py","language":"python","start_line":1,"end_line":176,"context_start_line":1,"context_end_line":176,"code":"\"\"\"\nConsole launcher for the Human-in-the-Loop Generic Agent UI.\n\nUsage (installed entry point):\n agentlab-mentor --benchmark miniwob --task-name miniwob.book-flight --seed 123 --no-headless\n\nThis will run a Study with the MultipleProposalGenericAgent and the selected task.\n\"\"\"\n\nfrom __future__ import annotations\n\nimport argparse\nimport logging\nfrom pathlib import Path\n\nimport bgym\n\nfrom agentlab.agents.hitl_agent.generic_human_guided_agent import get_base_agent\nfrom agentlab.experiments.exp_utils import RESULTS_DIR\nfrom agentlab.experiments.study import Study\n\n\ndef build_benchmark(benchmark_name: str, task_name: str, seed: int, headless: bool):\n # Instantiate benchmark by name using BrowserGym registry\n try:\n benchmark = bgym.DEFAULT_BENCHMARKS[benchmark_name.lower()]()\n except KeyError as e:\n choices = \", \".join(sorted(bgym.DEFAULT_BENCHMARKS.keys()))\n raise SystemExit(f\"Unknown benchmark '{benchmark_name}'. Choose one of: {choices}\") from e\n\n filtered_env_args = [\n env_args for env_args in benchmark.env_args_list if env_args.task_name == task_name\n ]\n if not filtered_env_args:\n raise SystemExit(f'No tasks found matching \"{task_name}\"')\n filtered_env_args = filtered_env_args[:1] # take the first one\n benchmark.env_args_list = filtered_env_args\n\n # Reasonable defaults for interactive UI\n for env_args in benchmark.env_args_list:\n env_args.task_seed = seed\n env_args.max_steps = env_args.max_steps or 200\n env_args.headless = headless\n\n return benchmark\n\n\ndef extract_hints_from_experiment_trace(exp_dir):\n \"\"\"Extracts hints from every step of each episode in a exp_dir and returns a df with each row containing a hint.\n\n Args:\n exp_dir: Path-like to a study/experiment directory whose results should be scanned.\n\n Returns:\n pandas.DataFrame: One row per hint with metadata columns.\n \"\"\"\n import pandas as pd\n\n from agentlab.analyze import inspect_results\n from agentlab.experiments.exp_utils import RESULTS_DIR\n from agentlab.experiments.loop import ExpResult\n\n output = []\n # Use provided exp_dir if set; otherwise default to <$AGENTLAB_EXP_ROOT>/agentlab_mentor\n result_df = inspect_results.load_result_df(exp_dir or (RESULTS_DIR / \"agentlab_mentor\"))\n if result_df is None:\n # No results to parse; return empty dataframe with expected columns\n return pd.DataFrame(\n columns=[\n \"exp_id\",\n \"agent_name\",\n \"benchmark\",\n \"task_name\",\n \"episode_reward\",\n \"hint\",\n ]\n )\n result_df = result_df.reset_index()\n for _, row in result_df.iterrows():\n result = ExpResult(row.exp_dir)\n episode = result.steps_info\n episode_reward = max([step.reward for step in episode])\n for step_info in episode:\n step_hints = step_info.agent_info.get(\"extra_info\", {}).get(\"step_hints\", None)\n if step_hints:\n for hint in step_hints:\n output.append(\n {\n \"exp_id\": row[\"exp_id\"],\n \"agent_name\": row[\"agent.agent_name\"],\n \"benchmark\": row[\"env.task_name\"].split(\".\")[0],\n \"task_name\": row[\"env.task_name\"],\n \"episode_reward\": episode_reward,\n \"hint\": hint,\n }\n )\n output = pd.DataFrame(output)\n output = output.dropna()\n return output\n\n\ndef parse_args():\n p = argparse.ArgumentParser(description=\"Run HITL Generic Agent UI on a benchmark task\")\n p.add_argument(\n \"--benchmark\",\n required=False,\n help=\"Benchmark name as registered in BrowserGym, e.g., miniwob, workarena_l1, webarena, visualwebarena\",\n )\n p.add_argument(\n \"--task-name\",\n dest=\"task_name\",\n required=False,\n help=\"Exact task name within the benchmark (e.g., 'miniwob.book-flight')\",\n )\n p.add_argument(\n \"--seed\",\n type=int,\n required=False,\n help=\"Task seed to use for the selected task.\",\n )\n p.add_argument(\n \"--llm-config\",\n dest=\"llm_config\",\n default=\"openai/gpt-5-mini-2025-08-07\",\n help=\"LLM configuration to use for the agent (e.g., 'azure/gpt-5-mini-2025-08-07').\",\n )\n p.add_argument(\n \"--headless\",\n action=argparse.BooleanOptionalAction,\n default=True,\n help=\"Run the browser headless (default: True). Use --no-headless to show the browser.\",\n )\n p.add_argument(\n \"--download-hints\",\n nargs=\"?\",\n const=\"extracted_hints.csv\",\n required=False,\n default=None,\n metavar=\"[OUTPUT_CSV]\",\n help=(\n \"Extract hints from the default study directory and save to OUTPUT_CSV. \"\n \"If OUTPUT_CSV is omitted, saves to 'extracted_hints.csv'. When provided, other args are ignored.\"\n ),\n )\n return p.parse_args()\n\n\ndef main():\n args = parse_args()\n save_dir = RESULTS_DIR / \"agentlab_mentor\"\n if args.download_hints:\n df = extract_hints_from_experiment_trace(save_dir)\n out_path = Path(args.download_hints)\n out_path.parent.mkdir(parents=True, exist_ok=True)\n df.to_csv(out_path, index=False)\n print(str(out_path))\n return\n # Validate required args only when not downloading hints\n if not args.benchmark or not args.task_name or args.seed is None:\n raise SystemExit(\n \"--benchmark, --task-name, and --seed are required unless using --download-hints\"\n )\n benchmark = build_benchmark(args.benchmark, args.task_name, args.seed, args.headless)\n agent_configs = [get_base_agent(args.llm_config)]\n # study is needed to run the 'set_benchmark' method which sets appropriate agent parameters.\n study = Study(agent_args=agent_configs, benchmark=benchmark, logging_level=logging.WARNING)\n study.run(\n n_jobs=1,\n parallel_backend=\"sequential\",\n n_relaunch=1,\n exp_root=save_dir,\n )\n\n\nif __name__ == \"__main__\":\n main()","source_hash":"c683689e509a684a36e0b5ae00b55d64df79c43b22b3199c053399e2514f5a71","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.hitl_agent.launch_hint_ui.build_benchmark","uri":"program://AgentLab/function/src.agentlab.agents.hitl_agent.launch_hint_ui.build_benchmark#L23-L45","kind":"function","name":"build_benchmark","path":"src/agentlab/agents/hitl_agent/launch_hint_ui.py","language":"python","start_line":23,"end_line":45,"context_start_line":3,"context_end_line":65,"code":"\nUsage (installed entry point):\n agentlab-mentor --benchmark miniwob --task-name miniwob.book-flight --seed 123 --no-headless\n\nThis will run a Study with the MultipleProposalGenericAgent and the selected task.\n\"\"\"\n\nfrom __future__ import annotations\n\nimport argparse\nimport logging\nfrom pathlib import Path\n\nimport bgym\n\nfrom agentlab.agents.hitl_agent.generic_human_guided_agent import get_base_agent\nfrom agentlab.experiments.exp_utils import RESULTS_DIR\nfrom agentlab.experiments.study import Study\n\n\ndef build_benchmark(benchmark_name: str, task_name: str, seed: int, headless: bool):\n # Instantiate benchmark by name using BrowserGym registry\n try:\n benchmark = bgym.DEFAULT_BENCHMARKS[benchmark_name.lower()]()\n except KeyError as e:\n choices = \", \".join(sorted(bgym.DEFAULT_BENCHMARKS.keys()))\n raise SystemExit(f\"Unknown benchmark '{benchmark_name}'. Choose one of: {choices}\") from e\n\n filtered_env_args = [\n env_args for env_args in benchmark.env_args_list if env_args.task_name == task_name\n ]\n if not filtered_env_args:\n raise SystemExit(f'No tasks found matching \"{task_name}\"')\n filtered_env_args = filtered_env_args[:1] # take the first one\n benchmark.env_args_list = filtered_env_args\n\n # Reasonable defaults for interactive UI\n for env_args in benchmark.env_args_list:\n env_args.task_seed = seed\n env_args.max_steps = env_args.max_steps or 200\n env_args.headless = headless\n\n return benchmark\n\n\ndef extract_hints_from_experiment_trace(exp_dir):\n \"\"\"Extracts hints from every step of each episode in a exp_dir and returns a df with each row containing a hint.\n\n Args:\n exp_dir: Path-like to a study/experiment directory whose results should be scanned.\n\n Returns:\n pandas.DataFrame: One row per hint with metadata columns.\n \"\"\"\n import pandas as pd\n\n from agentlab.analyze import inspect_results\n from agentlab.experiments.exp_utils import RESULTS_DIR\n from agentlab.experiments.loop import ExpResult\n\n output = []\n # Use provided exp_dir if set; otherwise default to <$AGENTLAB_EXP_ROOT>/agentlab_mentor\n result_df = inspect_results.load_result_df(exp_dir or (RESULTS_DIR / \"agentlab_mentor\"))","source_hash":"c683689e509a684a36e0b5ae00b55d64df79c43b22b3199c053399e2514f5a71","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.hitl_agent.launch_hint_ui.extract_hints_from_experiment_trace","uri":"program://AgentLab/function/src.agentlab.agents.hitl_agent.launch_hint_ui.extract_hints_from_experiment_trace#L48-L99","kind":"function","name":"extract_hints_from_experiment_trace","path":"src/agentlab/agents/hitl_agent/launch_hint_ui.py","language":"python","start_line":48,"end_line":99,"context_start_line":28,"context_end_line":119,"code":" choices = \", \".join(sorted(bgym.DEFAULT_BENCHMARKS.keys()))\n raise SystemExit(f\"Unknown benchmark '{benchmark_name}'. Choose one of: {choices}\") from e\n\n filtered_env_args = [\n env_args for env_args in benchmark.env_args_list if env_args.task_name == task_name\n ]\n if not filtered_env_args:\n raise SystemExit(f'No tasks found matching \"{task_name}\"')\n filtered_env_args = filtered_env_args[:1] # take the first one\n benchmark.env_args_list = filtered_env_args\n\n # Reasonable defaults for interactive UI\n for env_args in benchmark.env_args_list:\n env_args.task_seed = seed\n env_args.max_steps = env_args.max_steps or 200\n env_args.headless = headless\n\n return benchmark\n\n\ndef extract_hints_from_experiment_trace(exp_dir):\n \"\"\"Extracts hints from every step of each episode in a exp_dir and returns a df with each row containing a hint.\n\n Args:\n exp_dir: Path-like to a study/experiment directory whose results should be scanned.\n\n Returns:\n pandas.DataFrame: One row per hint with metadata columns.\n \"\"\"\n import pandas as pd\n\n from agentlab.analyze import inspect_results\n from agentlab.experiments.exp_utils import RESULTS_DIR\n from agentlab.experiments.loop import ExpResult\n\n output = []\n # Use provided exp_dir if set; otherwise default to <$AGENTLAB_EXP_ROOT>/agentlab_mentor\n result_df = inspect_results.load_result_df(exp_dir or (RESULTS_DIR / \"agentlab_mentor\"))\n if result_df is None:\n # No results to parse; return empty dataframe with expected columns\n return pd.DataFrame(\n columns=[\n \"exp_id\",\n \"agent_name\",\n \"benchmark\",\n \"task_name\",\n \"episode_reward\",\n \"hint\",\n ]\n )\n result_df = result_df.reset_index()\n for _, row in result_df.iterrows():\n result = ExpResult(row.exp_dir)\n episode = result.steps_info\n episode_reward = max([step.reward for step in episode])\n for step_info in episode:\n step_hints = step_info.agent_info.get(\"extra_info\", {}).get(\"step_hints\", None)\n if step_hints:\n for hint in step_hints:\n output.append(\n {\n \"exp_id\": row[\"exp_id\"],\n \"agent_name\": row[\"agent.agent_name\"],\n \"benchmark\": row[\"env.task_name\"].split(\".\")[0],\n \"task_name\": row[\"env.task_name\"],\n \"episode_reward\": episode_reward,\n \"hint\": hint,\n }\n )\n output = pd.DataFrame(output)\n output = output.dropna()\n return output\n\n\ndef parse_args():\n p = argparse.ArgumentParser(description=\"Run HITL Generic Agent UI on a benchmark task\")\n p.add_argument(\n \"--benchmark\",\n required=False,\n help=\"Benchmark name as registered in BrowserGym, e.g., miniwob, workarena_l1, webarena, visualwebarena\",\n )\n p.add_argument(\n \"--task-name\",\n dest=\"task_name\",\n required=False,\n help=\"Exact task name within the benchmark (e.g., 'miniwob.book-flight')\",\n )\n p.add_argument(\n \"--seed\",\n type=int,\n required=False,\n help=\"Task seed to use for the selected task.\",","source_hash":"c683689e509a684a36e0b5ae00b55d64df79c43b22b3199c053399e2514f5a71","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.hitl_agent.launch_hint_ui.parse_args","uri":"program://AgentLab/function/src.agentlab.agents.hitl_agent.launch_hint_ui.parse_args#L102-L145","kind":"function","name":"parse_args","path":"src/agentlab/agents/hitl_agent/launch_hint_ui.py","language":"python","start_line":102,"end_line":145,"context_start_line":82,"context_end_line":165,"code":" episode_reward = max([step.reward for step in episode])\n for step_info in episode:\n step_hints = step_info.agent_info.get(\"extra_info\", {}).get(\"step_hints\", None)\n if step_hints:\n for hint in step_hints:\n output.append(\n {\n \"exp_id\": row[\"exp_id\"],\n \"agent_name\": row[\"agent.agent_name\"],\n \"benchmark\": row[\"env.task_name\"].split(\".\")[0],\n \"task_name\": row[\"env.task_name\"],\n \"episode_reward\": episode_reward,\n \"hint\": hint,\n }\n )\n output = pd.DataFrame(output)\n output = output.dropna()\n return output\n\n\ndef parse_args():\n p = argparse.ArgumentParser(description=\"Run HITL Generic Agent UI on a benchmark task\")\n p.add_argument(\n \"--benchmark\",\n required=False,\n help=\"Benchmark name as registered in BrowserGym, e.g., miniwob, workarena_l1, webarena, visualwebarena\",\n )\n p.add_argument(\n \"--task-name\",\n dest=\"task_name\",\n required=False,\n help=\"Exact task name within the benchmark (e.g., 'miniwob.book-flight')\",\n )\n p.add_argument(\n \"--seed\",\n type=int,\n required=False,\n help=\"Task seed to use for the selected task.\",\n )\n p.add_argument(\n \"--llm-config\",\n dest=\"llm_config\",\n default=\"openai/gpt-5-mini-2025-08-07\",\n help=\"LLM configuration to use for the agent (e.g., 'azure/gpt-5-mini-2025-08-07').\",\n )\n p.add_argument(\n \"--headless\",\n action=argparse.BooleanOptionalAction,\n default=True,\n help=\"Run the browser headless (default: True). Use --no-headless to show the browser.\",\n )\n p.add_argument(\n \"--download-hints\",\n nargs=\"?\",\n const=\"extracted_hints.csv\",\n required=False,\n default=None,\n metavar=\"[OUTPUT_CSV]\",\n help=(\n \"Extract hints from the default study directory and save to OUTPUT_CSV. \"\n \"If OUTPUT_CSV is omitted, saves to 'extracted_hints.csv'. When provided, other args are ignored.\"\n ),\n )\n return p.parse_args()\n\n\ndef main():\n args = parse_args()\n save_dir = RESULTS_DIR / \"agentlab_mentor\"\n if args.download_hints:\n df = extract_hints_from_experiment_trace(save_dir)\n out_path = Path(args.download_hints)\n out_path.parent.mkdir(parents=True, exist_ok=True)\n df.to_csv(out_path, index=False)\n print(str(out_path))\n return\n # Validate required args only when not downloading hints\n if not args.benchmark or not args.task_name or args.seed is None:\n raise SystemExit(\n \"--benchmark, --task-name, and --seed are required unless using --download-hints\"\n )\n benchmark = build_benchmark(args.benchmark, args.task_name, args.seed, args.headless)\n agent_configs = [get_base_agent(args.llm_config)]\n # study is needed to run the 'set_benchmark' method which sets appropriate agent parameters.","source_hash":"c683689e509a684a36e0b5ae00b55d64df79c43b22b3199c053399e2514f5a71","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.hitl_agent.launch_hint_ui.main","uri":"program://AgentLab/function/src.agentlab.agents.hitl_agent.launch_hint_ui.main#L148-L172","kind":"function","name":"main","path":"src/agentlab/agents/hitl_agent/launch_hint_ui.py","language":"python","start_line":148,"end_line":172,"context_start_line":128,"context_end_line":176,"code":" \"--headless\",\n action=argparse.BooleanOptionalAction,\n default=True,\n help=\"Run the browser headless (default: True). Use --no-headless to show the browser.\",\n )\n p.add_argument(\n \"--download-hints\",\n nargs=\"?\",\n const=\"extracted_hints.csv\",\n required=False,\n default=None,\n metavar=\"[OUTPUT_CSV]\",\n help=(\n \"Extract hints from the default study directory and save to OUTPUT_CSV. \"\n \"If OUTPUT_CSV is omitted, saves to 'extracted_hints.csv'. When provided, other args are ignored.\"\n ),\n )\n return p.parse_args()\n\n\ndef main():\n args = parse_args()\n save_dir = RESULTS_DIR / \"agentlab_mentor\"\n if args.download_hints:\n df = extract_hints_from_experiment_trace(save_dir)\n out_path = Path(args.download_hints)\n out_path.parent.mkdir(parents=True, exist_ok=True)\n df.to_csv(out_path, index=False)\n print(str(out_path))\n return\n # Validate required args only when not downloading hints\n if not args.benchmark or not args.task_name or args.seed is None:\n raise SystemExit(\n \"--benchmark, --task-name, and --seed are required unless using --download-hints\"\n )\n benchmark = build_benchmark(args.benchmark, args.task_name, args.seed, args.headless)\n agent_configs = [get_base_agent(args.llm_config)]\n # study is needed to run the 'set_benchmark' method which sets appropriate agent parameters.\n study = Study(agent_args=agent_configs, benchmark=benchmark, logging_level=logging.WARNING)\n study.run(\n n_jobs=1,\n parallel_backend=\"sequential\",\n n_relaunch=1,\n exp_root=save_dir,\n )\n\n\nif __name__ == \"__main__\":\n main()","source_hash":"c683689e509a684a36e0b5ae00b55d64df79c43b22b3199c053399e2514f5a71","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.hitl_agent.multi_candidate_generic_agent","uri":"program://AgentLab/module/src.agentlab.agents.hitl_agent.multi_candidate_generic_agent#L1-L225","kind":"module","name":"src.agentlab.agents.hitl_agent.multi_candidate_generic_agent","path":"src/agentlab/agents/hitl_agent/multi_candidate_generic_agent.py","language":"python","start_line":1,"end_line":225,"context_start_line":1,"context_end_line":225,"code":"import re\nfrom dataclasses import asdict, dataclass\nfrom typing import Dict, List\n\nfrom browsergym.experiments.agent import AgentInfo\n\nfrom agentlab.agents import dynamic_prompting as dp\nfrom agentlab.agents.generic_agent.generic_agent import GenericAgent, GenericAgentArgs\nfrom agentlab.agents.generic_agent.generic_agent_prompt import MainPrompt\nfrom agentlab.llm.llm_utils import Discussion, HumanMessage, SystemMessage\n\n\nclass CandidatesGeneration(dp.PromptElement):\n # Ask for multiple alternatives; each candidate must contain and .\n def __init__(self, hint: list[str] | None = None, n_candidates=3) -> None:\n self.hint = hint\n self.n_candidates = n_candidates\n self.hint_prompt = \"\\n\".join(f\"{i}. {c}\" for i, c in enumerate(hint, 1)) if hint else \"\"\n super().__init__(True)\n self._prompt = [\n dict(\n type=\"text\",\n text=f\"\"\"\n You are a web agent. Propose {self.n_candidates} alternative next steps for the current page.\n {('Use the Hints:' + self.hint_prompt) if self.hint else \"\"}\\n\n Return EACH candidate wrapped as numbered tags:\n ...\n ...\n\n Inside every candidate you MUST include:\n ...why this action is appropriate now...\n ...ONE atomic, executable action string...\n\n Do not include any extra text outside the candidate tags.\n Use this format:\n \n Explain why Candidate One is chosen\n Candidate One Action\n \n\n \n Explain why Candidate Two is chosen\n Candidate Two Action\n \n # Example \n \n The login button is visible and proceeding will reveal the auth form.\n click(role=\"button\", name=\"Log in\")\n \n\n \n User might need to enter email first; the email field is focused and visible.\n fill(bid=\"a112\", text=\"user@example.com\")\n \n \"\"\",\n )\n ]\n\n # Regex patterns for numbered candidates only\n _NUM_BLOCK = re.compile(\n r\"<\\s*candidate[_ ]generation[_ ](?P[0-9]+)\\s*>(?P.*?)<\\s*/\\s*candidate[_ ]generation[_ ](?P=idx)\\s*>\",\n flags=re.IGNORECASE | re.DOTALL,\n )\n _THINK_PATTERN = re.compile(\n r\"<\\s*think\\s*>(?P.*?)<\\s*/\\s*think\\s*>\",\n flags=re.IGNORECASE | re.DOTALL,\n )\n _ACTION_PATTERN = re.compile(\n r\"<\\s*action\\s*>(?P.*?)<\\s*/\\s*action\\s*>\",\n flags=re.IGNORECASE | re.DOTALL,\n )\n\n def _parse_answer(self, text_answer: str) -> Dict[str, Dict[str, str]]:\n \"\"\"Extract up to n_candidates candidates, using numbered tags only.\n\n Args:\n text_answer: The text response containing candidate generation tags.\n\n Returns:\n Dictionary mapping candidate names to their think and action content.\n Format: {\"candidate_generation_1\": {\"think\": \"...\", \"action\": \"...\"}, ...}\n \"\"\"\n result = {\n f\"candidate_generation_{i+1}\": {\"think\": \"\", \"action\": \"\"}\n for i in range(self.n_candidates)\n }\n\n if not isinstance(text_answer, str):\n return result\n\n matches: List[re.Match] = list(self._NUM_BLOCK.finditer(text_answer))\n # Sort by numeric index\n matches_sorted = sorted(matches, key=lambda m: int(m.group(\"idx\")))\n for i, m in enumerate(matches_sorted[: self.n_candidates]):\n body = m.group(\"body\").strip()\n think_m = self._THINK_PATTERN.search(body)\n action_m = self._ACTION_PATTERN.search(body)\n result[f\"candidate_generation_{i+1}\"] = {\n \"think\": (think_m.group(\"think\").strip() if think_m else \"\"),\n \"action\": (action_m.group(\"action\").strip() if action_m else \"\"),\n }\n\n return result\n\n\nclass MultiCandidateGenericAgent(GenericAgent):\n\n def __init__(\n self,\n chat_model_args,\n flags,\n max_retry: int = 4,\n ):\n super().__init__(chat_model_args, flags, max_retry)\n\n def get_candidate_generations(\n self,\n obs,\n hint: list[str] | None = None,\n n_candidates=3,\n ) -> list[dict]:\n # Append obs to history only if it's not already the last entry\n # Important to handle cases when get_candidate_generation is called multiple times in a single step.\n if not self.obs_history or self.obs_history[-1] is not obs:\n self.obs_history.append(obs)\n\n main_prompt = MainPrompt(\n action_set=self.action_set,\n obs_history=self.obs_history,\n actions=self.actions,\n memories=self.memories,\n thoughts=self.thoughts,\n previous_plan=self.plan,\n step=self.plan_step,\n flags=self.flags,\n )\n max_prompt_tokens, max_trunc_itr = self._get_maxes()\n\n system_prompt = SystemMessage(dp.SystemPrompt().prompt)\n\n human_prompt = dp.fit_tokens(\n shrinkable=main_prompt,\n max_prompt_tokens=max_prompt_tokens,\n model_name=self.chat_model_args.model_name,\n max_iterations=max_trunc_itr,\n additional_prompts=system_prompt,\n )\n\n cg = CandidatesGeneration(hint=hint, n_candidates=n_candidates)\n candidates_prompt = HumanMessage(cg.prompt)\n chat_messages = Discussion([system_prompt, human_prompt, candidates_prompt])\n output = self.chat_llm(chat_messages)\n candidates = cg._parse_answer(output[\"content\"])\n # Not adding the generate candidate prompt to xray.\n msg_to_add_to_xray = Discussion([system_prompt, human_prompt])\n suggestions = [\n {\n \"action\": candidate[\"action\"],\n \"think\": candidate[\"think\"],\n }\n for key, candidate in candidates.items()\n ]\n output = []\n for candidate in suggestions:\n agent_info = AgentInfo(\n think=candidate.get(\"think\", None),\n chat_messages=msg_to_add_to_xray,\n stats=self.chat_llm.get_stats(),\n extra_info={\n \"chat_model_args\": asdict(self.chat_model_args),\n \"think\": candidate.get(\"think\", None),\n \"plan\": candidate.get(\"plan\", None),\n \"step\": candidate.get(\"step\", None),\n \"memory\": candidate.get(\"memory\", None),\n },\n )\n output.append({\"action\": candidate[\"action\"], \"agent_info\": agent_info})\n\n return output\n\n def update_agent_state_from_selected_candidate(self, output):\n \"\"\"Updates the agent's internal state based on the selected candidate from human feedback.\n\n Args:\n output: Dictionary containing 'action' and 'agent_info' keys from selected candidate.\n \"\"\"\n action, agent_info = output[\"action\"], output[\"agent_info\"]\n self.plan = agent_info.extra_info.get(\"plan\", self.plan)\n self.plan_step = agent_info.extra_info.get(\"step\", self.plan_step)\n self.memories.append(agent_info.extra_info.get(\"memory\", None))\n self.thoughts.append(agent_info.extra_info.get(\"think\", None))\n self.actions.append(action)\n\n def get_action(self, obs):\n \"\"\"Generates multiple candidates and always returns the first one.\n This allows to use this agent as a drop-in replacement for a single-candidate agent.\n\n Args:\n obs: The observation from the environment.\n\n Returns:\n tuple: A tuple containing (action, agent_info).\n \"\"\"\n candidates = self.get_candidate_generations(obs, hint=None, n_candidates=2)\n selection = candidates[0] # always select the first option.\n self.update_agent_state_from_selected_candidate(selection)\n action, agent_info = selection[\"action\"], selection[\"agent_info\"]\n\n return action, agent_info\n\n\n@dataclass\nclass MultiCandidateGenericAgentArgs(GenericAgentArgs):\n def make_agent(self):\n return MultiCandidateGenericAgent(\n chat_model_args=self.chat_model_args,\n flags=self.flags,\n max_retry=self.max_retry,\n )\n\n def __post_init__(self):\n \"\"\"Prefix subagent name with 'MC-'.\"\"\"\n super().__post_init__()\n if hasattr(self, \"agent_name\") and self.agent_name:\n self.agent_name = \"MC-\" + self.agent_name","source_hash":"5c1333972fbbdf23cbb8773feaacf67e1a818ae613d6adb17b19bb143073211d","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.hitl_agent.multi_candidate_generic_agent.CandidatesGeneration","uri":"program://AgentLab/class/src.agentlab.agents.hitl_agent.multi_candidate_generic_agent.CandidatesGeneration#L13-L103","kind":"class","name":"CandidatesGeneration","path":"src/agentlab/agents/hitl_agent/multi_candidate_generic_agent.py","language":"python","start_line":13,"end_line":103,"context_start_line":1,"context_end_line":123,"code":"import re\nfrom dataclasses import asdict, dataclass\nfrom typing import Dict, List\n\nfrom browsergym.experiments.agent import AgentInfo\n\nfrom agentlab.agents import dynamic_prompting as dp\nfrom agentlab.agents.generic_agent.generic_agent import GenericAgent, GenericAgentArgs\nfrom agentlab.agents.generic_agent.generic_agent_prompt import MainPrompt\nfrom agentlab.llm.llm_utils import Discussion, HumanMessage, SystemMessage\n\n\nclass CandidatesGeneration(dp.PromptElement):\n # Ask for multiple alternatives; each candidate must contain and .\n def __init__(self, hint: list[str] | None = None, n_candidates=3) -> None:\n self.hint = hint\n self.n_candidates = n_candidates\n self.hint_prompt = \"\\n\".join(f\"{i}. {c}\" for i, c in enumerate(hint, 1)) if hint else \"\"\n super().__init__(True)\n self._prompt = [\n dict(\n type=\"text\",\n text=f\"\"\"\n You are a web agent. Propose {self.n_candidates} alternative next steps for the current page.\n {('Use the Hints:' + self.hint_prompt) if self.hint else \"\"}\\n\n Return EACH candidate wrapped as numbered tags:\n ...\n ...\n\n Inside every candidate you MUST include:\n ...why this action is appropriate now...\n ...ONE atomic, executable action string...\n\n Do not include any extra text outside the candidate tags.\n Use this format:\n \n Explain why Candidate One is chosen\n Candidate One Action\n \n\n \n Explain why Candidate Two is chosen\n Candidate Two Action\n \n # Example \n \n The login button is visible and proceeding will reveal the auth form.\n click(role=\"button\", name=\"Log in\")\n \n\n \n User might need to enter email first; the email field is focused and visible.\n fill(bid=\"a112\", text=\"user@example.com\")\n \n \"\"\",\n )\n ]\n\n # Regex patterns for numbered candidates only\n _NUM_BLOCK = re.compile(\n r\"<\\s*candidate[_ ]generation[_ ](?P[0-9]+)\\s*>(?P.*?)<\\s*/\\s*candidate[_ ]generation[_ ](?P=idx)\\s*>\",\n flags=re.IGNORECASE | re.DOTALL,\n )\n _THINK_PATTERN = re.compile(\n r\"<\\s*think\\s*>(?P.*?)<\\s*/\\s*think\\s*>\",\n flags=re.IGNORECASE | re.DOTALL,\n )\n _ACTION_PATTERN = re.compile(\n r\"<\\s*action\\s*>(?P.*?)<\\s*/\\s*action\\s*>\",\n flags=re.IGNORECASE | re.DOTALL,\n )\n\n def _parse_answer(self, text_answer: str) -> Dict[str, Dict[str, str]]:\n \"\"\"Extract up to n_candidates candidates, using numbered tags only.\n\n Args:\n text_answer: The text response containing candidate generation tags.\n\n Returns:\n Dictionary mapping candidate names to their think and action content.\n Format: {\"candidate_generation_1\": {\"think\": \"...\", \"action\": \"...\"}, ...}\n \"\"\"\n result = {\n f\"candidate_generation_{i+1}\": {\"think\": \"\", \"action\": \"\"}\n for i in range(self.n_candidates)\n }\n\n if not isinstance(text_answer, str):\n return result\n\n matches: List[re.Match] = list(self._NUM_BLOCK.finditer(text_answer))\n # Sort by numeric index\n matches_sorted = sorted(matches, key=lambda m: int(m.group(\"idx\")))\n for i, m in enumerate(matches_sorted[: self.n_candidates]):\n body = m.group(\"body\").strip()\n think_m = self._THINK_PATTERN.search(body)\n action_m = self._ACTION_PATTERN.search(body)\n result[f\"candidate_generation_{i+1}\"] = {\n \"think\": (think_m.group(\"think\").strip() if think_m else \"\"),\n \"action\": (action_m.group(\"action\").strip() if action_m else \"\"),\n }\n\n return result\n\n\nclass MultiCandidateGenericAgent(GenericAgent):\n\n def __init__(\n self,\n chat_model_args,\n flags,\n max_retry: int = 4,\n ):\n super().__init__(chat_model_args, flags, max_retry)\n\n def get_candidate_generations(\n self,\n obs,\n hint: list[str] | None = None,\n n_candidates=3,\n ) -> list[dict]:\n # Append obs to history only if it's not already the last entry\n # Important to handle cases when get_candidate_generation is called multiple times in a single step.","source_hash":"5c1333972fbbdf23cbb8773feaacf67e1a818ae613d6adb17b19bb143073211d","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.hitl_agent.multi_candidate_generic_agent.MultiCandidateGenericAgent","uri":"program://AgentLab/class/src.agentlab.agents.hitl_agent.multi_candidate_generic_agent.MultiCandidateGenericAgent#L106-L209","kind":"class","name":"MultiCandidateGenericAgent","path":"src/agentlab/agents/hitl_agent/multi_candidate_generic_agent.py","language":"python","start_line":106,"end_line":209,"context_start_line":86,"context_end_line":225,"code":" }\n\n if not isinstance(text_answer, str):\n return result\n\n matches: List[re.Match] = list(self._NUM_BLOCK.finditer(text_answer))\n # Sort by numeric index\n matches_sorted = sorted(matches, key=lambda m: int(m.group(\"idx\")))\n for i, m in enumerate(matches_sorted[: self.n_candidates]):\n body = m.group(\"body\").strip()\n think_m = self._THINK_PATTERN.search(body)\n action_m = self._ACTION_PATTERN.search(body)\n result[f\"candidate_generation_{i+1}\"] = {\n \"think\": (think_m.group(\"think\").strip() if think_m else \"\"),\n \"action\": (action_m.group(\"action\").strip() if action_m else \"\"),\n }\n\n return result\n\n\nclass MultiCandidateGenericAgent(GenericAgent):\n\n def __init__(\n self,\n chat_model_args,\n flags,\n max_retry: int = 4,\n ):\n super().__init__(chat_model_args, flags, max_retry)\n\n def get_candidate_generations(\n self,\n obs,\n hint: list[str] | None = None,\n n_candidates=3,\n ) -> list[dict]:\n # Append obs to history only if it's not already the last entry\n # Important to handle cases when get_candidate_generation is called multiple times in a single step.\n if not self.obs_history or self.obs_history[-1] is not obs:\n self.obs_history.append(obs)\n\n main_prompt = MainPrompt(\n action_set=self.action_set,\n obs_history=self.obs_history,\n actions=self.actions,\n memories=self.memories,\n thoughts=self.thoughts,\n previous_plan=self.plan,\n step=self.plan_step,\n flags=self.flags,\n )\n max_prompt_tokens, max_trunc_itr = self._get_maxes()\n\n system_prompt = SystemMessage(dp.SystemPrompt().prompt)\n\n human_prompt = dp.fit_tokens(\n shrinkable=main_prompt,\n max_prompt_tokens=max_prompt_tokens,\n model_name=self.chat_model_args.model_name,\n max_iterations=max_trunc_itr,\n additional_prompts=system_prompt,\n )\n\n cg = CandidatesGeneration(hint=hint, n_candidates=n_candidates)\n candidates_prompt = HumanMessage(cg.prompt)\n chat_messages = Discussion([system_prompt, human_prompt, candidates_prompt])\n output = self.chat_llm(chat_messages)\n candidates = cg._parse_answer(output[\"content\"])\n # Not adding the generate candidate prompt to xray.\n msg_to_add_to_xray = Discussion([system_prompt, human_prompt])\n suggestions = [\n {\n \"action\": candidate[\"action\"],\n \"think\": candidate[\"think\"],\n }\n for key, candidate in candidates.items()\n ]\n output = []\n for candidate in suggestions:\n agent_info = AgentInfo(\n think=candidate.get(\"think\", None),\n chat_messages=msg_to_add_to_xray,\n stats=self.chat_llm.get_stats(),\n extra_info={\n \"chat_model_args\": asdict(self.chat_model_args),\n \"think\": candidate.get(\"think\", None),\n \"plan\": candidate.get(\"plan\", None),\n \"step\": candidate.get(\"step\", None),\n \"memory\": candidate.get(\"memory\", None),\n },\n )\n output.append({\"action\": candidate[\"action\"], \"agent_info\": agent_info})\n\n return output\n\n def update_agent_state_from_selected_candidate(self, output):\n \"\"\"Updates the agent's internal state based on the selected candidate from human feedback.\n\n Args:\n output: Dictionary containing 'action' and 'agent_info' keys from selected candidate.\n \"\"\"\n action, agent_info = output[\"action\"], output[\"agent_info\"]\n self.plan = agent_info.extra_info.get(\"plan\", self.plan)\n self.plan_step = agent_info.extra_info.get(\"step\", self.plan_step)\n self.memories.append(agent_info.extra_info.get(\"memory\", None))\n self.thoughts.append(agent_info.extra_info.get(\"think\", None))\n self.actions.append(action)\n\n def get_action(self, obs):\n \"\"\"Generates multiple candidates and always returns the first one.\n This allows to use this agent as a drop-in replacement for a single-candidate agent.\n\n Args:\n obs: The observation from the environment.\n\n Returns:\n tuple: A tuple containing (action, agent_info).\n \"\"\"\n candidates = self.get_candidate_generations(obs, hint=None, n_candidates=2)\n selection = candidates[0] # always select the first option.\n self.update_agent_state_from_selected_candidate(selection)\n action, agent_info = selection[\"action\"], selection[\"agent_info\"]\n\n return action, agent_info\n\n\n@dataclass\nclass MultiCandidateGenericAgentArgs(GenericAgentArgs):\n def make_agent(self):\n return MultiCandidateGenericAgent(\n chat_model_args=self.chat_model_args,\n flags=self.flags,\n max_retry=self.max_retry,\n )\n\n def __post_init__(self):\n \"\"\"Prefix subagent name with 'MC-'.\"\"\"\n super().__post_init__()\n if hasattr(self, \"agent_name\") and self.agent_name:\n self.agent_name = \"MC-\" + self.agent_name","source_hash":"5c1333972fbbdf23cbb8773feaacf67e1a818ae613d6adb17b19bb143073211d","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.hitl_agent.multi_candidate_generic_agent.MultiCandidateGenericAgentArgs","uri":"program://AgentLab/class/src.agentlab.agents.hitl_agent.multi_candidate_generic_agent.MultiCandidateGenericAgentArgs#L213-L225","kind":"class","name":"MultiCandidateGenericAgentArgs","path":"src/agentlab/agents/hitl_agent/multi_candidate_generic_agent.py","language":"python","start_line":213,"end_line":225,"context_start_line":193,"context_end_line":225,"code":"\n def get_action(self, obs):\n \"\"\"Generates multiple candidates and always returns the first one.\n This allows to use this agent as a drop-in replacement for a single-candidate agent.\n\n Args:\n obs: The observation from the environment.\n\n Returns:\n tuple: A tuple containing (action, agent_info).\n \"\"\"\n candidates = self.get_candidate_generations(obs, hint=None, n_candidates=2)\n selection = candidates[0] # always select the first option.\n self.update_agent_state_from_selected_candidate(selection)\n action, agent_info = selection[\"action\"], selection[\"agent_info\"]\n\n return action, agent_info\n\n\n@dataclass\nclass MultiCandidateGenericAgentArgs(GenericAgentArgs):\n def make_agent(self):\n return MultiCandidateGenericAgent(\n chat_model_args=self.chat_model_args,\n flags=self.flags,\n max_retry=self.max_retry,\n )\n\n def __post_init__(self):\n \"\"\"Prefix subagent name with 'MC-'.\"\"\"\n super().__post_init__()\n if hasattr(self, \"agent_name\") and self.agent_name:\n self.agent_name = \"MC-\" + self.agent_name","source_hash":"5c1333972fbbdf23cbb8773feaacf67e1a818ae613d6adb17b19bb143073211d","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.hitl_agent.multi_candidate_generic_agent.__init__","uri":"program://AgentLab/function/src.agentlab.agents.hitl_agent.multi_candidate_generic_agent.__init__#L108-L114","kind":"function","name":"__init__","path":"src/agentlab/agents/hitl_agent/multi_candidate_generic_agent.py","language":"python","start_line":108,"end_line":114,"context_start_line":88,"context_end_line":134,"code":" if not isinstance(text_answer, str):\n return result\n\n matches: List[re.Match] = list(self._NUM_BLOCK.finditer(text_answer))\n # Sort by numeric index\n matches_sorted = sorted(matches, key=lambda m: int(m.group(\"idx\")))\n for i, m in enumerate(matches_sorted[: self.n_candidates]):\n body = m.group(\"body\").strip()\n think_m = self._THINK_PATTERN.search(body)\n action_m = self._ACTION_PATTERN.search(body)\n result[f\"candidate_generation_{i+1}\"] = {\n \"think\": (think_m.group(\"think\").strip() if think_m else \"\"),\n \"action\": (action_m.group(\"action\").strip() if action_m else \"\"),\n }\n\n return result\n\n\nclass MultiCandidateGenericAgent(GenericAgent):\n\n def __init__(\n self,\n chat_model_args,\n flags,\n max_retry: int = 4,\n ):\n super().__init__(chat_model_args, flags, max_retry)\n\n def get_candidate_generations(\n self,\n obs,\n hint: list[str] | None = None,\n n_candidates=3,\n ) -> list[dict]:\n # Append obs to history only if it's not already the last entry\n # Important to handle cases when get_candidate_generation is called multiple times in a single step.\n if not self.obs_history or self.obs_history[-1] is not obs:\n self.obs_history.append(obs)\n\n main_prompt = MainPrompt(\n action_set=self.action_set,\n obs_history=self.obs_history,\n actions=self.actions,\n memories=self.memories,\n thoughts=self.thoughts,\n previous_plan=self.plan,\n step=self.plan_step,","source_hash":"5c1333972fbbdf23cbb8773feaacf67e1a818ae613d6adb17b19bb143073211d","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.hitl_agent.multi_candidate_generic_agent._parse_answer","uri":"program://AgentLab/function/src.agentlab.agents.hitl_agent.multi_candidate_generic_agent._parse_answer#L73-L103","kind":"function","name":"_parse_answer","path":"src/agentlab/agents/hitl_agent/multi_candidate_generic_agent.py","language":"python","start_line":73,"end_line":103,"context_start_line":53,"context_end_line":123,"code":" fill(bid=\"a112\", text=\"user@example.com\")\n \n \"\"\",\n )\n ]\n\n # Regex patterns for numbered candidates only\n _NUM_BLOCK = re.compile(\n r\"<\\s*candidate[_ ]generation[_ ](?P[0-9]+)\\s*>(?P.*?)<\\s*/\\s*candidate[_ ]generation[_ ](?P=idx)\\s*>\",\n flags=re.IGNORECASE | re.DOTALL,\n )\n _THINK_PATTERN = re.compile(\n r\"<\\s*think\\s*>(?P.*?)<\\s*/\\s*think\\s*>\",\n flags=re.IGNORECASE | re.DOTALL,\n )\n _ACTION_PATTERN = re.compile(\n r\"<\\s*action\\s*>(?P.*?)<\\s*/\\s*action\\s*>\",\n flags=re.IGNORECASE | re.DOTALL,\n )\n\n def _parse_answer(self, text_answer: str) -> Dict[str, Dict[str, str]]:\n \"\"\"Extract up to n_candidates candidates, using numbered tags only.\n\n Args:\n text_answer: The text response containing candidate generation tags.\n\n Returns:\n Dictionary mapping candidate names to their think and action content.\n Format: {\"candidate_generation_1\": {\"think\": \"...\", \"action\": \"...\"}, ...}\n \"\"\"\n result = {\n f\"candidate_generation_{i+1}\": {\"think\": \"\", \"action\": \"\"}\n for i in range(self.n_candidates)\n }\n\n if not isinstance(text_answer, str):\n return result\n\n matches: List[re.Match] = list(self._NUM_BLOCK.finditer(text_answer))\n # Sort by numeric index\n matches_sorted = sorted(matches, key=lambda m: int(m.group(\"idx\")))\n for i, m in enumerate(matches_sorted[: self.n_candidates]):\n body = m.group(\"body\").strip()\n think_m = self._THINK_PATTERN.search(body)\n action_m = self._ACTION_PATTERN.search(body)\n result[f\"candidate_generation_{i+1}\"] = {\n \"think\": (think_m.group(\"think\").strip() if think_m else \"\"),\n \"action\": (action_m.group(\"action\").strip() if action_m else \"\"),\n }\n\n return result\n\n\nclass MultiCandidateGenericAgent(GenericAgent):\n\n def __init__(\n self,\n chat_model_args,\n flags,\n max_retry: int = 4,\n ):\n super().__init__(chat_model_args, flags, max_retry)\n\n def get_candidate_generations(\n self,\n obs,\n hint: list[str] | None = None,\n n_candidates=3,\n ) -> list[dict]:\n # Append obs to history only if it's not already the last entry\n # Important to handle cases when get_candidate_generation is called multiple times in a single step.","source_hash":"5c1333972fbbdf23cbb8773feaacf67e1a818ae613d6adb17b19bb143073211d","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.hitl_agent.multi_candidate_generic_agent.get_candidate_generations","uri":"program://AgentLab/function/src.agentlab.agents.hitl_agent.multi_candidate_generic_agent.get_candidate_generations#L116-L179","kind":"function","name":"get_candidate_generations","path":"src/agentlab/agents/hitl_agent/multi_candidate_generic_agent.py","language":"python","start_line":116,"end_line":179,"context_start_line":96,"context_end_line":199,"code":" think_m = self._THINK_PATTERN.search(body)\n action_m = self._ACTION_PATTERN.search(body)\n result[f\"candidate_generation_{i+1}\"] = {\n \"think\": (think_m.group(\"think\").strip() if think_m else \"\"),\n \"action\": (action_m.group(\"action\").strip() if action_m else \"\"),\n }\n\n return result\n\n\nclass MultiCandidateGenericAgent(GenericAgent):\n\n def __init__(\n self,\n chat_model_args,\n flags,\n max_retry: int = 4,\n ):\n super().__init__(chat_model_args, flags, max_retry)\n\n def get_candidate_generations(\n self,\n obs,\n hint: list[str] | None = None,\n n_candidates=3,\n ) -> list[dict]:\n # Append obs to history only if it's not already the last entry\n # Important to handle cases when get_candidate_generation is called multiple times in a single step.\n if not self.obs_history or self.obs_history[-1] is not obs:\n self.obs_history.append(obs)\n\n main_prompt = MainPrompt(\n action_set=self.action_set,\n obs_history=self.obs_history,\n actions=self.actions,\n memories=self.memories,\n thoughts=self.thoughts,\n previous_plan=self.plan,\n step=self.plan_step,\n flags=self.flags,\n )\n max_prompt_tokens, max_trunc_itr = self._get_maxes()\n\n system_prompt = SystemMessage(dp.SystemPrompt().prompt)\n\n human_prompt = dp.fit_tokens(\n shrinkable=main_prompt,\n max_prompt_tokens=max_prompt_tokens,\n model_name=self.chat_model_args.model_name,\n max_iterations=max_trunc_itr,\n additional_prompts=system_prompt,\n )\n\n cg = CandidatesGeneration(hint=hint, n_candidates=n_candidates)\n candidates_prompt = HumanMessage(cg.prompt)\n chat_messages = Discussion([system_prompt, human_prompt, candidates_prompt])\n output = self.chat_llm(chat_messages)\n candidates = cg._parse_answer(output[\"content\"])\n # Not adding the generate candidate prompt to xray.\n msg_to_add_to_xray = Discussion([system_prompt, human_prompt])\n suggestions = [\n {\n \"action\": candidate[\"action\"],\n \"think\": candidate[\"think\"],\n }\n for key, candidate in candidates.items()\n ]\n output = []\n for candidate in suggestions:\n agent_info = AgentInfo(\n think=candidate.get(\"think\", None),\n chat_messages=msg_to_add_to_xray,\n stats=self.chat_llm.get_stats(),\n extra_info={\n \"chat_model_args\": asdict(self.chat_model_args),\n \"think\": candidate.get(\"think\", None),\n \"plan\": candidate.get(\"plan\", None),\n \"step\": candidate.get(\"step\", None),\n \"memory\": candidate.get(\"memory\", None),\n },\n )\n output.append({\"action\": candidate[\"action\"], \"agent_info\": agent_info})\n\n return output\n\n def update_agent_state_from_selected_candidate(self, output):\n \"\"\"Updates the agent's internal state based on the selected candidate from human feedback.\n\n Args:\n output: Dictionary containing 'action' and 'agent_info' keys from selected candidate.\n \"\"\"\n action, agent_info = output[\"action\"], output[\"agent_info\"]\n self.plan = agent_info.extra_info.get(\"plan\", self.plan)\n self.plan_step = agent_info.extra_info.get(\"step\", self.plan_step)\n self.memories.append(agent_info.extra_info.get(\"memory\", None))\n self.thoughts.append(agent_info.extra_info.get(\"think\", None))\n self.actions.append(action)\n\n def get_action(self, obs):\n \"\"\"Generates multiple candidates and always returns the first one.\n This allows to use this agent as a drop-in replacement for a single-candidate agent.\n\n Args:\n obs: The observation from the environment.","source_hash":"5c1333972fbbdf23cbb8773feaacf67e1a818ae613d6adb17b19bb143073211d","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.hitl_agent.multi_candidate_generic_agent.update_agent_state_from_selected_candidate","uri":"program://AgentLab/function/src.agentlab.agents.hitl_agent.multi_candidate_generic_agent.update_agent_state_from_selected_candidate#L181-L192","kind":"function","name":"update_agent_state_from_selected_candidate","path":"src/agentlab/agents/hitl_agent/multi_candidate_generic_agent.py","language":"python","start_line":181,"end_line":192,"context_start_line":161,"context_end_line":212,"code":" for key, candidate in candidates.items()\n ]\n output = []\n for candidate in suggestions:\n agent_info = AgentInfo(\n think=candidate.get(\"think\", None),\n chat_messages=msg_to_add_to_xray,\n stats=self.chat_llm.get_stats(),\n extra_info={\n \"chat_model_args\": asdict(self.chat_model_args),\n \"think\": candidate.get(\"think\", None),\n \"plan\": candidate.get(\"plan\", None),\n \"step\": candidate.get(\"step\", None),\n \"memory\": candidate.get(\"memory\", None),\n },\n )\n output.append({\"action\": candidate[\"action\"], \"agent_info\": agent_info})\n\n return output\n\n def update_agent_state_from_selected_candidate(self, output):\n \"\"\"Updates the agent's internal state based on the selected candidate from human feedback.\n\n Args:\n output: Dictionary containing 'action' and 'agent_info' keys from selected candidate.\n \"\"\"\n action, agent_info = output[\"action\"], output[\"agent_info\"]\n self.plan = agent_info.extra_info.get(\"plan\", self.plan)\n self.plan_step = agent_info.extra_info.get(\"step\", self.plan_step)\n self.memories.append(agent_info.extra_info.get(\"memory\", None))\n self.thoughts.append(agent_info.extra_info.get(\"think\", None))\n self.actions.append(action)\n\n def get_action(self, obs):\n \"\"\"Generates multiple candidates and always returns the first one.\n This allows to use this agent as a drop-in replacement for a single-candidate agent.\n\n Args:\n obs: The observation from the environment.\n\n Returns:\n tuple: A tuple containing (action, agent_info).\n \"\"\"\n candidates = self.get_candidate_generations(obs, hint=None, n_candidates=2)\n selection = candidates[0] # always select the first option.\n self.update_agent_state_from_selected_candidate(selection)\n action, agent_info = selection[\"action\"], selection[\"agent_info\"]\n\n return action, agent_info\n\n\n@dataclass","source_hash":"5c1333972fbbdf23cbb8773feaacf67e1a818ae613d6adb17b19bb143073211d","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.hitl_agent.multi_candidate_generic_agent.get_action","uri":"program://AgentLab/function/src.agentlab.agents.hitl_agent.multi_candidate_generic_agent.get_action#L194-L209","kind":"function","name":"get_action","path":"src/agentlab/agents/hitl_agent/multi_candidate_generic_agent.py","language":"python","start_line":194,"end_line":209,"context_start_line":174,"context_end_line":225,"code":" \"memory\": candidate.get(\"memory\", None),\n },\n )\n output.append({\"action\": candidate[\"action\"], \"agent_info\": agent_info})\n\n return output\n\n def update_agent_state_from_selected_candidate(self, output):\n \"\"\"Updates the agent's internal state based on the selected candidate from human feedback.\n\n Args:\n output: Dictionary containing 'action' and 'agent_info' keys from selected candidate.\n \"\"\"\n action, agent_info = output[\"action\"], output[\"agent_info\"]\n self.plan = agent_info.extra_info.get(\"plan\", self.plan)\n self.plan_step = agent_info.extra_info.get(\"step\", self.plan_step)\n self.memories.append(agent_info.extra_info.get(\"memory\", None))\n self.thoughts.append(agent_info.extra_info.get(\"think\", None))\n self.actions.append(action)\n\n def get_action(self, obs):\n \"\"\"Generates multiple candidates and always returns the first one.\n This allows to use this agent as a drop-in replacement for a single-candidate agent.\n\n Args:\n obs: The observation from the environment.\n\n Returns:\n tuple: A tuple containing (action, agent_info).\n \"\"\"\n candidates = self.get_candidate_generations(obs, hint=None, n_candidates=2)\n selection = candidates[0] # always select the first option.\n self.update_agent_state_from_selected_candidate(selection)\n action, agent_info = selection[\"action\"], selection[\"agent_info\"]\n\n return action, agent_info\n\n\n@dataclass\nclass MultiCandidateGenericAgentArgs(GenericAgentArgs):\n def make_agent(self):\n return MultiCandidateGenericAgent(\n chat_model_args=self.chat_model_args,\n flags=self.flags,\n max_retry=self.max_retry,\n )\n\n def __post_init__(self):\n \"\"\"Prefix subagent name with 'MC-'.\"\"\"\n super().__post_init__()\n if hasattr(self, \"agent_name\") and self.agent_name:\n self.agent_name = \"MC-\" + self.agent_name","source_hash":"5c1333972fbbdf23cbb8773feaacf67e1a818ae613d6adb17b19bb143073211d","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.hitl_agent.multi_candidate_generic_agent.make_agent","uri":"program://AgentLab/function/src.agentlab.agents.hitl_agent.multi_candidate_generic_agent.make_agent#L214-L219","kind":"function","name":"make_agent","path":"src/agentlab/agents/hitl_agent/multi_candidate_generic_agent.py","language":"python","start_line":214,"end_line":219,"context_start_line":194,"context_end_line":225,"code":" def get_action(self, obs):\n \"\"\"Generates multiple candidates and always returns the first one.\n This allows to use this agent as a drop-in replacement for a single-candidate agent.\n\n Args:\n obs: The observation from the environment.\n\n Returns:\n tuple: A tuple containing (action, agent_info).\n \"\"\"\n candidates = self.get_candidate_generations(obs, hint=None, n_candidates=2)\n selection = candidates[0] # always select the first option.\n self.update_agent_state_from_selected_candidate(selection)\n action, agent_info = selection[\"action\"], selection[\"agent_info\"]\n\n return action, agent_info\n\n\n@dataclass\nclass MultiCandidateGenericAgentArgs(GenericAgentArgs):\n def make_agent(self):\n return MultiCandidateGenericAgent(\n chat_model_args=self.chat_model_args,\n flags=self.flags,\n max_retry=self.max_retry,\n )\n\n def __post_init__(self):\n \"\"\"Prefix subagent name with 'MC-'.\"\"\"\n super().__post_init__()\n if hasattr(self, \"agent_name\") and self.agent_name:\n self.agent_name = \"MC-\" + self.agent_name","source_hash":"5c1333972fbbdf23cbb8773feaacf67e1a818ae613d6adb17b19bb143073211d","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.hitl_agent.multi_candidate_generic_agent.__post_init__","uri":"program://AgentLab/function/src.agentlab.agents.hitl_agent.multi_candidate_generic_agent.__post_init__#L221-L225","kind":"function","name":"__post_init__","path":"src/agentlab/agents/hitl_agent/multi_candidate_generic_agent.py","language":"python","start_line":221,"end_line":225,"context_start_line":201,"context_end_line":225,"code":" Returns:\n tuple: A tuple containing (action, agent_info).\n \"\"\"\n candidates = self.get_candidate_generations(obs, hint=None, n_candidates=2)\n selection = candidates[0] # always select the first option.\n self.update_agent_state_from_selected_candidate(selection)\n action, agent_info = selection[\"action\"], selection[\"agent_info\"]\n\n return action, agent_info\n\n\n@dataclass\nclass MultiCandidateGenericAgentArgs(GenericAgentArgs):\n def make_agent(self):\n return MultiCandidateGenericAgent(\n chat_model_args=self.chat_model_args,\n flags=self.flags,\n max_retry=self.max_retry,\n )\n\n def __post_init__(self):\n \"\"\"Prefix subagent name with 'MC-'.\"\"\"\n super().__post_init__()\n if hasattr(self, \"agent_name\") and self.agent_name:\n self.agent_name = \"MC-\" + self.agent_name","source_hash":"5c1333972fbbdf23cbb8773feaacf67e1a818ae613d6adb17b19bb143073211d","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.hitl_agent.hint_labelling","uri":"program://AgentLab/module/src.agentlab.agents.hitl_agent.hint_labelling#L1-L166","kind":"module","name":"src.agentlab.agents.hitl_agent.hint_labelling","path":"src/agentlab/agents/hitl_agent/hint_labelling.py","language":"python","start_line":1,"end_line":166,"context_start_line":1,"context_end_line":166,"code":"import json\nimport logging\nfrom importlib import resources\nfrom queue import Queue\nfrom typing import Dict, List, Optional\n\nimport playwright.sync_api\nfrom browsergym.core import _get_global_playwright\nfrom pydantic import BaseModel, Field\n\nlogger = logging.getLogger(__name__)\n\nHINT_LABELING_DIR = resources.files(\"agentlab.agents.hitl_agent.hint_labelling_ui_files\")\n\n\nclass HintLabelingInputs(BaseModel):\n goal: str\n error_feedback: str = \"\"\n screenshot: str # base64 screenshot (original/current)\n screenshots: List[str] = Field(default_factory=list) # list of base64 screenshots for hover\n axtree: str\n hints: List[str] = Field(default_factory=list)\n suggestions: List[Dict[str, str]] = Field(default_factory=list)\n\n\nclass HintLabeling:\n def __init__(self, headless: bool, *args, **kwargs):\n pw_opt = _get_global_playwright()\n pw: playwright.sync_api.Playwright = pw_opt # type: ignore[assignment]\n self.browser = pw.chromium.launch(headless=headless)\n self.context = self.browser.new_context(\n no_viewport=True,\n )\n self.page = self.context.new_page()\n self._resp_queue = Queue()\n\n self.page.route(\"**/api/reprompt\", self._route_reprompt)\n self.page.route(\"**/api/submit\", self._route_submit)\n self.page.set_content(get_hint_labeling_ui(HINT_LABELING_DIR))\n\n # internal state\n self._context = None\n self._running = False\n\n def _route_reprompt(\n self, route: playwright.sync_api.Route, request: playwright.sync_api.Request\n ):\n logger.info(\"Route hit: %s %s\", request.method, request.url)\n try:\n body = json.loads(request.post_data or \"{}\")\n except Exception:\n body = {}\n # enqueue output 1 (reprompt)\n hints = body.get(\"hints\")\n if not isinstance(hints, list):\n # Back-compat: accept single 'hint' string\n h = body.get(\"hint\")\n hints = [h] if isinstance(h, str) and h.strip() else []\n msg = {\"type\": \"reprompt\", \"payload\": {\"hints\": hints}}\n self._resp_queue.put(msg)\n # Respond something minimal so UI doesn’t break; it will be refreshed by a later update_context()\n route.fulfill(\n status=200,\n content_type=\"application/json\",\n body=json.dumps({\"suggestions\": []}),\n )\n\n def _route_submit(self, route: playwright.sync_api.Route, request: playwright.sync_api.Request):\n logger.info(\"Route hit: %s %s\", request.method, request.url)\n try:\n body = json.loads(request.post_data or \"{}\")\n except Exception:\n body = {}\n # Map UI payload -> your step shape\n msg = {\n \"type\": \"step\",\n \"payload\": {\n \"think\": body.get(\"think\", \"\"),\n \"action\": body.get(\"action\", \"\"),\n },\n }\n self._resp_queue.put(msg)\n # UI expects 200 JSON; we can optionally send new suggestions here too.\n route.fulfill(\n status=200,\n content_type=\"application/json\",\n body=json.dumps({\"suggestions\": []}),\n )\n\n def _to_ui_bootstrap(self, ctx: HintLabelingInputs) -> dict:\n return {\n \"goal\": ctx.goal,\n \"error_feedback\": ctx.error_feedback,\n \"screenshot\": ctx.screenshot,\n \"screenshots\": ctx.screenshots, # list of screenshots for hover\n \"axtree\": ctx.axtree,\n \"hints\": ctx.hints,\n \"suggestions\": ctx.suggestions,\n }\n\n def update_context(self, context: HintLabelingInputs):\n self._context = context\n ui_payload = self._to_ui_bootstrap(context)\n # call JS function with arg (no string concat)\n self.page.evaluate(\"(d) => updateContext(d)\", ui_payload)\n\n def wait_for_response(self, timeout: Optional[float] = 600) -> dict:\n \"\"\"\n Wait until the page makes a request to /api/reprompt or /api/submit,\n then parse the request body and return it in your schema.\n\n Args:\n timeout (Optional[float]): Maximum time to wait for the request in seconds. If None or 0,\n waits indefinitely. Defaults to 600 seconds.\n\n Returns:\n dict: A dictionary containing the parsed response with 'type' and 'payload' keys.\n For /api/reprompt: {'type': 'reprompt', 'payload': {'hints': list[str]}}\n For /api/submit: {'type': 'step', 'payload': {'think': str, 'action': str}}\n\n \"\"\"\n logger.info(\"Waiting for response from Hint Labeling UI...\")\n\n def is_api(req: playwright.sync_api.Request) -> bool:\n u = req.url\n return (\n u.endswith(\"/api/reprompt\") or u.endswith(\"/api/submit\")\n ) and req.method == \"POST\"\n\n # This pumps Playwright internally; no busy waiting.\n with self.page.expect_request(\n is_api, timeout=(timeout * 1000 if timeout else 0)\n ) as req_info:\n req = req_info.value\n\n body_text = req.post_data or \"{}\"\n try:\n body = json.loads(body_text)\n except Exception as e:\n print(\"JSON parse error:\", e)\n body = {}\n\n if req.url.endswith(\"/api/reprompt\"):\n hints = body.get(\"hints\")\n if not isinstance(hints, list):\n h = body.get(\"hint\")\n hints = [h] if isinstance(h, str) and h.strip() else []\n msg = {\"type\": \"reprompt\", \"payload\": {\"hints\": hints}}\n else:\n msg = {\n \"type\": \"step\",\n \"payload\": {\"think\": body.get(\"think\", \"\"), \"action\": body.get(\"action\", \"\")},\n }\n\n logger.info(\"Response received: %s\", msg)\n return msg\n\n def close(self):\n self.context.close()\n self.browser.close()\n\n\ndef get_hint_labeling_ui(hint_labeling_dir) -> str:\n with open(hint_labeling_dir / \"hint_labeling_ui.html\", \"r\") as file:\n hint_labeling_html = file.read()\n return hint_labeling_html","source_hash":"de08d8c488bf308ab6d374a107442c6c9ee8f0a1be83dc1ddb3609458b7f0333","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.hitl_agent.hint_labelling.HintLabelingInputs","uri":"program://AgentLab/class/src.agentlab.agents.hitl_agent.hint_labelling.HintLabelingInputs#L16-L23","kind":"class","name":"HintLabelingInputs","path":"src/agentlab/agents/hitl_agent/hint_labelling.py","language":"python","start_line":16,"end_line":23,"context_start_line":1,"context_end_line":43,"code":"import json\nimport logging\nfrom importlib import resources\nfrom queue import Queue\nfrom typing import Dict, List, Optional\n\nimport playwright.sync_api\nfrom browsergym.core import _get_global_playwright\nfrom pydantic import BaseModel, Field\n\nlogger = logging.getLogger(__name__)\n\nHINT_LABELING_DIR = resources.files(\"agentlab.agents.hitl_agent.hint_labelling_ui_files\")\n\n\nclass HintLabelingInputs(BaseModel):\n goal: str\n error_feedback: str = \"\"\n screenshot: str # base64 screenshot (original/current)\n screenshots: List[str] = Field(default_factory=list) # list of base64 screenshots for hover\n axtree: str\n hints: List[str] = Field(default_factory=list)\n suggestions: List[Dict[str, str]] = Field(default_factory=list)\n\n\nclass HintLabeling:\n def __init__(self, headless: bool, *args, **kwargs):\n pw_opt = _get_global_playwright()\n pw: playwright.sync_api.Playwright = pw_opt # type: ignore[assignment]\n self.browser = pw.chromium.launch(headless=headless)\n self.context = self.browser.new_context(\n no_viewport=True,\n )\n self.page = self.context.new_page()\n self._resp_queue = Queue()\n\n self.page.route(\"**/api/reprompt\", self._route_reprompt)\n self.page.route(\"**/api/submit\", self._route_submit)\n self.page.set_content(get_hint_labeling_ui(HINT_LABELING_DIR))\n\n # internal state\n self._context = None\n self._running = False","source_hash":"de08d8c488bf308ab6d374a107442c6c9ee8f0a1be83dc1ddb3609458b7f0333","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.hitl_agent.hint_labelling.HintLabeling","uri":"program://AgentLab/class/src.agentlab.agents.hitl_agent.hint_labelling.HintLabeling#L26-L160","kind":"class","name":"HintLabeling","path":"src/agentlab/agents/hitl_agent/hint_labelling.py","language":"python","start_line":26,"end_line":160,"context_start_line":6,"context_end_line":166,"code":"\nimport playwright.sync_api\nfrom browsergym.core import _get_global_playwright\nfrom pydantic import BaseModel, Field\n\nlogger = logging.getLogger(__name__)\n\nHINT_LABELING_DIR = resources.files(\"agentlab.agents.hitl_agent.hint_labelling_ui_files\")\n\n\nclass HintLabelingInputs(BaseModel):\n goal: str\n error_feedback: str = \"\"\n screenshot: str # base64 screenshot (original/current)\n screenshots: List[str] = Field(default_factory=list) # list of base64 screenshots for hover\n axtree: str\n hints: List[str] = Field(default_factory=list)\n suggestions: List[Dict[str, str]] = Field(default_factory=list)\n\n\nclass HintLabeling:\n def __init__(self, headless: bool, *args, **kwargs):\n pw_opt = _get_global_playwright()\n pw: playwright.sync_api.Playwright = pw_opt # type: ignore[assignment]\n self.browser = pw.chromium.launch(headless=headless)\n self.context = self.browser.new_context(\n no_viewport=True,\n )\n self.page = self.context.new_page()\n self._resp_queue = Queue()\n\n self.page.route(\"**/api/reprompt\", self._route_reprompt)\n self.page.route(\"**/api/submit\", self._route_submit)\n self.page.set_content(get_hint_labeling_ui(HINT_LABELING_DIR))\n\n # internal state\n self._context = None\n self._running = False\n\n def _route_reprompt(\n self, route: playwright.sync_api.Route, request: playwright.sync_api.Request\n ):\n logger.info(\"Route hit: %s %s\", request.method, request.url)\n try:\n body = json.loads(request.post_data or \"{}\")\n except Exception:\n body = {}\n # enqueue output 1 (reprompt)\n hints = body.get(\"hints\")\n if not isinstance(hints, list):\n # Back-compat: accept single 'hint' string\n h = body.get(\"hint\")\n hints = [h] if isinstance(h, str) and h.strip() else []\n msg = {\"type\": \"reprompt\", \"payload\": {\"hints\": hints}}\n self._resp_queue.put(msg)\n # Respond something minimal so UI doesn’t break; it will be refreshed by a later update_context()\n route.fulfill(\n status=200,\n content_type=\"application/json\",\n body=json.dumps({\"suggestions\": []}),\n )\n\n def _route_submit(self, route: playwright.sync_api.Route, request: playwright.sync_api.Request):\n logger.info(\"Route hit: %s %s\", request.method, request.url)\n try:\n body = json.loads(request.post_data or \"{}\")\n except Exception:\n body = {}\n # Map UI payload -> your step shape\n msg = {\n \"type\": \"step\",\n \"payload\": {\n \"think\": body.get(\"think\", \"\"),\n \"action\": body.get(\"action\", \"\"),\n },\n }\n self._resp_queue.put(msg)\n # UI expects 200 JSON; we can optionally send new suggestions here too.\n route.fulfill(\n status=200,\n content_type=\"application/json\",\n body=json.dumps({\"suggestions\": []}),\n )\n\n def _to_ui_bootstrap(self, ctx: HintLabelingInputs) -> dict:\n return {\n \"goal\": ctx.goal,\n \"error_feedback\": ctx.error_feedback,\n \"screenshot\": ctx.screenshot,\n \"screenshots\": ctx.screenshots, # list of screenshots for hover\n \"axtree\": ctx.axtree,\n \"hints\": ctx.hints,\n \"suggestions\": ctx.suggestions,\n }\n\n def update_context(self, context: HintLabelingInputs):\n self._context = context\n ui_payload = self._to_ui_bootstrap(context)\n # call JS function with arg (no string concat)\n self.page.evaluate(\"(d) => updateContext(d)\", ui_payload)\n\n def wait_for_response(self, timeout: Optional[float] = 600) -> dict:\n \"\"\"\n Wait until the page makes a request to /api/reprompt or /api/submit,\n then parse the request body and return it in your schema.\n\n Args:\n timeout (Optional[float]): Maximum time to wait for the request in seconds. If None or 0,\n waits indefinitely. Defaults to 600 seconds.\n\n Returns:\n dict: A dictionary containing the parsed response with 'type' and 'payload' keys.\n For /api/reprompt: {'type': 'reprompt', 'payload': {'hints': list[str]}}\n For /api/submit: {'type': 'step', 'payload': {'think': str, 'action': str}}\n\n \"\"\"\n logger.info(\"Waiting for response from Hint Labeling UI...\")\n\n def is_api(req: playwright.sync_api.Request) -> bool:\n u = req.url\n return (\n u.endswith(\"/api/reprompt\") or u.endswith(\"/api/submit\")\n ) and req.method == \"POST\"\n\n # This pumps Playwright internally; no busy waiting.\n with self.page.expect_request(\n is_api, timeout=(timeout * 1000 if timeout else 0)\n ) as req_info:\n req = req_info.value\n\n body_text = req.post_data or \"{}\"\n try:\n body = json.loads(body_text)\n except Exception as e:\n print(\"JSON parse error:\", e)\n body = {}\n\n if req.url.endswith(\"/api/reprompt\"):\n hints = body.get(\"hints\")\n if not isinstance(hints, list):\n h = body.get(\"hint\")\n hints = [h] if isinstance(h, str) and h.strip() else []\n msg = {\"type\": \"reprompt\", \"payload\": {\"hints\": hints}}\n else:\n msg = {\n \"type\": \"step\",\n \"payload\": {\"think\": body.get(\"think\", \"\"), \"action\": body.get(\"action\", \"\")},\n }\n\n logger.info(\"Response received: %s\", msg)\n return msg\n\n def close(self):\n self.context.close()\n self.browser.close()\n\n\ndef get_hint_labeling_ui(hint_labeling_dir) -> str:\n with open(hint_labeling_dir / \"hint_labeling_ui.html\", \"r\") as file:\n hint_labeling_html = file.read()\n return hint_labeling_html","source_hash":"de08d8c488bf308ab6d374a107442c6c9ee8f0a1be83dc1ddb3609458b7f0333","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.hitl_agent.hint_labelling.get_hint_labeling_ui","uri":"program://AgentLab/function/src.agentlab.agents.hitl_agent.hint_labelling.get_hint_labeling_ui#L163-L166","kind":"function","name":"get_hint_labeling_ui","path":"src/agentlab/agents/hitl_agent/hint_labelling.py","language":"python","start_line":163,"end_line":166,"context_start_line":143,"context_end_line":166,"code":" if req.url.endswith(\"/api/reprompt\"):\n hints = body.get(\"hints\")\n if not isinstance(hints, list):\n h = body.get(\"hint\")\n hints = [h] if isinstance(h, str) and h.strip() else []\n msg = {\"type\": \"reprompt\", \"payload\": {\"hints\": hints}}\n else:\n msg = {\n \"type\": \"step\",\n \"payload\": {\"think\": body.get(\"think\", \"\"), \"action\": body.get(\"action\", \"\")},\n }\n\n logger.info(\"Response received: %s\", msg)\n return msg\n\n def close(self):\n self.context.close()\n self.browser.close()\n\n\ndef get_hint_labeling_ui(hint_labeling_dir) -> str:\n with open(hint_labeling_dir / \"hint_labeling_ui.html\", \"r\") as file:\n hint_labeling_html = file.read()\n return hint_labeling_html","source_hash":"de08d8c488bf308ab6d374a107442c6c9ee8f0a1be83dc1ddb3609458b7f0333","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.hitl_agent.hint_labelling.__init__","uri":"program://AgentLab/function/src.agentlab.agents.hitl_agent.hint_labelling.__init__#L27-L43","kind":"function","name":"__init__","path":"src/agentlab/agents/hitl_agent/hint_labelling.py","language":"python","start_line":27,"end_line":43,"context_start_line":7,"context_end_line":63,"code":"import playwright.sync_api\nfrom browsergym.core import _get_global_playwright\nfrom pydantic import BaseModel, Field\n\nlogger = logging.getLogger(__name__)\n\nHINT_LABELING_DIR = resources.files(\"agentlab.agents.hitl_agent.hint_labelling_ui_files\")\n\n\nclass HintLabelingInputs(BaseModel):\n goal: str\n error_feedback: str = \"\"\n screenshot: str # base64 screenshot (original/current)\n screenshots: List[str] = Field(default_factory=list) # list of base64 screenshots for hover\n axtree: str\n hints: List[str] = Field(default_factory=list)\n suggestions: List[Dict[str, str]] = Field(default_factory=list)\n\n\nclass HintLabeling:\n def __init__(self, headless: bool, *args, **kwargs):\n pw_opt = _get_global_playwright()\n pw: playwright.sync_api.Playwright = pw_opt # type: ignore[assignment]\n self.browser = pw.chromium.launch(headless=headless)\n self.context = self.browser.new_context(\n no_viewport=True,\n )\n self.page = self.context.new_page()\n self._resp_queue = Queue()\n\n self.page.route(\"**/api/reprompt\", self._route_reprompt)\n self.page.route(\"**/api/submit\", self._route_submit)\n self.page.set_content(get_hint_labeling_ui(HINT_LABELING_DIR))\n\n # internal state\n self._context = None\n self._running = False\n\n def _route_reprompt(\n self, route: playwright.sync_api.Route, request: playwright.sync_api.Request\n ):\n logger.info(\"Route hit: %s %s\", request.method, request.url)\n try:\n body = json.loads(request.post_data or \"{}\")\n except Exception:\n body = {}\n # enqueue output 1 (reprompt)\n hints = body.get(\"hints\")\n if not isinstance(hints, list):\n # Back-compat: accept single 'hint' string\n h = body.get(\"hint\")\n hints = [h] if isinstance(h, str) and h.strip() else []\n msg = {\"type\": \"reprompt\", \"payload\": {\"hints\": hints}}\n self._resp_queue.put(msg)\n # Respond something minimal so UI doesn’t break; it will be refreshed by a later update_context()\n route.fulfill(\n status=200,","source_hash":"de08d8c488bf308ab6d374a107442c6c9ee8f0a1be83dc1ddb3609458b7f0333","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.hitl_agent.hint_labelling._route_reprompt","uri":"program://AgentLab/function/src.agentlab.agents.hitl_agent.hint_labelling._route_reprompt#L45-L66","kind":"function","name":"_route_reprompt","path":"src/agentlab/agents/hitl_agent/hint_labelling.py","language":"python","start_line":45,"end_line":66,"context_start_line":25,"context_end_line":86,"code":"\nclass HintLabeling:\n def __init__(self, headless: bool, *args, **kwargs):\n pw_opt = _get_global_playwright()\n pw: playwright.sync_api.Playwright = pw_opt # type: ignore[assignment]\n self.browser = pw.chromium.launch(headless=headless)\n self.context = self.browser.new_context(\n no_viewport=True,\n )\n self.page = self.context.new_page()\n self._resp_queue = Queue()\n\n self.page.route(\"**/api/reprompt\", self._route_reprompt)\n self.page.route(\"**/api/submit\", self._route_submit)\n self.page.set_content(get_hint_labeling_ui(HINT_LABELING_DIR))\n\n # internal state\n self._context = None\n self._running = False\n\n def _route_reprompt(\n self, route: playwright.sync_api.Route, request: playwright.sync_api.Request\n ):\n logger.info(\"Route hit: %s %s\", request.method, request.url)\n try:\n body = json.loads(request.post_data or \"{}\")\n except Exception:\n body = {}\n # enqueue output 1 (reprompt)\n hints = body.get(\"hints\")\n if not isinstance(hints, list):\n # Back-compat: accept single 'hint' string\n h = body.get(\"hint\")\n hints = [h] if isinstance(h, str) and h.strip() else []\n msg = {\"type\": \"reprompt\", \"payload\": {\"hints\": hints}}\n self._resp_queue.put(msg)\n # Respond something minimal so UI doesn’t break; it will be refreshed by a later update_context()\n route.fulfill(\n status=200,\n content_type=\"application/json\",\n body=json.dumps({\"suggestions\": []}),\n )\n\n def _route_submit(self, route: playwright.sync_api.Route, request: playwright.sync_api.Request):\n logger.info(\"Route hit: %s %s\", request.method, request.url)\n try:\n body = json.loads(request.post_data or \"{}\")\n except Exception:\n body = {}\n # Map UI payload -> your step shape\n msg = {\n \"type\": \"step\",\n \"payload\": {\n \"think\": body.get(\"think\", \"\"),\n \"action\": body.get(\"action\", \"\"),\n },\n }\n self._resp_queue.put(msg)\n # UI expects 200 JSON; we can optionally send new suggestions here too.\n route.fulfill(\n status=200,\n content_type=\"application/json\",","source_hash":"de08d8c488bf308ab6d374a107442c6c9ee8f0a1be83dc1ddb3609458b7f0333","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.hitl_agent.hint_labelling._route_submit","uri":"program://AgentLab/function/src.agentlab.agents.hitl_agent.hint_labelling._route_submit#L68-L88","kind":"function","name":"_route_submit","path":"src/agentlab/agents/hitl_agent/hint_labelling.py","language":"python","start_line":68,"end_line":88,"context_start_line":48,"context_end_line":108,"code":" logger.info(\"Route hit: %s %s\", request.method, request.url)\n try:\n body = json.loads(request.post_data or \"{}\")\n except Exception:\n body = {}\n # enqueue output 1 (reprompt)\n hints = body.get(\"hints\")\n if not isinstance(hints, list):\n # Back-compat: accept single 'hint' string\n h = body.get(\"hint\")\n hints = [h] if isinstance(h, str) and h.strip() else []\n msg = {\"type\": \"reprompt\", \"payload\": {\"hints\": hints}}\n self._resp_queue.put(msg)\n # Respond something minimal so UI doesn’t break; it will be refreshed by a later update_context()\n route.fulfill(\n status=200,\n content_type=\"application/json\",\n body=json.dumps({\"suggestions\": []}),\n )\n\n def _route_submit(self, route: playwright.sync_api.Route, request: playwright.sync_api.Request):\n logger.info(\"Route hit: %s %s\", request.method, request.url)\n try:\n body = json.loads(request.post_data or \"{}\")\n except Exception:\n body = {}\n # Map UI payload -> your step shape\n msg = {\n \"type\": \"step\",\n \"payload\": {\n \"think\": body.get(\"think\", \"\"),\n \"action\": body.get(\"action\", \"\"),\n },\n }\n self._resp_queue.put(msg)\n # UI expects 200 JSON; we can optionally send new suggestions here too.\n route.fulfill(\n status=200,\n content_type=\"application/json\",\n body=json.dumps({\"suggestions\": []}),\n )\n\n def _to_ui_bootstrap(self, ctx: HintLabelingInputs) -> dict:\n return {\n \"goal\": ctx.goal,\n \"error_feedback\": ctx.error_feedback,\n \"screenshot\": ctx.screenshot,\n \"screenshots\": ctx.screenshots, # list of screenshots for hover\n \"axtree\": ctx.axtree,\n \"hints\": ctx.hints,\n \"suggestions\": ctx.suggestions,\n }\n\n def update_context(self, context: HintLabelingInputs):\n self._context = context\n ui_payload = self._to_ui_bootstrap(context)\n # call JS function with arg (no string concat)\n self.page.evaluate(\"(d) => updateContext(d)\", ui_payload)\n\n def wait_for_response(self, timeout: Optional[float] = 600) -> dict:\n \"\"\"","source_hash":"de08d8c488bf308ab6d374a107442c6c9ee8f0a1be83dc1ddb3609458b7f0333","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.hitl_agent.hint_labelling._to_ui_bootstrap","uri":"program://AgentLab/function/src.agentlab.agents.hitl_agent.hint_labelling._to_ui_bootstrap#L90-L99","kind":"function","name":"_to_ui_bootstrap","path":"src/agentlab/agents/hitl_agent/hint_labelling.py","language":"python","start_line":90,"end_line":99,"context_start_line":70,"context_end_line":119,"code":" try:\n body = json.loads(request.post_data or \"{}\")\n except Exception:\n body = {}\n # Map UI payload -> your step shape\n msg = {\n \"type\": \"step\",\n \"payload\": {\n \"think\": body.get(\"think\", \"\"),\n \"action\": body.get(\"action\", \"\"),\n },\n }\n self._resp_queue.put(msg)\n # UI expects 200 JSON; we can optionally send new suggestions here too.\n route.fulfill(\n status=200,\n content_type=\"application/json\",\n body=json.dumps({\"suggestions\": []}),\n )\n\n def _to_ui_bootstrap(self, ctx: HintLabelingInputs) -> dict:\n return {\n \"goal\": ctx.goal,\n \"error_feedback\": ctx.error_feedback,\n \"screenshot\": ctx.screenshot,\n \"screenshots\": ctx.screenshots, # list of screenshots for hover\n \"axtree\": ctx.axtree,\n \"hints\": ctx.hints,\n \"suggestions\": ctx.suggestions,\n }\n\n def update_context(self, context: HintLabelingInputs):\n self._context = context\n ui_payload = self._to_ui_bootstrap(context)\n # call JS function with arg (no string concat)\n self.page.evaluate(\"(d) => updateContext(d)\", ui_payload)\n\n def wait_for_response(self, timeout: Optional[float] = 600) -> dict:\n \"\"\"\n Wait until the page makes a request to /api/reprompt or /api/submit,\n then parse the request body and return it in your schema.\n\n Args:\n timeout (Optional[float]): Maximum time to wait for the request in seconds. If None or 0,\n waits indefinitely. Defaults to 600 seconds.\n\n Returns:\n dict: A dictionary containing the parsed response with 'type' and 'payload' keys.\n For /api/reprompt: {'type': 'reprompt', 'payload': {'hints': list[str]}}\n For /api/submit: {'type': 'step', 'payload': {'think': str, 'action': str}}","source_hash":"de08d8c488bf308ab6d374a107442c6c9ee8f0a1be83dc1ddb3609458b7f0333","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.hitl_agent.hint_labelling.update_context","uri":"program://AgentLab/function/src.agentlab.agents.hitl_agent.hint_labelling.update_context#L101-L105","kind":"function","name":"update_context","path":"src/agentlab/agents/hitl_agent/hint_labelling.py","language":"python","start_line":101,"end_line":105,"context_start_line":81,"context_end_line":125,"code":" }\n self._resp_queue.put(msg)\n # UI expects 200 JSON; we can optionally send new suggestions here too.\n route.fulfill(\n status=200,\n content_type=\"application/json\",\n body=json.dumps({\"suggestions\": []}),\n )\n\n def _to_ui_bootstrap(self, ctx: HintLabelingInputs) -> dict:\n return {\n \"goal\": ctx.goal,\n \"error_feedback\": ctx.error_feedback,\n \"screenshot\": ctx.screenshot,\n \"screenshots\": ctx.screenshots, # list of screenshots for hover\n \"axtree\": ctx.axtree,\n \"hints\": ctx.hints,\n \"suggestions\": ctx.suggestions,\n }\n\n def update_context(self, context: HintLabelingInputs):\n self._context = context\n ui_payload = self._to_ui_bootstrap(context)\n # call JS function with arg (no string concat)\n self.page.evaluate(\"(d) => updateContext(d)\", ui_payload)\n\n def wait_for_response(self, timeout: Optional[float] = 600) -> dict:\n \"\"\"\n Wait until the page makes a request to /api/reprompt or /api/submit,\n then parse the request body and return it in your schema.\n\n Args:\n timeout (Optional[float]): Maximum time to wait for the request in seconds. If None or 0,\n waits indefinitely. Defaults to 600 seconds.\n\n Returns:\n dict: A dictionary containing the parsed response with 'type' and 'payload' keys.\n For /api/reprompt: {'type': 'reprompt', 'payload': {'hints': list[str]}}\n For /api/submit: {'type': 'step', 'payload': {'think': str, 'action': str}}\n\n \"\"\"\n logger.info(\"Waiting for response from Hint Labeling UI...\")\n\n def is_api(req: playwright.sync_api.Request) -> bool:\n u = req.url","source_hash":"de08d8c488bf308ab6d374a107442c6c9ee8f0a1be83dc1ddb3609458b7f0333","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.hitl_agent.hint_labelling.wait_for_response","uri":"program://AgentLab/function/src.agentlab.agents.hitl_agent.hint_labelling.wait_for_response#L107-L156","kind":"function","name":"wait_for_response","path":"src/agentlab/agents/hitl_agent/hint_labelling.py","language":"python","start_line":107,"end_line":156,"context_start_line":87,"context_end_line":166,"code":" body=json.dumps({\"suggestions\": []}),\n )\n\n def _to_ui_bootstrap(self, ctx: HintLabelingInputs) -> dict:\n return {\n \"goal\": ctx.goal,\n \"error_feedback\": ctx.error_feedback,\n \"screenshot\": ctx.screenshot,\n \"screenshots\": ctx.screenshots, # list of screenshots for hover\n \"axtree\": ctx.axtree,\n \"hints\": ctx.hints,\n \"suggestions\": ctx.suggestions,\n }\n\n def update_context(self, context: HintLabelingInputs):\n self._context = context\n ui_payload = self._to_ui_bootstrap(context)\n # call JS function with arg (no string concat)\n self.page.evaluate(\"(d) => updateContext(d)\", ui_payload)\n\n def wait_for_response(self, timeout: Optional[float] = 600) -> dict:\n \"\"\"\n Wait until the page makes a request to /api/reprompt or /api/submit,\n then parse the request body and return it in your schema.\n\n Args:\n timeout (Optional[float]): Maximum time to wait for the request in seconds. If None or 0,\n waits indefinitely. Defaults to 600 seconds.\n\n Returns:\n dict: A dictionary containing the parsed response with 'type' and 'payload' keys.\n For /api/reprompt: {'type': 'reprompt', 'payload': {'hints': list[str]}}\n For /api/submit: {'type': 'step', 'payload': {'think': str, 'action': str}}\n\n \"\"\"\n logger.info(\"Waiting for response from Hint Labeling UI...\")\n\n def is_api(req: playwright.sync_api.Request) -> bool:\n u = req.url\n return (\n u.endswith(\"/api/reprompt\") or u.endswith(\"/api/submit\")\n ) and req.method == \"POST\"\n\n # This pumps Playwright internally; no busy waiting.\n with self.page.expect_request(\n is_api, timeout=(timeout * 1000 if timeout else 0)\n ) as req_info:\n req = req_info.value\n\n body_text = req.post_data or \"{}\"\n try:\n body = json.loads(body_text)\n except Exception as e:\n print(\"JSON parse error:\", e)\n body = {}\n\n if req.url.endswith(\"/api/reprompt\"):\n hints = body.get(\"hints\")\n if not isinstance(hints, list):\n h = body.get(\"hint\")\n hints = [h] if isinstance(h, str) and h.strip() else []\n msg = {\"type\": \"reprompt\", \"payload\": {\"hints\": hints}}\n else:\n msg = {\n \"type\": \"step\",\n \"payload\": {\"think\": body.get(\"think\", \"\"), \"action\": body.get(\"action\", \"\")},\n }\n\n logger.info(\"Response received: %s\", msg)\n return msg\n\n def close(self):\n self.context.close()\n self.browser.close()\n\n\ndef get_hint_labeling_ui(hint_labeling_dir) -> str:\n with open(hint_labeling_dir / \"hint_labeling_ui.html\", \"r\") as file:\n hint_labeling_html = file.read()\n return hint_labeling_html","source_hash":"de08d8c488bf308ab6d374a107442c6c9ee8f0a1be83dc1ddb3609458b7f0333","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.hitl_agent.hint_labelling.close","uri":"program://AgentLab/function/src.agentlab.agents.hitl_agent.hint_labelling.close#L158-L160","kind":"function","name":"close","path":"src/agentlab/agents/hitl_agent/hint_labelling.py","language":"python","start_line":158,"end_line":160,"context_start_line":138,"context_end_line":166,"code":" body = json.loads(body_text)\n except Exception as e:\n print(\"JSON parse error:\", e)\n body = {}\n\n if req.url.endswith(\"/api/reprompt\"):\n hints = body.get(\"hints\")\n if not isinstance(hints, list):\n h = body.get(\"hint\")\n hints = [h] if isinstance(h, str) and h.strip() else []\n msg = {\"type\": \"reprompt\", \"payload\": {\"hints\": hints}}\n else:\n msg = {\n \"type\": \"step\",\n \"payload\": {\"think\": body.get(\"think\", \"\"), \"action\": body.get(\"action\", \"\")},\n }\n\n logger.info(\"Response received: %s\", msg)\n return msg\n\n def close(self):\n self.context.close()\n self.browser.close()\n\n\ndef get_hint_labeling_ui(hint_labeling_dir) -> str:\n with open(hint_labeling_dir / \"hint_labeling_ui.html\", \"r\") as file:\n hint_labeling_html = file.read()\n return hint_labeling_html","source_hash":"de08d8c488bf308ab6d374a107442c6c9ee8f0a1be83dc1ddb3609458b7f0333","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.hitl_agent.hint_labelling.is_api","uri":"program://AgentLab/function/src.agentlab.agents.hitl_agent.hint_labelling.is_api#L124-L128","kind":"function","name":"is_api","path":"src/agentlab/agents/hitl_agent/hint_labelling.py","language":"python","start_line":124,"end_line":128,"context_start_line":104,"context_end_line":148,"code":" # call JS function with arg (no string concat)\n self.page.evaluate(\"(d) => updateContext(d)\", ui_payload)\n\n def wait_for_response(self, timeout: Optional[float] = 600) -> dict:\n \"\"\"\n Wait until the page makes a request to /api/reprompt or /api/submit,\n then parse the request body and return it in your schema.\n\n Args:\n timeout (Optional[float]): Maximum time to wait for the request in seconds. If None or 0,\n waits indefinitely. Defaults to 600 seconds.\n\n Returns:\n dict: A dictionary containing the parsed response with 'type' and 'payload' keys.\n For /api/reprompt: {'type': 'reprompt', 'payload': {'hints': list[str]}}\n For /api/submit: {'type': 'step', 'payload': {'think': str, 'action': str}}\n\n \"\"\"\n logger.info(\"Waiting for response from Hint Labeling UI...\")\n\n def is_api(req: playwright.sync_api.Request) -> bool:\n u = req.url\n return (\n u.endswith(\"/api/reprompt\") or u.endswith(\"/api/submit\")\n ) and req.method == \"POST\"\n\n # This pumps Playwright internally; no busy waiting.\n with self.page.expect_request(\n is_api, timeout=(timeout * 1000 if timeout else 0)\n ) as req_info:\n req = req_info.value\n\n body_text = req.post_data or \"{}\"\n try:\n body = json.loads(body_text)\n except Exception as e:\n print(\"JSON parse error:\", e)\n body = {}\n\n if req.url.endswith(\"/api/reprompt\"):\n hints = body.get(\"hints\")\n if not isinstance(hints, list):\n h = body.get(\"hint\")\n hints = [h] if isinstance(h, str) and h.strip() else []\n msg = {\"type\": \"reprompt\", \"payload\": {\"hints\": hints}}","source_hash":"de08d8c488bf308ab6d374a107442c6c9ee8f0a1be83dc1ddb3609458b7f0333","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.hitl_agent.hitl_agent","uri":"program://AgentLab/module/src.agentlab.agents.hitl_agent.hitl_agent#L1-L205","kind":"module","name":"src.agentlab.agents.hitl_agent.hitl_agent","path":"src/agentlab/agents/hitl_agent/hitl_agent.py","language":"python","start_line":1,"end_line":205,"context_start_line":1,"context_end_line":205,"code":"from dataclasses import dataclass\nfrom typing import Optional\n\nimport bgym\nimport playwright\nfrom browsergym.experiments.agent import Agent\n\nfrom agentlab.agents.agent_args import AgentArgs\nfrom agentlab.agents.agent_utils import overlay_action\nfrom agentlab.agents.hitl_agent.base_multi_candidate_agent import MultiCandidateAgent\nfrom agentlab.agents.hitl_agent.hint_labelling import (\n HintLabeling,\n HintLabelingInputs,\n)\nfrom agentlab.llm.llm_utils import img_to_base_64\nfrom agentlab.llm.tracking import cost_tracker_decorator\n\n\nclass HumanInTheLoopAgent(Agent):\n\n def __init__(\n self,\n subagent_args, # Type: any object with MultiCandidateAgent interface\n ):\n self.subagent: MultiCandidateAgent = subagent_args.make_agent()\n super().__init__()\n self.ui = None\n\n @cost_tracker_decorator\n def get_action(self, obs):\n # reset vars\n step_n_human_intervention_rounds = 0\n step_hint = []\n\n # Initialize UI once outside the loop\n if self.ui is None:\n self.ui = HintLabeling(headless=False)\n # Show initial waiting state\n initial_inputs = HintLabelingInputs(\n goal=(\n obs.get(\"goal_object\", [{}])[0].get(\"text\", \"\")\n if obs.get(\"goal_object\")\n else \"\"\n ),\n error_feedback=\"\",\n screenshot=(img_to_base_64(obs[\"screenshot\"]) if \"screenshot\" in obs else \"\"),\n screenshots=[], # no overlay screenshots yet\n axtree=obs.get(\"axtree_txt\", \"\"),\n hints=[],\n suggestions=[], # no suggestions yet\n )\n self.ui.update_context(initial_inputs)\n\n # Generate first candidates\n candidates = self.subagent.get_candidate_generations(obs, hint=None, n_candidates=3)\n step_n_human_intervention_rounds += 1\n suggestions = [{\"action\": c[\"action\"], \"think\": c[\"agent_info\"].think} for c in candidates]\n # List of Images as base64 - create overlay screenshots for each suggested action\n screenshots = [overlay_action(obs, choice[\"action\"]) for choice in suggestions]\n\n while True:\n try:\n hint_labeling_inputs = HintLabelingInputs(\n goal=(\n obs.get(\"goal_object\", [{}])[0].get(\"text\", \"\")\n if obs.get(\"goal_object\")\n else \"\"\n ),\n error_feedback=obs.get(\"last_action_error\", \"\"),\n screenshot=(img_to_base_64(obs[\"screenshot\"]) if \"screenshot\" in obs else \"\"),\n screenshots=screenshots, # list of overlay screenshots for hover\n axtree=obs.get(\"axtree_txt\", \"\"),\n hints=step_hint,\n suggestions=suggestions,\n )\n\n self.ui.update_context(hint_labeling_inputs)\n response = self.ui.wait_for_response(timeout=600)\n\n if response[\"type\"] == \"reprompt\":\n new_hints = response[\"payload\"].get(\"hints\", [])\n # Replace with the new list from UI, or extend if needed\n step_hint = list(new_hints) if isinstance(new_hints, list) else step_hint\n candidates = self.subagent.get_candidate_generations(\n obs, hint=step_hint if step_hint else None, n_candidates=3\n )\n step_n_human_intervention_rounds += 1\n suggestions = [\n {\"action\": c[\"action\"], \"think\": c[\"agent_info\"].think} for c in candidates\n ]\n screenshots = [overlay_action(obs, choice[\"action\"]) for choice in suggestions]\n\n elif response[\"type\"] == \"step\":\n selected_action = response[\"payload\"][\"action\"]\n choice_idx = None\n for i, candidate in enumerate(suggestions):\n if candidate[\"action\"] == selected_action:\n choice_idx = i\n break\n selected_candidate = candidates[choice_idx]\n self.subagent.update_agent_state_from_selected_candidate(selected_candidate)\n action = selected_candidate[\"action\"]\n agent_info = selected_candidate[\"agent_info\"]\n return action, agent_info\n\n except KeyboardInterrupt:\n print(\"User cancelled the operation\")\n if self.ui:\n self.ui.close()\n raise\n except playwright.sync_api.TimeoutError:\n # Handle timeout specifically: fall back to first candidate\n print(\"UI timeout; falling back to first candidate.\")\n selected_candidate = candidates[0]\n self.subagent.update_agent_state_from_selected_candidate(selected_candidate)\n action = selected_candidate[\"action\"]\n agent_info = selected_candidate[\"agent_info\"]\n return action, agent_info\n except Exception as e:\n print(f\"Error in human intervention UI: {e}\")\n if self.ui:\n self.ui.close()\n self.ui = None\n # Raise exception instead of falling back to console input\n raise RuntimeError(f\"Human intervention UI failed: {e}\") from e\n\n\n@dataclass\nclass HumanInTheLoopAgentArgs(AgentArgs):\n subagent_args: Optional[AgentArgs] = None # args for the underlying multiple proposal agent\n\n def make_agent(self):\n assert self.subagent_args is not None\n return HumanInTheLoopAgent(subagent_args=self.subagent_args)\n\n def __post_init__(self):\n \"\"\"Prefix subagent name with 'HITL-'.\"\"\"\n super().__post_init__()\n if self.subagent_args and self.subagent_args.agent_name:\n self.agent_name = \"HITL-\" + self.subagent_args.agent_name\n\n def set_benchmark(self, benchmark, demo_mode):\n \"\"\"Delegate set_benchmark to the subagent if it has the method.\"\"\"\n if hasattr(self.subagent_args, \"set_benchmark\"):\n self.subagent_args.set_benchmark(benchmark, demo_mode)\n\n def set_reproducibility_mode(self):\n \"\"\"Delegate set_reproducibility_mode to the subagent if it has the method.\"\"\"\n if hasattr(self.subagent_args, \"set_reproducibility_mode\"):\n self.subagent_args.set_reproducibility_mode()\n\n\ndef get_base_human_in_the_loop_genericagent(llm_config):\n \"\"\"\n Create a base human-in-the-loop generic agent configuration using the key from CHAT_MODEL_ARGS_DICT.\n\n This function creates a HumanInTheLoopAgentArgs instance with a MultiCandidateGenericAgent\n as the subagent, configured with the specified LLM configuration and base flags.\n\n Args:\n llm_config (str): The LLM configuration key to use from CHAT_MODEL_ARGS_DICT.\n\n Returns:\n HumanInTheLoopAgentArgs: Configured human-in-the-loop agent arguments with\n a multi-candidate generic agent as the subagent.\n \"\"\"\n from agentlab.agents.generic_agent.tmlr_config import BASE_FLAGS\n from agentlab.agents.hitl_agent.hitl_agent import HumanInTheLoopAgentArgs\n from agentlab.agents.hitl_agent.multi_candidate_generic_agent import (\n MultiCandidateGenericAgentArgs,\n )\n from agentlab.llm.llm_configs import CHAT_MODEL_ARGS_DICT\n\n return HumanInTheLoopAgentArgs(\n subagent_args=MultiCandidateGenericAgentArgs(\n chat_model_args=CHAT_MODEL_ARGS_DICT[llm_config],\n flags=BASE_FLAGS,\n )\n )\n\n\nHUMAN_GUIDED_GENERIC_AGENT = get_base_human_in_the_loop_genericagent(\"openai/gpt-5-mini-2025-08-07\")\n\nif __name__ == \"__main__\":\n import logging\n\n from agentlab.agents.hitl_agent.hitl_agent import (\n HUMAN_GUIDED_GENERIC_AGENT,\n )\n from agentlab.experiments.study import Study\n\n agent_configs = [HUMAN_GUIDED_GENERIC_AGENT]\n benchmark = bgym.DEFAULT_BENCHMARKS[\"miniwob\"]()\n benchmark = benchmark.subset_from_glob(\"task_name\", \"*book*\")\n benchmark.env_args_list = benchmark.env_args_list[2:3]\n\n for env_args in benchmark.env_args_list:\n env_args.max_steps = 100 # max human steps\n env_args.headless = False\n\n Study(agent_configs, benchmark, logging_level=logging.WARNING).run(\n n_jobs=1,\n parallel_backend=\"sequential\",\n n_relaunch=1,\n )","source_hash":"caf3281cd0903c179066ffe16ff1af60ce1821b667e4368d4d92b80696f74396","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.hitl_agent.hitl_agent.HumanInTheLoopAgent","uri":"program://AgentLab/class/src.agentlab.agents.hitl_agent.hitl_agent.HumanInTheLoopAgent#L19-L125","kind":"class","name":"HumanInTheLoopAgent","path":"src/agentlab/agents/hitl_agent/hitl_agent.py","language":"python","start_line":19,"end_line":125,"context_start_line":1,"context_end_line":145,"code":"from dataclasses import dataclass\nfrom typing import Optional\n\nimport bgym\nimport playwright\nfrom browsergym.experiments.agent import Agent\n\nfrom agentlab.agents.agent_args import AgentArgs\nfrom agentlab.agents.agent_utils import overlay_action\nfrom agentlab.agents.hitl_agent.base_multi_candidate_agent import MultiCandidateAgent\nfrom agentlab.agents.hitl_agent.hint_labelling import (\n HintLabeling,\n HintLabelingInputs,\n)\nfrom agentlab.llm.llm_utils import img_to_base_64\nfrom agentlab.llm.tracking import cost_tracker_decorator\n\n\nclass HumanInTheLoopAgent(Agent):\n\n def __init__(\n self,\n subagent_args, # Type: any object with MultiCandidateAgent interface\n ):\n self.subagent: MultiCandidateAgent = subagent_args.make_agent()\n super().__init__()\n self.ui = None\n\n @cost_tracker_decorator\n def get_action(self, obs):\n # reset vars\n step_n_human_intervention_rounds = 0\n step_hint = []\n\n # Initialize UI once outside the loop\n if self.ui is None:\n self.ui = HintLabeling(headless=False)\n # Show initial waiting state\n initial_inputs = HintLabelingInputs(\n goal=(\n obs.get(\"goal_object\", [{}])[0].get(\"text\", \"\")\n if obs.get(\"goal_object\")\n else \"\"\n ),\n error_feedback=\"\",\n screenshot=(img_to_base_64(obs[\"screenshot\"]) if \"screenshot\" in obs else \"\"),\n screenshots=[], # no overlay screenshots yet\n axtree=obs.get(\"axtree_txt\", \"\"),\n hints=[],\n suggestions=[], # no suggestions yet\n )\n self.ui.update_context(initial_inputs)\n\n # Generate first candidates\n candidates = self.subagent.get_candidate_generations(obs, hint=None, n_candidates=3)\n step_n_human_intervention_rounds += 1\n suggestions = [{\"action\": c[\"action\"], \"think\": c[\"agent_info\"].think} for c in candidates]\n # List of Images as base64 - create overlay screenshots for each suggested action\n screenshots = [overlay_action(obs, choice[\"action\"]) for choice in suggestions]\n\n while True:\n try:\n hint_labeling_inputs = HintLabelingInputs(\n goal=(\n obs.get(\"goal_object\", [{}])[0].get(\"text\", \"\")\n if obs.get(\"goal_object\")\n else \"\"\n ),\n error_feedback=obs.get(\"last_action_error\", \"\"),\n screenshot=(img_to_base_64(obs[\"screenshot\"]) if \"screenshot\" in obs else \"\"),\n screenshots=screenshots, # list of overlay screenshots for hover\n axtree=obs.get(\"axtree_txt\", \"\"),\n hints=step_hint,\n suggestions=suggestions,\n )\n\n self.ui.update_context(hint_labeling_inputs)\n response = self.ui.wait_for_response(timeout=600)\n\n if response[\"type\"] == \"reprompt\":\n new_hints = response[\"payload\"].get(\"hints\", [])\n # Replace with the new list from UI, or extend if needed\n step_hint = list(new_hints) if isinstance(new_hints, list) else step_hint\n candidates = self.subagent.get_candidate_generations(\n obs, hint=step_hint if step_hint else None, n_candidates=3\n )\n step_n_human_intervention_rounds += 1\n suggestions = [\n {\"action\": c[\"action\"], \"think\": c[\"agent_info\"].think} for c in candidates\n ]\n screenshots = [overlay_action(obs, choice[\"action\"]) for choice in suggestions]\n\n elif response[\"type\"] == \"step\":\n selected_action = response[\"payload\"][\"action\"]\n choice_idx = None\n for i, candidate in enumerate(suggestions):\n if candidate[\"action\"] == selected_action:\n choice_idx = i\n break\n selected_candidate = candidates[choice_idx]\n self.subagent.update_agent_state_from_selected_candidate(selected_candidate)\n action = selected_candidate[\"action\"]\n agent_info = selected_candidate[\"agent_info\"]\n return action, agent_info\n\n except KeyboardInterrupt:\n print(\"User cancelled the operation\")\n if self.ui:\n self.ui.close()\n raise\n except playwright.sync_api.TimeoutError:\n # Handle timeout specifically: fall back to first candidate\n print(\"UI timeout; falling back to first candidate.\")\n selected_candidate = candidates[0]\n self.subagent.update_agent_state_from_selected_candidate(selected_candidate)\n action = selected_candidate[\"action\"]\n agent_info = selected_candidate[\"agent_info\"]\n return action, agent_info\n except Exception as e:\n print(f\"Error in human intervention UI: {e}\")\n if self.ui:\n self.ui.close()\n self.ui = None\n # Raise exception instead of falling back to console input\n raise RuntimeError(f\"Human intervention UI failed: {e}\") from e\n\n\n@dataclass\nclass HumanInTheLoopAgentArgs(AgentArgs):\n subagent_args: Optional[AgentArgs] = None # args for the underlying multiple proposal agent\n\n def make_agent(self):\n assert self.subagent_args is not None\n return HumanInTheLoopAgent(subagent_args=self.subagent_args)\n\n def __post_init__(self):\n \"\"\"Prefix subagent name with 'HITL-'.\"\"\"\n super().__post_init__()\n if self.subagent_args and self.subagent_args.agent_name:\n self.agent_name = \"HITL-\" + self.subagent_args.agent_name\n\n def set_benchmark(self, benchmark, demo_mode):\n \"\"\"Delegate set_benchmark to the subagent if it has the method.\"\"\"\n if hasattr(self.subagent_args, \"set_benchmark\"):\n self.subagent_args.set_benchmark(benchmark, demo_mode)","source_hash":"caf3281cd0903c179066ffe16ff1af60ce1821b667e4368d4d92b80696f74396","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.hitl_agent.hitl_agent.HumanInTheLoopAgentArgs","uri":"program://AgentLab/class/src.agentlab.agents.hitl_agent.hitl_agent.HumanInTheLoopAgentArgs#L129-L150","kind":"class","name":"HumanInTheLoopAgentArgs","path":"src/agentlab/agents/hitl_agent/hitl_agent.py","language":"python","start_line":129,"end_line":150,"context_start_line":109,"context_end_line":170,"code":" self.ui.close()\n raise\n except playwright.sync_api.TimeoutError:\n # Handle timeout specifically: fall back to first candidate\n print(\"UI timeout; falling back to first candidate.\")\n selected_candidate = candidates[0]\n self.subagent.update_agent_state_from_selected_candidate(selected_candidate)\n action = selected_candidate[\"action\"]\n agent_info = selected_candidate[\"agent_info\"]\n return action, agent_info\n except Exception as e:\n print(f\"Error in human intervention UI: {e}\")\n if self.ui:\n self.ui.close()\n self.ui = None\n # Raise exception instead of falling back to console input\n raise RuntimeError(f\"Human intervention UI failed: {e}\") from e\n\n\n@dataclass\nclass HumanInTheLoopAgentArgs(AgentArgs):\n subagent_args: Optional[AgentArgs] = None # args for the underlying multiple proposal agent\n\n def make_agent(self):\n assert self.subagent_args is not None\n return HumanInTheLoopAgent(subagent_args=self.subagent_args)\n\n def __post_init__(self):\n \"\"\"Prefix subagent name with 'HITL-'.\"\"\"\n super().__post_init__()\n if self.subagent_args and self.subagent_args.agent_name:\n self.agent_name = \"HITL-\" + self.subagent_args.agent_name\n\n def set_benchmark(self, benchmark, demo_mode):\n \"\"\"Delegate set_benchmark to the subagent if it has the method.\"\"\"\n if hasattr(self.subagent_args, \"set_benchmark\"):\n self.subagent_args.set_benchmark(benchmark, demo_mode)\n\n def set_reproducibility_mode(self):\n \"\"\"Delegate set_reproducibility_mode to the subagent if it has the method.\"\"\"\n if hasattr(self.subagent_args, \"set_reproducibility_mode\"):\n self.subagent_args.set_reproducibility_mode()\n\n\ndef get_base_human_in_the_loop_genericagent(llm_config):\n \"\"\"\n Create a base human-in-the-loop generic agent configuration using the key from CHAT_MODEL_ARGS_DICT.\n\n This function creates a HumanInTheLoopAgentArgs instance with a MultiCandidateGenericAgent\n as the subagent, configured with the specified LLM configuration and base flags.\n\n Args:\n llm_config (str): The LLM configuration key to use from CHAT_MODEL_ARGS_DICT.\n\n Returns:\n HumanInTheLoopAgentArgs: Configured human-in-the-loop agent arguments with\n a multi-candidate generic agent as the subagent.\n \"\"\"\n from agentlab.agents.generic_agent.tmlr_config import BASE_FLAGS\n from agentlab.agents.hitl_agent.hitl_agent import HumanInTheLoopAgentArgs\n from agentlab.agents.hitl_agent.multi_candidate_generic_agent import (\n MultiCandidateGenericAgentArgs,","source_hash":"caf3281cd0903c179066ffe16ff1af60ce1821b667e4368d4d92b80696f74396","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.hitl_agent.hitl_agent.get_base_human_in_the_loop_genericagent","uri":"program://AgentLab/function/src.agentlab.agents.hitl_agent.hitl_agent.get_base_human_in_the_loop_genericagent#L153-L179","kind":"function","name":"get_base_human_in_the_loop_genericagent","path":"src/agentlab/agents/hitl_agent/hitl_agent.py","language":"python","start_line":153,"end_line":179,"context_start_line":133,"context_end_line":199,"code":" assert self.subagent_args is not None\n return HumanInTheLoopAgent(subagent_args=self.subagent_args)\n\n def __post_init__(self):\n \"\"\"Prefix subagent name with 'HITL-'.\"\"\"\n super().__post_init__()\n if self.subagent_args and self.subagent_args.agent_name:\n self.agent_name = \"HITL-\" + self.subagent_args.agent_name\n\n def set_benchmark(self, benchmark, demo_mode):\n \"\"\"Delegate set_benchmark to the subagent if it has the method.\"\"\"\n if hasattr(self.subagent_args, \"set_benchmark\"):\n self.subagent_args.set_benchmark(benchmark, demo_mode)\n\n def set_reproducibility_mode(self):\n \"\"\"Delegate set_reproducibility_mode to the subagent if it has the method.\"\"\"\n if hasattr(self.subagent_args, \"set_reproducibility_mode\"):\n self.subagent_args.set_reproducibility_mode()\n\n\ndef get_base_human_in_the_loop_genericagent(llm_config):\n \"\"\"\n Create a base human-in-the-loop generic agent configuration using the key from CHAT_MODEL_ARGS_DICT.\n\n This function creates a HumanInTheLoopAgentArgs instance with a MultiCandidateGenericAgent\n as the subagent, configured with the specified LLM configuration and base flags.\n\n Args:\n llm_config (str): The LLM configuration key to use from CHAT_MODEL_ARGS_DICT.\n\n Returns:\n HumanInTheLoopAgentArgs: Configured human-in-the-loop agent arguments with\n a multi-candidate generic agent as the subagent.\n \"\"\"\n from agentlab.agents.generic_agent.tmlr_config import BASE_FLAGS\n from agentlab.agents.hitl_agent.hitl_agent import HumanInTheLoopAgentArgs\n from agentlab.agents.hitl_agent.multi_candidate_generic_agent import (\n MultiCandidateGenericAgentArgs,\n )\n from agentlab.llm.llm_configs import CHAT_MODEL_ARGS_DICT\n\n return HumanInTheLoopAgentArgs(\n subagent_args=MultiCandidateGenericAgentArgs(\n chat_model_args=CHAT_MODEL_ARGS_DICT[llm_config],\n flags=BASE_FLAGS,\n )\n )\n\n\nHUMAN_GUIDED_GENERIC_AGENT = get_base_human_in_the_loop_genericagent(\"openai/gpt-5-mini-2025-08-07\")\n\nif __name__ == \"__main__\":\n import logging\n\n from agentlab.agents.hitl_agent.hitl_agent import (\n HUMAN_GUIDED_GENERIC_AGENT,\n )\n from agentlab.experiments.study import Study\n\n agent_configs = [HUMAN_GUIDED_GENERIC_AGENT]\n benchmark = bgym.DEFAULT_BENCHMARKS[\"miniwob\"]()\n benchmark = benchmark.subset_from_glob(\"task_name\", \"*book*\")\n benchmark.env_args_list = benchmark.env_args_list[2:3]\n\n for env_args in benchmark.env_args_list:\n env_args.max_steps = 100 # max human steps\n env_args.headless = False","source_hash":"caf3281cd0903c179066ffe16ff1af60ce1821b667e4368d4d92b80696f74396","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.hitl_agent.hitl_agent.__init__","uri":"program://AgentLab/function/src.agentlab.agents.hitl_agent.hitl_agent.__init__#L21-L27","kind":"function","name":"__init__","path":"src/agentlab/agents/hitl_agent/hitl_agent.py","language":"python","start_line":21,"end_line":27,"context_start_line":1,"context_end_line":47,"code":"from dataclasses import dataclass\nfrom typing import Optional\n\nimport bgym\nimport playwright\nfrom browsergym.experiments.agent import Agent\n\nfrom agentlab.agents.agent_args import AgentArgs\nfrom agentlab.agents.agent_utils import overlay_action\nfrom agentlab.agents.hitl_agent.base_multi_candidate_agent import MultiCandidateAgent\nfrom agentlab.agents.hitl_agent.hint_labelling import (\n HintLabeling,\n HintLabelingInputs,\n)\nfrom agentlab.llm.llm_utils import img_to_base_64\nfrom agentlab.llm.tracking import cost_tracker_decorator\n\n\nclass HumanInTheLoopAgent(Agent):\n\n def __init__(\n self,\n subagent_args, # Type: any object with MultiCandidateAgent interface\n ):\n self.subagent: MultiCandidateAgent = subagent_args.make_agent()\n super().__init__()\n self.ui = None\n\n @cost_tracker_decorator\n def get_action(self, obs):\n # reset vars\n step_n_human_intervention_rounds = 0\n step_hint = []\n\n # Initialize UI once outside the loop\n if self.ui is None:\n self.ui = HintLabeling(headless=False)\n # Show initial waiting state\n initial_inputs = HintLabelingInputs(\n goal=(\n obs.get(\"goal_object\", [{}])[0].get(\"text\", \"\")\n if obs.get(\"goal_object\")\n else \"\"\n ),\n error_feedback=\"\",\n screenshot=(img_to_base_64(obs[\"screenshot\"]) if \"screenshot\" in obs else \"\"),\n screenshots=[], # no overlay screenshots yet","source_hash":"caf3281cd0903c179066ffe16ff1af60ce1821b667e4368d4d92b80696f74396","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.hitl_agent.hitl_agent.get_action","uri":"program://AgentLab/function/src.agentlab.agents.hitl_agent.hitl_agent.get_action#L30-L125","kind":"function","name":"get_action","path":"src/agentlab/agents/hitl_agent/hitl_agent.py","language":"python","start_line":30,"end_line":125,"context_start_line":10,"context_end_line":145,"code":"from agentlab.agents.hitl_agent.base_multi_candidate_agent import MultiCandidateAgent\nfrom agentlab.agents.hitl_agent.hint_labelling import (\n HintLabeling,\n HintLabelingInputs,\n)\nfrom agentlab.llm.llm_utils import img_to_base_64\nfrom agentlab.llm.tracking import cost_tracker_decorator\n\n\nclass HumanInTheLoopAgent(Agent):\n\n def __init__(\n self,\n subagent_args, # Type: any object with MultiCandidateAgent interface\n ):\n self.subagent: MultiCandidateAgent = subagent_args.make_agent()\n super().__init__()\n self.ui = None\n\n @cost_tracker_decorator\n def get_action(self, obs):\n # reset vars\n step_n_human_intervention_rounds = 0\n step_hint = []\n\n # Initialize UI once outside the loop\n if self.ui is None:\n self.ui = HintLabeling(headless=False)\n # Show initial waiting state\n initial_inputs = HintLabelingInputs(\n goal=(\n obs.get(\"goal_object\", [{}])[0].get(\"text\", \"\")\n if obs.get(\"goal_object\")\n else \"\"\n ),\n error_feedback=\"\",\n screenshot=(img_to_base_64(obs[\"screenshot\"]) if \"screenshot\" in obs else \"\"),\n screenshots=[], # no overlay screenshots yet\n axtree=obs.get(\"axtree_txt\", \"\"),\n hints=[],\n suggestions=[], # no suggestions yet\n )\n self.ui.update_context(initial_inputs)\n\n # Generate first candidates\n candidates = self.subagent.get_candidate_generations(obs, hint=None, n_candidates=3)\n step_n_human_intervention_rounds += 1\n suggestions = [{\"action\": c[\"action\"], \"think\": c[\"agent_info\"].think} for c in candidates]\n # List of Images as base64 - create overlay screenshots for each suggested action\n screenshots = [overlay_action(obs, choice[\"action\"]) for choice in suggestions]\n\n while True:\n try:\n hint_labeling_inputs = HintLabelingInputs(\n goal=(\n obs.get(\"goal_object\", [{}])[0].get(\"text\", \"\")\n if obs.get(\"goal_object\")\n else \"\"\n ),\n error_feedback=obs.get(\"last_action_error\", \"\"),\n screenshot=(img_to_base_64(obs[\"screenshot\"]) if \"screenshot\" in obs else \"\"),\n screenshots=screenshots, # list of overlay screenshots for hover\n axtree=obs.get(\"axtree_txt\", \"\"),\n hints=step_hint,\n suggestions=suggestions,\n )\n\n self.ui.update_context(hint_labeling_inputs)\n response = self.ui.wait_for_response(timeout=600)\n\n if response[\"type\"] == \"reprompt\":\n new_hints = response[\"payload\"].get(\"hints\", [])\n # Replace with the new list from UI, or extend if needed\n step_hint = list(new_hints) if isinstance(new_hints, list) else step_hint\n candidates = self.subagent.get_candidate_generations(\n obs, hint=step_hint if step_hint else None, n_candidates=3\n )\n step_n_human_intervention_rounds += 1\n suggestions = [\n {\"action\": c[\"action\"], \"think\": c[\"agent_info\"].think} for c in candidates\n ]\n screenshots = [overlay_action(obs, choice[\"action\"]) for choice in suggestions]\n\n elif response[\"type\"] == \"step\":\n selected_action = response[\"payload\"][\"action\"]\n choice_idx = None\n for i, candidate in enumerate(suggestions):\n if candidate[\"action\"] == selected_action:\n choice_idx = i\n break\n selected_candidate = candidates[choice_idx]\n self.subagent.update_agent_state_from_selected_candidate(selected_candidate)\n action = selected_candidate[\"action\"]\n agent_info = selected_candidate[\"agent_info\"]\n return action, agent_info\n\n except KeyboardInterrupt:\n print(\"User cancelled the operation\")\n if self.ui:\n self.ui.close()\n raise\n except playwright.sync_api.TimeoutError:\n # Handle timeout specifically: fall back to first candidate\n print(\"UI timeout; falling back to first candidate.\")\n selected_candidate = candidates[0]\n self.subagent.update_agent_state_from_selected_candidate(selected_candidate)\n action = selected_candidate[\"action\"]\n agent_info = selected_candidate[\"agent_info\"]\n return action, agent_info\n except Exception as e:\n print(f\"Error in human intervention UI: {e}\")\n if self.ui:\n self.ui.close()\n self.ui = None\n # Raise exception instead of falling back to console input\n raise RuntimeError(f\"Human intervention UI failed: {e}\") from e\n\n\n@dataclass\nclass HumanInTheLoopAgentArgs(AgentArgs):\n subagent_args: Optional[AgentArgs] = None # args for the underlying multiple proposal agent\n\n def make_agent(self):\n assert self.subagent_args is not None\n return HumanInTheLoopAgent(subagent_args=self.subagent_args)\n\n def __post_init__(self):\n \"\"\"Prefix subagent name with 'HITL-'.\"\"\"\n super().__post_init__()\n if self.subagent_args and self.subagent_args.agent_name:\n self.agent_name = \"HITL-\" + self.subagent_args.agent_name\n\n def set_benchmark(self, benchmark, demo_mode):\n \"\"\"Delegate set_benchmark to the subagent if it has the method.\"\"\"\n if hasattr(self.subagent_args, \"set_benchmark\"):\n self.subagent_args.set_benchmark(benchmark, demo_mode)","source_hash":"caf3281cd0903c179066ffe16ff1af60ce1821b667e4368d4d92b80696f74396","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.hitl_agent.hitl_agent.make_agent","uri":"program://AgentLab/function/src.agentlab.agents.hitl_agent.hitl_agent.make_agent#L132-L134","kind":"function","name":"make_agent","path":"src/agentlab/agents/hitl_agent/hitl_agent.py","language":"python","start_line":132,"end_line":134,"context_start_line":112,"context_end_line":154,"code":" # Handle timeout specifically: fall back to first candidate\n print(\"UI timeout; falling back to first candidate.\")\n selected_candidate = candidates[0]\n self.subagent.update_agent_state_from_selected_candidate(selected_candidate)\n action = selected_candidate[\"action\"]\n agent_info = selected_candidate[\"agent_info\"]\n return action, agent_info\n except Exception as e:\n print(f\"Error in human intervention UI: {e}\")\n if self.ui:\n self.ui.close()\n self.ui = None\n # Raise exception instead of falling back to console input\n raise RuntimeError(f\"Human intervention UI failed: {e}\") from e\n\n\n@dataclass\nclass HumanInTheLoopAgentArgs(AgentArgs):\n subagent_args: Optional[AgentArgs] = None # args for the underlying multiple proposal agent\n\n def make_agent(self):\n assert self.subagent_args is not None\n return HumanInTheLoopAgent(subagent_args=self.subagent_args)\n\n def __post_init__(self):\n \"\"\"Prefix subagent name with 'HITL-'.\"\"\"\n super().__post_init__()\n if self.subagent_args and self.subagent_args.agent_name:\n self.agent_name = \"HITL-\" + self.subagent_args.agent_name\n\n def set_benchmark(self, benchmark, demo_mode):\n \"\"\"Delegate set_benchmark to the subagent if it has the method.\"\"\"\n if hasattr(self.subagent_args, \"set_benchmark\"):\n self.subagent_args.set_benchmark(benchmark, demo_mode)\n\n def set_reproducibility_mode(self):\n \"\"\"Delegate set_reproducibility_mode to the subagent if it has the method.\"\"\"\n if hasattr(self.subagent_args, \"set_reproducibility_mode\"):\n self.subagent_args.set_reproducibility_mode()\n\n\ndef get_base_human_in_the_loop_genericagent(llm_config):\n \"\"\"","source_hash":"caf3281cd0903c179066ffe16ff1af60ce1821b667e4368d4d92b80696f74396","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.hitl_agent.hitl_agent.__post_init__","uri":"program://AgentLab/function/src.agentlab.agents.hitl_agent.hitl_agent.__post_init__#L136-L140","kind":"function","name":"__post_init__","path":"src/agentlab/agents/hitl_agent/hitl_agent.py","language":"python","start_line":136,"end_line":140,"context_start_line":116,"context_end_line":160,"code":" action = selected_candidate[\"action\"]\n agent_info = selected_candidate[\"agent_info\"]\n return action, agent_info\n except Exception as e:\n print(f\"Error in human intervention UI: {e}\")\n if self.ui:\n self.ui.close()\n self.ui = None\n # Raise exception instead of falling back to console input\n raise RuntimeError(f\"Human intervention UI failed: {e}\") from e\n\n\n@dataclass\nclass HumanInTheLoopAgentArgs(AgentArgs):\n subagent_args: Optional[AgentArgs] = None # args for the underlying multiple proposal agent\n\n def make_agent(self):\n assert self.subagent_args is not None\n return HumanInTheLoopAgent(subagent_args=self.subagent_args)\n\n def __post_init__(self):\n \"\"\"Prefix subagent name with 'HITL-'.\"\"\"\n super().__post_init__()\n if self.subagent_args and self.subagent_args.agent_name:\n self.agent_name = \"HITL-\" + self.subagent_args.agent_name\n\n def set_benchmark(self, benchmark, demo_mode):\n \"\"\"Delegate set_benchmark to the subagent if it has the method.\"\"\"\n if hasattr(self.subagent_args, \"set_benchmark\"):\n self.subagent_args.set_benchmark(benchmark, demo_mode)\n\n def set_reproducibility_mode(self):\n \"\"\"Delegate set_reproducibility_mode to the subagent if it has the method.\"\"\"\n if hasattr(self.subagent_args, \"set_reproducibility_mode\"):\n self.subagent_args.set_reproducibility_mode()\n\n\ndef get_base_human_in_the_loop_genericagent(llm_config):\n \"\"\"\n Create a base human-in-the-loop generic agent configuration using the key from CHAT_MODEL_ARGS_DICT.\n\n This function creates a HumanInTheLoopAgentArgs instance with a MultiCandidateGenericAgent\n as the subagent, configured with the specified LLM configuration and base flags.\n\n Args:","source_hash":"caf3281cd0903c179066ffe16ff1af60ce1821b667e4368d4d92b80696f74396","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.hitl_agent.hitl_agent.set_benchmark","uri":"program://AgentLab/function/src.agentlab.agents.hitl_agent.hitl_agent.set_benchmark#L142-L145","kind":"function","name":"set_benchmark","path":"src/agentlab/agents/hitl_agent/hitl_agent.py","language":"python","start_line":142,"end_line":145,"context_start_line":122,"context_end_line":165,"code":" self.ui.close()\n self.ui = None\n # Raise exception instead of falling back to console input\n raise RuntimeError(f\"Human intervention UI failed: {e}\") from e\n\n\n@dataclass\nclass HumanInTheLoopAgentArgs(AgentArgs):\n subagent_args: Optional[AgentArgs] = None # args for the underlying multiple proposal agent\n\n def make_agent(self):\n assert self.subagent_args is not None\n return HumanInTheLoopAgent(subagent_args=self.subagent_args)\n\n def __post_init__(self):\n \"\"\"Prefix subagent name with 'HITL-'.\"\"\"\n super().__post_init__()\n if self.subagent_args and self.subagent_args.agent_name:\n self.agent_name = \"HITL-\" + self.subagent_args.agent_name\n\n def set_benchmark(self, benchmark, demo_mode):\n \"\"\"Delegate set_benchmark to the subagent if it has the method.\"\"\"\n if hasattr(self.subagent_args, \"set_benchmark\"):\n self.subagent_args.set_benchmark(benchmark, demo_mode)\n\n def set_reproducibility_mode(self):\n \"\"\"Delegate set_reproducibility_mode to the subagent if it has the method.\"\"\"\n if hasattr(self.subagent_args, \"set_reproducibility_mode\"):\n self.subagent_args.set_reproducibility_mode()\n\n\ndef get_base_human_in_the_loop_genericagent(llm_config):\n \"\"\"\n Create a base human-in-the-loop generic agent configuration using the key from CHAT_MODEL_ARGS_DICT.\n\n This function creates a HumanInTheLoopAgentArgs instance with a MultiCandidateGenericAgent\n as the subagent, configured with the specified LLM configuration and base flags.\n\n Args:\n llm_config (str): The LLM configuration key to use from CHAT_MODEL_ARGS_DICT.\n\n Returns:\n HumanInTheLoopAgentArgs: Configured human-in-the-loop agent arguments with\n a multi-candidate generic agent as the subagent.","source_hash":"caf3281cd0903c179066ffe16ff1af60ce1821b667e4368d4d92b80696f74396","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.hitl_agent.hitl_agent.set_reproducibility_mode","uri":"program://AgentLab/function/src.agentlab.agents.hitl_agent.hitl_agent.set_reproducibility_mode#L147-L150","kind":"function","name":"set_reproducibility_mode","path":"src/agentlab/agents/hitl_agent/hitl_agent.py","language":"python","start_line":147,"end_line":150,"context_start_line":127,"context_end_line":170,"code":"\n@dataclass\nclass HumanInTheLoopAgentArgs(AgentArgs):\n subagent_args: Optional[AgentArgs] = None # args for the underlying multiple proposal agent\n\n def make_agent(self):\n assert self.subagent_args is not None\n return HumanInTheLoopAgent(subagent_args=self.subagent_args)\n\n def __post_init__(self):\n \"\"\"Prefix subagent name with 'HITL-'.\"\"\"\n super().__post_init__()\n if self.subagent_args and self.subagent_args.agent_name:\n self.agent_name = \"HITL-\" + self.subagent_args.agent_name\n\n def set_benchmark(self, benchmark, demo_mode):\n \"\"\"Delegate set_benchmark to the subagent if it has the method.\"\"\"\n if hasattr(self.subagent_args, \"set_benchmark\"):\n self.subagent_args.set_benchmark(benchmark, demo_mode)\n\n def set_reproducibility_mode(self):\n \"\"\"Delegate set_reproducibility_mode to the subagent if it has the method.\"\"\"\n if hasattr(self.subagent_args, \"set_reproducibility_mode\"):\n self.subagent_args.set_reproducibility_mode()\n\n\ndef get_base_human_in_the_loop_genericagent(llm_config):\n \"\"\"\n Create a base human-in-the-loop generic agent configuration using the key from CHAT_MODEL_ARGS_DICT.\n\n This function creates a HumanInTheLoopAgentArgs instance with a MultiCandidateGenericAgent\n as the subagent, configured with the specified LLM configuration and base flags.\n\n Args:\n llm_config (str): The LLM configuration key to use from CHAT_MODEL_ARGS_DICT.\n\n Returns:\n HumanInTheLoopAgentArgs: Configured human-in-the-loop agent arguments with\n a multi-candidate generic agent as the subagent.\n \"\"\"\n from agentlab.agents.generic_agent.tmlr_config import BASE_FLAGS\n from agentlab.agents.hitl_agent.hitl_agent import HumanInTheLoopAgentArgs\n from agentlab.agents.hitl_agent.multi_candidate_generic_agent import (\n MultiCandidateGenericAgentArgs,","source_hash":"caf3281cd0903c179066ffe16ff1af60ce1821b667e4368d4d92b80696f74396","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.hitl_agent.generic_human_guided_agent","uri":"program://AgentLab/module/src.agentlab.agents.hitl_agent.generic_human_guided_agent#L1-L362","kind":"module","name":"src.agentlab.agents.hitl_agent.generic_human_guided_agent","path":"src/agentlab/agents/hitl_agent/generic_human_guided_agent.py","language":"python","start_line":1,"end_line":362,"context_start_line":1,"context_end_line":362,"code":"import base64\nimport copy\nimport io\nimport re\nfrom dataclasses import Field, asdict, dataclass\nfrom typing import Dict, List\n\nimport bgym\nimport numpy as np\nfrom browsergym.experiments.agent import AgentInfo\nfrom PIL import Image\n\nfrom agentlab.agents import dynamic_prompting as dp\nfrom agentlab.agents.agent_utils import overlay_action\nfrom agentlab.agents.generic_agent.generic_agent import GenericAgent, GenericAgentArgs\nfrom agentlab.agents.generic_agent.generic_agent_prompt import MainPrompt\nfrom agentlab.agents.hitl_agent.hint_labelling import (\n HintLabeling,\n HintLabelingInputs,\n)\nfrom agentlab.llm.llm_utils import (\n Discussion,\n HumanMessage,\n SystemMessage,\n img_to_base_64,\n)\nfrom agentlab.llm.tracking import cost_tracker_decorator\n\n\nclass CandidatesGeneration(dp.PromptElement):\n # Ask for multiple alternatives; each candidate must contain and .\n def __init__(self, hint: list[str] | None = None, n_candidates=3) -> None:\n self.hint = hint\n self.n_candidates = n_candidates\n self.hint_prompt = \"\\n\".join(f\"{i}. {c}\" for i, c in enumerate(hint, 1)) if hint else \"\"\n super().__init__(True)\n self._prompt = [\n dict(\n type=\"text\",\n text=f\"\"\"\n You are a web agent. Propose {self.n_candidates} alternative next steps for the current page.\n {('Use the Hints:' + self.hint_prompt) if self.hint else \"\"}\\n\n Return EACH candidate wrapped as numbered tags:\n ...\n ...\n\n Inside every candidate you MUST include:\n ...why this action is appropriate now...\n ...ONE atomic, executable action string...\n\n Do not include any extra text outside the candidate tags.\n Use this format:\n \n Explain why Candidate One is chosen\n Candidate One Action\n \n\n \n Explain why Candidate Two is chosen\n Candidate Two Action\n \n # Example \n \n The login button is visible and proceeding will reveal the auth form.\n click(role=\"button\", name=\"Log in\")\n \n\n \n User might need to enter email first; the email field is focused and visible.\n fill(bid=\"a112\", text=\"user@example.com\")\n \n \"\"\",\n )\n ]\n\n # Regex patterns for numbered candidates only\n _NUM_BLOCK = re.compile(\n r\"<\\s*candidate[_ ]generation[_ ](?P[0-9]+)\\s*>(?P.*?)<\\s*/\\s*candidate[_ ]generation[_ ](?P=idx)\\s*>\",\n flags=re.IGNORECASE | re.DOTALL,\n )\n _THINK_PATTERN = re.compile(\n r\"<\\s*think\\s*>(?P.*?)<\\s*/\\s*think\\s*>\",\n flags=re.IGNORECASE | re.DOTALL,\n )\n _ACTION_PATTERN = re.compile(\n r\"<\\s*action\\s*>(?P.*?)<\\s*/\\s*action\\s*>\",\n flags=re.IGNORECASE | re.DOTALL,\n )\n\n def _parse_answer(self, text_answer: str) -> Dict[str, Dict[str, str]]:\n \"\"\"Extract up to n_candidates candidates, using numbered tags only.\n\n Args:\n text_answer: The text response containing candidate generation tags.\n\n Returns:\n Dictionary mapping candidate names to their think and action content.\n Format: {\"candidate_generation_1\": {\"think\": \"...\", \"action\": \"...\"}, ...}\n \"\"\"\n result = {\n f\"candidate_generation_{i+1}\": {\"think\": \"\", \"action\": \"\"}\n for i in range(self.n_candidates)\n }\n\n if not isinstance(text_answer, str):\n return result\n\n matches: List[re.Match] = list(self._NUM_BLOCK.finditer(text_answer))\n # Sort by numeric index\n matches_sorted = sorted(matches, key=lambda m: int(m.group(\"idx\")))\n for i, m in enumerate(matches_sorted[: self.n_candidates]):\n body = m.group(\"body\").strip()\n think_m = self._THINK_PATTERN.search(body)\n action_m = self._ACTION_PATTERN.search(body)\n result[f\"candidate_generation_{i+1}\"] = {\n \"think\": (think_m.group(\"think\").strip() if think_m else \"\"),\n \"action\": (action_m.group(\"action\").strip() if action_m else \"\"),\n }\n\n return result\n\n\n@dataclass\nclass MultipleProposalGenericAgentArgs(GenericAgentArgs):\n\n def make_agent(self):\n return MultipleProposalGenericAgent(\n chat_model_args=self.chat_model_args, flags=self.flags, max_retry=self.max_retry\n )\n\n def __post_init__(self):\n \"\"\"Prefix subagent name with 'HITL-'.\"\"\"\n super().__post_init__()\n if hasattr(self, \"agent_name\") and self.agent_name:\n self.agent_name = \"HITL-\" + self.agent_name\n\n\nclass MultipleProposalGenericAgent(GenericAgent):\n\n def __init__(\n self,\n chat_model_args,\n flags,\n max_retry: int = 4,\n ):\n super().__init__(chat_model_args, flags, max_retry)\n self.ui = None # Single HintLabeling instance\n\n def get_candidate_generation(\n self,\n sys_prompt: SystemMessage,\n human_prompt: HumanMessage,\n hint: list[str] | None = None,\n n_candidates=3,\n ) -> tuple[Dict[str, Dict[str, str]], Discussion]:\n\n cg = CandidatesGeneration(hint=hint, n_candidates=n_candidates)\n candidates_prompt = HumanMessage(cg.prompt)\n chat_messages = Discussion([sys_prompt, human_prompt, candidates_prompt])\n output = self.chat_llm(chat_messages)\n candidates = cg._parse_answer(output[\"content\"])\n self.step_n_human_intervention_rounds += 1\n msg_to_add_to_xray = Discussion([sys_prompt, human_prompt])\n\n return candidates, msg_to_add_to_xray\n\n @cost_tracker_decorator\n def get_action(self, obs):\n # reset vars\n step_hint = []\n self.step_n_human_intervention_rounds = 0\n self.obs_history.append(obs)\n main_prompt = MainPrompt(\n action_set=self.action_set,\n obs_history=self.obs_history,\n actions=self.actions,\n memories=self.memories,\n thoughts=self.thoughts,\n previous_plan=self.plan,\n step=self.plan_step,\n flags=self.flags,\n )\n\n max_prompt_tokens, max_trunc_itr = self._get_maxes()\n\n system_prompt = SystemMessage(dp.SystemPrompt().prompt)\n\n human_prompt = dp.fit_tokens(\n shrinkable=main_prompt,\n max_prompt_tokens=max_prompt_tokens,\n model_name=self.chat_model_args.model_name,\n max_iterations=max_trunc_itr,\n additional_prompts=system_prompt,\n )\n # Initialize UI once outside the loop\n if self.ui is None:\n self.ui = HintLabeling(headless=False)\n # Show initial waiting state\n initial_inputs = HintLabelingInputs(\n goal=(\n obs.get(\"goal_object\", [{}])[0].get(\"text\", \"\")\n if obs.get(\"goal_object\")\n else \"\"\n ),\n error_feedback=\"\",\n screenshot=(img_to_base_64(obs[\"screenshot\"]) if \"screenshot\" in obs else \"\"),\n screenshots=[], # no overlay screenshots yet\n axtree=obs.get(\"axtree_txt\", \"\"),\n hints=[],\n suggestions=[], # no suggestions yet\n )\n self.ui.update_context(initial_inputs)\n\n # Generate first candidates\n candidates, chat_messages = self.get_candidate_generation(\n sys_prompt=system_prompt,\n human_prompt=human_prompt,\n hint=step_hint if step_hint else None,\n )\n suggestions = [\n {\n \"id\": key.split(\"_\")[-1],\n \"action\": candidate[\"action\"],\n \"think\": candidate[\"think\"],\n }\n for key, candidate in candidates.items()\n ]\n # List of Images as base64 - create overlay screenshots for each suggestion\n screenshots = [overlay_action(obs, choice[\"action\"]) for choice in suggestions]\n\n while True:\n try:\n hint_labeling_inputs = HintLabelingInputs(\n goal=(\n obs.get(\"goal_object\", [{}])[0].get(\"text\", \"\")\n if obs.get(\"goal_object\")\n else \"\"\n ),\n error_feedback=obs.get(\"last_action_error\", \"\"),\n screenshot=(img_to_base_64(obs[\"screenshot\"]) if \"screenshot\" in obs else \"\"),\n screenshots=screenshots, # list of overlay screenshots for hover\n axtree=obs.get(\"axtree_txt\", \"\"),\n hints=step_hint,\n suggestions=suggestions,\n )\n\n self.ui.update_context(hint_labeling_inputs)\n response = self.ui.wait_for_response(timeout=600)\n\n if response[\"type\"] == \"reprompt\":\n new_hints = response[\"payload\"].get(\"hints\", [])\n step_hint = list(new_hints) if isinstance(new_hints, list) else step_hint\n candidates, chat_messages = self.get_candidate_generation(\n sys_prompt=system_prompt,\n human_prompt=human_prompt,\n hint=step_hint if step_hint else None,\n )\n suggestions = [\n {\n \"id\": key.split(\"_\")[-1],\n \"action\": candidate[\"action\"],\n \"think\": candidate[\"think\"],\n }\n for key, candidate in candidates.items()\n ]\n # Regenerate screenshots for new suggestions\n screenshots = [overlay_action(obs, choice[\"action\"]) for choice in suggestions]\n # Continue the loop to show new suggestions\n elif response[\"type\"] == \"step\":\n selected_action = response[\"payload\"][\"action\"]\n choice_idx = None\n for i, candidate in enumerate(suggestions, 1):\n if candidate[\"action\"] == selected_action:\n choice_idx = i\n break\n if choice_idx is None:\n choice_idx = 1\n ans_dict = candidates[f\"candidate_generation_{choice_idx}\"]\n break\n else:\n ans_dict = candidates[\"candidate_generation_1\"]\n break\n\n except KeyboardInterrupt:\n print(\"User cancelled the operation\")\n if self.ui:\n self.ui.close()\n raise\n except Exception as e:\n print(f\"Error in human intervention UI: {e}\")\n if self.ui:\n self.ui.close()\n self.ui = None\n # Raise exception instead of falling back to console input\n raise RuntimeError(f\"Human intervention UI failed: {e}\") from e\n\n # TODO: Refactor as discussed with ALAC.\n stats = self.chat_llm.get_stats()\n self.plan = ans_dict.get(\"plan\", self.plan)\n self.plan_step = ans_dict.get(\"step\", self.plan_step)\n self.actions.append(ans_dict[\"action\"])\n self.memories.append(ans_dict.get(\"memory\", None))\n self.thoughts.append(ans_dict.get(\"think\", None))\n agent_info = AgentInfo(\n think=ans_dict.get(\"think\", None),\n chat_messages=chat_messages,\n stats=stats,\n extra_info={\n \"chat_model_args\": asdict(self.chat_model_args),\n \"step_hints\": step_hint,\n \"n_human_intervention_rounds\": self.step_n_human_intervention_rounds,\n \"candidates\": candidates,\n \"suggestions\": suggestions,\n },\n )\n return ans_dict[\"action\"], agent_info\n\n\ndef get_base_agent(llm_config):\n \"\"\"Creates and returns a MultipleProposalGenericAgentArgs instance with\n specified LLM configuration from CHAT_MODEL_ARGS_DICT.\n\n Args:\n llm_config: The LLM configuration key to use from CHAT_MODEL_ARGS_DICT.\n\n Returns:\n MultipleProposalGenericAgentArgs: Configured agent arguments instance.\n \"\"\"\n\n from agentlab.agents.generic_agent.tmlr_config import BASE_FLAGS\n from agentlab.llm.llm_configs import CHAT_MODEL_ARGS_DICT\n\n return MultipleProposalGenericAgentArgs(\n chat_model_args=CHAT_MODEL_ARGS_DICT[llm_config],\n flags=BASE_FLAGS,\n )\n\n\nHUMAN_GUIDED_GENERIC_AGENT = get_base_agent(\"openai/gpt-5-mini-2025-08-07\")\n\nif __name__ == \"__main__\":\n import logging\n\n from agentlab.agents.hitl_agent.generic_human_guided_agent import (\n HUMAN_GUIDED_GENERIC_AGENT,\n )\n from agentlab.experiments.study import Study\n\n agent_configs = [HUMAN_GUIDED_GENERIC_AGENT]\n benchmark = bgym.DEFAULT_BENCHMARKS[\"miniwob\"]()\n benchmark = benchmark.subset_from_glob(\"task_name\", \"*book*\")\n benchmark.env_args_list = benchmark.env_args_list[3:4]\n\n for env_args in benchmark.env_args_list:\n env_args.max_steps = 100 # max human steps\n env_args.headless = True\n\n Study(agent_configs, benchmark, logging_level=logging.WARNING).run(\n n_jobs=1,\n parallel_backend=\"sequential\",\n n_relaunch=1,\n )","source_hash":"f8460a9a650e76095028142556a146fcf58f38b76dcabb6d885d2ba81b245aea","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.hitl_agent.generic_human_guided_agent.CandidatesGeneration","uri":"program://AgentLab/class/src.agentlab.agents.hitl_agent.generic_human_guided_agent.CandidatesGeneration#L30-L120","kind":"class","name":"CandidatesGeneration","path":"src/agentlab/agents/hitl_agent/generic_human_guided_agent.py","language":"python","start_line":30,"end_line":120,"context_start_line":10,"context_end_line":140,"code":"from browsergym.experiments.agent import AgentInfo\nfrom PIL import Image\n\nfrom agentlab.agents import dynamic_prompting as dp\nfrom agentlab.agents.agent_utils import overlay_action\nfrom agentlab.agents.generic_agent.generic_agent import GenericAgent, GenericAgentArgs\nfrom agentlab.agents.generic_agent.generic_agent_prompt import MainPrompt\nfrom agentlab.agents.hitl_agent.hint_labelling import (\n HintLabeling,\n HintLabelingInputs,\n)\nfrom agentlab.llm.llm_utils import (\n Discussion,\n HumanMessage,\n SystemMessage,\n img_to_base_64,\n)\nfrom agentlab.llm.tracking import cost_tracker_decorator\n\n\nclass CandidatesGeneration(dp.PromptElement):\n # Ask for multiple alternatives; each candidate must contain and .\n def __init__(self, hint: list[str] | None = None, n_candidates=3) -> None:\n self.hint = hint\n self.n_candidates = n_candidates\n self.hint_prompt = \"\\n\".join(f\"{i}. {c}\" for i, c in enumerate(hint, 1)) if hint else \"\"\n super().__init__(True)\n self._prompt = [\n dict(\n type=\"text\",\n text=f\"\"\"\n You are a web agent. Propose {self.n_candidates} alternative next steps for the current page.\n {('Use the Hints:' + self.hint_prompt) if self.hint else \"\"}\\n\n Return EACH candidate wrapped as numbered tags:\n ...\n ...\n\n Inside every candidate you MUST include:\n ...why this action is appropriate now...\n ...ONE atomic, executable action string...\n\n Do not include any extra text outside the candidate tags.\n Use this format:\n \n Explain why Candidate One is chosen\n Candidate One Action\n \n\n \n Explain why Candidate Two is chosen\n Candidate Two Action\n \n # Example \n \n The login button is visible and proceeding will reveal the auth form.\n click(role=\"button\", name=\"Log in\")\n \n\n \n User might need to enter email first; the email field is focused and visible.\n fill(bid=\"a112\", text=\"user@example.com\")\n \n \"\"\",\n )\n ]\n\n # Regex patterns for numbered candidates only\n _NUM_BLOCK = re.compile(\n r\"<\\s*candidate[_ ]generation[_ ](?P[0-9]+)\\s*>(?P.*?)<\\s*/\\s*candidate[_ ]generation[_ ](?P=idx)\\s*>\",\n flags=re.IGNORECASE | re.DOTALL,\n )\n _THINK_PATTERN = re.compile(\n r\"<\\s*think\\s*>(?P.*?)<\\s*/\\s*think\\s*>\",\n flags=re.IGNORECASE | re.DOTALL,\n )\n _ACTION_PATTERN = re.compile(\n r\"<\\s*action\\s*>(?P.*?)<\\s*/\\s*action\\s*>\",\n flags=re.IGNORECASE | re.DOTALL,\n )\n\n def _parse_answer(self, text_answer: str) -> Dict[str, Dict[str, str]]:\n \"\"\"Extract up to n_candidates candidates, using numbered tags only.\n\n Args:\n text_answer: The text response containing candidate generation tags.\n\n Returns:\n Dictionary mapping candidate names to their think and action content.\n Format: {\"candidate_generation_1\": {\"think\": \"...\", \"action\": \"...\"}, ...}\n \"\"\"\n result = {\n f\"candidate_generation_{i+1}\": {\"think\": \"\", \"action\": \"\"}\n for i in range(self.n_candidates)\n }\n\n if not isinstance(text_answer, str):\n return result\n\n matches: List[re.Match] = list(self._NUM_BLOCK.finditer(text_answer))\n # Sort by numeric index\n matches_sorted = sorted(matches, key=lambda m: int(m.group(\"idx\")))\n for i, m in enumerate(matches_sorted[: self.n_candidates]):\n body = m.group(\"body\").strip()\n think_m = self._THINK_PATTERN.search(body)\n action_m = self._ACTION_PATTERN.search(body)\n result[f\"candidate_generation_{i+1}\"] = {\n \"think\": (think_m.group(\"think\").strip() if think_m else \"\"),\n \"action\": (action_m.group(\"action\").strip() if action_m else \"\"),\n }\n\n return result\n\n\n@dataclass\nclass MultipleProposalGenericAgentArgs(GenericAgentArgs):\n\n def make_agent(self):\n return MultipleProposalGenericAgent(\n chat_model_args=self.chat_model_args, flags=self.flags, max_retry=self.max_retry\n )\n\n def __post_init__(self):\n \"\"\"Prefix subagent name with 'HITL-'.\"\"\"\n super().__post_init__()\n if hasattr(self, \"agent_name\") and self.agent_name:\n self.agent_name = \"HITL-\" + self.agent_name\n\n\nclass MultipleProposalGenericAgent(GenericAgent):\n\n def __init__(","source_hash":"f8460a9a650e76095028142556a146fcf58f38b76dcabb6d885d2ba81b245aea","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.hitl_agent.generic_human_guided_agent.MultipleProposalGenericAgentArgs","uri":"program://AgentLab/class/src.agentlab.agents.hitl_agent.generic_human_guided_agent.MultipleProposalGenericAgentArgs#L124-L135","kind":"class","name":"MultipleProposalGenericAgentArgs","path":"src/agentlab/agents/hitl_agent/generic_human_guided_agent.py","language":"python","start_line":124,"end_line":135,"context_start_line":104,"context_end_line":155,"code":"\n if not isinstance(text_answer, str):\n return result\n\n matches: List[re.Match] = list(self._NUM_BLOCK.finditer(text_answer))\n # Sort by numeric index\n matches_sorted = sorted(matches, key=lambda m: int(m.group(\"idx\")))\n for i, m in enumerate(matches_sorted[: self.n_candidates]):\n body = m.group(\"body\").strip()\n think_m = self._THINK_PATTERN.search(body)\n action_m = self._ACTION_PATTERN.search(body)\n result[f\"candidate_generation_{i+1}\"] = {\n \"think\": (think_m.group(\"think\").strip() if think_m else \"\"),\n \"action\": (action_m.group(\"action\").strip() if action_m else \"\"),\n }\n\n return result\n\n\n@dataclass\nclass MultipleProposalGenericAgentArgs(GenericAgentArgs):\n\n def make_agent(self):\n return MultipleProposalGenericAgent(\n chat_model_args=self.chat_model_args, flags=self.flags, max_retry=self.max_retry\n )\n\n def __post_init__(self):\n \"\"\"Prefix subagent name with 'HITL-'.\"\"\"\n super().__post_init__()\n if hasattr(self, \"agent_name\") and self.agent_name:\n self.agent_name = \"HITL-\" + self.agent_name\n\n\nclass MultipleProposalGenericAgent(GenericAgent):\n\n def __init__(\n self,\n chat_model_args,\n flags,\n max_retry: int = 4,\n ):\n super().__init__(chat_model_args, flags, max_retry)\n self.ui = None # Single HintLabeling instance\n\n def get_candidate_generation(\n self,\n sys_prompt: SystemMessage,\n human_prompt: HumanMessage,\n hint: list[str] | None = None,\n n_candidates=3,\n ) -> tuple[Dict[str, Dict[str, str]], Discussion]:","source_hash":"f8460a9a650e76095028142556a146fcf58f38b76dcabb6d885d2ba81b245aea","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.hitl_agent.generic_human_guided_agent.MultipleProposalGenericAgent","uri":"program://AgentLab/class/src.agentlab.agents.hitl_agent.generic_human_guided_agent.MultipleProposalGenericAgent#L138-L316","kind":"class","name":"MultipleProposalGenericAgent","path":"src/agentlab/agents/hitl_agent/generic_human_guided_agent.py","language":"python","start_line":138,"end_line":316,"context_start_line":118,"context_end_line":336,"code":" }\n\n return result\n\n\n@dataclass\nclass MultipleProposalGenericAgentArgs(GenericAgentArgs):\n\n def make_agent(self):\n return MultipleProposalGenericAgent(\n chat_model_args=self.chat_model_args, flags=self.flags, max_retry=self.max_retry\n )\n\n def __post_init__(self):\n \"\"\"Prefix subagent name with 'HITL-'.\"\"\"\n super().__post_init__()\n if hasattr(self, \"agent_name\") and self.agent_name:\n self.agent_name = \"HITL-\" + self.agent_name\n\n\nclass MultipleProposalGenericAgent(GenericAgent):\n\n def __init__(\n self,\n chat_model_args,\n flags,\n max_retry: int = 4,\n ):\n super().__init__(chat_model_args, flags, max_retry)\n self.ui = None # Single HintLabeling instance\n\n def get_candidate_generation(\n self,\n sys_prompt: SystemMessage,\n human_prompt: HumanMessage,\n hint: list[str] | None = None,\n n_candidates=3,\n ) -> tuple[Dict[str, Dict[str, str]], Discussion]:\n\n cg = CandidatesGeneration(hint=hint, n_candidates=n_candidates)\n candidates_prompt = HumanMessage(cg.prompt)\n chat_messages = Discussion([sys_prompt, human_prompt, candidates_prompt])\n output = self.chat_llm(chat_messages)\n candidates = cg._parse_answer(output[\"content\"])\n self.step_n_human_intervention_rounds += 1\n msg_to_add_to_xray = Discussion([sys_prompt, human_prompt])\n\n return candidates, msg_to_add_to_xray\n\n @cost_tracker_decorator\n def get_action(self, obs):\n # reset vars\n step_hint = []\n self.step_n_human_intervention_rounds = 0\n self.obs_history.append(obs)\n main_prompt = MainPrompt(\n action_set=self.action_set,\n obs_history=self.obs_history,\n actions=self.actions,\n memories=self.memories,\n thoughts=self.thoughts,\n previous_plan=self.plan,\n step=self.plan_step,\n flags=self.flags,\n )\n\n max_prompt_tokens, max_trunc_itr = self._get_maxes()\n\n system_prompt = SystemMessage(dp.SystemPrompt().prompt)\n\n human_prompt = dp.fit_tokens(\n shrinkable=main_prompt,\n max_prompt_tokens=max_prompt_tokens,\n model_name=self.chat_model_args.model_name,\n max_iterations=max_trunc_itr,\n additional_prompts=system_prompt,\n )\n # Initialize UI once outside the loop\n if self.ui is None:\n self.ui = HintLabeling(headless=False)\n # Show initial waiting state\n initial_inputs = HintLabelingInputs(\n goal=(\n obs.get(\"goal_object\", [{}])[0].get(\"text\", \"\")\n if obs.get(\"goal_object\")\n else \"\"\n ),\n error_feedback=\"\",\n screenshot=(img_to_base_64(obs[\"screenshot\"]) if \"screenshot\" in obs else \"\"),\n screenshots=[], # no overlay screenshots yet\n axtree=obs.get(\"axtree_txt\", \"\"),\n hints=[],\n suggestions=[], # no suggestions yet\n )\n self.ui.update_context(initial_inputs)\n\n # Generate first candidates\n candidates, chat_messages = self.get_candidate_generation(\n sys_prompt=system_prompt,\n human_prompt=human_prompt,\n hint=step_hint if step_hint else None,\n )\n suggestions = [\n {\n \"id\": key.split(\"_\")[-1],\n \"action\": candidate[\"action\"],\n \"think\": candidate[\"think\"],\n }\n for key, candidate in candidates.items()\n ]\n # List of Images as base64 - create overlay screenshots for each suggestion\n screenshots = [overlay_action(obs, choice[\"action\"]) for choice in suggestions]\n\n while True:\n try:\n hint_labeling_inputs = HintLabelingInputs(\n goal=(\n obs.get(\"goal_object\", [{}])[0].get(\"text\", \"\")\n if obs.get(\"goal_object\")\n else \"\"\n ),\n error_feedback=obs.get(\"last_action_error\", \"\"),\n screenshot=(img_to_base_64(obs[\"screenshot\"]) if \"screenshot\" in obs else \"\"),\n screenshots=screenshots, # list of overlay screenshots for hover\n axtree=obs.get(\"axtree_txt\", \"\"),\n hints=step_hint,\n suggestions=suggestions,\n )\n\n self.ui.update_context(hint_labeling_inputs)\n response = self.ui.wait_for_response(timeout=600)\n\n if response[\"type\"] == \"reprompt\":\n new_hints = response[\"payload\"].get(\"hints\", [])\n step_hint = list(new_hints) if isinstance(new_hints, list) else step_hint\n candidates, chat_messages = self.get_candidate_generation(\n sys_prompt=system_prompt,\n human_prompt=human_prompt,\n hint=step_hint if step_hint else None,\n )\n suggestions = [\n {\n \"id\": key.split(\"_\")[-1],\n \"action\": candidate[\"action\"],\n \"think\": candidate[\"think\"],\n }\n for key, candidate in candidates.items()\n ]\n # Regenerate screenshots for new suggestions\n screenshots = [overlay_action(obs, choice[\"action\"]) for choice in suggestions]\n # Continue the loop to show new suggestions\n elif response[\"type\"] == \"step\":\n selected_action = response[\"payload\"][\"action\"]\n choice_idx = None\n for i, candidate in enumerate(suggestions, 1):\n if candidate[\"action\"] == selected_action:\n choice_idx = i\n break\n if choice_idx is None:\n choice_idx = 1\n ans_dict = candidates[f\"candidate_generation_{choice_idx}\"]\n break\n else:\n ans_dict = candidates[\"candidate_generation_1\"]\n break\n\n except KeyboardInterrupt:\n print(\"User cancelled the operation\")\n if self.ui:\n self.ui.close()\n raise\n except Exception as e:\n print(f\"Error in human intervention UI: {e}\")\n if self.ui:\n self.ui.close()\n self.ui = None\n # Raise exception instead of falling back to console input\n raise RuntimeError(f\"Human intervention UI failed: {e}\") from e\n\n # TODO: Refactor as discussed with ALAC.\n stats = self.chat_llm.get_stats()\n self.plan = ans_dict.get(\"plan\", self.plan)\n self.plan_step = ans_dict.get(\"step\", self.plan_step)\n self.actions.append(ans_dict[\"action\"])\n self.memories.append(ans_dict.get(\"memory\", None))\n self.thoughts.append(ans_dict.get(\"think\", None))\n agent_info = AgentInfo(\n think=ans_dict.get(\"think\", None),\n chat_messages=chat_messages,\n stats=stats,\n extra_info={\n \"chat_model_args\": asdict(self.chat_model_args),\n \"step_hints\": step_hint,\n \"n_human_intervention_rounds\": self.step_n_human_intervention_rounds,\n \"candidates\": candidates,\n \"suggestions\": suggestions,\n },\n )\n return ans_dict[\"action\"], agent_info\n\n\ndef get_base_agent(llm_config):\n \"\"\"Creates and returns a MultipleProposalGenericAgentArgs instance with\n specified LLM configuration from CHAT_MODEL_ARGS_DICT.\n\n Args:\n llm_config: The LLM configuration key to use from CHAT_MODEL_ARGS_DICT.\n\n Returns:\n MultipleProposalGenericAgentArgs: Configured agent arguments instance.\n \"\"\"\n\n from agentlab.agents.generic_agent.tmlr_config import BASE_FLAGS\n from agentlab.llm.llm_configs import CHAT_MODEL_ARGS_DICT\n\n return MultipleProposalGenericAgentArgs(\n chat_model_args=CHAT_MODEL_ARGS_DICT[llm_config],\n flags=BASE_FLAGS,\n )","source_hash":"f8460a9a650e76095028142556a146fcf58f38b76dcabb6d885d2ba81b245aea","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.hitl_agent.generic_human_guided_agent.get_base_agent","uri":"program://AgentLab/function/src.agentlab.agents.hitl_agent.generic_human_guided_agent.get_base_agent#L319-L336","kind":"function","name":"get_base_agent","path":"src/agentlab/agents/hitl_agent/generic_human_guided_agent.py","language":"python","start_line":319,"end_line":336,"context_start_line":299,"context_end_line":356,"code":" self.plan = ans_dict.get(\"plan\", self.plan)\n self.plan_step = ans_dict.get(\"step\", self.plan_step)\n self.actions.append(ans_dict[\"action\"])\n self.memories.append(ans_dict.get(\"memory\", None))\n self.thoughts.append(ans_dict.get(\"think\", None))\n agent_info = AgentInfo(\n think=ans_dict.get(\"think\", None),\n chat_messages=chat_messages,\n stats=stats,\n extra_info={\n \"chat_model_args\": asdict(self.chat_model_args),\n \"step_hints\": step_hint,\n \"n_human_intervention_rounds\": self.step_n_human_intervention_rounds,\n \"candidates\": candidates,\n \"suggestions\": suggestions,\n },\n )\n return ans_dict[\"action\"], agent_info\n\n\ndef get_base_agent(llm_config):\n \"\"\"Creates and returns a MultipleProposalGenericAgentArgs instance with\n specified LLM configuration from CHAT_MODEL_ARGS_DICT.\n\n Args:\n llm_config: The LLM configuration key to use from CHAT_MODEL_ARGS_DICT.\n\n Returns:\n MultipleProposalGenericAgentArgs: Configured agent arguments instance.\n \"\"\"\n\n from agentlab.agents.generic_agent.tmlr_config import BASE_FLAGS\n from agentlab.llm.llm_configs import CHAT_MODEL_ARGS_DICT\n\n return MultipleProposalGenericAgentArgs(\n chat_model_args=CHAT_MODEL_ARGS_DICT[llm_config],\n flags=BASE_FLAGS,\n )\n\n\nHUMAN_GUIDED_GENERIC_AGENT = get_base_agent(\"openai/gpt-5-mini-2025-08-07\")\n\nif __name__ == \"__main__\":\n import logging\n\n from agentlab.agents.hitl_agent.generic_human_guided_agent import (\n HUMAN_GUIDED_GENERIC_AGENT,\n )\n from agentlab.experiments.study import Study\n\n agent_configs = [HUMAN_GUIDED_GENERIC_AGENT]\n benchmark = bgym.DEFAULT_BENCHMARKS[\"miniwob\"]()\n benchmark = benchmark.subset_from_glob(\"task_name\", \"*book*\")\n benchmark.env_args_list = benchmark.env_args_list[3:4]\n\n for env_args in benchmark.env_args_list:\n env_args.max_steps = 100 # max human steps\n env_args.headless = True","source_hash":"f8460a9a650e76095028142556a146fcf58f38b76dcabb6d885d2ba81b245aea","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.hitl_agent.generic_human_guided_agent.__init__","uri":"program://AgentLab/function/src.agentlab.agents.hitl_agent.generic_human_guided_agent.__init__#L140-L147","kind":"function","name":"__init__","path":"src/agentlab/agents/hitl_agent/generic_human_guided_agent.py","language":"python","start_line":140,"end_line":147,"context_start_line":120,"context_end_line":167,"code":" return result\n\n\n@dataclass\nclass MultipleProposalGenericAgentArgs(GenericAgentArgs):\n\n def make_agent(self):\n return MultipleProposalGenericAgent(\n chat_model_args=self.chat_model_args, flags=self.flags, max_retry=self.max_retry\n )\n\n def __post_init__(self):\n \"\"\"Prefix subagent name with 'HITL-'.\"\"\"\n super().__post_init__()\n if hasattr(self, \"agent_name\") and self.agent_name:\n self.agent_name = \"HITL-\" + self.agent_name\n\n\nclass MultipleProposalGenericAgent(GenericAgent):\n\n def __init__(\n self,\n chat_model_args,\n flags,\n max_retry: int = 4,\n ):\n super().__init__(chat_model_args, flags, max_retry)\n self.ui = None # Single HintLabeling instance\n\n def get_candidate_generation(\n self,\n sys_prompt: SystemMessage,\n human_prompt: HumanMessage,\n hint: list[str] | None = None,\n n_candidates=3,\n ) -> tuple[Dict[str, Dict[str, str]], Discussion]:\n\n cg = CandidatesGeneration(hint=hint, n_candidates=n_candidates)\n candidates_prompt = HumanMessage(cg.prompt)\n chat_messages = Discussion([sys_prompt, human_prompt, candidates_prompt])\n output = self.chat_llm(chat_messages)\n candidates = cg._parse_answer(output[\"content\"])\n self.step_n_human_intervention_rounds += 1\n msg_to_add_to_xray = Discussion([sys_prompt, human_prompt])\n\n return candidates, msg_to_add_to_xray\n\n @cost_tracker_decorator","source_hash":"f8460a9a650e76095028142556a146fcf58f38b76dcabb6d885d2ba81b245aea","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.hitl_agent.generic_human_guided_agent._parse_answer","uri":"program://AgentLab/function/src.agentlab.agents.hitl_agent.generic_human_guided_agent._parse_answer#L90-L120","kind":"function","name":"_parse_answer","path":"src/agentlab/agents/hitl_agent/generic_human_guided_agent.py","language":"python","start_line":90,"end_line":120,"context_start_line":70,"context_end_line":140,"code":" fill(bid=\"a112\", text=\"user@example.com\")\n \n \"\"\",\n )\n ]\n\n # Regex patterns for numbered candidates only\n _NUM_BLOCK = re.compile(\n r\"<\\s*candidate[_ ]generation[_ ](?P[0-9]+)\\s*>(?P.*?)<\\s*/\\s*candidate[_ ]generation[_ ](?P=idx)\\s*>\",\n flags=re.IGNORECASE | re.DOTALL,\n )\n _THINK_PATTERN = re.compile(\n r\"<\\s*think\\s*>(?P.*?)<\\s*/\\s*think\\s*>\",\n flags=re.IGNORECASE | re.DOTALL,\n )\n _ACTION_PATTERN = re.compile(\n r\"<\\s*action\\s*>(?P.*?)<\\s*/\\s*action\\s*>\",\n flags=re.IGNORECASE | re.DOTALL,\n )\n\n def _parse_answer(self, text_answer: str) -> Dict[str, Dict[str, str]]:\n \"\"\"Extract up to n_candidates candidates, using numbered tags only.\n\n Args:\n text_answer: The text response containing candidate generation tags.\n\n Returns:\n Dictionary mapping candidate names to their think and action content.\n Format: {\"candidate_generation_1\": {\"think\": \"...\", \"action\": \"...\"}, ...}\n \"\"\"\n result = {\n f\"candidate_generation_{i+1}\": {\"think\": \"\", \"action\": \"\"}\n for i in range(self.n_candidates)\n }\n\n if not isinstance(text_answer, str):\n return result\n\n matches: List[re.Match] = list(self._NUM_BLOCK.finditer(text_answer))\n # Sort by numeric index\n matches_sorted = sorted(matches, key=lambda m: int(m.group(\"idx\")))\n for i, m in enumerate(matches_sorted[: self.n_candidates]):\n body = m.group(\"body\").strip()\n think_m = self._THINK_PATTERN.search(body)\n action_m = self._ACTION_PATTERN.search(body)\n result[f\"candidate_generation_{i+1}\"] = {\n \"think\": (think_m.group(\"think\").strip() if think_m else \"\"),\n \"action\": (action_m.group(\"action\").strip() if action_m else \"\"),\n }\n\n return result\n\n\n@dataclass\nclass MultipleProposalGenericAgentArgs(GenericAgentArgs):\n\n def make_agent(self):\n return MultipleProposalGenericAgent(\n chat_model_args=self.chat_model_args, flags=self.flags, max_retry=self.max_retry\n )\n\n def __post_init__(self):\n \"\"\"Prefix subagent name with 'HITL-'.\"\"\"\n super().__post_init__()\n if hasattr(self, \"agent_name\") and self.agent_name:\n self.agent_name = \"HITL-\" + self.agent_name\n\n\nclass MultipleProposalGenericAgent(GenericAgent):\n\n def __init__(","source_hash":"f8460a9a650e76095028142556a146fcf58f38b76dcabb6d885d2ba81b245aea","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.hitl_agent.generic_human_guided_agent.make_agent","uri":"program://AgentLab/function/src.agentlab.agents.hitl_agent.generic_human_guided_agent.make_agent#L126-L129","kind":"function","name":"make_agent","path":"src/agentlab/agents/hitl_agent/generic_human_guided_agent.py","language":"python","start_line":126,"end_line":129,"context_start_line":106,"context_end_line":149,"code":" return result\n\n matches: List[re.Match] = list(self._NUM_BLOCK.finditer(text_answer))\n # Sort by numeric index\n matches_sorted = sorted(matches, key=lambda m: int(m.group(\"idx\")))\n for i, m in enumerate(matches_sorted[: self.n_candidates]):\n body = m.group(\"body\").strip()\n think_m = self._THINK_PATTERN.search(body)\n action_m = self._ACTION_PATTERN.search(body)\n result[f\"candidate_generation_{i+1}\"] = {\n \"think\": (think_m.group(\"think\").strip() if think_m else \"\"),\n \"action\": (action_m.group(\"action\").strip() if action_m else \"\"),\n }\n\n return result\n\n\n@dataclass\nclass MultipleProposalGenericAgentArgs(GenericAgentArgs):\n\n def make_agent(self):\n return MultipleProposalGenericAgent(\n chat_model_args=self.chat_model_args, flags=self.flags, max_retry=self.max_retry\n )\n\n def __post_init__(self):\n \"\"\"Prefix subagent name with 'HITL-'.\"\"\"\n super().__post_init__()\n if hasattr(self, \"agent_name\") and self.agent_name:\n self.agent_name = \"HITL-\" + self.agent_name\n\n\nclass MultipleProposalGenericAgent(GenericAgent):\n\n def __init__(\n self,\n chat_model_args,\n flags,\n max_retry: int = 4,\n ):\n super().__init__(chat_model_args, flags, max_retry)\n self.ui = None # Single HintLabeling instance\n\n def get_candidate_generation(","source_hash":"f8460a9a650e76095028142556a146fcf58f38b76dcabb6d885d2ba81b245aea","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.hitl_agent.generic_human_guided_agent.__post_init__","uri":"program://AgentLab/function/src.agentlab.agents.hitl_agent.generic_human_guided_agent.__post_init__#L131-L135","kind":"function","name":"__post_init__","path":"src/agentlab/agents/hitl_agent/generic_human_guided_agent.py","language":"python","start_line":131,"end_line":135,"context_start_line":111,"context_end_line":155,"code":" for i, m in enumerate(matches_sorted[: self.n_candidates]):\n body = m.group(\"body\").strip()\n think_m = self._THINK_PATTERN.search(body)\n action_m = self._ACTION_PATTERN.search(body)\n result[f\"candidate_generation_{i+1}\"] = {\n \"think\": (think_m.group(\"think\").strip() if think_m else \"\"),\n \"action\": (action_m.group(\"action\").strip() if action_m else \"\"),\n }\n\n return result\n\n\n@dataclass\nclass MultipleProposalGenericAgentArgs(GenericAgentArgs):\n\n def make_agent(self):\n return MultipleProposalGenericAgent(\n chat_model_args=self.chat_model_args, flags=self.flags, max_retry=self.max_retry\n )\n\n def __post_init__(self):\n \"\"\"Prefix subagent name with 'HITL-'.\"\"\"\n super().__post_init__()\n if hasattr(self, \"agent_name\") and self.agent_name:\n self.agent_name = \"HITL-\" + self.agent_name\n\n\nclass MultipleProposalGenericAgent(GenericAgent):\n\n def __init__(\n self,\n chat_model_args,\n flags,\n max_retry: int = 4,\n ):\n super().__init__(chat_model_args, flags, max_retry)\n self.ui = None # Single HintLabeling instance\n\n def get_candidate_generation(\n self,\n sys_prompt: SystemMessage,\n human_prompt: HumanMessage,\n hint: list[str] | None = None,\n n_candidates=3,\n ) -> tuple[Dict[str, Dict[str, str]], Discussion]:","source_hash":"f8460a9a650e76095028142556a146fcf58f38b76dcabb6d885d2ba81b245aea","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.hitl_agent.generic_human_guided_agent.get_candidate_generation","uri":"program://AgentLab/function/src.agentlab.agents.hitl_agent.generic_human_guided_agent.get_candidate_generation#L149-L165","kind":"function","name":"get_candidate_generation","path":"src/agentlab/agents/hitl_agent/generic_human_guided_agent.py","language":"python","start_line":149,"end_line":165,"context_start_line":129,"context_end_line":185,"code":" )\n\n def __post_init__(self):\n \"\"\"Prefix subagent name with 'HITL-'.\"\"\"\n super().__post_init__()\n if hasattr(self, \"agent_name\") and self.agent_name:\n self.agent_name = \"HITL-\" + self.agent_name\n\n\nclass MultipleProposalGenericAgent(GenericAgent):\n\n def __init__(\n self,\n chat_model_args,\n flags,\n max_retry: int = 4,\n ):\n super().__init__(chat_model_args, flags, max_retry)\n self.ui = None # Single HintLabeling instance\n\n def get_candidate_generation(\n self,\n sys_prompt: SystemMessage,\n human_prompt: HumanMessage,\n hint: list[str] | None = None,\n n_candidates=3,\n ) -> tuple[Dict[str, Dict[str, str]], Discussion]:\n\n cg = CandidatesGeneration(hint=hint, n_candidates=n_candidates)\n candidates_prompt = HumanMessage(cg.prompt)\n chat_messages = Discussion([sys_prompt, human_prompt, candidates_prompt])\n output = self.chat_llm(chat_messages)\n candidates = cg._parse_answer(output[\"content\"])\n self.step_n_human_intervention_rounds += 1\n msg_to_add_to_xray = Discussion([sys_prompt, human_prompt])\n\n return candidates, msg_to_add_to_xray\n\n @cost_tracker_decorator\n def get_action(self, obs):\n # reset vars\n step_hint = []\n self.step_n_human_intervention_rounds = 0\n self.obs_history.append(obs)\n main_prompt = MainPrompt(\n action_set=self.action_set,\n obs_history=self.obs_history,\n actions=self.actions,\n memories=self.memories,\n thoughts=self.thoughts,\n previous_plan=self.plan,\n step=self.plan_step,\n flags=self.flags,\n )\n\n max_prompt_tokens, max_trunc_itr = self._get_maxes()\n","source_hash":"f8460a9a650e76095028142556a146fcf58f38b76dcabb6d885d2ba81b245aea","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.hitl_agent.generic_human_guided_agent.get_action","uri":"program://AgentLab/function/src.agentlab.agents.hitl_agent.generic_human_guided_agent.get_action#L168-L316","kind":"function","name":"get_action","path":"src/agentlab/agents/hitl_agent/generic_human_guided_agent.py","language":"python","start_line":168,"end_line":316,"context_start_line":148,"context_end_line":336,"code":"\n def get_candidate_generation(\n self,\n sys_prompt: SystemMessage,\n human_prompt: HumanMessage,\n hint: list[str] | None = None,\n n_candidates=3,\n ) -> tuple[Dict[str, Dict[str, str]], Discussion]:\n\n cg = CandidatesGeneration(hint=hint, n_candidates=n_candidates)\n candidates_prompt = HumanMessage(cg.prompt)\n chat_messages = Discussion([sys_prompt, human_prompt, candidates_prompt])\n output = self.chat_llm(chat_messages)\n candidates = cg._parse_answer(output[\"content\"])\n self.step_n_human_intervention_rounds += 1\n msg_to_add_to_xray = Discussion([sys_prompt, human_prompt])\n\n return candidates, msg_to_add_to_xray\n\n @cost_tracker_decorator\n def get_action(self, obs):\n # reset vars\n step_hint = []\n self.step_n_human_intervention_rounds = 0\n self.obs_history.append(obs)\n main_prompt = MainPrompt(\n action_set=self.action_set,\n obs_history=self.obs_history,\n actions=self.actions,\n memories=self.memories,\n thoughts=self.thoughts,\n previous_plan=self.plan,\n step=self.plan_step,\n flags=self.flags,\n )\n\n max_prompt_tokens, max_trunc_itr = self._get_maxes()\n\n system_prompt = SystemMessage(dp.SystemPrompt().prompt)\n\n human_prompt = dp.fit_tokens(\n shrinkable=main_prompt,\n max_prompt_tokens=max_prompt_tokens,\n model_name=self.chat_model_args.model_name,\n max_iterations=max_trunc_itr,\n additional_prompts=system_prompt,\n )\n # Initialize UI once outside the loop\n if self.ui is None:\n self.ui = HintLabeling(headless=False)\n # Show initial waiting state\n initial_inputs = HintLabelingInputs(\n goal=(\n obs.get(\"goal_object\", [{}])[0].get(\"text\", \"\")\n if obs.get(\"goal_object\")\n else \"\"\n ),\n error_feedback=\"\",\n screenshot=(img_to_base_64(obs[\"screenshot\"]) if \"screenshot\" in obs else \"\"),\n screenshots=[], # no overlay screenshots yet\n axtree=obs.get(\"axtree_txt\", \"\"),\n hints=[],\n suggestions=[], # no suggestions yet\n )\n self.ui.update_context(initial_inputs)\n\n # Generate first candidates\n candidates, chat_messages = self.get_candidate_generation(\n sys_prompt=system_prompt,\n human_prompt=human_prompt,\n hint=step_hint if step_hint else None,\n )\n suggestions = [\n {\n \"id\": key.split(\"_\")[-1],\n \"action\": candidate[\"action\"],\n \"think\": candidate[\"think\"],\n }\n for key, candidate in candidates.items()\n ]\n # List of Images as base64 - create overlay screenshots for each suggestion\n screenshots = [overlay_action(obs, choice[\"action\"]) for choice in suggestions]\n\n while True:\n try:\n hint_labeling_inputs = HintLabelingInputs(\n goal=(\n obs.get(\"goal_object\", [{}])[0].get(\"text\", \"\")\n if obs.get(\"goal_object\")\n else \"\"\n ),\n error_feedback=obs.get(\"last_action_error\", \"\"),\n screenshot=(img_to_base_64(obs[\"screenshot\"]) if \"screenshot\" in obs else \"\"),\n screenshots=screenshots, # list of overlay screenshots for hover\n axtree=obs.get(\"axtree_txt\", \"\"),\n hints=step_hint,\n suggestions=suggestions,\n )\n\n self.ui.update_context(hint_labeling_inputs)\n response = self.ui.wait_for_response(timeout=600)\n\n if response[\"type\"] == \"reprompt\":\n new_hints = response[\"payload\"].get(\"hints\", [])\n step_hint = list(new_hints) if isinstance(new_hints, list) else step_hint\n candidates, chat_messages = self.get_candidate_generation(\n sys_prompt=system_prompt,\n human_prompt=human_prompt,\n hint=step_hint if step_hint else None,\n )\n suggestions = [\n {\n \"id\": key.split(\"_\")[-1],\n \"action\": candidate[\"action\"],\n \"think\": candidate[\"think\"],\n }\n for key, candidate in candidates.items()\n ]\n # Regenerate screenshots for new suggestions\n screenshots = [overlay_action(obs, choice[\"action\"]) for choice in suggestions]\n # Continue the loop to show new suggestions\n elif response[\"type\"] == \"step\":\n selected_action = response[\"payload\"][\"action\"]\n choice_idx = None\n for i, candidate in enumerate(suggestions, 1):\n if candidate[\"action\"] == selected_action:\n choice_idx = i\n break\n if choice_idx is None:\n choice_idx = 1\n ans_dict = candidates[f\"candidate_generation_{choice_idx}\"]\n break\n else:\n ans_dict = candidates[\"candidate_generation_1\"]\n break\n\n except KeyboardInterrupt:\n print(\"User cancelled the operation\")\n if self.ui:\n self.ui.close()\n raise\n except Exception as e:\n print(f\"Error in human intervention UI: {e}\")\n if self.ui:\n self.ui.close()\n self.ui = None\n # Raise exception instead of falling back to console input\n raise RuntimeError(f\"Human intervention UI failed: {e}\") from e\n\n # TODO: Refactor as discussed with ALAC.\n stats = self.chat_llm.get_stats()\n self.plan = ans_dict.get(\"plan\", self.plan)\n self.plan_step = ans_dict.get(\"step\", self.plan_step)\n self.actions.append(ans_dict[\"action\"])\n self.memories.append(ans_dict.get(\"memory\", None))\n self.thoughts.append(ans_dict.get(\"think\", None))\n agent_info = AgentInfo(\n think=ans_dict.get(\"think\", None),\n chat_messages=chat_messages,\n stats=stats,\n extra_info={\n \"chat_model_args\": asdict(self.chat_model_args),\n \"step_hints\": step_hint,\n \"n_human_intervention_rounds\": self.step_n_human_intervention_rounds,\n \"candidates\": candidates,\n \"suggestions\": suggestions,\n },\n )\n return ans_dict[\"action\"], agent_info\n\n\ndef get_base_agent(llm_config):\n \"\"\"Creates and returns a MultipleProposalGenericAgentArgs instance with\n specified LLM configuration from CHAT_MODEL_ARGS_DICT.\n\n Args:\n llm_config: The LLM configuration key to use from CHAT_MODEL_ARGS_DICT.\n\n Returns:\n MultipleProposalGenericAgentArgs: Configured agent arguments instance.\n \"\"\"\n\n from agentlab.agents.generic_agent.tmlr_config import BASE_FLAGS\n from agentlab.llm.llm_configs import CHAT_MODEL_ARGS_DICT\n\n return MultipleProposalGenericAgentArgs(\n chat_model_args=CHAT_MODEL_ARGS_DICT[llm_config],\n flags=BASE_FLAGS,\n )","source_hash":"f8460a9a650e76095028142556a146fcf58f38b76dcabb6d885d2ba81b245aea","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.tool_use_agent.tool_use_agent","uri":"program://AgentLab/module/src.agentlab.agents.tool_use_agent.tool_use_agent#L1-L843","kind":"module","name":"src.agentlab.agents.tool_use_agent.tool_use_agent","path":"src/agentlab/agents/tool_use_agent/tool_use_agent.py","language":"python","start_line":1,"end_line":843,"context_start_line":1,"context_end_line":843,"code":"import fnmatch\nimport json\nimport logging\nimport os\nimport random\nimport time\nfrom abc import ABC, abstractmethod\nfrom collections import defaultdict\nfrom copy import copy\nfrom dataclasses import asdict, dataclass, field\nfrom pathlib import Path\nfrom typing import Any, Literal\n\nimport bgym\nimport numpy as np\nimport pandas as pd\nimport requests\nfrom bgym import Benchmark as BgymBenchmark\nfrom browsergym.core.observation import extract_screenshot\nfrom browsergym.utils.obs import (\n flatten_axtree_to_str,\n flatten_dom_to_str,\n overlay_som,\n prune_html,\n)\n\nfrom agentlab.agents.agent_args import AgentArgs\nfrom agentlab.benchmarks.abstract_env import AbstractBenchmark as AgentLabBenchmark\nfrom agentlab.benchmarks.osworld import OSWorldActionSet\nfrom agentlab.llm.base_api import BaseModelArgs\nfrom agentlab.llm.llm_utils import image_to_png_base64_url\nfrom agentlab.llm.response_api import (\n APIPayload,\n ClaudeResponseModelArgs,\n LLMOutput,\n MessageBuilder,\n OpenAIChatModelArgs,\n OpenAIResponseModelArgs,\n OpenRouterModelArgs,\n ToolCalls,\n)\nfrom agentlab.llm.tracking import cost_tracker_decorator\n\nlogger = logging.getLogger(__name__)\n\n\n@dataclass\nclass Block(ABC):\n def _init(self):\n \"\"\"Initialize the block.\"\"\"\n pass\n\n def make(self) -> \"Block\":\n \"\"\"Returns a copy so the init can start adding some stuff to `self` without changing the\n original datatclass that should only contain a config.\n The aim is avoid having 2 calss definition for each block, e.g. Block and BlockArgs.\n\n Returns:\n Block: A copy of the current block instance with initialization applied.\n \"\"\"\n block = self.__class__(**asdict(self))\n block._init()\n return block\n\n @abstractmethod\n def apply(self, llm, messages: list[MessageBuilder], **kwargs):\n pass\n\n\n@dataclass\nclass MsgGroup:\n name: str = None\n messages: list[MessageBuilder] = field(default_factory=list)\n summary: MessageBuilder = None\n\n\nclass StructuredDiscussion:\n \"\"\"\n A structured discussion that groups messages into named groups with a potential summary for each group.\n\n When the discussion is flattened, only the last `keep_last_n_obs` groups are kept in the final list,\n the other groups are replaced by their summaries if they have one.\n \"\"\"\n\n def __init__(self, keep_last_n_obs=None):\n self.groups: list[MsgGroup] = []\n self.keep_last_n_obs: int | None = keep_last_n_obs\n\n def append(self, message: MessageBuilder):\n \"\"\"Append a message to the last group.\"\"\"\n self.groups[-1].messages.append(message)\n\n def new_group(self, name: str = None):\n \"\"\"Start a new group of messages.\"\"\"\n if name is None:\n name = f\"group_{len(self.groups)}\"\n self.groups.append(MsgGroup(name))\n\n def flatten(self) -> list[MessageBuilder]:\n \"\"\"Flatten the groups into a single list of messages.\"\"\"\n\n keep_last_n_obs = self.keep_last_n_obs or len(self.groups)\n messages = []\n for i, group in enumerate(self.groups):\n is_tail = i >= len(self.groups) - keep_last_n_obs\n\n if not is_tail and group.summary is not None:\n messages.append(group.summary)\n else:\n messages.extend(group.messages)\n # Mark all summarized messages for caching\n if i == len(self.groups) - keep_last_n_obs:\n for msg in messages: # unset previous cache breakpoints\n msg._cache_breakpoint = False\n # set new cache breakpoint\n messages[i].mark_all_previous_msg_for_caching()\n return messages\n\n def set_last_summary(self, summary: MessageBuilder):\n # append None to summaries until we reach the current group index\n self.groups[-1].summary = summary\n\n def get_last_summary(self) -> MessageBuilder | None:\n \"\"\"Get the last summary message.\"\"\"\n if len(self.groups) == 0:\n return None\n return self.groups[-1].summary\n\n def is_goal_set(self) -> bool:\n \"\"\"Check if the goal is set in the first group.\"\"\"\n return len(self.groups) > 0\n\n\nSYS_MSG = \"\"\"You are a web agent. Based on the observation, you will decide which action to take to accomplish your goal. \nYou strive for excellence and need to be as meticulous as possible. Make sure to explore when not sure.\n\"\"\"\n\n\n@dataclass\nclass Goal(Block):\n \"\"\"Block to add the goal to the messages.\"\"\"\n\n goal_as_system_msg: bool = True\n\n def apply(\n self, llm, discussion: StructuredDiscussion, obs: dict, sys_msg: str = SYS_MSG\n ) -> dict:\n system_message = llm.msg.system().add_text(sys_msg)\n discussion.append(system_message)\n\n if self.goal_as_system_msg:\n goal_message = llm.msg.system()\n else:\n goal_message = llm.msg.user()\n\n goal_message.add_text(\"# Goal:\\n\")\n for content in obs[\"goal_object\"]:\n if content[\"type\"] == \"text\":\n goal_message.add_text(content[\"text\"])\n elif content[\"type\"] == \"image_url\":\n goal_message.add_image(content[\"image_url\"])\n discussion.append(goal_message)\n\n\nAXTREE_NOTE = \"\"\"\nAXTree extracts most of the interactive elements of the DOM in a tree structure. It may also contain information that is not visible in the screenshot.\nA line starting with [bid] is a node in the AXTree. It is a unique alpha-numeric identifier to be used when calling tools, e.g, click(bid=\"a253\"). Make sure to include letters and numbers in the bid.\n\"\"\"\n\n\n@dataclass\nclass Obs(Block):\n \"\"\"Block to add the observation to the messages.\"\"\"\n\n use_last_error: bool = True\n use_screenshot: bool = True\n use_axtree: bool = False\n use_dom: bool = False\n use_som: bool = False\n use_tabs: bool = False\n # add_mouse_pointer: bool = False\n use_zoomed_webpage: bool = False\n skip_preprocessing: bool = False\n\n def apply(\n self, llm, discussion: StructuredDiscussion, obs: dict, last_llm_output: LLMOutput\n ) -> dict:\n obs_msg = llm.msg.user()\n tool_calls = last_llm_output.tool_calls\n if self.use_last_error:\n if obs[\"last_action_error\"] != \"\":\n obs_msg.add_text(f\"Last action error:\\n{obs['last_action_error']}\")\n\n if self.use_screenshot:\n if self.use_som:\n screenshot = obs[\"screenshot_som\"]\n else:\n screenshot = obs[\"screenshot\"]\n\n # if self.add_mouse_pointer:\n # screenshot = np.array(\n # agent_utils.add_mouse_pointer_from_action(\n # Image.fromarray(obs[\"screenshot\"]), obs[\"last_action\"]\n # )\n # )\n\n obs_msg.add_image(image_to_png_base64_url(screenshot))\n if self.use_axtree:\n obs_msg.add_text(f\"AXTree:\\n{AXTREE_NOTE}\\n{obs['axtree_txt']}\")\n if self.use_dom:\n obs_msg.add_text(f\"DOM:\\n{obs['pruned_html']}\")\n if self.use_tabs:\n obs_msg.add_text(_format_tabs(obs))\n\n discussion.append(obs_msg)\n\n if tool_calls:\n for call in tool_calls:\n call.response_text(\"See Observation\")\n tool_response = llm.msg.add_responded_tool_calls(tool_calls)\n discussion.append(tool_response)\n\n return obs_msg\n\n\ndef _format_tabs(obs):\n \"\"\"Format the open tabs in a llm-readable way.\"\"\"\n prompt_pieces = [\"Currently open tabs:\"]\n for page_index, (page_url, page_title) in enumerate(\n zip(obs[\"open_pages_urls\"], obs[\"open_pages_titles\"])\n ):\n active_or_not = \" (active tab)\" if page_index == obs[\"active_page_index\"] else \"\"\n prompt_piece = f\"\"\"\\\nTab {page_index}{active_or_not}:\n Title: {page_title}\n URL: {page_url}\n\"\"\"\n prompt_pieces.append(prompt_piece)\n return \"\\n\".join(prompt_pieces)\n\n\n@dataclass\nclass GeneralHints(Block):\n use_hints: bool = True\n\n def apply(self, llm, discussion: StructuredDiscussion) -> dict:\n if not self.use_hints:\n return\n\n hints = []\n\n hints.append(\n \"\"\"Use ControlOrMeta instead of Control and Meta for keyboard shortcuts, to be cross-platform compatible. E.g. use ControlOrMeta for mutliple selection in lists.\\n\"\"\"\n )\n\n discussion.append(llm.msg.user().add_text(\"\\n\".join(hints)))\n\n\n@dataclass\nclass Summarizer(Block):\n \"\"\"Block to summarize the last action and the current state of the environment.\"\"\"\n\n do_summary: bool = False\n high_details: bool = True\n\n def apply(self, llm, discussion: StructuredDiscussion) -> dict:\n if not self.do_summary:\n return\n\n msg = llm.msg.user().add_text(\"\"\"Summarize\\n\"\"\")\n\n discussion.append(msg)\n\n summary_response = llm(APIPayload(messages=discussion.flatten()))\n\n summary_msg = llm.msg.assistant().add_text(summary_response.think)\n discussion.append(summary_msg)\n discussion.set_last_summary(summary_msg)\n return summary_msg\n\n def apply_init(self, llm, discussion: StructuredDiscussion) -> dict:\n \"\"\"Initialize the summarizer block.\"\"\"\n if not self.do_summary:\n return\n\n system_msg = llm.msg.system()\n if self.high_details:\n # Add a system message to the LLM to indicate that it should summarize\n system_msg.add_text(\n \"\"\"# Summarizer instructions:\\nWhen asked to summarize, do the following:\n1) Summarize the effect of the last action, with attention to details.\n2) Give a semantic description of the current state of the environment, with attention to details. If there was a repeating mistake, mention the cause of it.\n3) Reason about the overall task at a high level.\n4) What hint can be relevant for the next action? Only chose from the hints provided in the task description. Or select none.\n5) Reason about the next action to take, based on the current state and the goal.\n\"\"\"\n )\n else:\n system_msg.add_text(\n \"\"\"When asked to summarize, give a semantic description of the current state of the environment.\"\"\"\n )\n discussion.append(system_msg)\n\n\n@dataclass\nclass TaskHint(Block):\n use_task_hint: bool = True\n hint_db_rel_path: str = \"hint_db.csv\"\n hint_retrieval_mode: Literal[\"direct\", \"llm\", \"emb\"] = \"direct\"\n top_n: int = 4 # Number of top hints to return when using embedding retrieval\n embedder_model: str = \"Qwen/Qwen3-Embedding-0.6B\" # Model for embedding hints\n embedder_server: str = \"http://localhost:5000\"\n llm_prompt: str = \"\"\"We're choosing hints to help solve the following task:\\n{goal}.\\n\nYou need to choose the most relevant hints topic from the following list:\\n\\nHint topics:\\n{topics}\\n\nChoose hint topic for the task and return only its number, e.g. 1. If you don't know the answer, return -1.\"\"\"\n\n def _init(self):\n \"\"\"Initialize the block.\"\"\"\n if Path(self.hint_db_rel_path).is_absolute():\n hint_db_path = Path(self.hint_db_rel_path)\n else:\n hint_db_path = Path(__file__).parent / self.hint_db_rel_path\n self.hint_db = pd.read_csv(hint_db_path, header=0, index_col=None, dtype=str)\n if self.hint_retrieval_mode == \"emb\":\n self.encode_hints()\n\n def oai_embed(self, text: str):\n response = self._oai_emb.create(input=text, model=\"text-embedding-3-small\")\n return response.data[0].embedding\n\n def encode_hints(self):\n self.uniq_hints = self.hint_db.drop_duplicates(subset=[\"hint\"], keep=\"first\")\n logger.info(\n f\"Encoding {len(self.uniq_hints)} unique hints with semantic keys using {self.embedder_model} model.\"\n )\n hints = self.uniq_hints[\"hint\"].tolist()\n semantic_keys = self.uniq_hints[\"semantic_keys\"].tolist()\n lines = [f\"{k}: {h}\" for h, k in zip(hints, semantic_keys)]\n emb_path = f\"{self.hint_db_rel_path}.embs.npy\"\n assert os.path.exists(emb_path), f\"Embedding file not found: {emb_path}\"\n logger.info(f\"Loading hint embeddings from: {emb_path}\")\n emb_dict = np.load(emb_path, allow_pickle=True).item()\n self.hint_embeddings = np.array([emb_dict[k] for k in lines])\n logger.info(f\"Loaded hint embeddings shape: {self.hint_embeddings.shape}\")\n\n def apply(self, llm, discussion: StructuredDiscussion, task_name: str) -> dict:\n if not self.use_task_hint:\n return {}\n\n goal = \"\\n\".join([c.get(\"text\", \"\") for c in discussion.groups[0].messages[1].content])\n task_hints = self.choose_hints(llm, task_name, goal)\n\n hints = []\n for hint in task_hints:\n hint = hint.strip()\n if hint:\n hints.append(f\"- {hint}\")\n\n if len(hints) > 0:\n hints_str = (\n \"# Hints:\\nHere are some hints for the task you are working on:\\n\"\n + \"\\n\".join(hints)\n )\n msg = llm.msg.user().add_text(hints_str)\n\n discussion.append(msg)\n\n def choose_hints(self, llm, task_name: str, goal: str) -> list[str]:\n \"\"\"Choose hints based on the task name.\"\"\"\n if self.hint_retrieval_mode == \"llm\":\n return self.choose_hints_llm(llm, goal)\n elif self.hint_retrieval_mode == \"direct\":\n return self.choose_hints_direct(task_name)\n elif self.hint_retrieval_mode == \"emb\":\n return self.choose_hints_emb(goal)\n else:\n raise ValueError(f\"Unknown hint retrieval mode: {self.hint_retrieval_mode}\")\n\n def choose_hints_llm(self, llm, goal: str) -> list[str]:\n \"\"\"Choose hints using LLM to filter the hints.\"\"\"\n topic_to_hints = defaultdict(list)\n for i, row in self.hint_db.iterrows():\n topic_to_hints[row[\"semantic_keys\"]].append(i)\n hint_topics = list(topic_to_hints.keys())\n topics = \"\\n\".join([f\"{i}. {h}\" for i, h in enumerate(hint_topics)])\n prompt = self.llm_prompt.format(goal=goal, topics=topics)\n response = llm(APIPayload(messages=[llm.msg.user().add_text(prompt)]))\n try:\n hint_topic_idx = json.loads(response.think)\n if hint_topic_idx < 0 or hint_topic_idx >= len(hint_topics):\n logger.error(f\"Wrong LLM hint id response: {response.think}, no hints\")\n return []\n hint_topic = hint_topics[hint_topic_idx]\n hint_indices = topic_to_hints[hint_topic]\n df = self.hint_db.iloc[hint_indices].copy()\n df = df.drop_duplicates(subset=[\"hint\"], keep=\"first\") # leave only unique hints\n hints = df[\"hint\"].tolist()\n logger.debug(f\"LLM hint topic {hint_topic_idx}, chosen hints: {df['hint'].tolist()}\")\n except json.JSONDecodeError:\n logger.error(f\"Failed to parse LLM hint id response: {response.think}, no hints\")\n hints = []\n return hints\n\n def choose_hints_emb(self, goal: str) -> list[str]:\n \"\"\"Choose hints using embeddings to filter the hints.\"\"\"\n goal_embeddings = self._encode([goal], prompt=\"task description\")\n similarities = self._similarity(goal_embeddings.tolist(), self.hint_embeddings.tolist())\n top_indices = similarities.argsort()[0][-self.top_n :].tolist()\n logger.info(f\"Top hint indices based on embedding similarity: {top_indices}\")\n hints = self.uniq_hints.iloc[top_indices]\n logger.info(f\"Embedding-based hints chosen: {hints}\")\n return hints[\"hint\"].tolist()\n\n def _encode(self, texts: list[str], prompt: str = \"\", timeout: int = 10, max_retries: int = 5):\n \"\"\"Call the encode API endpoint with timeout and retries\"\"\"\n for attempt in range(max_retries):\n try:\n response = requests.post(\n f\"{self.embedder_server}/encode\",\n json={\"texts\": texts, \"prompt\": prompt},\n timeout=timeout,\n )\n embs = response.json()[\"embeddings\"]\n return np.asarray(embs)\n except (requests.exceptions.RequestException, requests.exceptions.Timeout) as e:\n if attempt == max_retries - 1:\n raise e\n time.sleep(random.uniform(1, timeout))\n continue\n\n def _similarity(\n self, texts1: list[str], texts2: list[str], timeout: int = 2, max_retries: int = 5\n ):\n \"\"\"Call the similarity API endpoint with timeout and retries\"\"\"\n for attempt in range(max_retries):\n try:\n response = requests.post(\n f\"{self.embedder_server}/similarity\",\n json={\"texts1\": texts1, \"texts2\": texts2},\n timeout=timeout,\n )\n similarities = response.json()[\"similarities\"]\n return np.asarray(similarities)\n except (requests.exceptions.RequestException, requests.exceptions.Timeout) as e:\n if attempt == max_retries - 1:\n raise e\n time.sleep(random.uniform(1, timeout))\n continue\n\n def choose_hints_direct(self, task_name: str) -> list[str]:\n hints = self.hint_db[\n self.hint_db[\"task_name\"].apply(lambda x: fnmatch.fnmatch(x, task_name))\n ]\n return hints[\"hint\"].tolist()\n\n\n@dataclass\nclass PromptConfig:\n tag_screenshot: bool = True # Whether to tag the screenshot with the last action.\n goal: Goal = None\n obs: Obs = None\n summarizer: Summarizer = None\n general_hints: GeneralHints = None\n task_hint: TaskHint = None\n keep_last_n_obs: int = 1\n multiaction: bool = False\n action_subsets: tuple[str] = None\n\n\n@dataclass\nclass ToolUseAgentArgs(AgentArgs):\n model_args: BaseModelArgs = None\n config: PromptConfig = None\n use_raw_page_output: bool = False # This attribute is used in loop.py to setup the env.\n action_set: bgym.AbstractActionSet | None = None\n\n def __post_init__(self):\n try:\n self.agent_name = f\"ToolUse-{self.model_args.model_name}\".replace(\"/\", \"_\")\n except AttributeError:\n pass\n\n def make_agent(self) -> bgym.Agent:\n if self.config is None:\n self.config = DEFAULT_PROMPT_CONFIG\n return ToolUseAgent(\n model_args=self.model_args, # type: ignore\n config=self.config,\n action_set=self.action_set,\n )\n\n def prepare(self):\n return self.model_args.prepare_server()\n\n def close(self):\n return self.model_args.close_server()\n\n def set_benchmark(self, benchmark: AgentLabBenchmark | BgymBenchmark, demo_mode: bool):\n \"\"\"Set benchmark specific flags.\"\"\"\n benchmark_name = benchmark.name\n if benchmark_name == \"osworld\":\n self.config.obs.skip_preprocessing = True\n\n\nclass ToolUseAgent(bgym.Agent):\n def __init__(\n self,\n model_args: OpenAIResponseModelArgs,\n config: PromptConfig = None,\n action_set: bgym.AbstractActionSet | None = None,\n ):\n self.model_args = model_args\n self.config = config\n self.action_set: bgym.AbstractActionSet = action_set or bgym.HighLevelActionSet(\n self.config.action_subsets,\n multiaction=self.config.multiaction, # type: ignore\n )\n self.tools = self.action_set.to_tool_description(api=model_args.api)\n\n self.call_ids = []\n\n self.llm = model_args.make_model()\n self.msg_builder = model_args.get_message_builder()\n self.llm.msg = self.msg_builder\n\n self.task_hint = self.config.task_hint.make()\n self.obs_block = self.config.obs.make()\n\n self.discussion = StructuredDiscussion(self.config.keep_last_n_obs)\n self.last_response: LLMOutput = LLMOutput()\n self._responses: list[LLMOutput] = []\n\n def o\n# ... truncated ...","source_hash":"0aaa22a2ebd8845f6a17e105ac4061ad48d951f42e10d09378bdea9927c9b8bb","truncated":true} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.tool_use_agent.tool_use_agent.Block","uri":"program://AgentLab/class/src.agentlab.agents.tool_use_agent.tool_use_agent.Block#L48-L67","kind":"class","name":"Block","path":"src/agentlab/agents/tool_use_agent/tool_use_agent.py","language":"python","start_line":48,"end_line":67,"context_start_line":28,"context_end_line":87,"code":"from agentlab.benchmarks.abstract_env import AbstractBenchmark as AgentLabBenchmark\nfrom agentlab.benchmarks.osworld import OSWorldActionSet\nfrom agentlab.llm.base_api import BaseModelArgs\nfrom agentlab.llm.llm_utils import image_to_png_base64_url\nfrom agentlab.llm.response_api import (\n APIPayload,\n ClaudeResponseModelArgs,\n LLMOutput,\n MessageBuilder,\n OpenAIChatModelArgs,\n OpenAIResponseModelArgs,\n OpenRouterModelArgs,\n ToolCalls,\n)\nfrom agentlab.llm.tracking import cost_tracker_decorator\n\nlogger = logging.getLogger(__name__)\n\n\n@dataclass\nclass Block(ABC):\n def _init(self):\n \"\"\"Initialize the block.\"\"\"\n pass\n\n def make(self) -> \"Block\":\n \"\"\"Returns a copy so the init can start adding some stuff to `self` without changing the\n original datatclass that should only contain a config.\n The aim is avoid having 2 calss definition for each block, e.g. Block and BlockArgs.\n\n Returns:\n Block: A copy of the current block instance with initialization applied.\n \"\"\"\n block = self.__class__(**asdict(self))\n block._init()\n return block\n\n @abstractmethod\n def apply(self, llm, messages: list[MessageBuilder], **kwargs):\n pass\n\n\n@dataclass\nclass MsgGroup:\n name: str = None\n messages: list[MessageBuilder] = field(default_factory=list)\n summary: MessageBuilder = None\n\n\nclass StructuredDiscussion:\n \"\"\"\n A structured discussion that groups messages into named groups with a potential summary for each group.\n\n When the discussion is flattened, only the last `keep_last_n_obs` groups are kept in the final list,\n the other groups are replaced by their summaries if they have one.\n \"\"\"\n\n def __init__(self, keep_last_n_obs=None):\n self.groups: list[MsgGroup] = []\n self.keep_last_n_obs: int | None = keep_last_n_obs","source_hash":"0aaa22a2ebd8845f6a17e105ac4061ad48d951f42e10d09378bdea9927c9b8bb","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.tool_use_agent.tool_use_agent.MsgGroup","uri":"program://AgentLab/class/src.agentlab.agents.tool_use_agent.tool_use_agent.MsgGroup#L71-L74","kind":"class","name":"MsgGroup","path":"src/agentlab/agents/tool_use_agent/tool_use_agent.py","language":"python","start_line":71,"end_line":74,"context_start_line":51,"context_end_line":94,"code":" pass\n\n def make(self) -> \"Block\":\n \"\"\"Returns a copy so the init can start adding some stuff to `self` without changing the\n original datatclass that should only contain a config.\n The aim is avoid having 2 calss definition for each block, e.g. Block and BlockArgs.\n\n Returns:\n Block: A copy of the current block instance with initialization applied.\n \"\"\"\n block = self.__class__(**asdict(self))\n block._init()\n return block\n\n @abstractmethod\n def apply(self, llm, messages: list[MessageBuilder], **kwargs):\n pass\n\n\n@dataclass\nclass MsgGroup:\n name: str = None\n messages: list[MessageBuilder] = field(default_factory=list)\n summary: MessageBuilder = None\n\n\nclass StructuredDiscussion:\n \"\"\"\n A structured discussion that groups messages into named groups with a potential summary for each group.\n\n When the discussion is flattened, only the last `keep_last_n_obs` groups are kept in the final list,\n the other groups are replaced by their summaries if they have one.\n \"\"\"\n\n def __init__(self, keep_last_n_obs=None):\n self.groups: list[MsgGroup] = []\n self.keep_last_n_obs: int | None = keep_last_n_obs\n\n def append(self, message: MessageBuilder):\n \"\"\"Append a message to the last group.\"\"\"\n self.groups[-1].messages.append(message)\n\n def new_group(self, name: str = None):\n \"\"\"Start a new group of messages.\"\"\"","source_hash":"0aaa22a2ebd8845f6a17e105ac4061ad48d951f42e10d09378bdea9927c9b8bb","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.tool_use_agent.tool_use_agent.StructuredDiscussion","uri":"program://AgentLab/class/src.agentlab.agents.tool_use_agent.tool_use_agent.StructuredDiscussion#L77-L131","kind":"class","name":"StructuredDiscussion","path":"src/agentlab/agents/tool_use_agent/tool_use_agent.py","language":"python","start_line":77,"end_line":131,"context_start_line":57,"context_end_line":151,"code":"\n Returns:\n Block: A copy of the current block instance with initialization applied.\n \"\"\"\n block = self.__class__(**asdict(self))\n block._init()\n return block\n\n @abstractmethod\n def apply(self, llm, messages: list[MessageBuilder], **kwargs):\n pass\n\n\n@dataclass\nclass MsgGroup:\n name: str = None\n messages: list[MessageBuilder] = field(default_factory=list)\n summary: MessageBuilder = None\n\n\nclass StructuredDiscussion:\n \"\"\"\n A structured discussion that groups messages into named groups with a potential summary for each group.\n\n When the discussion is flattened, only the last `keep_last_n_obs` groups are kept in the final list,\n the other groups are replaced by their summaries if they have one.\n \"\"\"\n\n def __init__(self, keep_last_n_obs=None):\n self.groups: list[MsgGroup] = []\n self.keep_last_n_obs: int | None = keep_last_n_obs\n\n def append(self, message: MessageBuilder):\n \"\"\"Append a message to the last group.\"\"\"\n self.groups[-1].messages.append(message)\n\n def new_group(self, name: str = None):\n \"\"\"Start a new group of messages.\"\"\"\n if name is None:\n name = f\"group_{len(self.groups)}\"\n self.groups.append(MsgGroup(name))\n\n def flatten(self) -> list[MessageBuilder]:\n \"\"\"Flatten the groups into a single list of messages.\"\"\"\n\n keep_last_n_obs = self.keep_last_n_obs or len(self.groups)\n messages = []\n for i, group in enumerate(self.groups):\n is_tail = i >= len(self.groups) - keep_last_n_obs\n\n if not is_tail and group.summary is not None:\n messages.append(group.summary)\n else:\n messages.extend(group.messages)\n # Mark all summarized messages for caching\n if i == len(self.groups) - keep_last_n_obs:\n for msg in messages: # unset previous cache breakpoints\n msg._cache_breakpoint = False\n # set new cache breakpoint\n messages[i].mark_all_previous_msg_for_caching()\n return messages\n\n def set_last_summary(self, summary: MessageBuilder):\n # append None to summaries until we reach the current group index\n self.groups[-1].summary = summary\n\n def get_last_summary(self) -> MessageBuilder | None:\n \"\"\"Get the last summary message.\"\"\"\n if len(self.groups) == 0:\n return None\n return self.groups[-1].summary\n\n def is_goal_set(self) -> bool:\n \"\"\"Check if the goal is set in the first group.\"\"\"\n return len(self.groups) > 0\n\n\nSYS_MSG = \"\"\"You are a web agent. Based on the observation, you will decide which action to take to accomplish your goal. \nYou strive for excellence and need to be as meticulous as possible. Make sure to explore when not sure.\n\"\"\"\n\n\n@dataclass\nclass Goal(Block):\n \"\"\"Block to add the goal to the messages.\"\"\"\n\n goal_as_system_msg: bool = True\n\n def apply(\n self, llm, discussion: StructuredDiscussion, obs: dict, sys_msg: str = SYS_MSG\n ) -> dict:\n system_message = llm.msg.system().add_text(sys_msg)\n discussion.append(system_message)\n\n if self.goal_as_system_msg:","source_hash":"0aaa22a2ebd8845f6a17e105ac4061ad48d951f42e10d09378bdea9927c9b8bb","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.tool_use_agent.tool_use_agent.Goal","uri":"program://AgentLab/class/src.agentlab.agents.tool_use_agent.tool_use_agent.Goal#L140-L162","kind":"class","name":"Goal","path":"src/agentlab/agents/tool_use_agent/tool_use_agent.py","language":"python","start_line":140,"end_line":162,"context_start_line":120,"context_end_line":182,"code":" # append None to summaries until we reach the current group index\n self.groups[-1].summary = summary\n\n def get_last_summary(self) -> MessageBuilder | None:\n \"\"\"Get the last summary message.\"\"\"\n if len(self.groups) == 0:\n return None\n return self.groups[-1].summary\n\n def is_goal_set(self) -> bool:\n \"\"\"Check if the goal is set in the first group.\"\"\"\n return len(self.groups) > 0\n\n\nSYS_MSG = \"\"\"You are a web agent. Based on the observation, you will decide which action to take to accomplish your goal. \nYou strive for excellence and need to be as meticulous as possible. Make sure to explore when not sure.\n\"\"\"\n\n\n@dataclass\nclass Goal(Block):\n \"\"\"Block to add the goal to the messages.\"\"\"\n\n goal_as_system_msg: bool = True\n\n def apply(\n self, llm, discussion: StructuredDiscussion, obs: dict, sys_msg: str = SYS_MSG\n ) -> dict:\n system_message = llm.msg.system().add_text(sys_msg)\n discussion.append(system_message)\n\n if self.goal_as_system_msg:\n goal_message = llm.msg.system()\n else:\n goal_message = llm.msg.user()\n\n goal_message.add_text(\"# Goal:\\n\")\n for content in obs[\"goal_object\"]:\n if content[\"type\"] == \"text\":\n goal_message.add_text(content[\"text\"])\n elif content[\"type\"] == \"image_url\":\n goal_message.add_image(content[\"image_url\"])\n discussion.append(goal_message)\n\n\nAXTREE_NOTE = \"\"\"\nAXTree extracts most of the interactive elements of the DOM in a tree structure. It may also contain information that is not visible in the screenshot.\nA line starting with [bid] is a node in the AXTree. It is a unique alpha-numeric identifier to be used when calling tools, e.g, click(bid=\"a253\"). Make sure to include letters and numbers in the bid.\n\"\"\"\n\n\n@dataclass\nclass Obs(Block):\n \"\"\"Block to add the observation to the messages.\"\"\"\n\n use_last_error: bool = True\n use_screenshot: bool = True\n use_axtree: bool = False\n use_dom: bool = False\n use_som: bool = False\n use_tabs: bool = False\n # add_mouse_pointer: bool = False\n use_zoomed_webpage: bool = False","source_hash":"0aaa22a2ebd8845f6a17e105ac4061ad48d951f42e10d09378bdea9927c9b8bb","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.tool_use_agent.tool_use_agent.Obs","uri":"program://AgentLab/class/src.agentlab.agents.tool_use_agent.tool_use_agent.Obs#L172-L223","kind":"class","name":"Obs","path":"src/agentlab/agents/tool_use_agent/tool_use_agent.py","language":"python","start_line":172,"end_line":223,"context_start_line":152,"context_end_line":243,"code":" goal_message = llm.msg.system()\n else:\n goal_message = llm.msg.user()\n\n goal_message.add_text(\"# Goal:\\n\")\n for content in obs[\"goal_object\"]:\n if content[\"type\"] == \"text\":\n goal_message.add_text(content[\"text\"])\n elif content[\"type\"] == \"image_url\":\n goal_message.add_image(content[\"image_url\"])\n discussion.append(goal_message)\n\n\nAXTREE_NOTE = \"\"\"\nAXTree extracts most of the interactive elements of the DOM in a tree structure. It may also contain information that is not visible in the screenshot.\nA line starting with [bid] is a node in the AXTree. It is a unique alpha-numeric identifier to be used when calling tools, e.g, click(bid=\"a253\"). Make sure to include letters and numbers in the bid.\n\"\"\"\n\n\n@dataclass\nclass Obs(Block):\n \"\"\"Block to add the observation to the messages.\"\"\"\n\n use_last_error: bool = True\n use_screenshot: bool = True\n use_axtree: bool = False\n use_dom: bool = False\n use_som: bool = False\n use_tabs: bool = False\n # add_mouse_pointer: bool = False\n use_zoomed_webpage: bool = False\n skip_preprocessing: bool = False\n\n def apply(\n self, llm, discussion: StructuredDiscussion, obs: dict, last_llm_output: LLMOutput\n ) -> dict:\n obs_msg = llm.msg.user()\n tool_calls = last_llm_output.tool_calls\n if self.use_last_error:\n if obs[\"last_action_error\"] != \"\":\n obs_msg.add_text(f\"Last action error:\\n{obs['last_action_error']}\")\n\n if self.use_screenshot:\n if self.use_som:\n screenshot = obs[\"screenshot_som\"]\n else:\n screenshot = obs[\"screenshot\"]\n\n # if self.add_mouse_pointer:\n # screenshot = np.array(\n # agent_utils.add_mouse_pointer_from_action(\n # Image.fromarray(obs[\"screenshot\"]), obs[\"last_action\"]\n # )\n # )\n\n obs_msg.add_image(image_to_png_base64_url(screenshot))\n if self.use_axtree:\n obs_msg.add_text(f\"AXTree:\\n{AXTREE_NOTE}\\n{obs['axtree_txt']}\")\n if self.use_dom:\n obs_msg.add_text(f\"DOM:\\n{obs['pruned_html']}\")\n if self.use_tabs:\n obs_msg.add_text(_format_tabs(obs))\n\n discussion.append(obs_msg)\n\n if tool_calls:\n for call in tool_calls:\n call.response_text(\"See Observation\")\n tool_response = llm.msg.add_responded_tool_calls(tool_calls)\n discussion.append(tool_response)\n\n return obs_msg\n\n\ndef _format_tabs(obs):\n \"\"\"Format the open tabs in a llm-readable way.\"\"\"\n prompt_pieces = [\"Currently open tabs:\"]\n for page_index, (page_url, page_title) in enumerate(\n zip(obs[\"open_pages_urls\"], obs[\"open_pages_titles\"])\n ):\n active_or_not = \" (active tab)\" if page_index == obs[\"active_page_index\"] else \"\"\n prompt_piece = f\"\"\"\\\nTab {page_index}{active_or_not}:\n Title: {page_title}\n URL: {page_url}\n\"\"\"\n prompt_pieces.append(prompt_piece)\n return \"\\n\".join(prompt_pieces)\n\n\n@dataclass\nclass GeneralHints(Block):","source_hash":"0aaa22a2ebd8845f6a17e105ac4061ad48d951f42e10d09378bdea9927c9b8bb","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.tool_use_agent.tool_use_agent._format_tabs","uri":"program://AgentLab/function/src.agentlab.agents.tool_use_agent.tool_use_agent._format_tabs#L226-L239","kind":"function","name":"_format_tabs","path":"src/agentlab/agents/tool_use_agent/tool_use_agent.py","language":"python","start_line":226,"end_line":239,"context_start_line":206,"context_end_line":259,"code":"\n obs_msg.add_image(image_to_png_base64_url(screenshot))\n if self.use_axtree:\n obs_msg.add_text(f\"AXTree:\\n{AXTREE_NOTE}\\n{obs['axtree_txt']}\")\n if self.use_dom:\n obs_msg.add_text(f\"DOM:\\n{obs['pruned_html']}\")\n if self.use_tabs:\n obs_msg.add_text(_format_tabs(obs))\n\n discussion.append(obs_msg)\n\n if tool_calls:\n for call in tool_calls:\n call.response_text(\"See Observation\")\n tool_response = llm.msg.add_responded_tool_calls(tool_calls)\n discussion.append(tool_response)\n\n return obs_msg\n\n\ndef _format_tabs(obs):\n \"\"\"Format the open tabs in a llm-readable way.\"\"\"\n prompt_pieces = [\"Currently open tabs:\"]\n for page_index, (page_url, page_title) in enumerate(\n zip(obs[\"open_pages_urls\"], obs[\"open_pages_titles\"])\n ):\n active_or_not = \" (active tab)\" if page_index == obs[\"active_page_index\"] else \"\"\n prompt_piece = f\"\"\"\\\nTab {page_index}{active_or_not}:\n Title: {page_title}\n URL: {page_url}\n\"\"\"\n prompt_pieces.append(prompt_piece)\n return \"\\n\".join(prompt_pieces)\n\n\n@dataclass\nclass GeneralHints(Block):\n use_hints: bool = True\n\n def apply(self, llm, discussion: StructuredDiscussion) -> dict:\n if not self.use_hints:\n return\n\n hints = []\n\n hints.append(\n \"\"\"Use ControlOrMeta instead of Control and Meta for keyboard shortcuts, to be cross-platform compatible. E.g. use ControlOrMeta for mutliple selection in lists.\\n\"\"\"\n )\n\n discussion.append(llm.msg.user().add_text(\"\\n\".join(hints)))\n\n\n@dataclass","source_hash":"0aaa22a2ebd8845f6a17e105ac4061ad48d951f42e10d09378bdea9927c9b8bb","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.tool_use_agent.tool_use_agent.GeneralHints","uri":"program://AgentLab/class/src.agentlab.agents.tool_use_agent.tool_use_agent.GeneralHints#L243-L256","kind":"class","name":"GeneralHints","path":"src/agentlab/agents/tool_use_agent/tool_use_agent.py","language":"python","start_line":243,"end_line":256,"context_start_line":223,"context_end_line":276,"code":" return obs_msg\n\n\ndef _format_tabs(obs):\n \"\"\"Format the open tabs in a llm-readable way.\"\"\"\n prompt_pieces = [\"Currently open tabs:\"]\n for page_index, (page_url, page_title) in enumerate(\n zip(obs[\"open_pages_urls\"], obs[\"open_pages_titles\"])\n ):\n active_or_not = \" (active tab)\" if page_index == obs[\"active_page_index\"] else \"\"\n prompt_piece = f\"\"\"\\\nTab {page_index}{active_or_not}:\n Title: {page_title}\n URL: {page_url}\n\"\"\"\n prompt_pieces.append(prompt_piece)\n return \"\\n\".join(prompt_pieces)\n\n\n@dataclass\nclass GeneralHints(Block):\n use_hints: bool = True\n\n def apply(self, llm, discussion: StructuredDiscussion) -> dict:\n if not self.use_hints:\n return\n\n hints = []\n\n hints.append(\n \"\"\"Use ControlOrMeta instead of Control and Meta for keyboard shortcuts, to be cross-platform compatible. E.g. use ControlOrMeta for mutliple selection in lists.\\n\"\"\"\n )\n\n discussion.append(llm.msg.user().add_text(\"\\n\".join(hints)))\n\n\n@dataclass\nclass Summarizer(Block):\n \"\"\"Block to summarize the last action and the current state of the environment.\"\"\"\n\n do_summary: bool = False\n high_details: bool = True\n\n def apply(self, llm, discussion: StructuredDiscussion) -> dict:\n if not self.do_summary:\n return\n\n msg = llm.msg.user().add_text(\"\"\"Summarize\\n\"\"\")\n\n discussion.append(msg)\n\n summary_response = llm(APIPayload(messages=discussion.flatten()))\n\n summary_msg = llm.msg.assistant().add_text(summary_response.think)","source_hash":"0aaa22a2ebd8845f6a17e105ac4061ad48d951f42e10d09378bdea9927c9b8bb","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.tool_use_agent.tool_use_agent.Summarizer","uri":"program://AgentLab/class/src.agentlab.agents.tool_use_agent.tool_use_agent.Summarizer#L260-L302","kind":"class","name":"Summarizer","path":"src/agentlab/agents/tool_use_agent/tool_use_agent.py","language":"python","start_line":260,"end_line":302,"context_start_line":240,"context_end_line":322,"code":"\n\n@dataclass\nclass GeneralHints(Block):\n use_hints: bool = True\n\n def apply(self, llm, discussion: StructuredDiscussion) -> dict:\n if not self.use_hints:\n return\n\n hints = []\n\n hints.append(\n \"\"\"Use ControlOrMeta instead of Control and Meta for keyboard shortcuts, to be cross-platform compatible. E.g. use ControlOrMeta for mutliple selection in lists.\\n\"\"\"\n )\n\n discussion.append(llm.msg.user().add_text(\"\\n\".join(hints)))\n\n\n@dataclass\nclass Summarizer(Block):\n \"\"\"Block to summarize the last action and the current state of the environment.\"\"\"\n\n do_summary: bool = False\n high_details: bool = True\n\n def apply(self, llm, discussion: StructuredDiscussion) -> dict:\n if not self.do_summary:\n return\n\n msg = llm.msg.user().add_text(\"\"\"Summarize\\n\"\"\")\n\n discussion.append(msg)\n\n summary_response = llm(APIPayload(messages=discussion.flatten()))\n\n summary_msg = llm.msg.assistant().add_text(summary_response.think)\n discussion.append(summary_msg)\n discussion.set_last_summary(summary_msg)\n return summary_msg\n\n def apply_init(self, llm, discussion: StructuredDiscussion) -> dict:\n \"\"\"Initialize the summarizer block.\"\"\"\n if not self.do_summary:\n return\n\n system_msg = llm.msg.system()\n if self.high_details:\n # Add a system message to the LLM to indicate that it should summarize\n system_msg.add_text(\n \"\"\"# Summarizer instructions:\\nWhen asked to summarize, do the following:\n1) Summarize the effect of the last action, with attention to details.\n2) Give a semantic description of the current state of the environment, with attention to details. If there was a repeating mistake, mention the cause of it.\n3) Reason about the overall task at a high level.\n4) What hint can be relevant for the next action? Only chose from the hints provided in the task description. Or select none.\n5) Reason about the next action to take, based on the current state and the goal.\n\"\"\"\n )\n else:\n system_msg.add_text(\n \"\"\"When asked to summarize, give a semantic description of the current state of the environment.\"\"\"\n )\n discussion.append(system_msg)\n\n\n@dataclass\nclass TaskHint(Block):\n use_task_hint: bool = True\n hint_db_rel_path: str = \"hint_db.csv\"\n hint_retrieval_mode: Literal[\"direct\", \"llm\", \"emb\"] = \"direct\"\n top_n: int = 4 # Number of top hints to return when using embedding retrieval\n embedder_model: str = \"Qwen/Qwen3-Embedding-0.6B\" # Model for embedding hints\n embedder_server: str = \"http://localhost:5000\"\n llm_prompt: str = \"\"\"We're choosing hints to help solve the following task:\\n{goal}.\\n\nYou need to choose the most relevant hints topic from the following list:\\n\\nHint topics:\\n{topics}\\n\nChoose hint topic for the task and return only its number, e.g. 1. If you don't know the answer, return -1.\"\"\"\n\n def _init(self):\n \"\"\"Initialize the block.\"\"\"\n if Path(self.hint_db_rel_path).is_absolute():\n hint_db_path = Path(self.hint_db_rel_path)\n else:\n hint_db_path = Path(__file__).parent / self.hint_db_rel_path","source_hash":"0aaa22a2ebd8845f6a17e105ac4061ad48d951f42e10d09378bdea9927c9b8bb","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.tool_use_agent.tool_use_agent.TaskHint","uri":"program://AgentLab/class/src.agentlab.agents.tool_use_agent.tool_use_agent.TaskHint#L306-L454","kind":"class","name":"TaskHint","path":"src/agentlab/agents/tool_use_agent/tool_use_agent.py","language":"python","start_line":306,"end_line":454,"context_start_line":286,"context_end_line":474,"code":" system_msg = llm.msg.system()\n if self.high_details:\n # Add a system message to the LLM to indicate that it should summarize\n system_msg.add_text(\n \"\"\"# Summarizer instructions:\\nWhen asked to summarize, do the following:\n1) Summarize the effect of the last action, with attention to details.\n2) Give a semantic description of the current state of the environment, with attention to details. If there was a repeating mistake, mention the cause of it.\n3) Reason about the overall task at a high level.\n4) What hint can be relevant for the next action? Only chose from the hints provided in the task description. Or select none.\n5) Reason about the next action to take, based on the current state and the goal.\n\"\"\"\n )\n else:\n system_msg.add_text(\n \"\"\"When asked to summarize, give a semantic description of the current state of the environment.\"\"\"\n )\n discussion.append(system_msg)\n\n\n@dataclass\nclass TaskHint(Block):\n use_task_hint: bool = True\n hint_db_rel_path: str = \"hint_db.csv\"\n hint_retrieval_mode: Literal[\"direct\", \"llm\", \"emb\"] = \"direct\"\n top_n: int = 4 # Number of top hints to return when using embedding retrieval\n embedder_model: str = \"Qwen/Qwen3-Embedding-0.6B\" # Model for embedding hints\n embedder_server: str = \"http://localhost:5000\"\n llm_prompt: str = \"\"\"We're choosing hints to help solve the following task:\\n{goal}.\\n\nYou need to choose the most relevant hints topic from the following list:\\n\\nHint topics:\\n{topics}\\n\nChoose hint topic for the task and return only its number, e.g. 1. If you don't know the answer, return -1.\"\"\"\n\n def _init(self):\n \"\"\"Initialize the block.\"\"\"\n if Path(self.hint_db_rel_path).is_absolute():\n hint_db_path = Path(self.hint_db_rel_path)\n else:\n hint_db_path = Path(__file__).parent / self.hint_db_rel_path\n self.hint_db = pd.read_csv(hint_db_path, header=0, index_col=None, dtype=str)\n if self.hint_retrieval_mode == \"emb\":\n self.encode_hints()\n\n def oai_embed(self, text: str):\n response = self._oai_emb.create(input=text, model=\"text-embedding-3-small\")\n return response.data[0].embedding\n\n def encode_hints(self):\n self.uniq_hints = self.hint_db.drop_duplicates(subset=[\"hint\"], keep=\"first\")\n logger.info(\n f\"Encoding {len(self.uniq_hints)} unique hints with semantic keys using {self.embedder_model} model.\"\n )\n hints = self.uniq_hints[\"hint\"].tolist()\n semantic_keys = self.uniq_hints[\"semantic_keys\"].tolist()\n lines = [f\"{k}: {h}\" for h, k in zip(hints, semantic_keys)]\n emb_path = f\"{self.hint_db_rel_path}.embs.npy\"\n assert os.path.exists(emb_path), f\"Embedding file not found: {emb_path}\"\n logger.info(f\"Loading hint embeddings from: {emb_path}\")\n emb_dict = np.load(emb_path, allow_pickle=True).item()\n self.hint_embeddings = np.array([emb_dict[k] for k in lines])\n logger.info(f\"Loaded hint embeddings shape: {self.hint_embeddings.shape}\")\n\n def apply(self, llm, discussion: StructuredDiscussion, task_name: str) -> dict:\n if not self.use_task_hint:\n return {}\n\n goal = \"\\n\".join([c.get(\"text\", \"\") for c in discussion.groups[0].messages[1].content])\n task_hints = self.choose_hints(llm, task_name, goal)\n\n hints = []\n for hint in task_hints:\n hint = hint.strip()\n if hint:\n hints.append(f\"- {hint}\")\n\n if len(hints) > 0:\n hints_str = (\n \"# Hints:\\nHere are some hints for the task you are working on:\\n\"\n + \"\\n\".join(hints)\n )\n msg = llm.msg.user().add_text(hints_str)\n\n discussion.append(msg)\n\n def choose_hints(self, llm, task_name: str, goal: str) -> list[str]:\n \"\"\"Choose hints based on the task name.\"\"\"\n if self.hint_retrieval_mode == \"llm\":\n return self.choose_hints_llm(llm, goal)\n elif self.hint_retrieval_mode == \"direct\":\n return self.choose_hints_direct(task_name)\n elif self.hint_retrieval_mode == \"emb\":\n return self.choose_hints_emb(goal)\n else:\n raise ValueError(f\"Unknown hint retrieval mode: {self.hint_retrieval_mode}\")\n\n def choose_hints_llm(self, llm, goal: str) -> list[str]:\n \"\"\"Choose hints using LLM to filter the hints.\"\"\"\n topic_to_hints = defaultdict(list)\n for i, row in self.hint_db.iterrows():\n topic_to_hints[row[\"semantic_keys\"]].append(i)\n hint_topics = list(topic_to_hints.keys())\n topics = \"\\n\".join([f\"{i}. {h}\" for i, h in enumerate(hint_topics)])\n prompt = self.llm_prompt.format(goal=goal, topics=topics)\n response = llm(APIPayload(messages=[llm.msg.user().add_text(prompt)]))\n try:\n hint_topic_idx = json.loads(response.think)\n if hint_topic_idx < 0 or hint_topic_idx >= len(hint_topics):\n logger.error(f\"Wrong LLM hint id response: {response.think}, no hints\")\n return []\n hint_topic = hint_topics[hint_topic_idx]\n hint_indices = topic_to_hints[hint_topic]\n df = self.hint_db.iloc[hint_indices].copy()\n df = df.drop_duplicates(subset=[\"hint\"], keep=\"first\") # leave only unique hints\n hints = df[\"hint\"].tolist()\n logger.debug(f\"LLM hint topic {hint_topic_idx}, chosen hints: {df['hint'].tolist()}\")\n except json.JSONDecodeError:\n logger.error(f\"Failed to parse LLM hint id response: {response.think}, no hints\")\n hints = []\n return hints\n\n def choose_hints_emb(self, goal: str) -> list[str]:\n \"\"\"Choose hints using embeddings to filter the hints.\"\"\"\n goal_embeddings = self._encode([goal], prompt=\"task description\")\n similarities = self._similarity(goal_embeddings.tolist(), self.hint_embeddings.tolist())\n top_indices = similarities.argsort()[0][-self.top_n :].tolist()\n logger.info(f\"Top hint indices based on embedding similarity: {top_indices}\")\n hints = self.uniq_hints.iloc[top_indices]\n logger.info(f\"Embedding-based hints chosen: {hints}\")\n return hints[\"hint\"].tolist()\n\n def _encode(self, texts: list[str], prompt: str = \"\", timeout: int = 10, max_retries: int = 5):\n \"\"\"Call the encode API endpoint with timeout and retries\"\"\"\n for attempt in range(max_retries):\n try:\n response = requests.post(\n f\"{self.embedder_server}/encode\",\n json={\"texts\": texts, \"prompt\": prompt},\n timeout=timeout,\n )\n embs = response.json()[\"embeddings\"]\n return np.asarray(embs)\n except (requests.exceptions.RequestException, requests.exceptions.Timeout) as e:\n if attempt == max_retries - 1:\n raise e\n time.sleep(random.uniform(1, timeout))\n continue\n\n def _similarity(\n self, texts1: list[str], texts2: list[str], timeout: int = 2, max_retries: int = 5\n ):\n \"\"\"Call the similarity API endpoint with timeout and retries\"\"\"\n for attempt in range(max_retries):\n try:\n response = requests.post(\n f\"{self.embedder_server}/similarity\",\n json={\"texts1\": texts1, \"texts2\": texts2},\n timeout=timeout,\n )\n similarities = response.json()[\"similarities\"]\n return np.asarray(similarities)\n except (requests.exceptions.RequestException, requests.exceptions.Timeout) as e:\n if attempt == max_retries - 1:\n raise e\n time.sleep(random.uniform(1, timeout))\n continue\n\n def choose_hints_direct(self, task_name: str) -> list[str]:\n hints = self.hint_db[\n self.hint_db[\"task_name\"].apply(lambda x: fnmatch.fnmatch(x, task_name))\n ]\n return hints[\"hint\"].tolist()\n\n\n@dataclass\nclass PromptConfig:\n tag_screenshot: bool = True # Whether to tag the screenshot with the last action.\n goal: Goal = None\n obs: Obs = None\n summarizer: Summarizer = None\n general_hints: GeneralHints = None\n task_hint: TaskHint = None\n keep_last_n_obs: int = 1\n multiaction: bool = False\n action_subsets: tuple[str] = None\n\n\n@dataclass\nclass ToolUseAgentArgs(AgentArgs):\n model_args: BaseModelArgs = None\n config: PromptConfig = None\n use_raw_page_output: bool = False # This attribute is used in loop.py to setup the env.","source_hash":"0aaa22a2ebd8845f6a17e105ac4061ad48d951f42e10d09378bdea9927c9b8bb","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.tool_use_agent.tool_use_agent.PromptConfig","uri":"program://AgentLab/class/src.agentlab.agents.tool_use_agent.tool_use_agent.PromptConfig#L458-L467","kind":"class","name":"PromptConfig","path":"src/agentlab/agents/tool_use_agent/tool_use_agent.py","language":"python","start_line":458,"end_line":467,"context_start_line":438,"context_end_line":487,"code":" f\"{self.embedder_server}/similarity\",\n json={\"texts1\": texts1, \"texts2\": texts2},\n timeout=timeout,\n )\n similarities = response.json()[\"similarities\"]\n return np.asarray(similarities)\n except (requests.exceptions.RequestException, requests.exceptions.Timeout) as e:\n if attempt == max_retries - 1:\n raise e\n time.sleep(random.uniform(1, timeout))\n continue\n\n def choose_hints_direct(self, task_name: str) -> list[str]:\n hints = self.hint_db[\n self.hint_db[\"task_name\"].apply(lambda x: fnmatch.fnmatch(x, task_name))\n ]\n return hints[\"hint\"].tolist()\n\n\n@dataclass\nclass PromptConfig:\n tag_screenshot: bool = True # Whether to tag the screenshot with the last action.\n goal: Goal = None\n obs: Obs = None\n summarizer: Summarizer = None\n general_hints: GeneralHints = None\n task_hint: TaskHint = None\n keep_last_n_obs: int = 1\n multiaction: bool = False\n action_subsets: tuple[str] = None\n\n\n@dataclass\nclass ToolUseAgentArgs(AgentArgs):\n model_args: BaseModelArgs = None\n config: PromptConfig = None\n use_raw_page_output: bool = False # This attribute is used in loop.py to setup the env.\n action_set: bgym.AbstractActionSet | None = None\n\n def __post_init__(self):\n try:\n self.agent_name = f\"ToolUse-{self.model_args.model_name}\".replace(\"/\", \"_\")\n except AttributeError:\n pass\n\n def make_agent(self) -> bgym.Agent:\n if self.config is None:\n self.config = DEFAULT_PROMPT_CONFIG\n return ToolUseAgent(\n model_args=self.model_args, # type: ignore","source_hash":"0aaa22a2ebd8845f6a17e105ac4061ad48d951f42e10d09378bdea9927c9b8bb","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.tool_use_agent.tool_use_agent.ToolUseAgentArgs","uri":"program://AgentLab/class/src.agentlab.agents.tool_use_agent.tool_use_agent.ToolUseAgentArgs#L471-L502","kind":"class","name":"ToolUseAgentArgs","path":"src/agentlab/agents/tool_use_agent/tool_use_agent.py","language":"python","start_line":471,"end_line":502,"context_start_line":451,"context_end_line":522,"code":" hints = self.hint_db[\n self.hint_db[\"task_name\"].apply(lambda x: fnmatch.fnmatch(x, task_name))\n ]\n return hints[\"hint\"].tolist()\n\n\n@dataclass\nclass PromptConfig:\n tag_screenshot: bool = True # Whether to tag the screenshot with the last action.\n goal: Goal = None\n obs: Obs = None\n summarizer: Summarizer = None\n general_hints: GeneralHints = None\n task_hint: TaskHint = None\n keep_last_n_obs: int = 1\n multiaction: bool = False\n action_subsets: tuple[str] = None\n\n\n@dataclass\nclass ToolUseAgentArgs(AgentArgs):\n model_args: BaseModelArgs = None\n config: PromptConfig = None\n use_raw_page_output: bool = False # This attribute is used in loop.py to setup the env.\n action_set: bgym.AbstractActionSet | None = None\n\n def __post_init__(self):\n try:\n self.agent_name = f\"ToolUse-{self.model_args.model_name}\".replace(\"/\", \"_\")\n except AttributeError:\n pass\n\n def make_agent(self) -> bgym.Agent:\n if self.config is None:\n self.config = DEFAULT_PROMPT_CONFIG\n return ToolUseAgent(\n model_args=self.model_args, # type: ignore\n config=self.config,\n action_set=self.action_set,\n )\n\n def prepare(self):\n return self.model_args.prepare_server()\n\n def close(self):\n return self.model_args.close_server()\n\n def set_benchmark(self, benchmark: AgentLabBenchmark | BgymBenchmark, demo_mode: bool):\n \"\"\"Set benchmark specific flags.\"\"\"\n benchmark_name = benchmark.name\n if benchmark_name == \"osworld\":\n self.config.obs.skip_preprocessing = True\n\n\nclass ToolUseAgent(bgym.Agent):\n def __init__(\n self,\n model_args: OpenAIResponseModelArgs,\n config: PromptConfig = None,\n action_set: bgym.AbstractActionSet | None = None,\n ):\n self.model_args = model_args\n self.config = config\n self.action_set: bgym.AbstractActionSet = action_set or bgym.HighLevelActionSet(\n self.config.action_subsets,\n multiaction=self.config.multiaction, # type: ignore\n )\n self.tools = self.action_set.to_tool_description(api=model_args.api)\n\n self.call_ids = []\n\n self.llm = model_args.make_model()","source_hash":"0aaa22a2ebd8845f6a17e105ac4061ad48d951f42e10d09378bdea9927c9b8bb","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.tool_use_agent.tool_use_agent.ToolUseAgent","uri":"program://AgentLab/class/src.agentlab.agents.tool_use_agent.tool_use_agent.ToolUseAgent#L505-L628","kind":"class","name":"ToolUseAgent","path":"src/agentlab/agents/tool_use_agent/tool_use_agent.py","language":"python","start_line":505,"end_line":628,"context_start_line":485,"context_end_line":648,"code":" self.config = DEFAULT_PROMPT_CONFIG\n return ToolUseAgent(\n model_args=self.model_args, # type: ignore\n config=self.config,\n action_set=self.action_set,\n )\n\n def prepare(self):\n return self.model_args.prepare_server()\n\n def close(self):\n return self.model_args.close_server()\n\n def set_benchmark(self, benchmark: AgentLabBenchmark | BgymBenchmark, demo_mode: bool):\n \"\"\"Set benchmark specific flags.\"\"\"\n benchmark_name = benchmark.name\n if benchmark_name == \"osworld\":\n self.config.obs.skip_preprocessing = True\n\n\nclass ToolUseAgent(bgym.Agent):\n def __init__(\n self,\n model_args: OpenAIResponseModelArgs,\n config: PromptConfig = None,\n action_set: bgym.AbstractActionSet | None = None,\n ):\n self.model_args = model_args\n self.config = config\n self.action_set: bgym.AbstractActionSet = action_set or bgym.HighLevelActionSet(\n self.config.action_subsets,\n multiaction=self.config.multiaction, # type: ignore\n )\n self.tools = self.action_set.to_tool_description(api=model_args.api)\n\n self.call_ids = []\n\n self.llm = model_args.make_model()\n self.msg_builder = model_args.get_message_builder()\n self.llm.msg = self.msg_builder\n\n self.task_hint = self.config.task_hint.make()\n self.obs_block = self.config.obs.make()\n\n self.discussion = StructuredDiscussion(self.config.keep_last_n_obs)\n self.last_response: LLMOutput = LLMOutput()\n self._responses: list[LLMOutput] = []\n\n def obs_preprocessor(self, obs):\n obs = copy(obs)\n if self.config.obs.skip_preprocessing:\n return obs\n page = obs.pop(\"page\", None)\n if page is not None:\n obs[\"screenshot\"] = extract_screenshot(page)\n else:\n if self.config.obs.use_dom:\n obs[\"dom_txt\"] = flatten_dom_to_str(\n obs[\"dom_object\"],\n extra_properties=obs[\"extra_element_properties\"],\n )\n obs[\"pruned_html\"] = prune_html(obs[\"dom_txt\"])\n\n if self.config.obs.use_axtree:\n obs[\"axtree_txt\"] = flatten_axtree_to_str(\n obs[\"axtree_object\"],\n extra_properties=obs[\"extra_element_properties\"],\n )\n\n if self.config.obs.use_som:\n obs[\"screenshot_som\"] = overlay_som(\n obs[\"screenshot\"], extra_properties=obs[\"extra_element_properties\"]\n )\n if self.config.obs.use_zoomed_webpage:\n pass\n\n return obs\n\n def set_task_name(self, task_name: str):\n \"\"\"Cheater function that is supposed to be called by loop.py before callling get_action\"\"\"\n self.task_name = task_name\n\n @cost_tracker_decorator\n def get_action(self, obs: Any) -> float:\n self.llm.reset_stats()\n if not self.discussion.is_goal_set():\n self.discussion.new_group(\"goal\")\n\n if self.config.multiaction:\n sys_msg = SYS_MSG + \"\\nYou can take multiple actions in a single step, if needed.\"\n else:\n sys_msg = SYS_MSG + \"\\nYou can only take one action at a time.\"\n self.config.goal.apply(self.llm, self.discussion, obs, sys_msg)\n\n self.config.summarizer.apply_init(self.llm, self.discussion)\n self.config.general_hints.apply(self.llm, self.discussion)\n self.task_hint.apply(self.llm, self.discussion, self.task_name)\n\n self.discussion.new_group()\n\n self.obs_block.apply(self.llm, self.discussion, obs, last_llm_output=self.last_response)\n\n self.config.summarizer.apply(self.llm, self.discussion)\n\n messages = self.discussion.flatten()\n response: LLMOutput = self.llm(\n APIPayload(\n messages=messages,\n tools=self.tools, # You can update tools available tools now.\n tool_choice=\"any\",\n cache_tool_definition=True,\n cache_complete_prompt=False,\n use_cache_breakpoints=True,\n )\n )\n action = response.action\n think = response.think\n last_summary = self.discussion.get_last_summary()\n if last_summary is not None:\n think = last_summary.content[0][\"text\"] + \"\\n\" + think\n\n self.discussion.new_group()\n # self.discussion.append(response.tool_calls) # No need to append tool calls anymore.\n\n self.last_response = response\n self._responses.append(response) # may be useful for debugging\n # self.messages.append(response.assistant_message) # this is tool call\n\n tools_str = json.dumps(self.tools, indent=2)\n tools_msg = MessageBuilder(\"tool_description\").add_text(tools_str)\n\n # Adding these extra messages to visualize in gradio\n messages.insert(0, tools_msg) # insert at the beginning of the message\n # This avoids the assertion error with self.llm.user().add_responded_tool_calls(tool_calls)\n msg = self.llm.msg(\"tool\")\n msg.responded_tool_calls = response.tool_calls\n messages.append(msg)\n\n agent_info = bgym.AgentInfo(\n think=think,\n chat_messages=messages,\n stats=self.llm.stats.stats_dict,\n )\n return action, agent_info\n\n\nGPT_4_1 = OpenAIResponseModelArgs(\n model_name=\"gpt-4.1\",\n max_total_tokens=200_000,\n max_input_tokens=200_000,\n max_new_tokens=2_000,\n temperature=0.1,\n vision_support=True,\n)\n\nGPT_4_1_CC_API = OpenAIChatModelArgs(\n model_name=\"gpt-4.1\",\n max_total_tokens=200_000,\n max_input_tokens=200_000,\n max_new_tokens=2_000,\n temperature=0.1,\n vision_support=True,\n)\n","source_hash":"0aaa22a2ebd8845f6a17e105ac4061ad48d951f42e10d09378bdea9927c9b8bb","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.tool_use_agent.tool_use_agent._init","uri":"program://AgentLab/function/src.agentlab.agents.tool_use_agent.tool_use_agent._init#L317-L325","kind":"function","name":"_init","path":"src/agentlab/agents/tool_use_agent/tool_use_agent.py","language":"python","start_line":317,"end_line":325,"context_start_line":297,"context_end_line":345,"code":" )\n else:\n system_msg.add_text(\n \"\"\"When asked to summarize, give a semantic description of the current state of the environment.\"\"\"\n )\n discussion.append(system_msg)\n\n\n@dataclass\nclass TaskHint(Block):\n use_task_hint: bool = True\n hint_db_rel_path: str = \"hint_db.csv\"\n hint_retrieval_mode: Literal[\"direct\", \"llm\", \"emb\"] = \"direct\"\n top_n: int = 4 # Number of top hints to return when using embedding retrieval\n embedder_model: str = \"Qwen/Qwen3-Embedding-0.6B\" # Model for embedding hints\n embedder_server: str = \"http://localhost:5000\"\n llm_prompt: str = \"\"\"We're choosing hints to help solve the following task:\\n{goal}.\\n\nYou need to choose the most relevant hints topic from the following list:\\n\\nHint topics:\\n{topics}\\n\nChoose hint topic for the task and return only its number, e.g. 1. If you don't know the answer, return -1.\"\"\"\n\n def _init(self):\n \"\"\"Initialize the block.\"\"\"\n if Path(self.hint_db_rel_path).is_absolute():\n hint_db_path = Path(self.hint_db_rel_path)\n else:\n hint_db_path = Path(__file__).parent / self.hint_db_rel_path\n self.hint_db = pd.read_csv(hint_db_path, header=0, index_col=None, dtype=str)\n if self.hint_retrieval_mode == \"emb\":\n self.encode_hints()\n\n def oai_embed(self, text: str):\n response = self._oai_emb.create(input=text, model=\"text-embedding-3-small\")\n return response.data[0].embedding\n\n def encode_hints(self):\n self.uniq_hints = self.hint_db.drop_duplicates(subset=[\"hint\"], keep=\"first\")\n logger.info(\n f\"Encoding {len(self.uniq_hints)} unique hints with semantic keys using {self.embedder_model} model.\"\n )\n hints = self.uniq_hints[\"hint\"].tolist()\n semantic_keys = self.uniq_hints[\"semantic_keys\"].tolist()\n lines = [f\"{k}: {h}\" for h, k in zip(hints, semantic_keys)]\n emb_path = f\"{self.hint_db_rel_path}.embs.npy\"\n assert os.path.exists(emb_path), f\"Embedding file not found: {emb_path}\"\n logger.info(f\"Loading hint embeddings from: {emb_path}\")\n emb_dict = np.load(emb_path, allow_pickle=True).item()\n self.hint_embeddings = np.array([emb_dict[k] for k in lines])\n logger.info(f\"Loaded hint embeddings shape: {self.hint_embeddings.shape}\")\n","source_hash":"0aaa22a2ebd8845f6a17e105ac4061ad48d951f42e10d09378bdea9927c9b8bb","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.tool_use_agent.tool_use_agent.make","uri":"program://AgentLab/function/src.agentlab.agents.tool_use_agent.tool_use_agent.make#L53-L63","kind":"function","name":"make","path":"src/agentlab/agents/tool_use_agent/tool_use_agent.py","language":"python","start_line":53,"end_line":63,"context_start_line":33,"context_end_line":83,"code":" APIPayload,\n ClaudeResponseModelArgs,\n LLMOutput,\n MessageBuilder,\n OpenAIChatModelArgs,\n OpenAIResponseModelArgs,\n OpenRouterModelArgs,\n ToolCalls,\n)\nfrom agentlab.llm.tracking import cost_tracker_decorator\n\nlogger = logging.getLogger(__name__)\n\n\n@dataclass\nclass Block(ABC):\n def _init(self):\n \"\"\"Initialize the block.\"\"\"\n pass\n\n def make(self) -> \"Block\":\n \"\"\"Returns a copy so the init can start adding some stuff to `self` without changing the\n original datatclass that should only contain a config.\n The aim is avoid having 2 calss definition for each block, e.g. Block and BlockArgs.\n\n Returns:\n Block: A copy of the current block instance with initialization applied.\n \"\"\"\n block = self.__class__(**asdict(self))\n block._init()\n return block\n\n @abstractmethod\n def apply(self, llm, messages: list[MessageBuilder], **kwargs):\n pass\n\n\n@dataclass\nclass MsgGroup:\n name: str = None\n messages: list[MessageBuilder] = field(default_factory=list)\n summary: MessageBuilder = None\n\n\nclass StructuredDiscussion:\n \"\"\"\n A structured discussion that groups messages into named groups with a potential summary for each group.\n\n When the discussion is flattened, only the last `keep_last_n_obs` groups are kept in the final list,\n the other groups are replaced by their summaries if they have one.\n \"\"\"","source_hash":"0aaa22a2ebd8845f6a17e105ac4061ad48d951f42e10d09378bdea9927c9b8bb","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.tool_use_agent.tool_use_agent.apply","uri":"program://AgentLab/function/src.agentlab.agents.tool_use_agent.tool_use_agent.apply#L346-L366","kind":"function","name":"apply","path":"src/agentlab/agents/tool_use_agent/tool_use_agent.py","language":"python","start_line":346,"end_line":366,"context_start_line":326,"context_end_line":386,"code":"\n def oai_embed(self, text: str):\n response = self._oai_emb.create(input=text, model=\"text-embedding-3-small\")\n return response.data[0].embedding\n\n def encode_hints(self):\n self.uniq_hints = self.hint_db.drop_duplicates(subset=[\"hint\"], keep=\"first\")\n logger.info(\n f\"Encoding {len(self.uniq_hints)} unique hints with semantic keys using {self.embedder_model} model.\"\n )\n hints = self.uniq_hints[\"hint\"].tolist()\n semantic_keys = self.uniq_hints[\"semantic_keys\"].tolist()\n lines = [f\"{k}: {h}\" for h, k in zip(hints, semantic_keys)]\n emb_path = f\"{self.hint_db_rel_path}.embs.npy\"\n assert os.path.exists(emb_path), f\"Embedding file not found: {emb_path}\"\n logger.info(f\"Loading hint embeddings from: {emb_path}\")\n emb_dict = np.load(emb_path, allow_pickle=True).item()\n self.hint_embeddings = np.array([emb_dict[k] for k in lines])\n logger.info(f\"Loaded hint embeddings shape: {self.hint_embeddings.shape}\")\n\n def apply(self, llm, discussion: StructuredDiscussion, task_name: str) -> dict:\n if not self.use_task_hint:\n return {}\n\n goal = \"\\n\".join([c.get(\"text\", \"\") for c in discussion.groups[0].messages[1].content])\n task_hints = self.choose_hints(llm, task_name, goal)\n\n hints = []\n for hint in task_hints:\n hint = hint.strip()\n if hint:\n hints.append(f\"- {hint}\")\n\n if len(hints) > 0:\n hints_str = (\n \"# Hints:\\nHere are some hints for the task you are working on:\\n\"\n + \"\\n\".join(hints)\n )\n msg = llm.msg.user().add_text(hints_str)\n\n discussion.append(msg)\n\n def choose_hints(self, llm, task_name: str, goal: str) -> list[str]:\n \"\"\"Choose hints based on the task name.\"\"\"\n if self.hint_retrieval_mode == \"llm\":\n return self.choose_hints_llm(llm, goal)\n elif self.hint_retrieval_mode == \"direct\":\n return self.choose_hints_direct(task_name)\n elif self.hint_retrieval_mode == \"emb\":\n return self.choose_hints_emb(goal)\n else:\n raise ValueError(f\"Unknown hint retrieval mode: {self.hint_retrieval_mode}\")\n\n def choose_hints_llm(self, llm, goal: str) -> list[str]:\n \"\"\"Choose hints using LLM to filter the hints.\"\"\"\n topic_to_hints = defaultdict(list)\n for i, row in self.hint_db.iterrows():\n topic_to_hints[row[\"semantic_keys\"]].append(i)\n hint_topics = list(topic_to_hints.keys())\n topics = \"\\n\".join([f\"{i}. {h}\" for i, h in enumerate(hint_topics)])\n prompt = self.llm_prompt.format(goal=goal, topics=topics)","source_hash":"0aaa22a2ebd8845f6a17e105ac4061ad48d951f42e10d09378bdea9927c9b8bb","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.tool_use_agent.tool_use_agent.__init__","uri":"program://AgentLab/function/src.agentlab.agents.tool_use_agent.tool_use_agent.__init__#L506-L531","kind":"function","name":"__init__","path":"src/agentlab/agents/tool_use_agent/tool_use_agent.py","language":"python","start_line":506,"end_line":531,"context_start_line":486,"context_end_line":551,"code":" return ToolUseAgent(\n model_args=self.model_args, # type: ignore\n config=self.config,\n action_set=self.action_set,\n )\n\n def prepare(self):\n return self.model_args.prepare_server()\n\n def close(self):\n return self.model_args.close_server()\n\n def set_benchmark(self, benchmark: AgentLabBenchmark | BgymBenchmark, demo_mode: bool):\n \"\"\"Set benchmark specific flags.\"\"\"\n benchmark_name = benchmark.name\n if benchmark_name == \"osworld\":\n self.config.obs.skip_preprocessing = True\n\n\nclass ToolUseAgent(bgym.Agent):\n def __init__(\n self,\n model_args: OpenAIResponseModelArgs,\n config: PromptConfig = None,\n action_set: bgym.AbstractActionSet | None = None,\n ):\n self.model_args = model_args\n self.config = config\n self.action_set: bgym.AbstractActionSet = action_set or bgym.HighLevelActionSet(\n self.config.action_subsets,\n multiaction=self.config.multiaction, # type: ignore\n )\n self.tools = self.action_set.to_tool_description(api=model_args.api)\n\n self.call_ids = []\n\n self.llm = model_args.make_model()\n self.msg_builder = model_args.get_message_builder()\n self.llm.msg = self.msg_builder\n\n self.task_hint = self.config.task_hint.make()\n self.obs_block = self.config.obs.make()\n\n self.discussion = StructuredDiscussion(self.config.keep_last_n_obs)\n self.last_response: LLMOutput = LLMOutput()\n self._responses: list[LLMOutput] = []\n\n def obs_preprocessor(self, obs):\n obs = copy(obs)\n if self.config.obs.skip_preprocessing:\n return obs\n page = obs.pop(\"page\", None)\n if page is not None:\n obs[\"screenshot\"] = extract_screenshot(page)\n else:\n if self.config.obs.use_dom:\n obs[\"dom_txt\"] = flatten_dom_to_str(\n obs[\"dom_object\"],\n extra_properties=obs[\"extra_element_properties\"],\n )\n obs[\"pruned_html\"] = prune_html(obs[\"dom_txt\"])\n\n if self.config.obs.use_axtree:\n obs[\"axtree_txt\"] = flatten_axtree_to_str(\n obs[\"axtree_object\"],\n extra_properties=obs[\"extra_element_properties\"],","source_hash":"0aaa22a2ebd8845f6a17e105ac4061ad48d951f42e10d09378bdea9927c9b8bb","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.tool_use_agent.tool_use_agent.append","uri":"program://AgentLab/function/src.agentlab.agents.tool_use_agent.tool_use_agent.append#L89-L91","kind":"function","name":"append","path":"src/agentlab/agents/tool_use_agent/tool_use_agent.py","language":"python","start_line":89,"end_line":91,"context_start_line":69,"context_end_line":111,"code":"\n@dataclass\nclass MsgGroup:\n name: str = None\n messages: list[MessageBuilder] = field(default_factory=list)\n summary: MessageBuilder = None\n\n\nclass StructuredDiscussion:\n \"\"\"\n A structured discussion that groups messages into named groups with a potential summary for each group.\n\n When the discussion is flattened, only the last `keep_last_n_obs` groups are kept in the final list,\n the other groups are replaced by their summaries if they have one.\n \"\"\"\n\n def __init__(self, keep_last_n_obs=None):\n self.groups: list[MsgGroup] = []\n self.keep_last_n_obs: int | None = keep_last_n_obs\n\n def append(self, message: MessageBuilder):\n \"\"\"Append a message to the last group.\"\"\"\n self.groups[-1].messages.append(message)\n\n def new_group(self, name: str = None):\n \"\"\"Start a new group of messages.\"\"\"\n if name is None:\n name = f\"group_{len(self.groups)}\"\n self.groups.append(MsgGroup(name))\n\n def flatten(self) -> list[MessageBuilder]:\n \"\"\"Flatten the groups into a single list of messages.\"\"\"\n\n keep_last_n_obs = self.keep_last_n_obs or len(self.groups)\n messages = []\n for i, group in enumerate(self.groups):\n is_tail = i >= len(self.groups) - keep_last_n_obs\n\n if not is_tail and group.summary is not None:\n messages.append(group.summary)\n else:\n messages.extend(group.messages)\n # Mark all summarized messages for caching","source_hash":"0aaa22a2ebd8845f6a17e105ac4061ad48d951f42e10d09378bdea9927c9b8bb","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.tool_use_agent.tool_use_agent.new_group","uri":"program://AgentLab/function/src.agentlab.agents.tool_use_agent.tool_use_agent.new_group#L93-L97","kind":"function","name":"new_group","path":"src/agentlab/agents/tool_use_agent/tool_use_agent.py","language":"python","start_line":93,"end_line":97,"context_start_line":73,"context_end_line":117,"code":" messages: list[MessageBuilder] = field(default_factory=list)\n summary: MessageBuilder = None\n\n\nclass StructuredDiscussion:\n \"\"\"\n A structured discussion that groups messages into named groups with a potential summary for each group.\n\n When the discussion is flattened, only the last `keep_last_n_obs` groups are kept in the final list,\n the other groups are replaced by their summaries if they have one.\n \"\"\"\n\n def __init__(self, keep_last_n_obs=None):\n self.groups: list[MsgGroup] = []\n self.keep_last_n_obs: int | None = keep_last_n_obs\n\n def append(self, message: MessageBuilder):\n \"\"\"Append a message to the last group.\"\"\"\n self.groups[-1].messages.append(message)\n\n def new_group(self, name: str = None):\n \"\"\"Start a new group of messages.\"\"\"\n if name is None:\n name = f\"group_{len(self.groups)}\"\n self.groups.append(MsgGroup(name))\n\n def flatten(self) -> list[MessageBuilder]:\n \"\"\"Flatten the groups into a single list of messages.\"\"\"\n\n keep_last_n_obs = self.keep_last_n_obs or len(self.groups)\n messages = []\n for i, group in enumerate(self.groups):\n is_tail = i >= len(self.groups) - keep_last_n_obs\n\n if not is_tail and group.summary is not None:\n messages.append(group.summary)\n else:\n messages.extend(group.messages)\n # Mark all summarized messages for caching\n if i == len(self.groups) - keep_last_n_obs:\n for msg in messages: # unset previous cache breakpoints\n msg._cache_breakpoint = False\n # set new cache breakpoint\n messages[i].mark_all_previous_msg_for_caching()\n return messages","source_hash":"0aaa22a2ebd8845f6a17e105ac4061ad48d951f42e10d09378bdea9927c9b8bb","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.tool_use_agent.tool_use_agent.flatten","uri":"program://AgentLab/function/src.agentlab.agents.tool_use_agent.tool_use_agent.flatten#L99-L117","kind":"function","name":"flatten","path":"src/agentlab/agents/tool_use_agent/tool_use_agent.py","language":"python","start_line":99,"end_line":117,"context_start_line":79,"context_end_line":137,"code":" A structured discussion that groups messages into named groups with a potential summary for each group.\n\n When the discussion is flattened, only the last `keep_last_n_obs` groups are kept in the final list,\n the other groups are replaced by their summaries if they have one.\n \"\"\"\n\n def __init__(self, keep_last_n_obs=None):\n self.groups: list[MsgGroup] = []\n self.keep_last_n_obs: int | None = keep_last_n_obs\n\n def append(self, message: MessageBuilder):\n \"\"\"Append a message to the last group.\"\"\"\n self.groups[-1].messages.append(message)\n\n def new_group(self, name: str = None):\n \"\"\"Start a new group of messages.\"\"\"\n if name is None:\n name = f\"group_{len(self.groups)}\"\n self.groups.append(MsgGroup(name))\n\n def flatten(self) -> list[MessageBuilder]:\n \"\"\"Flatten the groups into a single list of messages.\"\"\"\n\n keep_last_n_obs = self.keep_last_n_obs or len(self.groups)\n messages = []\n for i, group in enumerate(self.groups):\n is_tail = i >= len(self.groups) - keep_last_n_obs\n\n if not is_tail and group.summary is not None:\n messages.append(group.summary)\n else:\n messages.extend(group.messages)\n # Mark all summarized messages for caching\n if i == len(self.groups) - keep_last_n_obs:\n for msg in messages: # unset previous cache breakpoints\n msg._cache_breakpoint = False\n # set new cache breakpoint\n messages[i].mark_all_previous_msg_for_caching()\n return messages\n\n def set_last_summary(self, summary: MessageBuilder):\n # append None to summaries until we reach the current group index\n self.groups[-1].summary = summary\n\n def get_last_summary(self) -> MessageBuilder | None:\n \"\"\"Get the last summary message.\"\"\"\n if len(self.groups) == 0:\n return None\n return self.groups[-1].summary\n\n def is_goal_set(self) -> bool:\n \"\"\"Check if the goal is set in the first group.\"\"\"\n return len(self.groups) > 0\n\n\nSYS_MSG = \"\"\"You are a web agent. Based on the observation, you will decide which action to take to accomplish your goal. \nYou strive for excellence and need to be as meticulous as possible. Make sure to explore when not sure.\n\"\"\"\n","source_hash":"0aaa22a2ebd8845f6a17e105ac4061ad48d951f42e10d09378bdea9927c9b8bb","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.tool_use_agent.tool_use_agent.set_last_summary","uri":"program://AgentLab/function/src.agentlab.agents.tool_use_agent.tool_use_agent.set_last_summary#L119-L121","kind":"function","name":"set_last_summary","path":"src/agentlab/agents/tool_use_agent/tool_use_agent.py","language":"python","start_line":119,"end_line":121,"context_start_line":99,"context_end_line":141,"code":" def flatten(self) -> list[MessageBuilder]:\n \"\"\"Flatten the groups into a single list of messages.\"\"\"\n\n keep_last_n_obs = self.keep_last_n_obs or len(self.groups)\n messages = []\n for i, group in enumerate(self.groups):\n is_tail = i >= len(self.groups) - keep_last_n_obs\n\n if not is_tail and group.summary is not None:\n messages.append(group.summary)\n else:\n messages.extend(group.messages)\n # Mark all summarized messages for caching\n if i == len(self.groups) - keep_last_n_obs:\n for msg in messages: # unset previous cache breakpoints\n msg._cache_breakpoint = False\n # set new cache breakpoint\n messages[i].mark_all_previous_msg_for_caching()\n return messages\n\n def set_last_summary(self, summary: MessageBuilder):\n # append None to summaries until we reach the current group index\n self.groups[-1].summary = summary\n\n def get_last_summary(self) -> MessageBuilder | None:\n \"\"\"Get the last summary message.\"\"\"\n if len(self.groups) == 0:\n return None\n return self.groups[-1].summary\n\n def is_goal_set(self) -> bool:\n \"\"\"Check if the goal is set in the first group.\"\"\"\n return len(self.groups) > 0\n\n\nSYS_MSG = \"\"\"You are a web agent. Based on the observation, you will decide which action to take to accomplish your goal. \nYou strive for excellence and need to be as meticulous as possible. Make sure to explore when not sure.\n\"\"\"\n\n\n@dataclass\nclass Goal(Block):\n \"\"\"Block to add the goal to the messages.\"\"\"","source_hash":"0aaa22a2ebd8845f6a17e105ac4061ad48d951f42e10d09378bdea9927c9b8bb","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.tool_use_agent.tool_use_agent.get_last_summary","uri":"program://AgentLab/function/src.agentlab.agents.tool_use_agent.tool_use_agent.get_last_summary#L123-L127","kind":"function","name":"get_last_summary","path":"src/agentlab/agents/tool_use_agent/tool_use_agent.py","language":"python","start_line":123,"end_line":127,"context_start_line":103,"context_end_line":147,"code":" messages = []\n for i, group in enumerate(self.groups):\n is_tail = i >= len(self.groups) - keep_last_n_obs\n\n if not is_tail and group.summary is not None:\n messages.append(group.summary)\n else:\n messages.extend(group.messages)\n # Mark all summarized messages for caching\n if i == len(self.groups) - keep_last_n_obs:\n for msg in messages: # unset previous cache breakpoints\n msg._cache_breakpoint = False\n # set new cache breakpoint\n messages[i].mark_all_previous_msg_for_caching()\n return messages\n\n def set_last_summary(self, summary: MessageBuilder):\n # append None to summaries until we reach the current group index\n self.groups[-1].summary = summary\n\n def get_last_summary(self) -> MessageBuilder | None:\n \"\"\"Get the last summary message.\"\"\"\n if len(self.groups) == 0:\n return None\n return self.groups[-1].summary\n\n def is_goal_set(self) -> bool:\n \"\"\"Check if the goal is set in the first group.\"\"\"\n return len(self.groups) > 0\n\n\nSYS_MSG = \"\"\"You are a web agent. Based on the observation, you will decide which action to take to accomplish your goal. \nYou strive for excellence and need to be as meticulous as possible. Make sure to explore when not sure.\n\"\"\"\n\n\n@dataclass\nclass Goal(Block):\n \"\"\"Block to add the goal to the messages.\"\"\"\n\n goal_as_system_msg: bool = True\n\n def apply(\n self, llm, discussion: StructuredDiscussion, obs: dict, sys_msg: str = SYS_MSG\n ) -> dict:","source_hash":"0aaa22a2ebd8845f6a17e105ac4061ad48d951f42e10d09378bdea9927c9b8bb","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.tool_use_agent.tool_use_agent.is_goal_set","uri":"program://AgentLab/function/src.agentlab.agents.tool_use_agent.tool_use_agent.is_goal_set#L129-L131","kind":"function","name":"is_goal_set","path":"src/agentlab/agents/tool_use_agent/tool_use_agent.py","language":"python","start_line":129,"end_line":131,"context_start_line":109,"context_end_line":151,"code":" else:\n messages.extend(group.messages)\n # Mark all summarized messages for caching\n if i == len(self.groups) - keep_last_n_obs:\n for msg in messages: # unset previous cache breakpoints\n msg._cache_breakpoint = False\n # set new cache breakpoint\n messages[i].mark_all_previous_msg_for_caching()\n return messages\n\n def set_last_summary(self, summary: MessageBuilder):\n # append None to summaries until we reach the current group index\n self.groups[-1].summary = summary\n\n def get_last_summary(self) -> MessageBuilder | None:\n \"\"\"Get the last summary message.\"\"\"\n if len(self.groups) == 0:\n return None\n return self.groups[-1].summary\n\n def is_goal_set(self) -> bool:\n \"\"\"Check if the goal is set in the first group.\"\"\"\n return len(self.groups) > 0\n\n\nSYS_MSG = \"\"\"You are a web agent. Based on the observation, you will decide which action to take to accomplish your goal. \nYou strive for excellence and need to be as meticulous as possible. Make sure to explore when not sure.\n\"\"\"\n\n\n@dataclass\nclass Goal(Block):\n \"\"\"Block to add the goal to the messages.\"\"\"\n\n goal_as_system_msg: bool = True\n\n def apply(\n self, llm, discussion: StructuredDiscussion, obs: dict, sys_msg: str = SYS_MSG\n ) -> dict:\n system_message = llm.msg.system().add_text(sys_msg)\n discussion.append(system_message)\n\n if self.goal_as_system_msg:","source_hash":"0aaa22a2ebd8845f6a17e105ac4061ad48d951f42e10d09378bdea9927c9b8bb","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.tool_use_agent.tool_use_agent.apply_init","uri":"program://AgentLab/function/src.agentlab.agents.tool_use_agent.tool_use_agent.apply_init#L281-L302","kind":"function","name":"apply_init","path":"src/agentlab/agents/tool_use_agent/tool_use_agent.py","language":"python","start_line":281,"end_line":302,"context_start_line":261,"context_end_line":322,"code":" \"\"\"Block to summarize the last action and the current state of the environment.\"\"\"\n\n do_summary: bool = False\n high_details: bool = True\n\n def apply(self, llm, discussion: StructuredDiscussion) -> dict:\n if not self.do_summary:\n return\n\n msg = llm.msg.user().add_text(\"\"\"Summarize\\n\"\"\")\n\n discussion.append(msg)\n\n summary_response = llm(APIPayload(messages=discussion.flatten()))\n\n summary_msg = llm.msg.assistant().add_text(summary_response.think)\n discussion.append(summary_msg)\n discussion.set_last_summary(summary_msg)\n return summary_msg\n\n def apply_init(self, llm, discussion: StructuredDiscussion) -> dict:\n \"\"\"Initialize the summarizer block.\"\"\"\n if not self.do_summary:\n return\n\n system_msg = llm.msg.system()\n if self.high_details:\n # Add a system message to the LLM to indicate that it should summarize\n system_msg.add_text(\n \"\"\"# Summarizer instructions:\\nWhen asked to summarize, do the following:\n1) Summarize the effect of the last action, with attention to details.\n2) Give a semantic description of the current state of the environment, with attention to details. If there was a repeating mistake, mention the cause of it.\n3) Reason about the overall task at a high level.\n4) What hint can be relevant for the next action? Only chose from the hints provided in the task description. Or select none.\n5) Reason about the next action to take, based on the current state and the goal.\n\"\"\"\n )\n else:\n system_msg.add_text(\n \"\"\"When asked to summarize, give a semantic description of the current state of the environment.\"\"\"\n )\n discussion.append(system_msg)\n\n\n@dataclass\nclass TaskHint(Block):\n use_task_hint: bool = True\n hint_db_rel_path: str = \"hint_db.csv\"\n hint_retrieval_mode: Literal[\"direct\", \"llm\", \"emb\"] = \"direct\"\n top_n: int = 4 # Number of top hints to return when using embedding retrieval\n embedder_model: str = \"Qwen/Qwen3-Embedding-0.6B\" # Model for embedding hints\n embedder_server: str = \"http://localhost:5000\"\n llm_prompt: str = \"\"\"We're choosing hints to help solve the following task:\\n{goal}.\\n\nYou need to choose the most relevant hints topic from the following list:\\n\\nHint topics:\\n{topics}\\n\nChoose hint topic for the task and return only its number, e.g. 1. If you don't know the answer, return -1.\"\"\"\n\n def _init(self):\n \"\"\"Initialize the block.\"\"\"\n if Path(self.hint_db_rel_path).is_absolute():\n hint_db_path = Path(self.hint_db_rel_path)\n else:\n hint_db_path = Path(__file__).parent / self.hint_db_rel_path","source_hash":"0aaa22a2ebd8845f6a17e105ac4061ad48d951f42e10d09378bdea9927c9b8bb","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.tool_use_agent.tool_use_agent.oai_embed","uri":"program://AgentLab/function/src.agentlab.agents.tool_use_agent.tool_use_agent.oai_embed#L327-L329","kind":"function","name":"oai_embed","path":"src/agentlab/agents/tool_use_agent/tool_use_agent.py","language":"python","start_line":327,"end_line":329,"context_start_line":307,"context_end_line":349,"code":" use_task_hint: bool = True\n hint_db_rel_path: str = \"hint_db.csv\"\n hint_retrieval_mode: Literal[\"direct\", \"llm\", \"emb\"] = \"direct\"\n top_n: int = 4 # Number of top hints to return when using embedding retrieval\n embedder_model: str = \"Qwen/Qwen3-Embedding-0.6B\" # Model for embedding hints\n embedder_server: str = \"http://localhost:5000\"\n llm_prompt: str = \"\"\"We're choosing hints to help solve the following task:\\n{goal}.\\n\nYou need to choose the most relevant hints topic from the following list:\\n\\nHint topics:\\n{topics}\\n\nChoose hint topic for the task and return only its number, e.g. 1. If you don't know the answer, return -1.\"\"\"\n\n def _init(self):\n \"\"\"Initialize the block.\"\"\"\n if Path(self.hint_db_rel_path).is_absolute():\n hint_db_path = Path(self.hint_db_rel_path)\n else:\n hint_db_path = Path(__file__).parent / self.hint_db_rel_path\n self.hint_db = pd.read_csv(hint_db_path, header=0, index_col=None, dtype=str)\n if self.hint_retrieval_mode == \"emb\":\n self.encode_hints()\n\n def oai_embed(self, text: str):\n response = self._oai_emb.create(input=text, model=\"text-embedding-3-small\")\n return response.data[0].embedding\n\n def encode_hints(self):\n self.uniq_hints = self.hint_db.drop_duplicates(subset=[\"hint\"], keep=\"first\")\n logger.info(\n f\"Encoding {len(self.uniq_hints)} unique hints with semantic keys using {self.embedder_model} model.\"\n )\n hints = self.uniq_hints[\"hint\"].tolist()\n semantic_keys = self.uniq_hints[\"semantic_keys\"].tolist()\n lines = [f\"{k}: {h}\" for h, k in zip(hints, semantic_keys)]\n emb_path = f\"{self.hint_db_rel_path}.embs.npy\"\n assert os.path.exists(emb_path), f\"Embedding file not found: {emb_path}\"\n logger.info(f\"Loading hint embeddings from: {emb_path}\")\n emb_dict = np.load(emb_path, allow_pickle=True).item()\n self.hint_embeddings = np.array([emb_dict[k] for k in lines])\n logger.info(f\"Loaded hint embeddings shape: {self.hint_embeddings.shape}\")\n\n def apply(self, llm, discussion: StructuredDiscussion, task_name: str) -> dict:\n if not self.use_task_hint:\n return {}\n","source_hash":"0aaa22a2ebd8845f6a17e105ac4061ad48d951f42e10d09378bdea9927c9b8bb","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.tool_use_agent.tool_use_agent.encode_hints","uri":"program://AgentLab/function/src.agentlab.agents.tool_use_agent.tool_use_agent.encode_hints#L331-L344","kind":"function","name":"encode_hints","path":"src/agentlab/agents/tool_use_agent/tool_use_agent.py","language":"python","start_line":331,"end_line":344,"context_start_line":311,"context_end_line":364,"code":" embedder_model: str = \"Qwen/Qwen3-Embedding-0.6B\" # Model for embedding hints\n embedder_server: str = \"http://localhost:5000\"\n llm_prompt: str = \"\"\"We're choosing hints to help solve the following task:\\n{goal}.\\n\nYou need to choose the most relevant hints topic from the following list:\\n\\nHint topics:\\n{topics}\\n\nChoose hint topic for the task and return only its number, e.g. 1. If you don't know the answer, return -1.\"\"\"\n\n def _init(self):\n \"\"\"Initialize the block.\"\"\"\n if Path(self.hint_db_rel_path).is_absolute():\n hint_db_path = Path(self.hint_db_rel_path)\n else:\n hint_db_path = Path(__file__).parent / self.hint_db_rel_path\n self.hint_db = pd.read_csv(hint_db_path, header=0, index_col=None, dtype=str)\n if self.hint_retrieval_mode == \"emb\":\n self.encode_hints()\n\n def oai_embed(self, text: str):\n response = self._oai_emb.create(input=text, model=\"text-embedding-3-small\")\n return response.data[0].embedding\n\n def encode_hints(self):\n self.uniq_hints = self.hint_db.drop_duplicates(subset=[\"hint\"], keep=\"first\")\n logger.info(\n f\"Encoding {len(self.uniq_hints)} unique hints with semantic keys using {self.embedder_model} model.\"\n )\n hints = self.uniq_hints[\"hint\"].tolist()\n semantic_keys = self.uniq_hints[\"semantic_keys\"].tolist()\n lines = [f\"{k}: {h}\" for h, k in zip(hints, semantic_keys)]\n emb_path = f\"{self.hint_db_rel_path}.embs.npy\"\n assert os.path.exists(emb_path), f\"Embedding file not found: {emb_path}\"\n logger.info(f\"Loading hint embeddings from: {emb_path}\")\n emb_dict = np.load(emb_path, allow_pickle=True).item()\n self.hint_embeddings = np.array([emb_dict[k] for k in lines])\n logger.info(f\"Loaded hint embeddings shape: {self.hint_embeddings.shape}\")\n\n def apply(self, llm, discussion: StructuredDiscussion, task_name: str) -> dict:\n if not self.use_task_hint:\n return {}\n\n goal = \"\\n\".join([c.get(\"text\", \"\") for c in discussion.groups[0].messages[1].content])\n task_hints = self.choose_hints(llm, task_name, goal)\n\n hints = []\n for hint in task_hints:\n hint = hint.strip()\n if hint:\n hints.append(f\"- {hint}\")\n\n if len(hints) > 0:\n hints_str = (\n \"# Hints:\\nHere are some hints for the task you are working on:\\n\"\n + \"\\n\".join(hints)\n )\n msg = llm.msg.user().add_text(hints_str)","source_hash":"0aaa22a2ebd8845f6a17e105ac4061ad48d951f42e10d09378bdea9927c9b8bb","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.tool_use_agent.tool_use_agent.choose_hints","uri":"program://AgentLab/function/src.agentlab.agents.tool_use_agent.tool_use_agent.choose_hints#L368-L377","kind":"function","name":"choose_hints","path":"src/agentlab/agents/tool_use_agent/tool_use_agent.py","language":"python","start_line":368,"end_line":377,"context_start_line":348,"context_end_line":397,"code":" return {}\n\n goal = \"\\n\".join([c.get(\"text\", \"\") for c in discussion.groups[0].messages[1].content])\n task_hints = self.choose_hints(llm, task_name, goal)\n\n hints = []\n for hint in task_hints:\n hint = hint.strip()\n if hint:\n hints.append(f\"- {hint}\")\n\n if len(hints) > 0:\n hints_str = (\n \"# Hints:\\nHere are some hints for the task you are working on:\\n\"\n + \"\\n\".join(hints)\n )\n msg = llm.msg.user().add_text(hints_str)\n\n discussion.append(msg)\n\n def choose_hints(self, llm, task_name: str, goal: str) -> list[str]:\n \"\"\"Choose hints based on the task name.\"\"\"\n if self.hint_retrieval_mode == \"llm\":\n return self.choose_hints_llm(llm, goal)\n elif self.hint_retrieval_mode == \"direct\":\n return self.choose_hints_direct(task_name)\n elif self.hint_retrieval_mode == \"emb\":\n return self.choose_hints_emb(goal)\n else:\n raise ValueError(f\"Unknown hint retrieval mode: {self.hint_retrieval_mode}\")\n\n def choose_hints_llm(self, llm, goal: str) -> list[str]:\n \"\"\"Choose hints using LLM to filter the hints.\"\"\"\n topic_to_hints = defaultdict(list)\n for i, row in self.hint_db.iterrows():\n topic_to_hints[row[\"semantic_keys\"]].append(i)\n hint_topics = list(topic_to_hints.keys())\n topics = \"\\n\".join([f\"{i}. {h}\" for i, h in enumerate(hint_topics)])\n prompt = self.llm_prompt.format(goal=goal, topics=topics)\n response = llm(APIPayload(messages=[llm.msg.user().add_text(prompt)]))\n try:\n hint_topic_idx = json.loads(response.think)\n if hint_topic_idx < 0 or hint_topic_idx >= len(hint_topics):\n logger.error(f\"Wrong LLM hint id response: {response.think}, no hints\")\n return []\n hint_topic = hint_topics[hint_topic_idx]\n hint_indices = topic_to_hints[hint_topic]\n df = self.hint_db.iloc[hint_indices].copy()\n df = df.drop_duplicates(subset=[\"hint\"], keep=\"first\") # leave only unique hints\n hints = df[\"hint\"].tolist()","source_hash":"0aaa22a2ebd8845f6a17e105ac4061ad48d951f42e10d09378bdea9927c9b8bb","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.tool_use_agent.tool_use_agent.choose_hints_llm","uri":"program://AgentLab/function/src.agentlab.agents.tool_use_agent.tool_use_agent.choose_hints_llm#L379-L402","kind":"function","name":"choose_hints_llm","path":"src/agentlab/agents/tool_use_agent/tool_use_agent.py","language":"python","start_line":379,"end_line":402,"context_start_line":359,"context_end_line":422,"code":" if len(hints) > 0:\n hints_str = (\n \"# Hints:\\nHere are some hints for the task you are working on:\\n\"\n + \"\\n\".join(hints)\n )\n msg = llm.msg.user().add_text(hints_str)\n\n discussion.append(msg)\n\n def choose_hints(self, llm, task_name: str, goal: str) -> list[str]:\n \"\"\"Choose hints based on the task name.\"\"\"\n if self.hint_retrieval_mode == \"llm\":\n return self.choose_hints_llm(llm, goal)\n elif self.hint_retrieval_mode == \"direct\":\n return self.choose_hints_direct(task_name)\n elif self.hint_retrieval_mode == \"emb\":\n return self.choose_hints_emb(goal)\n else:\n raise ValueError(f\"Unknown hint retrieval mode: {self.hint_retrieval_mode}\")\n\n def choose_hints_llm(self, llm, goal: str) -> list[str]:\n \"\"\"Choose hints using LLM to filter the hints.\"\"\"\n topic_to_hints = defaultdict(list)\n for i, row in self.hint_db.iterrows():\n topic_to_hints[row[\"semantic_keys\"]].append(i)\n hint_topics = list(topic_to_hints.keys())\n topics = \"\\n\".join([f\"{i}. {h}\" for i, h in enumerate(hint_topics)])\n prompt = self.llm_prompt.format(goal=goal, topics=topics)\n response = llm(APIPayload(messages=[llm.msg.user().add_text(prompt)]))\n try:\n hint_topic_idx = json.loads(response.think)\n if hint_topic_idx < 0 or hint_topic_idx >= len(hint_topics):\n logger.error(f\"Wrong LLM hint id response: {response.think}, no hints\")\n return []\n hint_topic = hint_topics[hint_topic_idx]\n hint_indices = topic_to_hints[hint_topic]\n df = self.hint_db.iloc[hint_indices].copy()\n df = df.drop_duplicates(subset=[\"hint\"], keep=\"first\") # leave only unique hints\n hints = df[\"hint\"].tolist()\n logger.debug(f\"LLM hint topic {hint_topic_idx}, chosen hints: {df['hint'].tolist()}\")\n except json.JSONDecodeError:\n logger.error(f\"Failed to parse LLM hint id response: {response.think}, no hints\")\n hints = []\n return hints\n\n def choose_hints_emb(self, goal: str) -> list[str]:\n \"\"\"Choose hints using embeddings to filter the hints.\"\"\"\n goal_embeddings = self._encode([goal], prompt=\"task description\")\n similarities = self._similarity(goal_embeddings.tolist(), self.hint_embeddings.tolist())\n top_indices = similarities.argsort()[0][-self.top_n :].tolist()\n logger.info(f\"Top hint indices based on embedding similarity: {top_indices}\")\n hints = self.uniq_hints.iloc[top_indices]\n logger.info(f\"Embedding-based hints chosen: {hints}\")\n return hints[\"hint\"].tolist()\n\n def _encode(self, texts: list[str], prompt: str = \"\", timeout: int = 10, max_retries: int = 5):\n \"\"\"Call the encode API endpoint with timeout and retries\"\"\"\n for attempt in range(max_retries):\n try:\n response = requests.post(\n f\"{self.embedder_server}/encode\",\n json={\"texts\": texts, \"prompt\": prompt},\n timeout=timeout,\n )","source_hash":"0aaa22a2ebd8845f6a17e105ac4061ad48d951f42e10d09378bdea9927c9b8bb","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.tool_use_agent.tool_use_agent.choose_hints_emb","uri":"program://AgentLab/function/src.agentlab.agents.tool_use_agent.tool_use_agent.choose_hints_emb#L404-L412","kind":"function","name":"choose_hints_emb","path":"src/agentlab/agents/tool_use_agent/tool_use_agent.py","language":"python","start_line":404,"end_line":412,"context_start_line":384,"context_end_line":432,"code":" hint_topics = list(topic_to_hints.keys())\n topics = \"\\n\".join([f\"{i}. {h}\" for i, h in enumerate(hint_topics)])\n prompt = self.llm_prompt.format(goal=goal, topics=topics)\n response = llm(APIPayload(messages=[llm.msg.user().add_text(prompt)]))\n try:\n hint_topic_idx = json.loads(response.think)\n if hint_topic_idx < 0 or hint_topic_idx >= len(hint_topics):\n logger.error(f\"Wrong LLM hint id response: {response.think}, no hints\")\n return []\n hint_topic = hint_topics[hint_topic_idx]\n hint_indices = topic_to_hints[hint_topic]\n df = self.hint_db.iloc[hint_indices].copy()\n df = df.drop_duplicates(subset=[\"hint\"], keep=\"first\") # leave only unique hints\n hints = df[\"hint\"].tolist()\n logger.debug(f\"LLM hint topic {hint_topic_idx}, chosen hints: {df['hint'].tolist()}\")\n except json.JSONDecodeError:\n logger.error(f\"Failed to parse LLM hint id response: {response.think}, no hints\")\n hints = []\n return hints\n\n def choose_hints_emb(self, goal: str) -> list[str]:\n \"\"\"Choose hints using embeddings to filter the hints.\"\"\"\n goal_embeddings = self._encode([goal], prompt=\"task description\")\n similarities = self._similarity(goal_embeddings.tolist(), self.hint_embeddings.tolist())\n top_indices = similarities.argsort()[0][-self.top_n :].tolist()\n logger.info(f\"Top hint indices based on embedding similarity: {top_indices}\")\n hints = self.uniq_hints.iloc[top_indices]\n logger.info(f\"Embedding-based hints chosen: {hints}\")\n return hints[\"hint\"].tolist()\n\n def _encode(self, texts: list[str], prompt: str = \"\", timeout: int = 10, max_retries: int = 5):\n \"\"\"Call the encode API endpoint with timeout and retries\"\"\"\n for attempt in range(max_retries):\n try:\n response = requests.post(\n f\"{self.embedder_server}/encode\",\n json={\"texts\": texts, \"prompt\": prompt},\n timeout=timeout,\n )\n embs = response.json()[\"embeddings\"]\n return np.asarray(embs)\n except (requests.exceptions.RequestException, requests.exceptions.Timeout) as e:\n if attempt == max_retries - 1:\n raise e\n time.sleep(random.uniform(1, timeout))\n continue\n\n def _similarity(\n self, texts1: list[str], texts2: list[str], timeout: int = 2, max_retries: int = 5","source_hash":"0aaa22a2ebd8845f6a17e105ac4061ad48d951f42e10d09378bdea9927c9b8bb","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.tool_use_agent.tool_use_agent._encode","uri":"program://AgentLab/function/src.agentlab.agents.tool_use_agent.tool_use_agent._encode#L414-L429","kind":"function","name":"_encode","path":"src/agentlab/agents/tool_use_agent/tool_use_agent.py","language":"python","start_line":414,"end_line":429,"context_start_line":394,"context_end_line":449,"code":" hint_indices = topic_to_hints[hint_topic]\n df = self.hint_db.iloc[hint_indices].copy()\n df = df.drop_duplicates(subset=[\"hint\"], keep=\"first\") # leave only unique hints\n hints = df[\"hint\"].tolist()\n logger.debug(f\"LLM hint topic {hint_topic_idx}, chosen hints: {df['hint'].tolist()}\")\n except json.JSONDecodeError:\n logger.error(f\"Failed to parse LLM hint id response: {response.think}, no hints\")\n hints = []\n return hints\n\n def choose_hints_emb(self, goal: str) -> list[str]:\n \"\"\"Choose hints using embeddings to filter the hints.\"\"\"\n goal_embeddings = self._encode([goal], prompt=\"task description\")\n similarities = self._similarity(goal_embeddings.tolist(), self.hint_embeddings.tolist())\n top_indices = similarities.argsort()[0][-self.top_n :].tolist()\n logger.info(f\"Top hint indices based on embedding similarity: {top_indices}\")\n hints = self.uniq_hints.iloc[top_indices]\n logger.info(f\"Embedding-based hints chosen: {hints}\")\n return hints[\"hint\"].tolist()\n\n def _encode(self, texts: list[str], prompt: str = \"\", timeout: int = 10, max_retries: int = 5):\n \"\"\"Call the encode API endpoint with timeout and retries\"\"\"\n for attempt in range(max_retries):\n try:\n response = requests.post(\n f\"{self.embedder_server}/encode\",\n json={\"texts\": texts, \"prompt\": prompt},\n timeout=timeout,\n )\n embs = response.json()[\"embeddings\"]\n return np.asarray(embs)\n except (requests.exceptions.RequestException, requests.exceptions.Timeout) as e:\n if attempt == max_retries - 1:\n raise e\n time.sleep(random.uniform(1, timeout))\n continue\n\n def _similarity(\n self, texts1: list[str], texts2: list[str], timeout: int = 2, max_retries: int = 5\n ):\n \"\"\"Call the similarity API endpoint with timeout and retries\"\"\"\n for attempt in range(max_retries):\n try:\n response = requests.post(\n f\"{self.embedder_server}/similarity\",\n json={\"texts1\": texts1, \"texts2\": texts2},\n timeout=timeout,\n )\n similarities = response.json()[\"similarities\"]\n return np.asarray(similarities)\n except (requests.exceptions.RequestException, requests.exceptions.Timeout) as e:\n if attempt == max_retries - 1:\n raise e\n time.sleep(random.uniform(1, timeout))\n continue\n","source_hash":"0aaa22a2ebd8845f6a17e105ac4061ad48d951f42e10d09378bdea9927c9b8bb","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.tool_use_agent.tool_use_agent._similarity","uri":"program://AgentLab/function/src.agentlab.agents.tool_use_agent.tool_use_agent._similarity#L431-L448","kind":"function","name":"_similarity","path":"src/agentlab/agents/tool_use_agent/tool_use_agent.py","language":"python","start_line":431,"end_line":448,"context_start_line":411,"context_end_line":468,"code":" logger.info(f\"Embedding-based hints chosen: {hints}\")\n return hints[\"hint\"].tolist()\n\n def _encode(self, texts: list[str], prompt: str = \"\", timeout: int = 10, max_retries: int = 5):\n \"\"\"Call the encode API endpoint with timeout and retries\"\"\"\n for attempt in range(max_retries):\n try:\n response = requests.post(\n f\"{self.embedder_server}/encode\",\n json={\"texts\": texts, \"prompt\": prompt},\n timeout=timeout,\n )\n embs = response.json()[\"embeddings\"]\n return np.asarray(embs)\n except (requests.exceptions.RequestException, requests.exceptions.Timeout) as e:\n if attempt == max_retries - 1:\n raise e\n time.sleep(random.uniform(1, timeout))\n continue\n\n def _similarity(\n self, texts1: list[str], texts2: list[str], timeout: int = 2, max_retries: int = 5\n ):\n \"\"\"Call the similarity API endpoint with timeout and retries\"\"\"\n for attempt in range(max_retries):\n try:\n response = requests.post(\n f\"{self.embedder_server}/similarity\",\n json={\"texts1\": texts1, \"texts2\": texts2},\n timeout=timeout,\n )\n similarities = response.json()[\"similarities\"]\n return np.asarray(similarities)\n except (requests.exceptions.RequestException, requests.exceptions.Timeout) as e:\n if attempt == max_retries - 1:\n raise e\n time.sleep(random.uniform(1, timeout))\n continue\n\n def choose_hints_direct(self, task_name: str) -> list[str]:\n hints = self.hint_db[\n self.hint_db[\"task_name\"].apply(lambda x: fnmatch.fnmatch(x, task_name))\n ]\n return hints[\"hint\"].tolist()\n\n\n@dataclass\nclass PromptConfig:\n tag_screenshot: bool = True # Whether to tag the screenshot with the last action.\n goal: Goal = None\n obs: Obs = None\n summarizer: Summarizer = None\n general_hints: GeneralHints = None\n task_hint: TaskHint = None\n keep_last_n_obs: int = 1\n multiaction: bool = False\n action_subsets: tuple[str] = None\n","source_hash":"0aaa22a2ebd8845f6a17e105ac4061ad48d951f42e10d09378bdea9927c9b8bb","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.tool_use_agent.tool_use_agent.choose_hints_direct","uri":"program://AgentLab/function/src.agentlab.agents.tool_use_agent.tool_use_agent.choose_hints_direct#L450-L454","kind":"function","name":"choose_hints_direct","path":"src/agentlab/agents/tool_use_agent/tool_use_agent.py","language":"python","start_line":450,"end_line":454,"context_start_line":430,"context_end_line":474,"code":"\n def _similarity(\n self, texts1: list[str], texts2: list[str], timeout: int = 2, max_retries: int = 5\n ):\n \"\"\"Call the similarity API endpoint with timeout and retries\"\"\"\n for attempt in range(max_retries):\n try:\n response = requests.post(\n f\"{self.embedder_server}/similarity\",\n json={\"texts1\": texts1, \"texts2\": texts2},\n timeout=timeout,\n )\n similarities = response.json()[\"similarities\"]\n return np.asarray(similarities)\n except (requests.exceptions.RequestException, requests.exceptions.Timeout) as e:\n if attempt == max_retries - 1:\n raise e\n time.sleep(random.uniform(1, timeout))\n continue\n\n def choose_hints_direct(self, task_name: str) -> list[str]:\n hints = self.hint_db[\n self.hint_db[\"task_name\"].apply(lambda x: fnmatch.fnmatch(x, task_name))\n ]\n return hints[\"hint\"].tolist()\n\n\n@dataclass\nclass PromptConfig:\n tag_screenshot: bool = True # Whether to tag the screenshot with the last action.\n goal: Goal = None\n obs: Obs = None\n summarizer: Summarizer = None\n general_hints: GeneralHints = None\n task_hint: TaskHint = None\n keep_last_n_obs: int = 1\n multiaction: bool = False\n action_subsets: tuple[str] = None\n\n\n@dataclass\nclass ToolUseAgentArgs(AgentArgs):\n model_args: BaseModelArgs = None\n config: PromptConfig = None\n use_raw_page_output: bool = False # This attribute is used in loop.py to setup the env.","source_hash":"0aaa22a2ebd8845f6a17e105ac4061ad48d951f42e10d09378bdea9927c9b8bb","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.tool_use_agent.tool_use_agent.__post_init__","uri":"program://AgentLab/function/src.agentlab.agents.tool_use_agent.tool_use_agent.__post_init__#L477-L481","kind":"function","name":"__post_init__","path":"src/agentlab/agents/tool_use_agent/tool_use_agent.py","language":"python","start_line":477,"end_line":481,"context_start_line":457,"context_end_line":501,"code":"@dataclass\nclass PromptConfig:\n tag_screenshot: bool = True # Whether to tag the screenshot with the last action.\n goal: Goal = None\n obs: Obs = None\n summarizer: Summarizer = None\n general_hints: GeneralHints = None\n task_hint: TaskHint = None\n keep_last_n_obs: int = 1\n multiaction: bool = False\n action_subsets: tuple[str] = None\n\n\n@dataclass\nclass ToolUseAgentArgs(AgentArgs):\n model_args: BaseModelArgs = None\n config: PromptConfig = None\n use_raw_page_output: bool = False # This attribute is used in loop.py to setup the env.\n action_set: bgym.AbstractActionSet | None = None\n\n def __post_init__(self):\n try:\n self.agent_name = f\"ToolUse-{self.model_args.model_name}\".replace(\"/\", \"_\")\n except AttributeError:\n pass\n\n def make_agent(self) -> bgym.Agent:\n if self.config is None:\n self.config = DEFAULT_PROMPT_CONFIG\n return ToolUseAgent(\n model_args=self.model_args, # type: ignore\n config=self.config,\n action_set=self.action_set,\n )\n\n def prepare(self):\n return self.model_args.prepare_server()\n\n def close(self):\n return self.model_args.close_server()\n\n def set_benchmark(self, benchmark: AgentLabBenchmark | BgymBenchmark, demo_mode: bool):\n \"\"\"Set benchmark specific flags.\"\"\"\n benchmark_name = benchmark.name\n if benchmark_name == \"osworld\":","source_hash":"0aaa22a2ebd8845f6a17e105ac4061ad48d951f42e10d09378bdea9927c9b8bb","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.tool_use_agent.tool_use_agent.make_agent","uri":"program://AgentLab/function/src.agentlab.agents.tool_use_agent.tool_use_agent.make_agent#L483-L490","kind":"function","name":"make_agent","path":"src/agentlab/agents/tool_use_agent/tool_use_agent.py","language":"python","start_line":483,"end_line":490,"context_start_line":463,"context_end_line":510,"code":" general_hints: GeneralHints = None\n task_hint: TaskHint = None\n keep_last_n_obs: int = 1\n multiaction: bool = False\n action_subsets: tuple[str] = None\n\n\n@dataclass\nclass ToolUseAgentArgs(AgentArgs):\n model_args: BaseModelArgs = None\n config: PromptConfig = None\n use_raw_page_output: bool = False # This attribute is used in loop.py to setup the env.\n action_set: bgym.AbstractActionSet | None = None\n\n def __post_init__(self):\n try:\n self.agent_name = f\"ToolUse-{self.model_args.model_name}\".replace(\"/\", \"_\")\n except AttributeError:\n pass\n\n def make_agent(self) -> bgym.Agent:\n if self.config is None:\n self.config = DEFAULT_PROMPT_CONFIG\n return ToolUseAgent(\n model_args=self.model_args, # type: ignore\n config=self.config,\n action_set=self.action_set,\n )\n\n def prepare(self):\n return self.model_args.prepare_server()\n\n def close(self):\n return self.model_args.close_server()\n\n def set_benchmark(self, benchmark: AgentLabBenchmark | BgymBenchmark, demo_mode: bool):\n \"\"\"Set benchmark specific flags.\"\"\"\n benchmark_name = benchmark.name\n if benchmark_name == \"osworld\":\n self.config.obs.skip_preprocessing = True\n\n\nclass ToolUseAgent(bgym.Agent):\n def __init__(\n self,\n model_args: OpenAIResponseModelArgs,\n config: PromptConfig = None,\n action_set: bgym.AbstractActionSet | None = None,","source_hash":"0aaa22a2ebd8845f6a17e105ac4061ad48d951f42e10d09378bdea9927c9b8bb","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.tool_use_agent.tool_use_agent.prepare","uri":"program://AgentLab/function/src.agentlab.agents.tool_use_agent.tool_use_agent.prepare#L492-L493","kind":"function","name":"prepare","path":"src/agentlab/agents/tool_use_agent/tool_use_agent.py","language":"python","start_line":492,"end_line":493,"context_start_line":472,"context_end_line":513,"code":" model_args: BaseModelArgs = None\n config: PromptConfig = None\n use_raw_page_output: bool = False # This attribute is used in loop.py to setup the env.\n action_set: bgym.AbstractActionSet | None = None\n\n def __post_init__(self):\n try:\n self.agent_name = f\"ToolUse-{self.model_args.model_name}\".replace(\"/\", \"_\")\n except AttributeError:\n pass\n\n def make_agent(self) -> bgym.Agent:\n if self.config is None:\n self.config = DEFAULT_PROMPT_CONFIG\n return ToolUseAgent(\n model_args=self.model_args, # type: ignore\n config=self.config,\n action_set=self.action_set,\n )\n\n def prepare(self):\n return self.model_args.prepare_server()\n\n def close(self):\n return self.model_args.close_server()\n\n def set_benchmark(self, benchmark: AgentLabBenchmark | BgymBenchmark, demo_mode: bool):\n \"\"\"Set benchmark specific flags.\"\"\"\n benchmark_name = benchmark.name\n if benchmark_name == \"osworld\":\n self.config.obs.skip_preprocessing = True\n\n\nclass ToolUseAgent(bgym.Agent):\n def __init__(\n self,\n model_args: OpenAIResponseModelArgs,\n config: PromptConfig = None,\n action_set: bgym.AbstractActionSet | None = None,\n ):\n self.model_args = model_args\n self.config = config","source_hash":"0aaa22a2ebd8845f6a17e105ac4061ad48d951f42e10d09378bdea9927c9b8bb","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.tool_use_agent.tool_use_agent.close","uri":"program://AgentLab/function/src.agentlab.agents.tool_use_agent.tool_use_agent.close#L495-L496","kind":"function","name":"close","path":"src/agentlab/agents/tool_use_agent/tool_use_agent.py","language":"python","start_line":495,"end_line":496,"context_start_line":475,"context_end_line":516,"code":" action_set: bgym.AbstractActionSet | None = None\n\n def __post_init__(self):\n try:\n self.agent_name = f\"ToolUse-{self.model_args.model_name}\".replace(\"/\", \"_\")\n except AttributeError:\n pass\n\n def make_agent(self) -> bgym.Agent:\n if self.config is None:\n self.config = DEFAULT_PROMPT_CONFIG\n return ToolUseAgent(\n model_args=self.model_args, # type: ignore\n config=self.config,\n action_set=self.action_set,\n )\n\n def prepare(self):\n return self.model_args.prepare_server()\n\n def close(self):\n return self.model_args.close_server()\n\n def set_benchmark(self, benchmark: AgentLabBenchmark | BgymBenchmark, demo_mode: bool):\n \"\"\"Set benchmark specific flags.\"\"\"\n benchmark_name = benchmark.name\n if benchmark_name == \"osworld\":\n self.config.obs.skip_preprocessing = True\n\n\nclass ToolUseAgent(bgym.Agent):\n def __init__(\n self,\n model_args: OpenAIResponseModelArgs,\n config: PromptConfig = None,\n action_set: bgym.AbstractActionSet | None = None,\n ):\n self.model_args = model_args\n self.config = config\n self.action_set: bgym.AbstractActionSet = action_set or bgym.HighLevelActionSet(\n self.config.action_subsets,\n multiaction=self.config.multiaction, # type: ignore","source_hash":"0aaa22a2ebd8845f6a17e105ac4061ad48d951f42e10d09378bdea9927c9b8bb","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.tool_use_agent.tool_use_agent.set_benchmark","uri":"program://AgentLab/function/src.agentlab.agents.tool_use_agent.tool_use_agent.set_benchmark#L498-L502","kind":"function","name":"set_benchmark","path":"src/agentlab/agents/tool_use_agent/tool_use_agent.py","language":"python","start_line":498,"end_line":502,"context_start_line":478,"context_end_line":522,"code":" try:\n self.agent_name = f\"ToolUse-{self.model_args.model_name}\".replace(\"/\", \"_\")\n except AttributeError:\n pass\n\n def make_agent(self) -> bgym.Agent:\n if self.config is None:\n self.config = DEFAULT_PROMPT_CONFIG\n return ToolUseAgent(\n model_args=self.model_args, # type: ignore\n config=self.config,\n action_set=self.action_set,\n )\n\n def prepare(self):\n return self.model_args.prepare_server()\n\n def close(self):\n return self.model_args.close_server()\n\n def set_benchmark(self, benchmark: AgentLabBenchmark | BgymBenchmark, demo_mode: bool):\n \"\"\"Set benchmark specific flags.\"\"\"\n benchmark_name = benchmark.name\n if benchmark_name == \"osworld\":\n self.config.obs.skip_preprocessing = True\n\n\nclass ToolUseAgent(bgym.Agent):\n def __init__(\n self,\n model_args: OpenAIResponseModelArgs,\n config: PromptConfig = None,\n action_set: bgym.AbstractActionSet | None = None,\n ):\n self.model_args = model_args\n self.config = config\n self.action_set: bgym.AbstractActionSet = action_set or bgym.HighLevelActionSet(\n self.config.action_subsets,\n multiaction=self.config.multiaction, # type: ignore\n )\n self.tools = self.action_set.to_tool_description(api=model_args.api)\n\n self.call_ids = []\n\n self.llm = model_args.make_model()","source_hash":"0aaa22a2ebd8845f6a17e105ac4061ad48d951f42e10d09378bdea9927c9b8bb","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.tool_use_agent.tool_use_agent.obs_preprocessor","uri":"program://AgentLab/function/src.agentlab.agents.tool_use_agent.tool_use_agent.obs_preprocessor#L533-L561","kind":"function","name":"obs_preprocessor","path":"src/agentlab/agents/tool_use_agent/tool_use_agent.py","language":"python","start_line":533,"end_line":561,"context_start_line":513,"context_end_line":581,"code":" self.config = config\n self.action_set: bgym.AbstractActionSet = action_set or bgym.HighLevelActionSet(\n self.config.action_subsets,\n multiaction=self.config.multiaction, # type: ignore\n )\n self.tools = self.action_set.to_tool_description(api=model_args.api)\n\n self.call_ids = []\n\n self.llm = model_args.make_model()\n self.msg_builder = model_args.get_message_builder()\n self.llm.msg = self.msg_builder\n\n self.task_hint = self.config.task_hint.make()\n self.obs_block = self.config.obs.make()\n\n self.discussion = StructuredDiscussion(self.config.keep_last_n_obs)\n self.last_response: LLMOutput = LLMOutput()\n self._responses: list[LLMOutput] = []\n\n def obs_preprocessor(self, obs):\n obs = copy(obs)\n if self.config.obs.skip_preprocessing:\n return obs\n page = obs.pop(\"page\", None)\n if page is not None:\n obs[\"screenshot\"] = extract_screenshot(page)\n else:\n if self.config.obs.use_dom:\n obs[\"dom_txt\"] = flatten_dom_to_str(\n obs[\"dom_object\"],\n extra_properties=obs[\"extra_element_properties\"],\n )\n obs[\"pruned_html\"] = prune_html(obs[\"dom_txt\"])\n\n if self.config.obs.use_axtree:\n obs[\"axtree_txt\"] = flatten_axtree_to_str(\n obs[\"axtree_object\"],\n extra_properties=obs[\"extra_element_properties\"],\n )\n\n if self.config.obs.use_som:\n obs[\"screenshot_som\"] = overlay_som(\n obs[\"screenshot\"], extra_properties=obs[\"extra_element_properties\"]\n )\n if self.config.obs.use_zoomed_webpage:\n pass\n\n return obs\n\n def set_task_name(self, task_name: str):\n \"\"\"Cheater function that is supposed to be called by loop.py before callling get_action\"\"\"\n self.task_name = task_name\n\n @cost_tracker_decorator\n def get_action(self, obs: Any) -> float:\n self.llm.reset_stats()\n if not self.discussion.is_goal_set():\n self.discussion.new_group(\"goal\")\n\n if self.config.multiaction:\n sys_msg = SYS_MSG + \"\\nYou can take multiple actions in a single step, if needed.\"\n else:\n sys_msg = SYS_MSG + \"\\nYou can only take one action at a time.\"\n self.config.goal.apply(self.llm, self.discussion, obs, sys_msg)\n\n self.config.summarizer.apply_init(self.llm, self.discussion)\n self.config.general_hints.apply(self.llm, self.discussion)\n self.task_hint.apply(self.llm, self.discussion, self.task_name)","source_hash":"0aaa22a2ebd8845f6a17e105ac4061ad48d951f42e10d09378bdea9927c9b8bb","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.tool_use_agent.tool_use_agent.set_task_name","uri":"program://AgentLab/function/src.agentlab.agents.tool_use_agent.tool_use_agent.set_task_name#L563-L565","kind":"function","name":"set_task_name","path":"src/agentlab/agents/tool_use_agent/tool_use_agent.py","language":"python","start_line":563,"end_line":565,"context_start_line":543,"context_end_line":585,"code":" obs[\"dom_object\"],\n extra_properties=obs[\"extra_element_properties\"],\n )\n obs[\"pruned_html\"] = prune_html(obs[\"dom_txt\"])\n\n if self.config.obs.use_axtree:\n obs[\"axtree_txt\"] = flatten_axtree_to_str(\n obs[\"axtree_object\"],\n extra_properties=obs[\"extra_element_properties\"],\n )\n\n if self.config.obs.use_som:\n obs[\"screenshot_som\"] = overlay_som(\n obs[\"screenshot\"], extra_properties=obs[\"extra_element_properties\"]\n )\n if self.config.obs.use_zoomed_webpage:\n pass\n\n return obs\n\n def set_task_name(self, task_name: str):\n \"\"\"Cheater function that is supposed to be called by loop.py before callling get_action\"\"\"\n self.task_name = task_name\n\n @cost_tracker_decorator\n def get_action(self, obs: Any) -> float:\n self.llm.reset_stats()\n if not self.discussion.is_goal_set():\n self.discussion.new_group(\"goal\")\n\n if self.config.multiaction:\n sys_msg = SYS_MSG + \"\\nYou can take multiple actions in a single step, if needed.\"\n else:\n sys_msg = SYS_MSG + \"\\nYou can only take one action at a time.\"\n self.config.goal.apply(self.llm, self.discussion, obs, sys_msg)\n\n self.config.summarizer.apply_init(self.llm, self.discussion)\n self.config.general_hints.apply(self.llm, self.discussion)\n self.task_hint.apply(self.llm, self.discussion, self.task_name)\n\n self.discussion.new_group()\n\n self.obs_block.apply(self.llm, self.discussion, obs, last_llm_output=self.last_response)","source_hash":"0aaa22a2ebd8845f6a17e105ac4061ad48d951f42e10d09378bdea9927c9b8bb","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.tool_use_agent.tool_use_agent.get_action","uri":"program://AgentLab/function/src.agentlab.agents.tool_use_agent.tool_use_agent.get_action#L568-L628","kind":"function","name":"get_action","path":"src/agentlab/agents/tool_use_agent/tool_use_agent.py","language":"python","start_line":568,"end_line":628,"context_start_line":548,"context_end_line":648,"code":" if self.config.obs.use_axtree:\n obs[\"axtree_txt\"] = flatten_axtree_to_str(\n obs[\"axtree_object\"],\n extra_properties=obs[\"extra_element_properties\"],\n )\n\n if self.config.obs.use_som:\n obs[\"screenshot_som\"] = overlay_som(\n obs[\"screenshot\"], extra_properties=obs[\"extra_element_properties\"]\n )\n if self.config.obs.use_zoomed_webpage:\n pass\n\n return obs\n\n def set_task_name(self, task_name: str):\n \"\"\"Cheater function that is supposed to be called by loop.py before callling get_action\"\"\"\n self.task_name = task_name\n\n @cost_tracker_decorator\n def get_action(self, obs: Any) -> float:\n self.llm.reset_stats()\n if not self.discussion.is_goal_set():\n self.discussion.new_group(\"goal\")\n\n if self.config.multiaction:\n sys_msg = SYS_MSG + \"\\nYou can take multiple actions in a single step, if needed.\"\n else:\n sys_msg = SYS_MSG + \"\\nYou can only take one action at a time.\"\n self.config.goal.apply(self.llm, self.discussion, obs, sys_msg)\n\n self.config.summarizer.apply_init(self.llm, self.discussion)\n self.config.general_hints.apply(self.llm, self.discussion)\n self.task_hint.apply(self.llm, self.discussion, self.task_name)\n\n self.discussion.new_group()\n\n self.obs_block.apply(self.llm, self.discussion, obs, last_llm_output=self.last_response)\n\n self.config.summarizer.apply(self.llm, self.discussion)\n\n messages = self.discussion.flatten()\n response: LLMOutput = self.llm(\n APIPayload(\n messages=messages,\n tools=self.tools, # You can update tools available tools now.\n tool_choice=\"any\",\n cache_tool_definition=True,\n cache_complete_prompt=False,\n use_cache_breakpoints=True,\n )\n )\n action = response.action\n think = response.think\n last_summary = self.discussion.get_last_summary()\n if last_summary is not None:\n think = last_summary.content[0][\"text\"] + \"\\n\" + think\n\n self.discussion.new_group()\n # self.discussion.append(response.tool_calls) # No need to append tool calls anymore.\n\n self.last_response = response\n self._responses.append(response) # may be useful for debugging\n # self.messages.append(response.assistant_message) # this is tool call\n\n tools_str = json.dumps(self.tools, indent=2)\n tools_msg = MessageBuilder(\"tool_description\").add_text(tools_str)\n\n # Adding these extra messages to visualize in gradio\n messages.insert(0, tools_msg) # insert at the beginning of the message\n # This avoids the assertion error with self.llm.user().add_responded_tool_calls(tool_calls)\n msg = self.llm.msg(\"tool\")\n msg.responded_tool_calls = response.tool_calls\n messages.append(msg)\n\n agent_info = bgym.AgentInfo(\n think=think,\n chat_messages=messages,\n stats=self.llm.stats.stats_dict,\n )\n return action, agent_info\n\n\nGPT_4_1 = OpenAIResponseModelArgs(\n model_name=\"gpt-4.1\",\n max_total_tokens=200_000,\n max_input_tokens=200_000,\n max_new_tokens=2_000,\n temperature=0.1,\n vision_support=True,\n)\n\nGPT_4_1_CC_API = OpenAIChatModelArgs(\n model_name=\"gpt-4.1\",\n max_total_tokens=200_000,\n max_input_tokens=200_000,\n max_new_tokens=2_000,\n temperature=0.1,\n vision_support=True,\n)\n","source_hash":"0aaa22a2ebd8845f6a17e105ac4061ad48d951f42e10d09378bdea9927c9b8bb","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.visualwebarena.agent","uri":"program://AgentLab/module/src.agentlab.agents.visualwebarena.agent#L1-L317","kind":"module","name":"src.agentlab.agents.visualwebarena.agent","path":"src/agentlab/agents/visualwebarena/agent.py","language":"python","start_line":1,"end_line":317,"context_start_line":1,"context_end_line":317,"code":"import base64\nimport importlib.resources\nimport io\nfrom copy import deepcopy\nfrom dataclasses import dataclass\nfrom functools import partial\nfrom typing import Any, Literal\n\nimport numpy as np\nimport PIL.Image\nfrom browsergym.core.action.highlevel import HighLevelActionSet\nfrom browsergym.experiments import Agent, AgentInfo\nfrom browsergym.experiments.benchmark import Benchmark, HighLevelActionSetArgs\nfrom browsergym.utils.obs import overlay_som\n\nfrom agentlab.llm.base_api import AbstractChatModel\nfrom agentlab.llm.chat_api import BaseModelArgs, make_system_message, make_user_message\nfrom agentlab.llm.llm_configs import CHAT_MODEL_ARGS_DICT\nfrom agentlab.llm.llm_utils import ParseError, extract_code_blocks, retry\nfrom agentlab.llm.tracking import cost_tracker_decorator\n\nfrom ..agent_args import AgentArgs\nfrom . import few_shots\nfrom .prompts import TEMPLATES\n\nFEW_SHOT_FILES = importlib.resources.files(few_shots)\nVisualWebArenaObservationType = Literal[\"axtree\", \"axtree_som\", \"axtree_screenshot\"]\n\n\ndef image_data_to_uri(\n image_data: bytes | np.ndarray, output_format: Literal[\"png\", \"jpeg\"] = \"png\"\n) -> str:\n assert output_format in (\"png\", \"jpeg\")\n # load input image data (auto-detect input format)\n if isinstance(image_data, np.ndarray):\n image = PIL.Image.fromarray(image_data)\n else:\n image = PIL.Image.open(io.BytesIO(image_data))\n # TODO: is this necessary?\n if image.mode in (\"RGBA\", \"LA\"):\n image = image.convert(\"RGB\")\n # convert image to desired output format\n with io.BytesIO() as image_buffer:\n image.save(image_buffer, format=output_format.upper())\n image_data = image_buffer.getvalue()\n # convert to base64 data/image URI\n image_b64 = base64.b64encode(image_data).decode(\"utf-8\")\n image_b64 = f\"data:image/{output_format};base64,\" + image_b64\n return image_b64\n\n\n@dataclass\nclass VisualWebArenaAgentArgs(AgentArgs):\n agent_name: str = \"VisualWebArenaAgent\"\n temperature: float = 0.1\n chat_model_args: BaseModelArgs = None\n action_set_args: HighLevelActionSetArgs = None\n observation_type: VisualWebArenaObservationType = \"axtree_som\"\n with_few_shot_examples: bool = True\n\n def __post_init__(self):\n self.agent_name = (\n f\"{self.agent_name}-{self.observation_type}-{self.chat_model_args.model_name}\".replace(\n \"/\", \"_\"\n )\n )\n\n def make_agent(self) -> Agent:\n return VisualWebArenaAgent(\n temperature=self.temperature,\n chat_model=self.chat_model_args.make_model(),\n action_set=self.action_set_args.make_action_set(),\n observation_type=self.observation_type,\n with_few_shot_examples=self.with_few_shot_examples,\n )\n\n def set_benchmark(self, benchmark: Benchmark, demo_mode: bool):\n self.action_set_args = deepcopy(benchmark.high_level_action_set_args)\n\n def set_reproducibility_mode(self):\n self.temperature = 0.0\n\n def prepare(self):\n return self.chat_model_args.prepare_server()\n\n def close(self):\n return self.chat_model_args.close_server()\n\n\ndef parser(response: str) -> dict:\n blocks = extract_code_blocks(response)\n if len(blocks) == 0:\n raise ParseError(\"No code block found in the response\")\n action = blocks[0][1]\n thought = response\n return {\"action\": action, \"think\": thought}\n\n\nclass VisualWebArenaAgent(Agent):\n def __init__(\n self,\n temperature: float,\n chat_model: AbstractChatModel,\n action_set: HighLevelActionSet,\n observation_type: VisualWebArenaObservationType,\n with_few_shot_examples: bool,\n ):\n self.temperature = temperature\n self.chat_model = chat_model\n self.action_set = action_set\n self.observation_type = observation_type\n self.with_few_shot_examples = with_few_shot_examples\n\n self.action_history = [\"None\"]\n\n self.intro_messages: list[dict] = []\n\n # pre-build the prompt's intro message\n self.intro_messages.append(\n {\n \"type\": \"text\",\n \"text\": TEMPLATES[observation_type][\"intro\"].format(\n action_space_description=self.action_set.describe(\n with_long_description=True, with_examples=False\n )\n ),\n }\n )\n\n self.few_shot_messages: list[dict] = []\n\n # pre-build the prompt's few-shot example messages\n if with_few_shot_examples:\n examples = TEMPLATES[observation_type][\"examples\"]\n for i, example in enumerate(examples):\n if len(example) == 2:\n # text-only example\n observation, action = example\n self.few_shot_messages.append(\n {\n \"type\": \"text\",\n \"text\": f\"\"\"\\\nExample {i + 1}/{len(examples)}:\n\n{observation}\nACTION: {action}\n\"\"\",\n }\n )\n elif len(example) == 3:\n # example with screenshot\n observation, action, screenshot_filename = example\n screenshot_data = FEW_SHOT_FILES.joinpath(screenshot_filename).read_bytes()\n self.few_shot_messages.extend(\n [\n {\n \"type\": \"text\",\n \"text\": f\"\"\"\\\nExample {i + 1}/{len(examples)}:\n\n{observation}\n\"\"\",\n },\n {\n \"type\": \"text\",\n \"text\": \"\"\"\\\nSCREENSHOT:\n\"\"\",\n },\n {\n \"type\": \"image_url\",\n \"image_url\": {\"url\": image_data_to_uri(screenshot_data)},\n },\n {\n \"type\": \"text\",\n \"text\": f\"\"\"\\\nACTION: {action}\n\"\"\",\n },\n ]\n )\n else:\n raise ValueError(\"Unexpected format for few-shot example.\")\n\n @cost_tracker_decorator\n def get_action(self, obs: Any) -> tuple[str, dict]:\n \"\"\"\n Replica of VisualWebArena agent\n https://github.com/web-arena-x/visualwebarena/blob/89f5af29305c3d1e9f97ce4421462060a70c9a03/agent/prompts/prompt_constructor.py#L211\n https://github.com/web-arena-x/visualwebarena/blob/89f5af29305c3d1e9f97ce4421462060a70c9a03/agent/prompts/prompt_constructor.py#L272\n\n Args:\n obs (Any): Observation from the environment\n\n Returns:\n tuple[str, dict]: Action and AgentInfo\n \"\"\"\n user_messages = []\n\n # 1. add few-shot examples (if any)\n user_messages.extend(self.few_shot_messages)\n\n # 2. add the current observation to the user prompt\n active_tab = obs[\"active_page_index\"][0]\n open_tab_titles = obs[\"open_pages_titles\"]\n cur_tabs_txt = \" | \".join(\n f\"Tab {i}{' (current)' if i == active_tab else ''}: {title}\"\n for i, title in enumerate(open_tab_titles)\n )\n cur_axtree_txt = obs[\"axtree_txt\"]\n cur_url = obs[\"url\"]\n user_messages.append(\n {\n \"type\": \"text\",\n \"text\": f\"\"\"\\\nOBSERVATION:\n\n{cur_tabs_txt}\n\n{cur_axtree_txt}\n\nURL: {cur_url}\n\nPREVIOUS ACTION: {self.action_history[-1]}\n\"\"\",\n }\n )\n\n # if desired, add current page's screenshot\n if self.observation_type in (\"axtree_som\", \"axtree_screenshot\"):\n cur_screenshot = obs[\"screenshot\"]\n # if desired, overlay set-of-marks on the screenshot\n if self.observation_type == \"axtree_som\":\n cur_screenshot = overlay_som(cur_screenshot, obs[\"extra_element_properties\"])\n user_messages.extend(\n [\n {\n \"type\": \"text\",\n \"text\": \"\"\"\\\nSCREENSHOT:\n\"\"\",\n },\n {\"type\": \"image_url\", \"image_url\": {\"url\": image_data_to_uri(cur_screenshot)}},\n ]\n )\n\n # 3. add the objective (goal) to the user prompt\n user_messages.append(\n {\n \"type\": \"text\",\n \"text\": f\"\"\"\\\nOBJECTIVE:\n\"\"\",\n }\n )\n user_messages.extend(obs[\"goal_object\"])\n\n messages = [\n # intro prompt\n make_system_message(content=self.intro_messages),\n # few-shot examples + observation + goal\n make_user_message(content=user_messages),\n ]\n\n # finally, query the chat model\n answer: dict = retry(self.chat_model, messages, n_retry=3, parser=parser)\n\n action = answer.get(\"action\", None)\n thought = answer.get(\"think\", None)\n\n self.action_history.append(action)\n\n return (\n action,\n AgentInfo(\n think=thought,\n chat_messages=messages,\n ),\n )\n\n\n# A WebArena agent is a VisualWebArena agent with only axtree observation\nWebArenaAgent = partial(\n VisualWebArenaAgentArgs,\n agent_name=\"WebArenaAgent\",\n observation_type=\"axtree\",\n)\n\nWA_AGENT_4O_MINI = WebArenaAgent(\n temperature=0.1,\n chat_model_args=CHAT_MODEL_ARGS_DICT[\"openai/gpt-4o-mini-2024-07-18\"],\n)\n\nWA_AGENT_4O = WebArenaAgent(\n temperature=0.1,\n chat_model_args=CHAT_MODEL_ARGS_DICT[\"openai/gpt-4o-mini-2024-07-18\"],\n)\n\nWA_AGENT_SONNET = WebArenaAgent(\n temperature=0.1,\n chat_model_args=CHAT_MODEL_ARGS_DICT[\"openrouter/anthropic/claude-3.5-sonnet:beta\"],\n)\n\nVWA_AGENT_4O_MINI = VisualWebArenaAgentArgs(\n temperature=0.1,\n chat_model_args=CHAT_MODEL_ARGS_DICT[\"openai/gpt-4o-mini-2024-07-18\"],\n)\n\nVWA_AGENT_4O = VisualWebArenaAgentArgs(\n temperature=0.1,\n chat_model_args=CHAT_MODEL_ARGS_DICT[\"azure/gpt-4o-2024-08-06\"],\n)\n\nVWA_AGENT_SONNET = VisualWebArenaAgentArgs(\n temperature=0.1,\n chat_model_args=CHAT_MODEL_ARGS_DICT[\"openrouter/anthropic/claude-3.5-sonnet:beta\"],\n)","source_hash":"912b863f7f974eecbc6c38dd320667aba7b66c5281d610f35992a3eb8abedeb5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.visualwebarena.agent.image_data_to_uri","uri":"program://AgentLab/function/src.agentlab.agents.visualwebarena.agent.image_data_to_uri#L30-L49","kind":"function","name":"image_data_to_uri","path":"src/agentlab/agents/visualwebarena/agent.py","language":"python","start_line":30,"end_line":49,"context_start_line":10,"context_end_line":69,"code":"import PIL.Image\nfrom browsergym.core.action.highlevel import HighLevelActionSet\nfrom browsergym.experiments import Agent, AgentInfo\nfrom browsergym.experiments.benchmark import Benchmark, HighLevelActionSetArgs\nfrom browsergym.utils.obs import overlay_som\n\nfrom agentlab.llm.base_api import AbstractChatModel\nfrom agentlab.llm.chat_api import BaseModelArgs, make_system_message, make_user_message\nfrom agentlab.llm.llm_configs import CHAT_MODEL_ARGS_DICT\nfrom agentlab.llm.llm_utils import ParseError, extract_code_blocks, retry\nfrom agentlab.llm.tracking import cost_tracker_decorator\n\nfrom ..agent_args import AgentArgs\nfrom . import few_shots\nfrom .prompts import TEMPLATES\n\nFEW_SHOT_FILES = importlib.resources.files(few_shots)\nVisualWebArenaObservationType = Literal[\"axtree\", \"axtree_som\", \"axtree_screenshot\"]\n\n\ndef image_data_to_uri(\n image_data: bytes | np.ndarray, output_format: Literal[\"png\", \"jpeg\"] = \"png\"\n) -> str:\n assert output_format in (\"png\", \"jpeg\")\n # load input image data (auto-detect input format)\n if isinstance(image_data, np.ndarray):\n image = PIL.Image.fromarray(image_data)\n else:\n image = PIL.Image.open(io.BytesIO(image_data))\n # TODO: is this necessary?\n if image.mode in (\"RGBA\", \"LA\"):\n image = image.convert(\"RGB\")\n # convert image to desired output format\n with io.BytesIO() as image_buffer:\n image.save(image_buffer, format=output_format.upper())\n image_data = image_buffer.getvalue()\n # convert to base64 data/image URI\n image_b64 = base64.b64encode(image_data).decode(\"utf-8\")\n image_b64 = f\"data:image/{output_format};base64,\" + image_b64\n return image_b64\n\n\n@dataclass\nclass VisualWebArenaAgentArgs(AgentArgs):\n agent_name: str = \"VisualWebArenaAgent\"\n temperature: float = 0.1\n chat_model_args: BaseModelArgs = None\n action_set_args: HighLevelActionSetArgs = None\n observation_type: VisualWebArenaObservationType = \"axtree_som\"\n with_few_shot_examples: bool = True\n\n def __post_init__(self):\n self.agent_name = (\n f\"{self.agent_name}-{self.observation_type}-{self.chat_model_args.model_name}\".replace(\n \"/\", \"_\"\n )\n )\n\n def make_agent(self) -> Agent:\n return VisualWebArenaAgent(","source_hash":"912b863f7f974eecbc6c38dd320667aba7b66c5281d610f35992a3eb8abedeb5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.visualwebarena.agent.VisualWebArenaAgentArgs","uri":"program://AgentLab/class/src.agentlab.agents.visualwebarena.agent.VisualWebArenaAgentArgs#L53-L87","kind":"class","name":"VisualWebArenaAgentArgs","path":"src/agentlab/agents/visualwebarena/agent.py","language":"python","start_line":53,"end_line":87,"context_start_line":33,"context_end_line":107,"code":" assert output_format in (\"png\", \"jpeg\")\n # load input image data (auto-detect input format)\n if isinstance(image_data, np.ndarray):\n image = PIL.Image.fromarray(image_data)\n else:\n image = PIL.Image.open(io.BytesIO(image_data))\n # TODO: is this necessary?\n if image.mode in (\"RGBA\", \"LA\"):\n image = image.convert(\"RGB\")\n # convert image to desired output format\n with io.BytesIO() as image_buffer:\n image.save(image_buffer, format=output_format.upper())\n image_data = image_buffer.getvalue()\n # convert to base64 data/image URI\n image_b64 = base64.b64encode(image_data).decode(\"utf-8\")\n image_b64 = f\"data:image/{output_format};base64,\" + image_b64\n return image_b64\n\n\n@dataclass\nclass VisualWebArenaAgentArgs(AgentArgs):\n agent_name: str = \"VisualWebArenaAgent\"\n temperature: float = 0.1\n chat_model_args: BaseModelArgs = None\n action_set_args: HighLevelActionSetArgs = None\n observation_type: VisualWebArenaObservationType = \"axtree_som\"\n with_few_shot_examples: bool = True\n\n def __post_init__(self):\n self.agent_name = (\n f\"{self.agent_name}-{self.observation_type}-{self.chat_model_args.model_name}\".replace(\n \"/\", \"_\"\n )\n )\n\n def make_agent(self) -> Agent:\n return VisualWebArenaAgent(\n temperature=self.temperature,\n chat_model=self.chat_model_args.make_model(),\n action_set=self.action_set_args.make_action_set(),\n observation_type=self.observation_type,\n with_few_shot_examples=self.with_few_shot_examples,\n )\n\n def set_benchmark(self, benchmark: Benchmark, demo_mode: bool):\n self.action_set_args = deepcopy(benchmark.high_level_action_set_args)\n\n def set_reproducibility_mode(self):\n self.temperature = 0.0\n\n def prepare(self):\n return self.chat_model_args.prepare_server()\n\n def close(self):\n return self.chat_model_args.close_server()\n\n\ndef parser(response: str) -> dict:\n blocks = extract_code_blocks(response)\n if len(blocks) == 0:\n raise ParseError(\"No code block found in the response\")\n action = blocks[0][1]\n thought = response\n return {\"action\": action, \"think\": thought}\n\n\nclass VisualWebArenaAgent(Agent):\n def __init__(\n self,\n temperature: float,\n chat_model: AbstractChatModel,\n action_set: HighLevelActionSet,\n observation_type: VisualWebArenaObservationType,\n with_few_shot_examples: bool,\n ):","source_hash":"912b863f7f974eecbc6c38dd320667aba7b66c5281d610f35992a3eb8abedeb5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.visualwebarena.agent.parser","uri":"program://AgentLab/function/src.agentlab.agents.visualwebarena.agent.parser#L90-L96","kind":"function","name":"parser","path":"src/agentlab/agents/visualwebarena/agent.py","language":"python","start_line":90,"end_line":96,"context_start_line":70,"context_end_line":116,"code":" temperature=self.temperature,\n chat_model=self.chat_model_args.make_model(),\n action_set=self.action_set_args.make_action_set(),\n observation_type=self.observation_type,\n with_few_shot_examples=self.with_few_shot_examples,\n )\n\n def set_benchmark(self, benchmark: Benchmark, demo_mode: bool):\n self.action_set_args = deepcopy(benchmark.high_level_action_set_args)\n\n def set_reproducibility_mode(self):\n self.temperature = 0.0\n\n def prepare(self):\n return self.chat_model_args.prepare_server()\n\n def close(self):\n return self.chat_model_args.close_server()\n\n\ndef parser(response: str) -> dict:\n blocks = extract_code_blocks(response)\n if len(blocks) == 0:\n raise ParseError(\"No code block found in the response\")\n action = blocks[0][1]\n thought = response\n return {\"action\": action, \"think\": thought}\n\n\nclass VisualWebArenaAgent(Agent):\n def __init__(\n self,\n temperature: float,\n chat_model: AbstractChatModel,\n action_set: HighLevelActionSet,\n observation_type: VisualWebArenaObservationType,\n with_few_shot_examples: bool,\n ):\n self.temperature = temperature\n self.chat_model = chat_model\n self.action_set = action_set\n self.observation_type = observation_type\n self.with_few_shot_examples = with_few_shot_examples\n\n self.action_history = [\"None\"]\n\n self.intro_messages: list[dict] = []","source_hash":"912b863f7f974eecbc6c38dd320667aba7b66c5281d610f35992a3eb8abedeb5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.visualwebarena.agent.VisualWebArenaAgent","uri":"program://AgentLab/class/src.agentlab.agents.visualwebarena.agent.VisualWebArenaAgent#L99-L279","kind":"class","name":"VisualWebArenaAgent","path":"src/agentlab/agents/visualwebarena/agent.py","language":"python","start_line":99,"end_line":279,"context_start_line":79,"context_end_line":299,"code":"\n def set_reproducibility_mode(self):\n self.temperature = 0.0\n\n def prepare(self):\n return self.chat_model_args.prepare_server()\n\n def close(self):\n return self.chat_model_args.close_server()\n\n\ndef parser(response: str) -> dict:\n blocks = extract_code_blocks(response)\n if len(blocks) == 0:\n raise ParseError(\"No code block found in the response\")\n action = blocks[0][1]\n thought = response\n return {\"action\": action, \"think\": thought}\n\n\nclass VisualWebArenaAgent(Agent):\n def __init__(\n self,\n temperature: float,\n chat_model: AbstractChatModel,\n action_set: HighLevelActionSet,\n observation_type: VisualWebArenaObservationType,\n with_few_shot_examples: bool,\n ):\n self.temperature = temperature\n self.chat_model = chat_model\n self.action_set = action_set\n self.observation_type = observation_type\n self.with_few_shot_examples = with_few_shot_examples\n\n self.action_history = [\"None\"]\n\n self.intro_messages: list[dict] = []\n\n # pre-build the prompt's intro message\n self.intro_messages.append(\n {\n \"type\": \"text\",\n \"text\": TEMPLATES[observation_type][\"intro\"].format(\n action_space_description=self.action_set.describe(\n with_long_description=True, with_examples=False\n )\n ),\n }\n )\n\n self.few_shot_messages: list[dict] = []\n\n # pre-build the prompt's few-shot example messages\n if with_few_shot_examples:\n examples = TEMPLATES[observation_type][\"examples\"]\n for i, example in enumerate(examples):\n if len(example) == 2:\n # text-only example\n observation, action = example\n self.few_shot_messages.append(\n {\n \"type\": \"text\",\n \"text\": f\"\"\"\\\nExample {i + 1}/{len(examples)}:\n\n{observation}\nACTION: {action}\n\"\"\",\n }\n )\n elif len(example) == 3:\n # example with screenshot\n observation, action, screenshot_filename = example\n screenshot_data = FEW_SHOT_FILES.joinpath(screenshot_filename).read_bytes()\n self.few_shot_messages.extend(\n [\n {\n \"type\": \"text\",\n \"text\": f\"\"\"\\\nExample {i + 1}/{len(examples)}:\n\n{observation}\n\"\"\",\n },\n {\n \"type\": \"text\",\n \"text\": \"\"\"\\\nSCREENSHOT:\n\"\"\",\n },\n {\n \"type\": \"image_url\",\n \"image_url\": {\"url\": image_data_to_uri(screenshot_data)},\n },\n {\n \"type\": \"text\",\n \"text\": f\"\"\"\\\nACTION: {action}\n\"\"\",\n },\n ]\n )\n else:\n raise ValueError(\"Unexpected format for few-shot example.\")\n\n @cost_tracker_decorator\n def get_action(self, obs: Any) -> tuple[str, dict]:\n \"\"\"\n Replica of VisualWebArena agent\n https://github.com/web-arena-x/visualwebarena/blob/89f5af29305c3d1e9f97ce4421462060a70c9a03/agent/prompts/prompt_constructor.py#L211\n https://github.com/web-arena-x/visualwebarena/blob/89f5af29305c3d1e9f97ce4421462060a70c9a03/agent/prompts/prompt_constructor.py#L272\n\n Args:\n obs (Any): Observation from the environment\n\n Returns:\n tuple[str, dict]: Action and AgentInfo\n \"\"\"\n user_messages = []\n\n # 1. add few-shot examples (if any)\n user_messages.extend(self.few_shot_messages)\n\n # 2. add the current observation to the user prompt\n active_tab = obs[\"active_page_index\"][0]\n open_tab_titles = obs[\"open_pages_titles\"]\n cur_tabs_txt = \" | \".join(\n f\"Tab {i}{' (current)' if i == active_tab else ''}: {title}\"\n for i, title in enumerate(open_tab_titles)\n )\n cur_axtree_txt = obs[\"axtree_txt\"]\n cur_url = obs[\"url\"]\n user_messages.append(\n {\n \"type\": \"text\",\n \"text\": f\"\"\"\\\nOBSERVATION:\n\n{cur_tabs_txt}\n\n{cur_axtree_txt}\n\nURL: {cur_url}\n\nPREVIOUS ACTION: {self.action_history[-1]}\n\"\"\",\n }\n )\n\n # if desired, add current page's screenshot\n if self.observation_type in (\"axtree_som\", \"axtree_screenshot\"):\n cur_screenshot = obs[\"screenshot\"]\n # if desired, overlay set-of-marks on the screenshot\n if self.observation_type == \"axtree_som\":\n cur_screenshot = overlay_som(cur_screenshot, obs[\"extra_element_properties\"])\n user_messages.extend(\n [\n {\n \"type\": \"text\",\n \"text\": \"\"\"\\\nSCREENSHOT:\n\"\"\",\n },\n {\"type\": \"image_url\", \"image_url\": {\"url\": image_data_to_uri(cur_screenshot)}},\n ]\n )\n\n # 3. add the objective (goal) to the user prompt\n user_messages.append(\n {\n \"type\": \"text\",\n \"text\": f\"\"\"\\\nOBJECTIVE:\n\"\"\",\n }\n )\n user_messages.extend(obs[\"goal_object\"])\n\n messages = [\n # intro prompt\n make_system_message(content=self.intro_messages),\n # few-shot examples + observation + goal\n make_user_message(content=user_messages),\n ]\n\n # finally, query the chat model\n answer: dict = retry(self.chat_model, messages, n_retry=3, parser=parser)\n\n action = answer.get(\"action\", None)\n thought = answer.get(\"think\", None)\n\n self.action_history.append(action)\n\n return (\n action,\n AgentInfo(\n think=thought,\n chat_messages=messages,\n ),\n )\n\n\n# A WebArena agent is a VisualWebArena agent with only axtree observation\nWebArenaAgent = partial(\n VisualWebArenaAgentArgs,\n agent_name=\"WebArenaAgent\",\n observation_type=\"axtree\",\n)\n\nWA_AGENT_4O_MINI = WebArenaAgent(\n temperature=0.1,\n chat_model_args=CHAT_MODEL_ARGS_DICT[\"openai/gpt-4o-mini-2024-07-18\"],\n)\n\nWA_AGENT_4O = WebArenaAgent(\n temperature=0.1,\n chat_model_args=CHAT_MODEL_ARGS_DICT[\"openai/gpt-4o-mini-2024-07-18\"],\n)\n\nWA_AGENT_SONNET = WebArenaAgent(","source_hash":"912b863f7f974eecbc6c38dd320667aba7b66c5281d610f35992a3eb8abedeb5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.visualwebarena.agent.__post_init__","uri":"program://AgentLab/function/src.agentlab.agents.visualwebarena.agent.__post_init__#L61-L66","kind":"function","name":"__post_init__","path":"src/agentlab/agents/visualwebarena/agent.py","language":"python","start_line":61,"end_line":66,"context_start_line":41,"context_end_line":86,"code":" image = image.convert(\"RGB\")\n # convert image to desired output format\n with io.BytesIO() as image_buffer:\n image.save(image_buffer, format=output_format.upper())\n image_data = image_buffer.getvalue()\n # convert to base64 data/image URI\n image_b64 = base64.b64encode(image_data).decode(\"utf-8\")\n image_b64 = f\"data:image/{output_format};base64,\" + image_b64\n return image_b64\n\n\n@dataclass\nclass VisualWebArenaAgentArgs(AgentArgs):\n agent_name: str = \"VisualWebArenaAgent\"\n temperature: float = 0.1\n chat_model_args: BaseModelArgs = None\n action_set_args: HighLevelActionSetArgs = None\n observation_type: VisualWebArenaObservationType = \"axtree_som\"\n with_few_shot_examples: bool = True\n\n def __post_init__(self):\n self.agent_name = (\n f\"{self.agent_name}-{self.observation_type}-{self.chat_model_args.model_name}\".replace(\n \"/\", \"_\"\n )\n )\n\n def make_agent(self) -> Agent:\n return VisualWebArenaAgent(\n temperature=self.temperature,\n chat_model=self.chat_model_args.make_model(),\n action_set=self.action_set_args.make_action_set(),\n observation_type=self.observation_type,\n with_few_shot_examples=self.with_few_shot_examples,\n )\n\n def set_benchmark(self, benchmark: Benchmark, demo_mode: bool):\n self.action_set_args = deepcopy(benchmark.high_level_action_set_args)\n\n def set_reproducibility_mode(self):\n self.temperature = 0.0\n\n def prepare(self):\n return self.chat_model_args.prepare_server()\n\n def close(self):","source_hash":"912b863f7f974eecbc6c38dd320667aba7b66c5281d610f35992a3eb8abedeb5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.visualwebarena.agent.make_agent","uri":"program://AgentLab/function/src.agentlab.agents.visualwebarena.agent.make_agent#L68-L75","kind":"function","name":"make_agent","path":"src/agentlab/agents/visualwebarena/agent.py","language":"python","start_line":68,"end_line":75,"context_start_line":48,"context_end_line":95,"code":" image_b64 = f\"data:image/{output_format};base64,\" + image_b64\n return image_b64\n\n\n@dataclass\nclass VisualWebArenaAgentArgs(AgentArgs):\n agent_name: str = \"VisualWebArenaAgent\"\n temperature: float = 0.1\n chat_model_args: BaseModelArgs = None\n action_set_args: HighLevelActionSetArgs = None\n observation_type: VisualWebArenaObservationType = \"axtree_som\"\n with_few_shot_examples: bool = True\n\n def __post_init__(self):\n self.agent_name = (\n f\"{self.agent_name}-{self.observation_type}-{self.chat_model_args.model_name}\".replace(\n \"/\", \"_\"\n )\n )\n\n def make_agent(self) -> Agent:\n return VisualWebArenaAgent(\n temperature=self.temperature,\n chat_model=self.chat_model_args.make_model(),\n action_set=self.action_set_args.make_action_set(),\n observation_type=self.observation_type,\n with_few_shot_examples=self.with_few_shot_examples,\n )\n\n def set_benchmark(self, benchmark: Benchmark, demo_mode: bool):\n self.action_set_args = deepcopy(benchmark.high_level_action_set_args)\n\n def set_reproducibility_mode(self):\n self.temperature = 0.0\n\n def prepare(self):\n return self.chat_model_args.prepare_server()\n\n def close(self):\n return self.chat_model_args.close_server()\n\n\ndef parser(response: str) -> dict:\n blocks = extract_code_blocks(response)\n if len(blocks) == 0:\n raise ParseError(\"No code block found in the response\")\n action = blocks[0][1]\n thought = response","source_hash":"912b863f7f974eecbc6c38dd320667aba7b66c5281d610f35992a3eb8abedeb5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.visualwebarena.agent.set_benchmark","uri":"program://AgentLab/function/src.agentlab.agents.visualwebarena.agent.set_benchmark#L77-L78","kind":"function","name":"set_benchmark","path":"src/agentlab/agents/visualwebarena/agent.py","language":"python","start_line":77,"end_line":78,"context_start_line":57,"context_end_line":98,"code":" action_set_args: HighLevelActionSetArgs = None\n observation_type: VisualWebArenaObservationType = \"axtree_som\"\n with_few_shot_examples: bool = True\n\n def __post_init__(self):\n self.agent_name = (\n f\"{self.agent_name}-{self.observation_type}-{self.chat_model_args.model_name}\".replace(\n \"/\", \"_\"\n )\n )\n\n def make_agent(self) -> Agent:\n return VisualWebArenaAgent(\n temperature=self.temperature,\n chat_model=self.chat_model_args.make_model(),\n action_set=self.action_set_args.make_action_set(),\n observation_type=self.observation_type,\n with_few_shot_examples=self.with_few_shot_examples,\n )\n\n def set_benchmark(self, benchmark: Benchmark, demo_mode: bool):\n self.action_set_args = deepcopy(benchmark.high_level_action_set_args)\n\n def set_reproducibility_mode(self):\n self.temperature = 0.0\n\n def prepare(self):\n return self.chat_model_args.prepare_server()\n\n def close(self):\n return self.chat_model_args.close_server()\n\n\ndef parser(response: str) -> dict:\n blocks = extract_code_blocks(response)\n if len(blocks) == 0:\n raise ParseError(\"No code block found in the response\")\n action = blocks[0][1]\n thought = response\n return {\"action\": action, \"think\": thought}\n\n","source_hash":"912b863f7f974eecbc6c38dd320667aba7b66c5281d610f35992a3eb8abedeb5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.visualwebarena.agent.set_reproducibility_mode","uri":"program://AgentLab/function/src.agentlab.agents.visualwebarena.agent.set_reproducibility_mode#L80-L81","kind":"function","name":"set_reproducibility_mode","path":"src/agentlab/agents/visualwebarena/agent.py","language":"python","start_line":80,"end_line":81,"context_start_line":60,"context_end_line":101,"code":"\n def __post_init__(self):\n self.agent_name = (\n f\"{self.agent_name}-{self.observation_type}-{self.chat_model_args.model_name}\".replace(\n \"/\", \"_\"\n )\n )\n\n def make_agent(self) -> Agent:\n return VisualWebArenaAgent(\n temperature=self.temperature,\n chat_model=self.chat_model_args.make_model(),\n action_set=self.action_set_args.make_action_set(),\n observation_type=self.observation_type,\n with_few_shot_examples=self.with_few_shot_examples,\n )\n\n def set_benchmark(self, benchmark: Benchmark, demo_mode: bool):\n self.action_set_args = deepcopy(benchmark.high_level_action_set_args)\n\n def set_reproducibility_mode(self):\n self.temperature = 0.0\n\n def prepare(self):\n return self.chat_model_args.prepare_server()\n\n def close(self):\n return self.chat_model_args.close_server()\n\n\ndef parser(response: str) -> dict:\n blocks = extract_code_blocks(response)\n if len(blocks) == 0:\n raise ParseError(\"No code block found in the response\")\n action = blocks[0][1]\n thought = response\n return {\"action\": action, \"think\": thought}\n\n\nclass VisualWebArenaAgent(Agent):\n def __init__(\n self,","source_hash":"912b863f7f974eecbc6c38dd320667aba7b66c5281d610f35992a3eb8abedeb5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.visualwebarena.agent.prepare","uri":"program://AgentLab/function/src.agentlab.agents.visualwebarena.agent.prepare#L83-L84","kind":"function","name":"prepare","path":"src/agentlab/agents/visualwebarena/agent.py","language":"python","start_line":83,"end_line":84,"context_start_line":63,"context_end_line":104,"code":" f\"{self.agent_name}-{self.observation_type}-{self.chat_model_args.model_name}\".replace(\n \"/\", \"_\"\n )\n )\n\n def make_agent(self) -> Agent:\n return VisualWebArenaAgent(\n temperature=self.temperature,\n chat_model=self.chat_model_args.make_model(),\n action_set=self.action_set_args.make_action_set(),\n observation_type=self.observation_type,\n with_few_shot_examples=self.with_few_shot_examples,\n )\n\n def set_benchmark(self, benchmark: Benchmark, demo_mode: bool):\n self.action_set_args = deepcopy(benchmark.high_level_action_set_args)\n\n def set_reproducibility_mode(self):\n self.temperature = 0.0\n\n def prepare(self):\n return self.chat_model_args.prepare_server()\n\n def close(self):\n return self.chat_model_args.close_server()\n\n\ndef parser(response: str) -> dict:\n blocks = extract_code_blocks(response)\n if len(blocks) == 0:\n raise ParseError(\"No code block found in the response\")\n action = blocks[0][1]\n thought = response\n return {\"action\": action, \"think\": thought}\n\n\nclass VisualWebArenaAgent(Agent):\n def __init__(\n self,\n temperature: float,\n chat_model: AbstractChatModel,\n action_set: HighLevelActionSet,","source_hash":"912b863f7f974eecbc6c38dd320667aba7b66c5281d610f35992a3eb8abedeb5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.visualwebarena.agent.close","uri":"program://AgentLab/function/src.agentlab.agents.visualwebarena.agent.close#L86-L87","kind":"function","name":"close","path":"src/agentlab/agents/visualwebarena/agent.py","language":"python","start_line":86,"end_line":87,"context_start_line":66,"context_end_line":107,"code":" )\n\n def make_agent(self) -> Agent:\n return VisualWebArenaAgent(\n temperature=self.temperature,\n chat_model=self.chat_model_args.make_model(),\n action_set=self.action_set_args.make_action_set(),\n observation_type=self.observation_type,\n with_few_shot_examples=self.with_few_shot_examples,\n )\n\n def set_benchmark(self, benchmark: Benchmark, demo_mode: bool):\n self.action_set_args = deepcopy(benchmark.high_level_action_set_args)\n\n def set_reproducibility_mode(self):\n self.temperature = 0.0\n\n def prepare(self):\n return self.chat_model_args.prepare_server()\n\n def close(self):\n return self.chat_model_args.close_server()\n\n\ndef parser(response: str) -> dict:\n blocks = extract_code_blocks(response)\n if len(blocks) == 0:\n raise ParseError(\"No code block found in the response\")\n action = blocks[0][1]\n thought = response\n return {\"action\": action, \"think\": thought}\n\n\nclass VisualWebArenaAgent(Agent):\n def __init__(\n self,\n temperature: float,\n chat_model: AbstractChatModel,\n action_set: HighLevelActionSet,\n observation_type: VisualWebArenaObservationType,\n with_few_shot_examples: bool,\n ):","source_hash":"912b863f7f974eecbc6c38dd320667aba7b66c5281d610f35992a3eb8abedeb5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.visualwebarena.agent.__init__","uri":"program://AgentLab/function/src.agentlab.agents.visualwebarena.agent.__init__#L100-L183","kind":"function","name":"__init__","path":"src/agentlab/agents/visualwebarena/agent.py","language":"python","start_line":100,"end_line":183,"context_start_line":80,"context_end_line":203,"code":" def set_reproducibility_mode(self):\n self.temperature = 0.0\n\n def prepare(self):\n return self.chat_model_args.prepare_server()\n\n def close(self):\n return self.chat_model_args.close_server()\n\n\ndef parser(response: str) -> dict:\n blocks = extract_code_blocks(response)\n if len(blocks) == 0:\n raise ParseError(\"No code block found in the response\")\n action = blocks[0][1]\n thought = response\n return {\"action\": action, \"think\": thought}\n\n\nclass VisualWebArenaAgent(Agent):\n def __init__(\n self,\n temperature: float,\n chat_model: AbstractChatModel,\n action_set: HighLevelActionSet,\n observation_type: VisualWebArenaObservationType,\n with_few_shot_examples: bool,\n ):\n self.temperature = temperature\n self.chat_model = chat_model\n self.action_set = action_set\n self.observation_type = observation_type\n self.with_few_shot_examples = with_few_shot_examples\n\n self.action_history = [\"None\"]\n\n self.intro_messages: list[dict] = []\n\n # pre-build the prompt's intro message\n self.intro_messages.append(\n {\n \"type\": \"text\",\n \"text\": TEMPLATES[observation_type][\"intro\"].format(\n action_space_description=self.action_set.describe(\n with_long_description=True, with_examples=False\n )\n ),\n }\n )\n\n self.few_shot_messages: list[dict] = []\n\n # pre-build the prompt's few-shot example messages\n if with_few_shot_examples:\n examples = TEMPLATES[observation_type][\"examples\"]\n for i, example in enumerate(examples):\n if len(example) == 2:\n # text-only example\n observation, action = example\n self.few_shot_messages.append(\n {\n \"type\": \"text\",\n \"text\": f\"\"\"\\\nExample {i + 1}/{len(examples)}:\n\n{observation}\nACTION: {action}\n\"\"\",\n }\n )\n elif len(example) == 3:\n # example with screenshot\n observation, action, screenshot_filename = example\n screenshot_data = FEW_SHOT_FILES.joinpath(screenshot_filename).read_bytes()\n self.few_shot_messages.extend(\n [\n {\n \"type\": \"text\",\n \"text\": f\"\"\"\\\nExample {i + 1}/{len(examples)}:\n\n{observation}\n\"\"\",\n },\n {\n \"type\": \"text\",\n \"text\": \"\"\"\\\nSCREENSHOT:\n\"\"\",\n },\n {\n \"type\": \"image_url\",\n \"image_url\": {\"url\": image_data_to_uri(screenshot_data)},\n },\n {\n \"type\": \"text\",\n \"text\": f\"\"\"\\\nACTION: {action}\n\"\"\",\n },\n ]\n )\n else:\n raise ValueError(\"Unexpected format for few-shot example.\")\n\n @cost_tracker_decorator\n def get_action(self, obs: Any) -> tuple[str, dict]:\n \"\"\"\n Replica of VisualWebArena agent\n https://github.com/web-arena-x/visualwebarena/blob/89f5af29305c3d1e9f97ce4421462060a70c9a03/agent/prompts/prompt_constructor.py#L211\n https://github.com/web-arena-x/visualwebarena/blob/89f5af29305c3d1e9f97ce4421462060a70c9a03/agent/prompts/prompt_constructor.py#L272\n\n Args:\n obs (Any): Observation from the environment\n\n Returns:\n tuple[str, dict]: Action and AgentInfo\n \"\"\"\n user_messages = []\n\n # 1. add few-shot examples (if any)\n user_messages.extend(self.few_shot_messages)\n\n # 2. add the current observation to the user prompt","source_hash":"912b863f7f974eecbc6c38dd320667aba7b66c5281d610f35992a3eb8abedeb5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.visualwebarena.agent.get_action","uri":"program://AgentLab/function/src.agentlab.agents.visualwebarena.agent.get_action#L186-L279","kind":"function","name":"get_action","path":"src/agentlab/agents/visualwebarena/agent.py","language":"python","start_line":186,"end_line":279,"context_start_line":166,"context_end_line":299,"code":" \"text\": \"\"\"\\\nSCREENSHOT:\n\"\"\",\n },\n {\n \"type\": \"image_url\",\n \"image_url\": {\"url\": image_data_to_uri(screenshot_data)},\n },\n {\n \"type\": \"text\",\n \"text\": f\"\"\"\\\nACTION: {action}\n\"\"\",\n },\n ]\n )\n else:\n raise ValueError(\"Unexpected format for few-shot example.\")\n\n @cost_tracker_decorator\n def get_action(self, obs: Any) -> tuple[str, dict]:\n \"\"\"\n Replica of VisualWebArena agent\n https://github.com/web-arena-x/visualwebarena/blob/89f5af29305c3d1e9f97ce4421462060a70c9a03/agent/prompts/prompt_constructor.py#L211\n https://github.com/web-arena-x/visualwebarena/blob/89f5af29305c3d1e9f97ce4421462060a70c9a03/agent/prompts/prompt_constructor.py#L272\n\n Args:\n obs (Any): Observation from the environment\n\n Returns:\n tuple[str, dict]: Action and AgentInfo\n \"\"\"\n user_messages = []\n\n # 1. add few-shot examples (if any)\n user_messages.extend(self.few_shot_messages)\n\n # 2. add the current observation to the user prompt\n active_tab = obs[\"active_page_index\"][0]\n open_tab_titles = obs[\"open_pages_titles\"]\n cur_tabs_txt = \" | \".join(\n f\"Tab {i}{' (current)' if i == active_tab else ''}: {title}\"\n for i, title in enumerate(open_tab_titles)\n )\n cur_axtree_txt = obs[\"axtree_txt\"]\n cur_url = obs[\"url\"]\n user_messages.append(\n {\n \"type\": \"text\",\n \"text\": f\"\"\"\\\nOBSERVATION:\n\n{cur_tabs_txt}\n\n{cur_axtree_txt}\n\nURL: {cur_url}\n\nPREVIOUS ACTION: {self.action_history[-1]}\n\"\"\",\n }\n )\n\n # if desired, add current page's screenshot\n if self.observation_type in (\"axtree_som\", \"axtree_screenshot\"):\n cur_screenshot = obs[\"screenshot\"]\n # if desired, overlay set-of-marks on the screenshot\n if self.observation_type == \"axtree_som\":\n cur_screenshot = overlay_som(cur_screenshot, obs[\"extra_element_properties\"])\n user_messages.extend(\n [\n {\n \"type\": \"text\",\n \"text\": \"\"\"\\\nSCREENSHOT:\n\"\"\",\n },\n {\"type\": \"image_url\", \"image_url\": {\"url\": image_data_to_uri(cur_screenshot)}},\n ]\n )\n\n # 3. add the objective (goal) to the user prompt\n user_messages.append(\n {\n \"type\": \"text\",\n \"text\": f\"\"\"\\\nOBJECTIVE:\n\"\"\",\n }\n )\n user_messages.extend(obs[\"goal_object\"])\n\n messages = [\n # intro prompt\n make_system_message(content=self.intro_messages),\n # few-shot examples + observation + goal\n make_user_message(content=user_messages),\n ]\n\n # finally, query the chat model\n answer: dict = retry(self.chat_model, messages, n_retry=3, parser=parser)\n\n action = answer.get(\"action\", None)\n thought = answer.get(\"think\", None)\n\n self.action_history.append(action)\n\n return (\n action,\n AgentInfo(\n think=thought,\n chat_messages=messages,\n ),\n )\n\n\n# A WebArena agent is a VisualWebArena agent with only axtree observation\nWebArenaAgent = partial(\n VisualWebArenaAgentArgs,\n agent_name=\"WebArenaAgent\",\n observation_type=\"axtree\",\n)\n\nWA_AGENT_4O_MINI = WebArenaAgent(\n temperature=0.1,\n chat_model_args=CHAT_MODEL_ARGS_DICT[\"openai/gpt-4o-mini-2024-07-18\"],\n)\n\nWA_AGENT_4O = WebArenaAgent(\n temperature=0.1,\n chat_model_args=CHAT_MODEL_ARGS_DICT[\"openai/gpt-4o-mini-2024-07-18\"],\n)\n\nWA_AGENT_SONNET = WebArenaAgent(","source_hash":"912b863f7f974eecbc6c38dd320667aba7b66c5281d610f35992a3eb8abedeb5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.visualwebarena.prompts","uri":"program://AgentLab/module/src.agentlab.agents.visualwebarena.prompts#L1-L262","kind":"module","name":"src.agentlab.agents.visualwebarena.prompts","path":"src/agentlab/agents/visualwebarena/prompts.py","language":"python","start_line":1,"end_line":262,"context_start_line":1,"context_end_line":262,"code":"# Best-attempt reproduction of original prompts the VisualWebArena agent\n\nTEMPLATES = {}\n\n# https://github.com/web-arena-x/visualwebarena/blob/89f5af29305c3d1e9f97ce4421462060a70c9a03/agent/prompts/raw/p_cot_id_actree_3s.py#L1\nTEMPLATES[\"axtree\"] = {\n \"intro\": \"\"\"\\\nYou are an autonomous intelligent agent tasked with navigating a web browser. You will be given web-based tasks. These tasks will be accomplished through the use of specific actions you can issue.\n\nHere's the information you'll have:\nThe user's objective: This is the task you're trying to complete.\nThe current web page's accessibility tree: This is a simplified representation of the webpage, providing key information.\nThe current web page's URL: This is the page you're currently navigating.\nThe open tabs: These are the tabs you have open.\nThe previous action: This is the action you just performed. It may be helpful to track your progress.\n\n{action_space_description}\n\nTo be successful, it is very important to follow the following rules:\n1. You should only issue an action that is valid given the current observation\n2. You should only issue one action at a time.\n3. You should follow the examples to reason step by step and then issue the next action.\n4. Generate the action in the correct format. Start with a \"In summary, the next action I will perform is\" phrase, followed by action inside ``````. For example, \"In summary, the next action I will perform is ```click(\"1234\")```\".\"\"\",\n \"examples\": [\n (\n \"\"\"\\\nOBSERVATION:\n[1744] link 'HP CB782A#ABA 640 Inkjet Fax Machine (Renewed)'\n[1749] StaticText '$279.49'\n[1757] button 'Add to Cart'\n[1760] button 'Add to Wish List'\n[1761] button 'Add to Compare'\nURL: http://onestopmarket.com/office-products/office-electronics.html\nOBJECTIVE: What is the price of HP Inkjet Fax Machine?\nPREVIOUS ACTION: None\"\"\",\n \"\"\"\\\nLet's think step-by-step. This page list the information of HP Inkjet Fax Machine, which is the product identified in the objective. Its price is $279.49. I think I have achieved the objective. I will issue a user message with the answer. In summary, the next action I will perform is ```send_msg_to_user(\"$279.49\")```\"\"\",\n ),\n (\n \"\"\"\\\nOBSERVATION:\n[204] heading '/f/food'\n[593] heading '[homemade] Obligatory Halloween Pumpkin Loaf!'\n\t[942] link '[homemade] Obligatory Halloween Pumpkin Loaf!'\n[945] StaticText 'Submitted by '\n[30] link 'kneechalice' expanded: False\n[1484] StaticText 't3_yid9lu'\n[949] time 'October 31, 2022 at 10:10:03 AM EDT'\n\t[1488] StaticText '1 year ago'\n[1489] link '45 comments'\n[605] heading '[I ate] Maple Pecan Croissant'\n\t[963] link '[I ate] Maple Pecan Croissant'\n[966] StaticText 'Submitted by '\n[37] link 'AccordingtoJP' expanded: False\n[1494] StaticText 't3_y3hrpn'\n[970] time 'October 13, 2022 at 10:41:09 PM EDT'\n\t[1498] StaticText '1 year ago'\n[1499] link '204 comments'\nURL: http://reddit.com\nOBJECTIVE: Tell me what the top comment on the croissant post says.\nPREVIOUS ACTION: None\"\"\",\n \"\"\"\\\nLet's think step-by-step. This page has a post titled '[I ate] Maple Pecan Croissant', which is the post mentioned in the objective. In order to find the top comment, I will navigate into the comments section of the post. In summary, the next action I will perform is ```click(\"1499\")```\"\"\",\n ),\n (\n \"\"\"\\\nOBSERVATION:\n[42] link 'My account'\n[43] link 'Logout'\n[44] link 'Publish Ad'\n[25] heading 'What are you looking for today?'\n[143] StaticText 'Keyword'\n[81] textbox 'e.g., a blue used car' required: False\n[146] StaticText 'Category'\n[28] heading 'Latest Listings'\n[86] link 'Atlas Powered Audio System w/ Tripod'\n\t[176] img 'Atlas Powered Audio System w/ Tripod'\n[511] StaticText '150.00 $'\n[88] link 'Neptune Gaming Console'\n\t[178] img 'Neptune Gaming Console'\n[515] StaticText '350.00 $'\nURL: http://classifieds.com\nOBJECTIVE: Help me find the cheapest dark colored guitar.\nPREVIOUS ACTION: None\"\"\",\n \"\"\"\\\nLet's think step-by-step. The objective is to find the cheapest dark colored guitar on the site. The site has a search box whose ID is [81]. I can search for guitars by entering \"guitar\". I can submit this by pressing the Enter afterwards. In summary, the next action I will perform is ```fill(\"81\", \"guitar\")```\"\"\",\n ),\n ],\n}\n\n# https://github.com/web-arena-x/visualwebarena/blob/89f5af29305c3d1e9f97ce4421462060a70c9a03/agent/prompts/raw/p_multimodal_cot_id_actree_3s.py#L1\nTEMPLATES[\"axtree_screenshot\"] = {\n \"intro\": \"\"\"\\\nYou are an autonomous intelligent agent tasked with navigating a web browser. You will be given web-based tasks. These tasks will be accomplished through the use of specific actions you can issue.\n\nHere's the information you'll have:\nThe user's objective: This is the task you're trying to complete.\nThe current web page's accessibility tree: This is a simplified representation of the webpage, providing key information.\nThe current web page's URL: This is the page you're currently navigating.\nThe open tabs: These are the tabs you have open.\nThe previous action: This is the action you just performed. It may be helpful to track your progress.\n\n{action_space_description}\n\nTo be successful, it is very important to follow the following rules:\n1. You should only issue an action that is valid given the current observation\n2. You should only issue one action at a time.\n3. You should follow the examples to reason step by step and then issue the next action.\n4. Generate the action in the correct format. Start with a \"In summary, the next action I will perform is\" phrase, followed by action inside ``````. For example, \"In summary, the next action I will perform is ```click(\"1234\")```\".\"\"\",\n \"examples\": [\n (\n \"\"\"\\\nOBSERVATION:\n[1744] link 'HP CB782A#ABA 640 Inkjet Fax Machine (Renewed)'\n[1749] StaticText '$279.49'\n[1757] button 'Add to Cart'\n[1760] button 'Add to Wish List'\n[1761] button 'Add to Compare'\nURL: http://onestopmarket.com/office-products/office-electronics.html\nOBJECTIVE: What is the price of HP Inkjet Fax Machine?\nPREVIOUS ACTION: None\"\"\",\n \"\"\"\\\nLet's think step-by-step. This page list the information of HP Inkjet Fax Machine, which is the product identified in the objective. Its price is $279.49. I think I have achieved the objective. I will issue a user message with the answer. In summary, the next action I will perform is ```send_msg_to_user(\"$279.49\")```\"\"\",\n \"multimodal_example1.png\",\n ),\n (\n \"\"\"\\\nOBSERVATION:\n[204] heading '/f/food'\n[593] heading '[homemade] Obligatory Halloween Pumpkin Loaf!'\n\t[942] link '[homemade] Obligatory Halloween Pumpkin Loaf!'\n[945] StaticText 'Submitted by '\n[30] link 'kneechalice' expanded: False\n[1484] StaticText 't3_yid9lu'\n[949] time 'October 31, 2022 at 10:10:03 AM EDT'\n\t[1488] StaticText '1 year ago'\n[1489] link '45 comments'\n[605] heading '[I ate] Maple Pecan Croissant'\n\t[963] link '[I ate] Maple Pecan Croissant'\n[966] StaticText 'Submitted by '\n[37] link 'AccordingtoJP' expanded: False\n[1494] StaticText 't3_y3hrpn'\n[970] time 'October 13, 2022 at 10:41:09 PM EDT'\n\t[1498] StaticText '1 year ago'\n[1499] link '204 comments'\nURL: http://reddit.com\nOBJECTIVE: Tell me what the top comment on the croissant post says.\nPREVIOUS ACTION: None\"\"\",\n \"\"\"\\\nLet's think step-by-step. This page has a post titled '[I ate] Maple Pecan Croissant', which is the post mentioned in the objective. In order to find the top comment, I will navigate into the comments section of the post. In summary, the next action I will perform is ```click(\"1499\")```\"\"\",\n \"multimodal_example2.png\",\n ),\n (\n \"\"\"\\\nOBSERVATION:\n[42] link 'My account'\n[43] link 'Logout'\n[44] link 'Publish Ad'\n[25] heading 'What are you looking for today?'\n[143] StaticText 'Keyword'\n[81] textbox 'e.g., a blue used car' required: False\n[146] StaticText 'Category'\n[28] heading 'Latest Listings'\n[86] link 'Atlas Powered Audio System w/ Tripod'\n\t[176] img 'Atlas Powered Audio System w/ Tripod'\n[511] StaticText '150.00 $'\n[88] link 'Neptune Gaming Console'\n\t[178] img 'Neptune Gaming Console'\n[515] StaticText '350.00 $'\nURL: http://classifieds.com\nOBJECTIVE: Help me find the cheapest dark colored guitar.\nPREVIOUS ACTION: None\"\"\",\n \"\"\"\\\nLet's think step-by-step. The objective is to find the cheapest dark colored guitar on the site. The site has a search box whose ID is [81]. I can search for guitars by entering \"guitar\". I can submit this by pressing the Enter afterwards. In summary, the next action I will perform is ```fill(\"81\", \"guitar\")```\"\"\",\n \"multimodal_example3.png\",\n ),\n ],\n}\n\n# https://github.com/web-arena-x/visualwebarena/blob/89f5af29305c3d1e9f97ce4421462060a70c9a03/agent/prompts/raw/p_som_cot_id_actree_3s.py#L1\nTEMPLATES[\"axtree_som\"] = prompt = {\n \"intro\": \"\"\"\\\nYou are an autonomous intelligent agent tasked with navigating a web browser. You will be given web-based tasks. These tasks will be accomplished through the use of specific actions you can issue.\n\nHere's the information you'll have:\nThe user's objective: This is the task you're trying to complete.\nThe current web page screenshot: This is a screenshot of the webpage, with each interactable element assigned a unique numerical id. Each bounding box and its respective id shares the same color.\nThe observation, which lists the IDs of all interactable elements on the current web page with their text content if any, in the format [id] [tagType] [text content]. tagType is the type of the element, such as button, link, or textbox. text content is the text content of the element. For example, [1234] [button] ['Add to Cart'] means that there is a button with id 1234 and text content 'Add to Cart' on the current web page. [] [StaticText] [text] means that the element is of some text that is not interactable.\nThe current web page's URL: This is the page you're currently navigating.\nThe open tabs: These are the tabs you have open.\nThe previous action: This is the action you just performed. It may be helpful to track your progress.\n\n{action_space_description}\n\nTo be successful, it is very important to follow the following rules:\n1. You should only issue an action that is valid given the current observation\n2. You should only issue one action at a time.\n3. You should follow the examples to reason step by step and then issue the next action.\n4. Generate the action in the correct format. Start with a \"In summary, the next action I will perform is\" phrase, followed by action inside ``````. For example, \"In summary, the next action I will perform is ```click(\"1234\")```\".\"\"\",\n \"examples\": [\n (\n \"\"\"\\\nOBSERVATION:\n[31] [IMG] [Image, description: hp fx-7010dn fax machine, url: http://ec2-3-13-232-171.us-east-2.compute.amazonaws.com:7770/media/catalog/product/cache/89ff578b9cd87e0600daac45c9e1ea98/B/0/B08GKZ3ZKD.0.jpg]\n[32] [A] [HP CB782A#ABA 640 Inkjet Fax Machine (Renewed)]\n[] [StaticText] [$279.49]\n[33] [BUTTON] [Add to Cart]\n[34] [A] [Add to Wish List]\n[35] [A] [Add to Compare]\nURL: http://onestopmarket.com/office-products/office-electronics.html\nOBJECTIVE: What is the price of HP Inkjet Fax Machine?\nPREVIOUS ACTION: None\"\"\",\n \"\"\"\\\nLet's think step-by-step. This page list the information of HP Inkjet Fax Machine, which is the product identified in the objective. Its price is $279.49. I think I have achieved the objective. I will issue a user message with the answer. In summary, the next action I will perform is ```send_msg_to_user(\"$279.49\")```\"\"\",\n \"som_example1.png\",\n ),\n (\n \"\"\"\\\nOBSERVATION:\n[] [StaticText] [/f/food]\n[] [StaticText] [[homemade] Obligatory Halloween Pumpkin Loaf!\tSubmitted by\tkneechalice\tt3_yid9lu\t1 year ago]\n[9] [IMG] []\n[] [StaticText] [Submitted by\tkneechalice\tt3_yid9lu\t1 year ago]\n[10] [A] [kneechalice]\n[11] [A] [45 comments]\n[] [StaticText] [[I ate] Maple Pecan Croissant\tSubmitted by\tAccordingtoJP\tt3_y3hrpn\t1 year ago]\n[14] [IMG] []\n[] [StaticText] [Submitted by\tAccordingtoJP\tt3_y3hrpn\t1 year ago]\n[15] [A] [AccordingtoJP]\n[16] [A] [204 comments]\nURL: http://reddit.com\nOBJECTIVE: Tell me what the top comment on the croissant post says.\nPREVIOUS ACTION: None\"\"\",\n \"\"\"\\\nLet's think step-by-step. This page has a post titled '[I ate] Maple Pecan Croissant', which is the post mentioned in the objective. In order to find the top comment, I will navigate into the comments section of the post. In summary, the next action I will perform is ```click(\"11\")```\"\"\",\n \"som_example2.png\",\n ),\n (\n \"\"\"\\\nOBSERVATION:\n[] [StaticText] [What are you looking for today?]\n[5] [INPUT] []\n[6] [SELECT] [Select a category]\n[7] [BUTTON] [Search]\n[] [StaticText] [Latest Listings]\n[] [StaticText] [Atlas Powered Audio System w/ Tripod\t150.00 $\tMusic instruments\tBorough of Red Lion (Pennsylvania)\t2023/11/16]\n[8] [IMG] [Atlas Powered Audio System w/ Tripod]\n[9] [A] [Atlas Powered Audio System w/ Tripod]\n[] [StaticText] [150.00 $]\n[] [StaticText] [Neptune Gaming Console\t350.00 $\tVideo gaming\tPennwyn (Pennsylvania)\t2023/11/16]\n[10] [IMG] [Neptune Gaming Console]\n[11] [A] [Neptune Gaming Console]\n[] [StaticText] [350.00 $]\nURL: http://classifieds.com\nOBJECTIVE: Help me find the cheapest dark colored guitar.\nPREVIOUS ACTION: None\"\"\",\n \"\"\"\\\nLet's think step-by-step. The objective is to find the cheapest dark colored guitar on the site. The site has a search box whose ID is [5]. I can search for guitars by entering \"guitar\". I can submit this by pressing the Enter afterwards. In summary, the next action I will perform is ```fill(\"5\", \"guitar\")```\"\"\",\n \"som_example3.png\",\n ),\n ],\n}","source_hash":"fe1c62488bb08638debc1156c94e20ff19342180acfe4c18eb51b96b74f2bc62","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.most_basic_agent.most_basic_agent","uri":"program://AgentLab/module/src.agentlab.agents.most_basic_agent.most_basic_agent#L1-L164","kind":"module","name":"src.agentlab.agents.most_basic_agent.most_basic_agent","path":"src/agentlab/agents/most_basic_agent/most_basic_agent.py","language":"python","start_line":1,"end_line":164,"context_start_line":1,"context_end_line":164,"code":"import logging\nfrom dataclasses import asdict, dataclass\nfrom typing import TYPE_CHECKING, Any\n\nimport bgym\n\nfrom agentlab.agents.agent_args import AgentArgs\nfrom agentlab.experiments.loop import ExpArgs\nfrom agentlab.llm.llm_configs import CHAT_MODEL_ARGS_DICT\nfrom agentlab.llm.llm_utils import (\n Discussion,\n HumanMessage,\n ParseError,\n SystemMessage,\n extract_code_blocks,\n retry,\n)\nfrom agentlab.llm.tracking import cost_tracker_decorator\n\nif TYPE_CHECKING:\n from agentlab.llm.chat_api import BaseModelArgs\n\n\n@dataclass\nclass MostBasicAgentArgs(AgentArgs):\n agent_name: str = \"BasicAgent\"\n temperature: float = 0.1\n use_chain_of_thought: bool = False\n chat_model_args: \"BaseModelArgs\" = None\n\n def make_agent(self) -> bgym.Agent:\n return MostBasicAgent(\n temperature=self.temperature,\n use_chain_of_thought=self.use_chain_of_thought,\n chat_model_args=self.chat_model_args,\n )\n\n def set_reproducibility_mode(self):\n self.temperature = 0\n\n def prepare(self):\n return self.chat_model_args.prepare_server()\n\n def close(self):\n return self.chat_model_args.close_server()\n\n\nclass MostBasicAgent(bgym.Agent):\n def __init__(\n self, temperature: float, use_chain_of_thought: bool, chat_model_args: \"BaseModelArgs\"\n ):\n self.temperature = temperature\n self.use_chain_of_thought = use_chain_of_thought\n self.chat = chat_model_args.make_model()\n self.chat_model_args = chat_model_args\n\n self.action_set = bgym.HighLevelActionSet([\"bid\"], multiaction=False)\n\n @cost_tracker_decorator\n def get_action(self, obs: Any) -> tuple[str, dict]:\n messages = Discussion(SystemMessage(\"You are a web assistant.\"))\n messages.append(\n HumanMessage(\n f\"\"\"\nYou are helping a user to accomplish the following goal on a website:\n\n{obs[\"goal\"]}\n\nTo do so, you can interact with the environment using the following actions:\n\n{self.action_set.describe(with_long_description=False)}\n\nThe inputs to those functions are the bids given in the html.\n\nHere is the current state of the website, in the form of an html:\n\n{obs[\"pruned_html\"]}\n\nThe action you provide must be in between triple ticks and leverage the 'bid=' information provided in the html.\nHere is an example of how to use the bid action:\n\n```\nclick('a314')\n```\n\nPlease provide a single action at a time and wait for the next observation. Provide only a single action per step. \nFocus on the bid that are given in the html, and use them to perform the actions.\n\"\"\"\n )\n )\n if self.use_chain_of_thought:\n messages.add_text(\n f\"\"\"\nProvide a chain of thoughts reasoning to decompose the task into smaller steps. And execute only the next step.\n\"\"\"\n )\n\n def parser(response: str) -> tuple[dict, bool, str]:\n blocks = extract_code_blocks(response)\n if len(blocks) == 0:\n raise ParseError(\"No code block found in the response\")\n action = blocks[0][1]\n thought = response\n return {\"action\": action, \"think\": thought}\n\n ans_dict = retry(self.chat, messages, n_retry=3, parser=parser)\n\n action = ans_dict.get(\"action\", None)\n thought = ans_dict.get(\"think\", None)\n\n return (\n action,\n bgym.AgentInfo(\n think=thought,\n chat_messages=messages,\n # put any stats that you care about as long as it is a number or a dict of numbers\n stats={\"prompt_length\": len(messages), \"response_length\": len(thought)},\n markdown_page=\"Add any txt information here, including base 64 images, to display in xray\",\n extra_info={\"chat_model_args\": asdict(self.chat_model_args)},\n ),\n )\n\n\n# example for a single task\nenv_args = bgym.EnvArgs(\n task_name=\"miniwob.click-button\",\n task_seed=0,\n max_steps=10,\n headless=True,\n)\n\nchat_model_args = CHAT_MODEL_ARGS_DICT[\"openai/gpt-4o-mini-2024-07-18\"]\n\n# example for 2 experiments testing chain of thoughts on a miniwob task\nexp_args = [\n ExpArgs(\n agent_args=MostBasicAgentArgs(\n temperature=0.1,\n use_chain_of_thought=True,\n chat_model_args=chat_model_args,\n ),\n env_args=env_args,\n logging_level=logging.INFO,\n ),\n ExpArgs(\n agent_args=MostBasicAgentArgs(\n temperature=0.1,\n use_chain_of_thought=False,\n chat_model_args=chat_model_args,\n ),\n env_args=env_args,\n logging_level=logging.INFO,\n ),\n]\n\nAGENT_4o_MINI = MostBasicAgentArgs(\n temperature=0.3,\n use_chain_of_thought=True,\n chat_model_args=chat_model_args,\n)\n\n\ndef experiment_config():\n return exp_args","source_hash":"e5bb1cb531d9222de92644376a2b8c982cb1e1cbf6358da92daf2e88e56be510","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.most_basic_agent.most_basic_agent.MostBasicAgentArgs","uri":"program://AgentLab/class/src.agentlab.agents.most_basic_agent.most_basic_agent.MostBasicAgentArgs#L25-L45","kind":"class","name":"MostBasicAgentArgs","path":"src/agentlab/agents/most_basic_agent/most_basic_agent.py","language":"python","start_line":25,"end_line":45,"context_start_line":5,"context_end_line":65,"code":"import bgym\n\nfrom agentlab.agents.agent_args import AgentArgs\nfrom agentlab.experiments.loop import ExpArgs\nfrom agentlab.llm.llm_configs import CHAT_MODEL_ARGS_DICT\nfrom agentlab.llm.llm_utils import (\n Discussion,\n HumanMessage,\n ParseError,\n SystemMessage,\n extract_code_blocks,\n retry,\n)\nfrom agentlab.llm.tracking import cost_tracker_decorator\n\nif TYPE_CHECKING:\n from agentlab.llm.chat_api import BaseModelArgs\n\n\n@dataclass\nclass MostBasicAgentArgs(AgentArgs):\n agent_name: str = \"BasicAgent\"\n temperature: float = 0.1\n use_chain_of_thought: bool = False\n chat_model_args: \"BaseModelArgs\" = None\n\n def make_agent(self) -> bgym.Agent:\n return MostBasicAgent(\n temperature=self.temperature,\n use_chain_of_thought=self.use_chain_of_thought,\n chat_model_args=self.chat_model_args,\n )\n\n def set_reproducibility_mode(self):\n self.temperature = 0\n\n def prepare(self):\n return self.chat_model_args.prepare_server()\n\n def close(self):\n return self.chat_model_args.close_server()\n\n\nclass MostBasicAgent(bgym.Agent):\n def __init__(\n self, temperature: float, use_chain_of_thought: bool, chat_model_args: \"BaseModelArgs\"\n ):\n self.temperature = temperature\n self.use_chain_of_thought = use_chain_of_thought\n self.chat = chat_model_args.make_model()\n self.chat_model_args = chat_model_args\n\n self.action_set = bgym.HighLevelActionSet([\"bid\"], multiaction=False)\n\n @cost_tracker_decorator\n def get_action(self, obs: Any) -> tuple[str, dict]:\n messages = Discussion(SystemMessage(\"You are a web assistant.\"))\n messages.append(\n HumanMessage(\n f\"\"\"\nYou are helping a user to accomplish the following goal on a website:","source_hash":"e5bb1cb531d9222de92644376a2b8c982cb1e1cbf6358da92daf2e88e56be510","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.most_basic_agent.most_basic_agent.MostBasicAgent","uri":"program://AgentLab/class/src.agentlab.agents.most_basic_agent.most_basic_agent.MostBasicAgent#L48-L121","kind":"class","name":"MostBasicAgent","path":"src/agentlab/agents/most_basic_agent/most_basic_agent.py","language":"python","start_line":48,"end_line":121,"context_start_line":28,"context_end_line":141,"code":" use_chain_of_thought: bool = False\n chat_model_args: \"BaseModelArgs\" = None\n\n def make_agent(self) -> bgym.Agent:\n return MostBasicAgent(\n temperature=self.temperature,\n use_chain_of_thought=self.use_chain_of_thought,\n chat_model_args=self.chat_model_args,\n )\n\n def set_reproducibility_mode(self):\n self.temperature = 0\n\n def prepare(self):\n return self.chat_model_args.prepare_server()\n\n def close(self):\n return self.chat_model_args.close_server()\n\n\nclass MostBasicAgent(bgym.Agent):\n def __init__(\n self, temperature: float, use_chain_of_thought: bool, chat_model_args: \"BaseModelArgs\"\n ):\n self.temperature = temperature\n self.use_chain_of_thought = use_chain_of_thought\n self.chat = chat_model_args.make_model()\n self.chat_model_args = chat_model_args\n\n self.action_set = bgym.HighLevelActionSet([\"bid\"], multiaction=False)\n\n @cost_tracker_decorator\n def get_action(self, obs: Any) -> tuple[str, dict]:\n messages = Discussion(SystemMessage(\"You are a web assistant.\"))\n messages.append(\n HumanMessage(\n f\"\"\"\nYou are helping a user to accomplish the following goal on a website:\n\n{obs[\"goal\"]}\n\nTo do so, you can interact with the environment using the following actions:\n\n{self.action_set.describe(with_long_description=False)}\n\nThe inputs to those functions are the bids given in the html.\n\nHere is the current state of the website, in the form of an html:\n\n{obs[\"pruned_html\"]}\n\nThe action you provide must be in between triple ticks and leverage the 'bid=' information provided in the html.\nHere is an example of how to use the bid action:\n\n```\nclick('a314')\n```\n\nPlease provide a single action at a time and wait for the next observation. Provide only a single action per step. \nFocus on the bid that are given in the html, and use them to perform the actions.\n\"\"\"\n )\n )\n if self.use_chain_of_thought:\n messages.add_text(\n f\"\"\"\nProvide a chain of thoughts reasoning to decompose the task into smaller steps. And execute only the next step.\n\"\"\"\n )\n\n def parser(response: str) -> tuple[dict, bool, str]:\n blocks = extract_code_blocks(response)\n if len(blocks) == 0:\n raise ParseError(\"No code block found in the response\")\n action = blocks[0][1]\n thought = response\n return {\"action\": action, \"think\": thought}\n\n ans_dict = retry(self.chat, messages, n_retry=3, parser=parser)\n\n action = ans_dict.get(\"action\", None)\n thought = ans_dict.get(\"think\", None)\n\n return (\n action,\n bgym.AgentInfo(\n think=thought,\n chat_messages=messages,\n # put any stats that you care about as long as it is a number or a dict of numbers\n stats={\"prompt_length\": len(messages), \"response_length\": len(thought)},\n markdown_page=\"Add any txt information here, including base 64 images, to display in xray\",\n extra_info={\"chat_model_args\": asdict(self.chat_model_args)},\n ),\n )\n\n\n# example for a single task\nenv_args = bgym.EnvArgs(\n task_name=\"miniwob.click-button\",\n task_seed=0,\n max_steps=10,\n headless=True,\n)\n\nchat_model_args = CHAT_MODEL_ARGS_DICT[\"openai/gpt-4o-mini-2024-07-18\"]\n\n# example for 2 experiments testing chain of thoughts on a miniwob task\nexp_args = [\n ExpArgs(\n agent_args=MostBasicAgentArgs(\n temperature=0.1,\n use_chain_of_thought=True,\n chat_model_args=chat_model_args,\n ),","source_hash":"e5bb1cb531d9222de92644376a2b8c982cb1e1cbf6358da92daf2e88e56be510","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.most_basic_agent.most_basic_agent.experiment_config","uri":"program://AgentLab/function/src.agentlab.agents.most_basic_agent.most_basic_agent.experiment_config#L163-L164","kind":"function","name":"experiment_config","path":"src/agentlab/agents/most_basic_agent/most_basic_agent.py","language":"python","start_line":163,"end_line":164,"context_start_line":143,"context_end_line":164,"code":" logging_level=logging.INFO,\n ),\n ExpArgs(\n agent_args=MostBasicAgentArgs(\n temperature=0.1,\n use_chain_of_thought=False,\n chat_model_args=chat_model_args,\n ),\n env_args=env_args,\n logging_level=logging.INFO,\n ),\n]\n\nAGENT_4o_MINI = MostBasicAgentArgs(\n temperature=0.3,\n use_chain_of_thought=True,\n chat_model_args=chat_model_args,\n)\n\n\ndef experiment_config():\n return exp_args","source_hash":"e5bb1cb531d9222de92644376a2b8c982cb1e1cbf6358da92daf2e88e56be510","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.most_basic_agent.most_basic_agent.make_agent","uri":"program://AgentLab/function/src.agentlab.agents.most_basic_agent.most_basic_agent.make_agent#L31-L36","kind":"function","name":"make_agent","path":"src/agentlab/agents/most_basic_agent/most_basic_agent.py","language":"python","start_line":31,"end_line":36,"context_start_line":11,"context_end_line":56,"code":" Discussion,\n HumanMessage,\n ParseError,\n SystemMessage,\n extract_code_blocks,\n retry,\n)\nfrom agentlab.llm.tracking import cost_tracker_decorator\n\nif TYPE_CHECKING:\n from agentlab.llm.chat_api import BaseModelArgs\n\n\n@dataclass\nclass MostBasicAgentArgs(AgentArgs):\n agent_name: str = \"BasicAgent\"\n temperature: float = 0.1\n use_chain_of_thought: bool = False\n chat_model_args: \"BaseModelArgs\" = None\n\n def make_agent(self) -> bgym.Agent:\n return MostBasicAgent(\n temperature=self.temperature,\n use_chain_of_thought=self.use_chain_of_thought,\n chat_model_args=self.chat_model_args,\n )\n\n def set_reproducibility_mode(self):\n self.temperature = 0\n\n def prepare(self):\n return self.chat_model_args.prepare_server()\n\n def close(self):\n return self.chat_model_args.close_server()\n\n\nclass MostBasicAgent(bgym.Agent):\n def __init__(\n self, temperature: float, use_chain_of_thought: bool, chat_model_args: \"BaseModelArgs\"\n ):\n self.temperature = temperature\n self.use_chain_of_thought = use_chain_of_thought\n self.chat = chat_model_args.make_model()\n self.chat_model_args = chat_model_args\n","source_hash":"e5bb1cb531d9222de92644376a2b8c982cb1e1cbf6358da92daf2e88e56be510","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.most_basic_agent.most_basic_agent.set_reproducibility_mode","uri":"program://AgentLab/function/src.agentlab.agents.most_basic_agent.most_basic_agent.set_reproducibility_mode#L38-L39","kind":"function","name":"set_reproducibility_mode","path":"src/agentlab/agents/most_basic_agent/most_basic_agent.py","language":"python","start_line":38,"end_line":39,"context_start_line":18,"context_end_line":59,"code":"from agentlab.llm.tracking import cost_tracker_decorator\n\nif TYPE_CHECKING:\n from agentlab.llm.chat_api import BaseModelArgs\n\n\n@dataclass\nclass MostBasicAgentArgs(AgentArgs):\n agent_name: str = \"BasicAgent\"\n temperature: float = 0.1\n use_chain_of_thought: bool = False\n chat_model_args: \"BaseModelArgs\" = None\n\n def make_agent(self) -> bgym.Agent:\n return MostBasicAgent(\n temperature=self.temperature,\n use_chain_of_thought=self.use_chain_of_thought,\n chat_model_args=self.chat_model_args,\n )\n\n def set_reproducibility_mode(self):\n self.temperature = 0\n\n def prepare(self):\n return self.chat_model_args.prepare_server()\n\n def close(self):\n return self.chat_model_args.close_server()\n\n\nclass MostBasicAgent(bgym.Agent):\n def __init__(\n self, temperature: float, use_chain_of_thought: bool, chat_model_args: \"BaseModelArgs\"\n ):\n self.temperature = temperature\n self.use_chain_of_thought = use_chain_of_thought\n self.chat = chat_model_args.make_model()\n self.chat_model_args = chat_model_args\n\n self.action_set = bgym.HighLevelActionSet([\"bid\"], multiaction=False)\n\n @cost_tracker_decorator","source_hash":"e5bb1cb531d9222de92644376a2b8c982cb1e1cbf6358da92daf2e88e56be510","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.most_basic_agent.most_basic_agent.prepare","uri":"program://AgentLab/function/src.agentlab.agents.most_basic_agent.most_basic_agent.prepare#L41-L42","kind":"function","name":"prepare","path":"src/agentlab/agents/most_basic_agent/most_basic_agent.py","language":"python","start_line":41,"end_line":42,"context_start_line":21,"context_end_line":62,"code":" from agentlab.llm.chat_api import BaseModelArgs\n\n\n@dataclass\nclass MostBasicAgentArgs(AgentArgs):\n agent_name: str = \"BasicAgent\"\n temperature: float = 0.1\n use_chain_of_thought: bool = False\n chat_model_args: \"BaseModelArgs\" = None\n\n def make_agent(self) -> bgym.Agent:\n return MostBasicAgent(\n temperature=self.temperature,\n use_chain_of_thought=self.use_chain_of_thought,\n chat_model_args=self.chat_model_args,\n )\n\n def set_reproducibility_mode(self):\n self.temperature = 0\n\n def prepare(self):\n return self.chat_model_args.prepare_server()\n\n def close(self):\n return self.chat_model_args.close_server()\n\n\nclass MostBasicAgent(bgym.Agent):\n def __init__(\n self, temperature: float, use_chain_of_thought: bool, chat_model_args: \"BaseModelArgs\"\n ):\n self.temperature = temperature\n self.use_chain_of_thought = use_chain_of_thought\n self.chat = chat_model_args.make_model()\n self.chat_model_args = chat_model_args\n\n self.action_set = bgym.HighLevelActionSet([\"bid\"], multiaction=False)\n\n @cost_tracker_decorator\n def get_action(self, obs: Any) -> tuple[str, dict]:\n messages = Discussion(SystemMessage(\"You are a web assistant.\"))\n messages.append(","source_hash":"e5bb1cb531d9222de92644376a2b8c982cb1e1cbf6358da92daf2e88e56be510","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.most_basic_agent.most_basic_agent.close","uri":"program://AgentLab/function/src.agentlab.agents.most_basic_agent.most_basic_agent.close#L44-L45","kind":"function","name":"close","path":"src/agentlab/agents/most_basic_agent/most_basic_agent.py","language":"python","start_line":44,"end_line":45,"context_start_line":24,"context_end_line":65,"code":"@dataclass\nclass MostBasicAgentArgs(AgentArgs):\n agent_name: str = \"BasicAgent\"\n temperature: float = 0.1\n use_chain_of_thought: bool = False\n chat_model_args: \"BaseModelArgs\" = None\n\n def make_agent(self) -> bgym.Agent:\n return MostBasicAgent(\n temperature=self.temperature,\n use_chain_of_thought=self.use_chain_of_thought,\n chat_model_args=self.chat_model_args,\n )\n\n def set_reproducibility_mode(self):\n self.temperature = 0\n\n def prepare(self):\n return self.chat_model_args.prepare_server()\n\n def close(self):\n return self.chat_model_args.close_server()\n\n\nclass MostBasicAgent(bgym.Agent):\n def __init__(\n self, temperature: float, use_chain_of_thought: bool, chat_model_args: \"BaseModelArgs\"\n ):\n self.temperature = temperature\n self.use_chain_of_thought = use_chain_of_thought\n self.chat = chat_model_args.make_model()\n self.chat_model_args = chat_model_args\n\n self.action_set = bgym.HighLevelActionSet([\"bid\"], multiaction=False)\n\n @cost_tracker_decorator\n def get_action(self, obs: Any) -> tuple[str, dict]:\n messages = Discussion(SystemMessage(\"You are a web assistant.\"))\n messages.append(\n HumanMessage(\n f\"\"\"\nYou are helping a user to accomplish the following goal on a website:","source_hash":"e5bb1cb531d9222de92644376a2b8c982cb1e1cbf6358da92daf2e88e56be510","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.most_basic_agent.most_basic_agent.__init__","uri":"program://AgentLab/function/src.agentlab.agents.most_basic_agent.most_basic_agent.__init__#L49-L57","kind":"function","name":"__init__","path":"src/agentlab/agents/most_basic_agent/most_basic_agent.py","language":"python","start_line":49,"end_line":57,"context_start_line":29,"context_end_line":77,"code":" chat_model_args: \"BaseModelArgs\" = None\n\n def make_agent(self) -> bgym.Agent:\n return MostBasicAgent(\n temperature=self.temperature,\n use_chain_of_thought=self.use_chain_of_thought,\n chat_model_args=self.chat_model_args,\n )\n\n def set_reproducibility_mode(self):\n self.temperature = 0\n\n def prepare(self):\n return self.chat_model_args.prepare_server()\n\n def close(self):\n return self.chat_model_args.close_server()\n\n\nclass MostBasicAgent(bgym.Agent):\n def __init__(\n self, temperature: float, use_chain_of_thought: bool, chat_model_args: \"BaseModelArgs\"\n ):\n self.temperature = temperature\n self.use_chain_of_thought = use_chain_of_thought\n self.chat = chat_model_args.make_model()\n self.chat_model_args = chat_model_args\n\n self.action_set = bgym.HighLevelActionSet([\"bid\"], multiaction=False)\n\n @cost_tracker_decorator\n def get_action(self, obs: Any) -> tuple[str, dict]:\n messages = Discussion(SystemMessage(\"You are a web assistant.\"))\n messages.append(\n HumanMessage(\n f\"\"\"\nYou are helping a user to accomplish the following goal on a website:\n\n{obs[\"goal\"]}\n\nTo do so, you can interact with the environment using the following actions:\n\n{self.action_set.describe(with_long_description=False)}\n\nThe inputs to those functions are the bids given in the html.\n\nHere is the current state of the website, in the form of an html:\n\n{obs[\"pruned_html\"]}","source_hash":"e5bb1cb531d9222de92644376a2b8c982cb1e1cbf6358da92daf2e88e56be510","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.most_basic_agent.most_basic_agent.get_action","uri":"program://AgentLab/function/src.agentlab.agents.most_basic_agent.most_basic_agent.get_action#L60-L121","kind":"function","name":"get_action","path":"src/agentlab/agents/most_basic_agent/most_basic_agent.py","language":"python","start_line":60,"end_line":121,"context_start_line":40,"context_end_line":141,"code":"\n def prepare(self):\n return self.chat_model_args.prepare_server()\n\n def close(self):\n return self.chat_model_args.close_server()\n\n\nclass MostBasicAgent(bgym.Agent):\n def __init__(\n self, temperature: float, use_chain_of_thought: bool, chat_model_args: \"BaseModelArgs\"\n ):\n self.temperature = temperature\n self.use_chain_of_thought = use_chain_of_thought\n self.chat = chat_model_args.make_model()\n self.chat_model_args = chat_model_args\n\n self.action_set = bgym.HighLevelActionSet([\"bid\"], multiaction=False)\n\n @cost_tracker_decorator\n def get_action(self, obs: Any) -> tuple[str, dict]:\n messages = Discussion(SystemMessage(\"You are a web assistant.\"))\n messages.append(\n HumanMessage(\n f\"\"\"\nYou are helping a user to accomplish the following goal on a website:\n\n{obs[\"goal\"]}\n\nTo do so, you can interact with the environment using the following actions:\n\n{self.action_set.describe(with_long_description=False)}\n\nThe inputs to those functions are the bids given in the html.\n\nHere is the current state of the website, in the form of an html:\n\n{obs[\"pruned_html\"]}\n\nThe action you provide must be in between triple ticks and leverage the 'bid=' information provided in the html.\nHere is an example of how to use the bid action:\n\n```\nclick('a314')\n```\n\nPlease provide a single action at a time and wait for the next observation. Provide only a single action per step. \nFocus on the bid that are given in the html, and use them to perform the actions.\n\"\"\"\n )\n )\n if self.use_chain_of_thought:\n messages.add_text(\n f\"\"\"\nProvide a chain of thoughts reasoning to decompose the task into smaller steps. And execute only the next step.\n\"\"\"\n )\n\n def parser(response: str) -> tuple[dict, bool, str]:\n blocks = extract_code_blocks(response)\n if len(blocks) == 0:\n raise ParseError(\"No code block found in the response\")\n action = blocks[0][1]\n thought = response\n return {\"action\": action, \"think\": thought}\n\n ans_dict = retry(self.chat, messages, n_retry=3, parser=parser)\n\n action = ans_dict.get(\"action\", None)\n thought = ans_dict.get(\"think\", None)\n\n return (\n action,\n bgym.AgentInfo(\n think=thought,\n chat_messages=messages,\n # put any stats that you care about as long as it is a number or a dict of numbers\n stats={\"prompt_length\": len(messages), \"response_length\": len(thought)},\n markdown_page=\"Add any txt information here, including base 64 images, to display in xray\",\n extra_info={\"chat_model_args\": asdict(self.chat_model_args)},\n ),\n )\n\n\n# example for a single task\nenv_args = bgym.EnvArgs(\n task_name=\"miniwob.click-button\",\n task_seed=0,\n max_steps=10,\n headless=True,\n)\n\nchat_model_args = CHAT_MODEL_ARGS_DICT[\"openai/gpt-4o-mini-2024-07-18\"]\n\n# example for 2 experiments testing chain of thoughts on a miniwob task\nexp_args = [\n ExpArgs(\n agent_args=MostBasicAgentArgs(\n temperature=0.1,\n use_chain_of_thought=True,\n chat_model_args=chat_model_args,\n ),","source_hash":"e5bb1cb531d9222de92644376a2b8c982cb1e1cbf6358da92daf2e88e56be510","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.most_basic_agent.most_basic_agent.parser","uri":"program://AgentLab/function/src.agentlab.agents.most_basic_agent.most_basic_agent.parser#L98-L104","kind":"function","name":"parser","path":"src/agentlab/agents/most_basic_agent/most_basic_agent.py","language":"python","start_line":98,"end_line":104,"context_start_line":78,"context_end_line":124,"code":"\nThe action you provide must be in between triple ticks and leverage the 'bid=' information provided in the html.\nHere is an example of how to use the bid action:\n\n```\nclick('a314')\n```\n\nPlease provide a single action at a time and wait for the next observation. Provide only a single action per step. \nFocus on the bid that are given in the html, and use them to perform the actions.\n\"\"\"\n )\n )\n if self.use_chain_of_thought:\n messages.add_text(\n f\"\"\"\nProvide a chain of thoughts reasoning to decompose the task into smaller steps. And execute only the next step.\n\"\"\"\n )\n\n def parser(response: str) -> tuple[dict, bool, str]:\n blocks = extract_code_blocks(response)\n if len(blocks) == 0:\n raise ParseError(\"No code block found in the response\")\n action = blocks[0][1]\n thought = response\n return {\"action\": action, \"think\": thought}\n\n ans_dict = retry(self.chat, messages, n_retry=3, parser=parser)\n\n action = ans_dict.get(\"action\", None)\n thought = ans_dict.get(\"think\", None)\n\n return (\n action,\n bgym.AgentInfo(\n think=thought,\n chat_messages=messages,\n # put any stats that you care about as long as it is a number or a dict of numbers\n stats={\"prompt_length\": len(messages), \"response_length\": len(thought)},\n markdown_page=\"Add any txt information here, including base 64 images, to display in xray\",\n extra_info={\"chat_model_args\": asdict(self.chat_model_args)},\n ),\n )\n\n\n# example for a single task","source_hash":"e5bb1cb531d9222de92644376a2b8c982cb1e1cbf6358da92daf2e88e56be510","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.visual_agent.agent_configs","uri":"program://AgentLab/module/src.agentlab.agents.visual_agent.agent_configs#L1-L46","kind":"module","name":"src.agentlab.agents.visual_agent.agent_configs","path":"src/agentlab/agents/visual_agent/agent_configs.py","language":"python","start_line":1,"end_line":46,"context_start_line":1,"context_end_line":46,"code":"import bgym\nfrom bgym import HighLevelActionSetArgs\n\nimport agentlab.agents.dynamic_prompting as dp\nfrom agentlab.llm.llm_configs import CHAT_MODEL_ARGS_DICT\n\nfrom .visual_agent import VisualAgentArgs\nfrom .visual_agent_prompts import PromptFlags\n\n# the other flags are ignored for this agent.\nDEFAULT_OBS_FLAGS = dp.ObsFlags(\n use_tabs=True, # will be overridden by the benchmark when set_benchmark is called after initalizing the agent\n use_error_logs=True,\n use_past_error_logs=False,\n use_screenshot=True,\n use_som=False,\n openai_vision_detail=\"auto\",\n)\n\nDEFAULT_ACTION_FLAGS = dp.ActionFlags(\n action_set=HighLevelActionSetArgs(subsets=[\"coord\"]),\n long_description=True,\n individual_examples=False,\n)\n\n\nDEFAULT_PROMPT_FLAGS = PromptFlags(\n obs=DEFAULT_OBS_FLAGS,\n action=DEFAULT_ACTION_FLAGS,\n use_thinking=True,\n use_concrete_example=False,\n use_abstract_example=True,\n enable_chat=False,\n extra_instructions=None,\n)\n\nVISUAL_AGENT_4o = VisualAgentArgs(\n chat_model_args=CHAT_MODEL_ARGS_DICT[\"openai/gpt-4o-2024-05-13\"],\n flags=DEFAULT_PROMPT_FLAGS,\n)\n\n\nVISUAL_AGENT_CLAUDE_3_5 = VisualAgentArgs(\n chat_model_args=CHAT_MODEL_ARGS_DICT[\"openrouter/anthropic/claude-3.5-sonnet:beta\"],\n flags=DEFAULT_PROMPT_FLAGS,\n)","source_hash":"4a2d34d262ffc699c7888d2fa972367b853b989de126d2e6159546d6f266e113","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.visual_agent.visual_agent","uri":"program://AgentLab/module/src.agentlab.agents.visual_agent.visual_agent#L1-L130","kind":"module","name":"src.agentlab.agents.visual_agent.visual_agent","path":"src/agentlab/agents/visual_agent/visual_agent.py","language":"python","start_line":1,"end_line":130,"context_start_line":1,"context_end_line":130,"code":"\"\"\"\nGenericAgent implementation for AgentLab\n\nThis module defines a `GenericAgent` class and its associated arguments for use in the AgentLab framework. \\\nThe `GenericAgent` class is designed to interact with a chat-based model to determine actions based on \\\nobservations. It includes methods for preprocessing observations, generating actions, and managing internal \\\nstate such as plans, memories, and thoughts. The `GenericAgentArgs` class provides configuration options for \\\nthe agent, including model arguments and flags for various behaviors.\n\"\"\"\n\nfrom dataclasses import asdict, dataclass\n\nimport bgym\nfrom bgym import Benchmark\nfrom browsergym.experiments.agent import Agent, AgentInfo\n\nfrom agentlab.agents import dynamic_prompting as dp\nfrom agentlab.agents.agent_args import AgentArgs\nfrom agentlab.llm.chat_api import BaseModelArgs\nfrom agentlab.llm.llm_utils import Discussion, ParseError, SystemMessage, retry\nfrom agentlab.llm.tracking import cost_tracker_decorator\n\nfrom .visual_agent_prompts import MainPrompt, PromptFlags\n\n\n@dataclass\nclass VisualAgentArgs(AgentArgs):\n chat_model_args: BaseModelArgs = None\n flags: PromptFlags = None\n max_retry: int = 4\n\n def __post_init__(self):\n try: # some attributes might be missing temporarily due to args.CrossProd for hyperparameter generation\n self.agent_name = f\"VisualAgent-{self.chat_model_args.model_name}\".replace(\"/\", \"_\")\n except AttributeError:\n pass\n\n def set_benchmark(self, benchmark: Benchmark, demo_mode):\n \"\"\"Override Some flags based on the benchmark.\"\"\"\n self.flags.obs.use_tabs = benchmark.is_multi_tab\n\n def set_reproducibility_mode(self):\n self.chat_model_args.temperature = 0\n\n def prepare(self):\n return self.chat_model_args.prepare_server()\n\n def close(self):\n return self.chat_model_args.close_server()\n\n def make_agent(self):\n return VisualAgent(\n chat_model_args=self.chat_model_args, flags=self.flags, max_retry=self.max_retry\n )\n\n\nclass VisualAgent(Agent):\n\n def __init__(\n self,\n chat_model_args: BaseModelArgs,\n flags: PromptFlags,\n max_retry: int = 4,\n ):\n\n self.chat_llm = chat_model_args.make_model()\n self.chat_model_args = chat_model_args\n self.max_retry = max_retry\n\n self.flags = flags\n self.action_set = self.flags.action.action_set.make_action_set()\n self._obs_preprocessor = dp.make_obs_preprocessor(flags.obs)\n\n self.reset(seed=None)\n\n def obs_preprocessor(self, obs: dict) -> dict:\n return self._obs_preprocessor(obs)\n\n @cost_tracker_decorator\n def get_action(self, obs):\n\n main_prompt = MainPrompt(\n action_set=self.action_set,\n obs=obs,\n actions=self.actions,\n thoughts=self.thoughts,\n flags=self.flags,\n )\n\n system_prompt = SystemMessage(dp.SystemPrompt().prompt)\n try:\n # TODO, we would need to further shrink the prompt if the retry\n # cause it to be too long\n\n chat_messages = Discussion([system_prompt, main_prompt.prompt])\n ans_dict = retry(\n self.chat_llm,\n chat_messages,\n n_retry=self.max_retry,\n parser=main_prompt._parse_answer,\n )\n ans_dict[\"busted_retry\"] = 0\n # inferring the number of retries, TODO: make this less hacky\n ans_dict[\"n_retry\"] = (len(chat_messages) - 3) / 2\n except ParseError:\n ans_dict = dict(\n action=None,\n n_retry=self.max_retry + 1,\n busted_retry=1,\n )\n\n stats = self.chat_llm.get_stats()\n stats[\"n_retry\"] = ans_dict[\"n_retry\"]\n stats[\"busted_retry\"] = ans_dict[\"busted_retry\"]\n\n self.actions.append(ans_dict[\"action\"])\n self.thoughts.append(ans_dict.get(\"think\", None))\n\n agent_info = AgentInfo(\n think=ans_dict.get(\"think\", None),\n chat_messages=chat_messages,\n stats=stats,\n extra_info={\"chat_model_args\": asdict(self.chat_model_args)},\n )\n return ans_dict[\"action\"], agent_info\n\n def reset(self, seed=None):\n self.seed = seed\n self.thoughts = []\n self.actions = []","source_hash":"4f085c9089a1abb38ca6509bb2a1119a10d59abd733edff938f95988bc7a0b5e","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.visual_agent.visual_agent.VisualAgentArgs","uri":"program://AgentLab/class/src.agentlab.agents.visual_agent.visual_agent.VisualAgentArgs#L27-L54","kind":"class","name":"VisualAgentArgs","path":"src/agentlab/agents/visual_agent/visual_agent.py","language":"python","start_line":27,"end_line":54,"context_start_line":7,"context_end_line":74,"code":"state such as plans, memories, and thoughts. The `GenericAgentArgs` class provides configuration options for \\\nthe agent, including model arguments and flags for various behaviors.\n\"\"\"\n\nfrom dataclasses import asdict, dataclass\n\nimport bgym\nfrom bgym import Benchmark\nfrom browsergym.experiments.agent import Agent, AgentInfo\n\nfrom agentlab.agents import dynamic_prompting as dp\nfrom agentlab.agents.agent_args import AgentArgs\nfrom agentlab.llm.chat_api import BaseModelArgs\nfrom agentlab.llm.llm_utils import Discussion, ParseError, SystemMessage, retry\nfrom agentlab.llm.tracking import cost_tracker_decorator\n\nfrom .visual_agent_prompts import MainPrompt, PromptFlags\n\n\n@dataclass\nclass VisualAgentArgs(AgentArgs):\n chat_model_args: BaseModelArgs = None\n flags: PromptFlags = None\n max_retry: int = 4\n\n def __post_init__(self):\n try: # some attributes might be missing temporarily due to args.CrossProd for hyperparameter generation\n self.agent_name = f\"VisualAgent-{self.chat_model_args.model_name}\".replace(\"/\", \"_\")\n except AttributeError:\n pass\n\n def set_benchmark(self, benchmark: Benchmark, demo_mode):\n \"\"\"Override Some flags based on the benchmark.\"\"\"\n self.flags.obs.use_tabs = benchmark.is_multi_tab\n\n def set_reproducibility_mode(self):\n self.chat_model_args.temperature = 0\n\n def prepare(self):\n return self.chat_model_args.prepare_server()\n\n def close(self):\n return self.chat_model_args.close_server()\n\n def make_agent(self):\n return VisualAgent(\n chat_model_args=self.chat_model_args, flags=self.flags, max_retry=self.max_retry\n )\n\n\nclass VisualAgent(Agent):\n\n def __init__(\n self,\n chat_model_args: BaseModelArgs,\n flags: PromptFlags,\n max_retry: int = 4,\n ):\n\n self.chat_llm = chat_model_args.make_model()\n self.chat_model_args = chat_model_args\n self.max_retry = max_retry\n\n self.flags = flags\n self.action_set = self.flags.action.action_set.make_action_set()\n self._obs_preprocessor = dp.make_obs_preprocessor(flags.obs)\n\n self.reset(seed=None)","source_hash":"4f085c9089a1abb38ca6509bb2a1119a10d59abd733edff938f95988bc7a0b5e","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.visual_agent.visual_agent.VisualAgent","uri":"program://AgentLab/class/src.agentlab.agents.visual_agent.visual_agent.VisualAgent#L57-L130","kind":"class","name":"VisualAgent","path":"src/agentlab/agents/visual_agent/visual_agent.py","language":"python","start_line":57,"end_line":130,"context_start_line":37,"context_end_line":130,"code":"\n def set_benchmark(self, benchmark: Benchmark, demo_mode):\n \"\"\"Override Some flags based on the benchmark.\"\"\"\n self.flags.obs.use_tabs = benchmark.is_multi_tab\n\n def set_reproducibility_mode(self):\n self.chat_model_args.temperature = 0\n\n def prepare(self):\n return self.chat_model_args.prepare_server()\n\n def close(self):\n return self.chat_model_args.close_server()\n\n def make_agent(self):\n return VisualAgent(\n chat_model_args=self.chat_model_args, flags=self.flags, max_retry=self.max_retry\n )\n\n\nclass VisualAgent(Agent):\n\n def __init__(\n self,\n chat_model_args: BaseModelArgs,\n flags: PromptFlags,\n max_retry: int = 4,\n ):\n\n self.chat_llm = chat_model_args.make_model()\n self.chat_model_args = chat_model_args\n self.max_retry = max_retry\n\n self.flags = flags\n self.action_set = self.flags.action.action_set.make_action_set()\n self._obs_preprocessor = dp.make_obs_preprocessor(flags.obs)\n\n self.reset(seed=None)\n\n def obs_preprocessor(self, obs: dict) -> dict:\n return self._obs_preprocessor(obs)\n\n @cost_tracker_decorator\n def get_action(self, obs):\n\n main_prompt = MainPrompt(\n action_set=self.action_set,\n obs=obs,\n actions=self.actions,\n thoughts=self.thoughts,\n flags=self.flags,\n )\n\n system_prompt = SystemMessage(dp.SystemPrompt().prompt)\n try:\n # TODO, we would need to further shrink the prompt if the retry\n # cause it to be too long\n\n chat_messages = Discussion([system_prompt, main_prompt.prompt])\n ans_dict = retry(\n self.chat_llm,\n chat_messages,\n n_retry=self.max_retry,\n parser=main_prompt._parse_answer,\n )\n ans_dict[\"busted_retry\"] = 0\n # inferring the number of retries, TODO: make this less hacky\n ans_dict[\"n_retry\"] = (len(chat_messages) - 3) / 2\n except ParseError:\n ans_dict = dict(\n action=None,\n n_retry=self.max_retry + 1,\n busted_retry=1,\n )\n\n stats = self.chat_llm.get_stats()\n stats[\"n_retry\"] = ans_dict[\"n_retry\"]\n stats[\"busted_retry\"] = ans_dict[\"busted_retry\"]\n\n self.actions.append(ans_dict[\"action\"])\n self.thoughts.append(ans_dict.get(\"think\", None))\n\n agent_info = AgentInfo(\n think=ans_dict.get(\"think\", None),\n chat_messages=chat_messages,\n stats=stats,\n extra_info={\"chat_model_args\": asdict(self.chat_model_args)},\n )\n return ans_dict[\"action\"], agent_info\n\n def reset(self, seed=None):\n self.seed = seed\n self.thoughts = []\n self.actions = []","source_hash":"4f085c9089a1abb38ca6509bb2a1119a10d59abd733edff938f95988bc7a0b5e","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.visual_agent.visual_agent.__post_init__","uri":"program://AgentLab/function/src.agentlab.agents.visual_agent.visual_agent.__post_init__#L32-L36","kind":"function","name":"__post_init__","path":"src/agentlab/agents/visual_agent/visual_agent.py","language":"python","start_line":32,"end_line":36,"context_start_line":12,"context_end_line":56,"code":"\nimport bgym\nfrom bgym import Benchmark\nfrom browsergym.experiments.agent import Agent, AgentInfo\n\nfrom agentlab.agents import dynamic_prompting as dp\nfrom agentlab.agents.agent_args import AgentArgs\nfrom agentlab.llm.chat_api import BaseModelArgs\nfrom agentlab.llm.llm_utils import Discussion, ParseError, SystemMessage, retry\nfrom agentlab.llm.tracking import cost_tracker_decorator\n\nfrom .visual_agent_prompts import MainPrompt, PromptFlags\n\n\n@dataclass\nclass VisualAgentArgs(AgentArgs):\n chat_model_args: BaseModelArgs = None\n flags: PromptFlags = None\n max_retry: int = 4\n\n def __post_init__(self):\n try: # some attributes might be missing temporarily due to args.CrossProd for hyperparameter generation\n self.agent_name = f\"VisualAgent-{self.chat_model_args.model_name}\".replace(\"/\", \"_\")\n except AttributeError:\n pass\n\n def set_benchmark(self, benchmark: Benchmark, demo_mode):\n \"\"\"Override Some flags based on the benchmark.\"\"\"\n self.flags.obs.use_tabs = benchmark.is_multi_tab\n\n def set_reproducibility_mode(self):\n self.chat_model_args.temperature = 0\n\n def prepare(self):\n return self.chat_model_args.prepare_server()\n\n def close(self):\n return self.chat_model_args.close_server()\n\n def make_agent(self):\n return VisualAgent(\n chat_model_args=self.chat_model_args, flags=self.flags, max_retry=self.max_retry\n )\n\n","source_hash":"4f085c9089a1abb38ca6509bb2a1119a10d59abd733edff938f95988bc7a0b5e","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.visual_agent.visual_agent.set_benchmark","uri":"program://AgentLab/function/src.agentlab.agents.visual_agent.visual_agent.set_benchmark#L38-L40","kind":"function","name":"set_benchmark","path":"src/agentlab/agents/visual_agent/visual_agent.py","language":"python","start_line":38,"end_line":40,"context_start_line":18,"context_end_line":60,"code":"from agentlab.agents.agent_args import AgentArgs\nfrom agentlab.llm.chat_api import BaseModelArgs\nfrom agentlab.llm.llm_utils import Discussion, ParseError, SystemMessage, retry\nfrom agentlab.llm.tracking import cost_tracker_decorator\n\nfrom .visual_agent_prompts import MainPrompt, PromptFlags\n\n\n@dataclass\nclass VisualAgentArgs(AgentArgs):\n chat_model_args: BaseModelArgs = None\n flags: PromptFlags = None\n max_retry: int = 4\n\n def __post_init__(self):\n try: # some attributes might be missing temporarily due to args.CrossProd for hyperparameter generation\n self.agent_name = f\"VisualAgent-{self.chat_model_args.model_name}\".replace(\"/\", \"_\")\n except AttributeError:\n pass\n\n def set_benchmark(self, benchmark: Benchmark, demo_mode):\n \"\"\"Override Some flags based on the benchmark.\"\"\"\n self.flags.obs.use_tabs = benchmark.is_multi_tab\n\n def set_reproducibility_mode(self):\n self.chat_model_args.temperature = 0\n\n def prepare(self):\n return self.chat_model_args.prepare_server()\n\n def close(self):\n return self.chat_model_args.close_server()\n\n def make_agent(self):\n return VisualAgent(\n chat_model_args=self.chat_model_args, flags=self.flags, max_retry=self.max_retry\n )\n\n\nclass VisualAgent(Agent):\n\n def __init__(\n self,","source_hash":"4f085c9089a1abb38ca6509bb2a1119a10d59abd733edff938f95988bc7a0b5e","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.visual_agent.visual_agent.set_reproducibility_mode","uri":"program://AgentLab/function/src.agentlab.agents.visual_agent.visual_agent.set_reproducibility_mode#L42-L43","kind":"function","name":"set_reproducibility_mode","path":"src/agentlab/agents/visual_agent/visual_agent.py","language":"python","start_line":42,"end_line":43,"context_start_line":22,"context_end_line":63,"code":"\nfrom .visual_agent_prompts import MainPrompt, PromptFlags\n\n\n@dataclass\nclass VisualAgentArgs(AgentArgs):\n chat_model_args: BaseModelArgs = None\n flags: PromptFlags = None\n max_retry: int = 4\n\n def __post_init__(self):\n try: # some attributes might be missing temporarily due to args.CrossProd for hyperparameter generation\n self.agent_name = f\"VisualAgent-{self.chat_model_args.model_name}\".replace(\"/\", \"_\")\n except AttributeError:\n pass\n\n def set_benchmark(self, benchmark: Benchmark, demo_mode):\n \"\"\"Override Some flags based on the benchmark.\"\"\"\n self.flags.obs.use_tabs = benchmark.is_multi_tab\n\n def set_reproducibility_mode(self):\n self.chat_model_args.temperature = 0\n\n def prepare(self):\n return self.chat_model_args.prepare_server()\n\n def close(self):\n return self.chat_model_args.close_server()\n\n def make_agent(self):\n return VisualAgent(\n chat_model_args=self.chat_model_args, flags=self.flags, max_retry=self.max_retry\n )\n\n\nclass VisualAgent(Agent):\n\n def __init__(\n self,\n chat_model_args: BaseModelArgs,\n flags: PromptFlags,\n max_retry: int = 4,","source_hash":"4f085c9089a1abb38ca6509bb2a1119a10d59abd733edff938f95988bc7a0b5e","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.visual_agent.visual_agent.prepare","uri":"program://AgentLab/function/src.agentlab.agents.visual_agent.visual_agent.prepare#L45-L46","kind":"function","name":"prepare","path":"src/agentlab/agents/visual_agent/visual_agent.py","language":"python","start_line":45,"end_line":46,"context_start_line":25,"context_end_line":66,"code":"\n@dataclass\nclass VisualAgentArgs(AgentArgs):\n chat_model_args: BaseModelArgs = None\n flags: PromptFlags = None\n max_retry: int = 4\n\n def __post_init__(self):\n try: # some attributes might be missing temporarily due to args.CrossProd for hyperparameter generation\n self.agent_name = f\"VisualAgent-{self.chat_model_args.model_name}\".replace(\"/\", \"_\")\n except AttributeError:\n pass\n\n def set_benchmark(self, benchmark: Benchmark, demo_mode):\n \"\"\"Override Some flags based on the benchmark.\"\"\"\n self.flags.obs.use_tabs = benchmark.is_multi_tab\n\n def set_reproducibility_mode(self):\n self.chat_model_args.temperature = 0\n\n def prepare(self):\n return self.chat_model_args.prepare_server()\n\n def close(self):\n return self.chat_model_args.close_server()\n\n def make_agent(self):\n return VisualAgent(\n chat_model_args=self.chat_model_args, flags=self.flags, max_retry=self.max_retry\n )\n\n\nclass VisualAgent(Agent):\n\n def __init__(\n self,\n chat_model_args: BaseModelArgs,\n flags: PromptFlags,\n max_retry: int = 4,\n ):\n\n self.chat_llm = chat_model_args.make_model()","source_hash":"4f085c9089a1abb38ca6509bb2a1119a10d59abd733edff938f95988bc7a0b5e","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.visual_agent.visual_agent.close","uri":"program://AgentLab/function/src.agentlab.agents.visual_agent.visual_agent.close#L48-L49","kind":"function","name":"close","path":"src/agentlab/agents/visual_agent/visual_agent.py","language":"python","start_line":48,"end_line":49,"context_start_line":28,"context_end_line":69,"code":" chat_model_args: BaseModelArgs = None\n flags: PromptFlags = None\n max_retry: int = 4\n\n def __post_init__(self):\n try: # some attributes might be missing temporarily due to args.CrossProd for hyperparameter generation\n self.agent_name = f\"VisualAgent-{self.chat_model_args.model_name}\".replace(\"/\", \"_\")\n except AttributeError:\n pass\n\n def set_benchmark(self, benchmark: Benchmark, demo_mode):\n \"\"\"Override Some flags based on the benchmark.\"\"\"\n self.flags.obs.use_tabs = benchmark.is_multi_tab\n\n def set_reproducibility_mode(self):\n self.chat_model_args.temperature = 0\n\n def prepare(self):\n return self.chat_model_args.prepare_server()\n\n def close(self):\n return self.chat_model_args.close_server()\n\n def make_agent(self):\n return VisualAgent(\n chat_model_args=self.chat_model_args, flags=self.flags, max_retry=self.max_retry\n )\n\n\nclass VisualAgent(Agent):\n\n def __init__(\n self,\n chat_model_args: BaseModelArgs,\n flags: PromptFlags,\n max_retry: int = 4,\n ):\n\n self.chat_llm = chat_model_args.make_model()\n self.chat_model_args = chat_model_args\n self.max_retry = max_retry\n","source_hash":"4f085c9089a1abb38ca6509bb2a1119a10d59abd733edff938f95988bc7a0b5e","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.visual_agent.visual_agent.make_agent","uri":"program://AgentLab/function/src.agentlab.agents.visual_agent.visual_agent.make_agent#L51-L54","kind":"function","name":"make_agent","path":"src/agentlab/agents/visual_agent/visual_agent.py","language":"python","start_line":51,"end_line":54,"context_start_line":31,"context_end_line":74,"code":"\n def __post_init__(self):\n try: # some attributes might be missing temporarily due to args.CrossProd for hyperparameter generation\n self.agent_name = f\"VisualAgent-{self.chat_model_args.model_name}\".replace(\"/\", \"_\")\n except AttributeError:\n pass\n\n def set_benchmark(self, benchmark: Benchmark, demo_mode):\n \"\"\"Override Some flags based on the benchmark.\"\"\"\n self.flags.obs.use_tabs = benchmark.is_multi_tab\n\n def set_reproducibility_mode(self):\n self.chat_model_args.temperature = 0\n\n def prepare(self):\n return self.chat_model_args.prepare_server()\n\n def close(self):\n return self.chat_model_args.close_server()\n\n def make_agent(self):\n return VisualAgent(\n chat_model_args=self.chat_model_args, flags=self.flags, max_retry=self.max_retry\n )\n\n\nclass VisualAgent(Agent):\n\n def __init__(\n self,\n chat_model_args: BaseModelArgs,\n flags: PromptFlags,\n max_retry: int = 4,\n ):\n\n self.chat_llm = chat_model_args.make_model()\n self.chat_model_args = chat_model_args\n self.max_retry = max_retry\n\n self.flags = flags\n self.action_set = self.flags.action.action_set.make_action_set()\n self._obs_preprocessor = dp.make_obs_preprocessor(flags.obs)\n\n self.reset(seed=None)","source_hash":"4f085c9089a1abb38ca6509bb2a1119a10d59abd733edff938f95988bc7a0b5e","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.visual_agent.visual_agent.__init__","uri":"program://AgentLab/function/src.agentlab.agents.visual_agent.visual_agent.__init__#L59-L74","kind":"function","name":"__init__","path":"src/agentlab/agents/visual_agent/visual_agent.py","language":"python","start_line":59,"end_line":74,"context_start_line":39,"context_end_line":94,"code":" \"\"\"Override Some flags based on the benchmark.\"\"\"\n self.flags.obs.use_tabs = benchmark.is_multi_tab\n\n def set_reproducibility_mode(self):\n self.chat_model_args.temperature = 0\n\n def prepare(self):\n return self.chat_model_args.prepare_server()\n\n def close(self):\n return self.chat_model_args.close_server()\n\n def make_agent(self):\n return VisualAgent(\n chat_model_args=self.chat_model_args, flags=self.flags, max_retry=self.max_retry\n )\n\n\nclass VisualAgent(Agent):\n\n def __init__(\n self,\n chat_model_args: BaseModelArgs,\n flags: PromptFlags,\n max_retry: int = 4,\n ):\n\n self.chat_llm = chat_model_args.make_model()\n self.chat_model_args = chat_model_args\n self.max_retry = max_retry\n\n self.flags = flags\n self.action_set = self.flags.action.action_set.make_action_set()\n self._obs_preprocessor = dp.make_obs_preprocessor(flags.obs)\n\n self.reset(seed=None)\n\n def obs_preprocessor(self, obs: dict) -> dict:\n return self._obs_preprocessor(obs)\n\n @cost_tracker_decorator\n def get_action(self, obs):\n\n main_prompt = MainPrompt(\n action_set=self.action_set,\n obs=obs,\n actions=self.actions,\n thoughts=self.thoughts,\n flags=self.flags,\n )\n\n system_prompt = SystemMessage(dp.SystemPrompt().prompt)\n try:\n # TODO, we would need to further shrink the prompt if the retry\n # cause it to be too long\n","source_hash":"4f085c9089a1abb38ca6509bb2a1119a10d59abd733edff938f95988bc7a0b5e","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.visual_agent.visual_agent.obs_preprocessor","uri":"program://AgentLab/function/src.agentlab.agents.visual_agent.visual_agent.obs_preprocessor#L76-L77","kind":"function","name":"obs_preprocessor","path":"src/agentlab/agents/visual_agent/visual_agent.py","language":"python","start_line":76,"end_line":77,"context_start_line":56,"context_end_line":97,"code":"\nclass VisualAgent(Agent):\n\n def __init__(\n self,\n chat_model_args: BaseModelArgs,\n flags: PromptFlags,\n max_retry: int = 4,\n ):\n\n self.chat_llm = chat_model_args.make_model()\n self.chat_model_args = chat_model_args\n self.max_retry = max_retry\n\n self.flags = flags\n self.action_set = self.flags.action.action_set.make_action_set()\n self._obs_preprocessor = dp.make_obs_preprocessor(flags.obs)\n\n self.reset(seed=None)\n\n def obs_preprocessor(self, obs: dict) -> dict:\n return self._obs_preprocessor(obs)\n\n @cost_tracker_decorator\n def get_action(self, obs):\n\n main_prompt = MainPrompt(\n action_set=self.action_set,\n obs=obs,\n actions=self.actions,\n thoughts=self.thoughts,\n flags=self.flags,\n )\n\n system_prompt = SystemMessage(dp.SystemPrompt().prompt)\n try:\n # TODO, we would need to further shrink the prompt if the retry\n # cause it to be too long\n\n chat_messages = Discussion([system_prompt, main_prompt.prompt])\n ans_dict = retry(\n self.chat_llm,","source_hash":"4f085c9089a1abb38ca6509bb2a1119a10d59abd733edff938f95988bc7a0b5e","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.visual_agent.visual_agent.get_action","uri":"program://AgentLab/function/src.agentlab.agents.visual_agent.visual_agent.get_action#L80-L125","kind":"function","name":"get_action","path":"src/agentlab/agents/visual_agent/visual_agent.py","language":"python","start_line":80,"end_line":125,"context_start_line":60,"context_end_line":130,"code":" self,\n chat_model_args: BaseModelArgs,\n flags: PromptFlags,\n max_retry: int = 4,\n ):\n\n self.chat_llm = chat_model_args.make_model()\n self.chat_model_args = chat_model_args\n self.max_retry = max_retry\n\n self.flags = flags\n self.action_set = self.flags.action.action_set.make_action_set()\n self._obs_preprocessor = dp.make_obs_preprocessor(flags.obs)\n\n self.reset(seed=None)\n\n def obs_preprocessor(self, obs: dict) -> dict:\n return self._obs_preprocessor(obs)\n\n @cost_tracker_decorator\n def get_action(self, obs):\n\n main_prompt = MainPrompt(\n action_set=self.action_set,\n obs=obs,\n actions=self.actions,\n thoughts=self.thoughts,\n flags=self.flags,\n )\n\n system_prompt = SystemMessage(dp.SystemPrompt().prompt)\n try:\n # TODO, we would need to further shrink the prompt if the retry\n # cause it to be too long\n\n chat_messages = Discussion([system_prompt, main_prompt.prompt])\n ans_dict = retry(\n self.chat_llm,\n chat_messages,\n n_retry=self.max_retry,\n parser=main_prompt._parse_answer,\n )\n ans_dict[\"busted_retry\"] = 0\n # inferring the number of retries, TODO: make this less hacky\n ans_dict[\"n_retry\"] = (len(chat_messages) - 3) / 2\n except ParseError:\n ans_dict = dict(\n action=None,\n n_retry=self.max_retry + 1,\n busted_retry=1,\n )\n\n stats = self.chat_llm.get_stats()\n stats[\"n_retry\"] = ans_dict[\"n_retry\"]\n stats[\"busted_retry\"] = ans_dict[\"busted_retry\"]\n\n self.actions.append(ans_dict[\"action\"])\n self.thoughts.append(ans_dict.get(\"think\", None))\n\n agent_info = AgentInfo(\n think=ans_dict.get(\"think\", None),\n chat_messages=chat_messages,\n stats=stats,\n extra_info={\"chat_model_args\": asdict(self.chat_model_args)},\n )\n return ans_dict[\"action\"], agent_info\n\n def reset(self, seed=None):\n self.seed = seed\n self.thoughts = []\n self.actions = []","source_hash":"4f085c9089a1abb38ca6509bb2a1119a10d59abd733edff938f95988bc7a0b5e","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.visual_agent.visual_agent.reset","uri":"program://AgentLab/function/src.agentlab.agents.visual_agent.visual_agent.reset#L127-L130","kind":"function","name":"reset","path":"src/agentlab/agents/visual_agent/visual_agent.py","language":"python","start_line":127,"end_line":130,"context_start_line":107,"context_end_line":130,"code":" action=None,\n n_retry=self.max_retry + 1,\n busted_retry=1,\n )\n\n stats = self.chat_llm.get_stats()\n stats[\"n_retry\"] = ans_dict[\"n_retry\"]\n stats[\"busted_retry\"] = ans_dict[\"busted_retry\"]\n\n self.actions.append(ans_dict[\"action\"])\n self.thoughts.append(ans_dict.get(\"think\", None))\n\n agent_info = AgentInfo(\n think=ans_dict.get(\"think\", None),\n chat_messages=chat_messages,\n stats=stats,\n extra_info={\"chat_model_args\": asdict(self.chat_model_args)},\n )\n return ans_dict[\"action\"], agent_info\n\n def reset(self, seed=None):\n self.seed = seed\n self.thoughts = []\n self.actions = []","source_hash":"4f085c9089a1abb38ca6509bb2a1119a10d59abd733edff938f95988bc7a0b5e","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.visual_agent.visual_agent_prompts","uri":"program://AgentLab/module/src.agentlab.agents.visual_agent.visual_agent_prompts#L1-L185","kind":"module","name":"src.agentlab.agents.visual_agent.visual_agent_prompts","path":"src/agentlab/agents/visual_agent/visual_agent_prompts.py","language":"python","start_line":1,"end_line":185,"context_start_line":1,"context_end_line":185,"code":"\"\"\"\nPrompt builder for GenericAgent\n\nIt is based on the dynamic_prompting module from the agentlab package.\n\"\"\"\n\nimport logging\nfrom dataclasses import dataclass\nimport bgym\n\nfrom browsergym.core.action.base import AbstractActionSet\n\nfrom agentlab.agents import dynamic_prompting as dp\nfrom agentlab.llm.llm_utils import BaseMessage, HumanMessage, image_to_jpg_base64_url\n\n\n@dataclass\nclass PromptFlags(dp.Flags):\n \"\"\"\n A class to represent various flags used to control features in an application.\n \"\"\"\n\n obs: dp.ObsFlags = None\n action: dp.ActionFlags = None\n use_thinking: bool = True\n use_concrete_example: bool = False\n use_abstract_example: bool = True\n enable_chat: bool = False\n extra_instructions: str | None = None\n\n\nclass SystemPrompt(dp.PromptElement):\n _prompt = \"\"\"\\\nYou are an agent trying to solve a web task based on the content of the page and\nuser instructions. You can interact with the page and explore, and send messages to the user. Each time you\nsubmit an action it will be sent to the browser and you will receive a new page.\"\"\"\n\n\ndef make_instructions(obs: dict, from_chat: bool, extra_instructions: str | None):\n \"\"\"Convenient wrapper to extract instructions from either goal or chat\"\"\"\n if from_chat:\n instructions = dp.ChatInstructions(\n obs[\"chat_messages\"], extra_instructions=extra_instructions\n )\n else:\n if sum([msg[\"role\"] == \"user\" for msg in obs.get(\"chat_messages\", [])]) > 1:\n logging.warning(\n \"Agent is in goal mode, but multiple user messages are present in the chat. Consider switching to `enable_chat=True`.\"\n )\n instructions = dp.GoalInstructions(\n obs[\"goal_object\"], extra_instructions=extra_instructions\n )\n return instructions\n\n\nclass History(dp.PromptElement):\n \"\"\"\n Format the actions and thoughts of previous steps.\"\"\"\n\n def __init__(self, actions, thoughts) -> None:\n super().__init__()\n prompt_elements = []\n for i, (action, thought) in enumerate(zip(actions, thoughts)):\n prompt_elements.append(\n f\"\"\"\n## Step {i}\n### Thoughts:\n{thought}\n### Action:\n{action}\n\"\"\"\n )\n self._prompt = \"\\n\".join(prompt_elements) + \"\\n\"\n\n\nclass Observation(dp.PromptElement):\n \"\"\"Observation of the current step.\n\n Contains the html, the accessibility tree and the error logs.\n \"\"\"\n\n def __init__(self, obs, flags: dp.ObsFlags) -> None:\n super().__init__()\n self.flags = flags\n self.obs = obs\n\n # for a multi-tab browser, we need to show the current tab\n self.tabs = dp.Tabs(\n obs,\n visible=lambda: flags.use_tabs,\n prefix=\"## \",\n )\n\n # if an error is present, we need to show it\n self.error = dp.Error(\n obs[\"last_action_error\"],\n visible=lambda: flags.use_error_logs and obs[\"last_action_error\"],\n prefix=\"## \",\n )\n\n @property\n def _prompt(self) -> str:\n return f\"\"\"\n# Observation of current step:\n{self.tabs.prompt}{self.error.prompt}\n\n\"\"\"\n\n def add_screenshot(self, prompt: BaseMessage) -> BaseMessage:\n if self.flags.use_screenshot:\n if self.flags.use_som:\n screenshot = self.obs[\"screenshot_som\"]\n prompt.add_text(\n \"\\n## Screenshot:\\nHere is a screenshot of the page, it is annotated with bounding boxes and corresponding bids:\"\n )\n else:\n screenshot = self.obs[\"screenshot\"]\n prompt.add_text(\"\\n## Screenshot:\\nHere is a screenshot of the page:\")\n img_url = image_to_jpg_base64_url(screenshot)\n prompt.add_image(img_url, detail=self.flags.openai_vision_detail)\n return prompt\n\n\nclass MainPrompt(dp.PromptElement):\n\n def __init__(\n self,\n action_set: AbstractActionSet,\n obs: dict,\n actions: list[str],\n thoughts: list[str],\n flags: PromptFlags,\n ) -> None:\n super().__init__()\n self.flags = flags\n self.history = History(actions, thoughts)\n self.instructions = make_instructions(obs, flags.enable_chat, flags.extra_instructions)\n self.obs = Observation(obs, self.flags.obs)\n\n self.action_prompt = dp.ActionPrompt(action_set, action_flags=flags.action)\n self.think = dp.Think(visible=lambda: flags.use_thinking)\n\n @property\n def _prompt(self) -> HumanMessage:\n prompt = HumanMessage(self.instructions.prompt)\n prompt.add_text(\n f\"\"\"\\\n{self.obs.prompt}\\\n{self.history.prompt}\\\n{self.action_prompt.prompt}\\\n{self.think.prompt}\\\n\"\"\"\n )\n\n if self.flags.use_abstract_example:\n prompt.add_text(\n f\"\"\"\n# Abstract Example\n\nHere is an abstract version of the answer with description of the content of\neach tag. Make sure you follow this structure, but replace the content with your\nanswer:\n{self.think.abstract_ex}\\\n{self.action_prompt.abstract_ex}\\\n\"\"\"\n )\n\n if self.flags.use_concrete_example:\n prompt.add_text(\n f\"\"\"\n# Concrete Example\n\nHere is a concrete example of how to format your answer.\nMake sure to follow the template with proper tags:\n{self.think.concrete_ex}\\\n{self.action_prompt.concrete_ex}\\\n\"\"\"\n )\n return self.obs.add_screenshot(prompt)\n\n def _parse_answer(self, text_answer):\n ans_dict = {}\n ans_dict.update(self.think.parse_answer(text_answer))\n ans_dict.update(self.action_prompt.parse_answer(text_answer))\n return ans_dict","source_hash":"8a892029aea421778ab9e274dcbea31ed91baaca2bf89aec1d229c18c6bb58f5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.visual_agent.visual_agent_prompts.PromptFlags","uri":"program://AgentLab/class/src.agentlab.agents.visual_agent.visual_agent_prompts.PromptFlags#L18-L29","kind":"class","name":"PromptFlags","path":"src/agentlab/agents/visual_agent/visual_agent_prompts.py","language":"python","start_line":18,"end_line":29,"context_start_line":1,"context_end_line":49,"code":"\"\"\"\nPrompt builder for GenericAgent\n\nIt is based on the dynamic_prompting module from the agentlab package.\n\"\"\"\n\nimport logging\nfrom dataclasses import dataclass\nimport bgym\n\nfrom browsergym.core.action.base import AbstractActionSet\n\nfrom agentlab.agents import dynamic_prompting as dp\nfrom agentlab.llm.llm_utils import BaseMessage, HumanMessage, image_to_jpg_base64_url\n\n\n@dataclass\nclass PromptFlags(dp.Flags):\n \"\"\"\n A class to represent various flags used to control features in an application.\n \"\"\"\n\n obs: dp.ObsFlags = None\n action: dp.ActionFlags = None\n use_thinking: bool = True\n use_concrete_example: bool = False\n use_abstract_example: bool = True\n enable_chat: bool = False\n extra_instructions: str | None = None\n\n\nclass SystemPrompt(dp.PromptElement):\n _prompt = \"\"\"\\\nYou are an agent trying to solve a web task based on the content of the page and\nuser instructions. You can interact with the page and explore, and send messages to the user. Each time you\nsubmit an action it will be sent to the browser and you will receive a new page.\"\"\"\n\n\ndef make_instructions(obs: dict, from_chat: bool, extra_instructions: str | None):\n \"\"\"Convenient wrapper to extract instructions from either goal or chat\"\"\"\n if from_chat:\n instructions = dp.ChatInstructions(\n obs[\"chat_messages\"], extra_instructions=extra_instructions\n )\n else:\n if sum([msg[\"role\"] == \"user\" for msg in obs.get(\"chat_messages\", [])]) > 1:\n logging.warning(\n \"Agent is in goal mode, but multiple user messages are present in the chat. Consider switching to `enable_chat=True`.\"\n )","source_hash":"8a892029aea421778ab9e274dcbea31ed91baaca2bf89aec1d229c18c6bb58f5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.visual_agent.visual_agent_prompts.SystemPrompt","uri":"program://AgentLab/class/src.agentlab.agents.visual_agent.visual_agent_prompts.SystemPrompt#L32-L36","kind":"class","name":"SystemPrompt","path":"src/agentlab/agents/visual_agent/visual_agent_prompts.py","language":"python","start_line":32,"end_line":36,"context_start_line":12,"context_end_line":56,"code":"\nfrom agentlab.agents import dynamic_prompting as dp\nfrom agentlab.llm.llm_utils import BaseMessage, HumanMessage, image_to_jpg_base64_url\n\n\n@dataclass\nclass PromptFlags(dp.Flags):\n \"\"\"\n A class to represent various flags used to control features in an application.\n \"\"\"\n\n obs: dp.ObsFlags = None\n action: dp.ActionFlags = None\n use_thinking: bool = True\n use_concrete_example: bool = False\n use_abstract_example: bool = True\n enable_chat: bool = False\n extra_instructions: str | None = None\n\n\nclass SystemPrompt(dp.PromptElement):\n _prompt = \"\"\"\\\nYou are an agent trying to solve a web task based on the content of the page and\nuser instructions. You can interact with the page and explore, and send messages to the user. Each time you\nsubmit an action it will be sent to the browser and you will receive a new page.\"\"\"\n\n\ndef make_instructions(obs: dict, from_chat: bool, extra_instructions: str | None):\n \"\"\"Convenient wrapper to extract instructions from either goal or chat\"\"\"\n if from_chat:\n instructions = dp.ChatInstructions(\n obs[\"chat_messages\"], extra_instructions=extra_instructions\n )\n else:\n if sum([msg[\"role\"] == \"user\" for msg in obs.get(\"chat_messages\", [])]) > 1:\n logging.warning(\n \"Agent is in goal mode, but multiple user messages are present in the chat. Consider switching to `enable_chat=True`.\"\n )\n instructions = dp.GoalInstructions(\n obs[\"goal_object\"], extra_instructions=extra_instructions\n )\n return instructions\n\n\nclass History(dp.PromptElement):","source_hash":"8a892029aea421778ab9e274dcbea31ed91baaca2bf89aec1d229c18c6bb58f5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.visual_agent.visual_agent_prompts.make_instructions","uri":"program://AgentLab/function/src.agentlab.agents.visual_agent.visual_agent_prompts.make_instructions#L39-L53","kind":"function","name":"make_instructions","path":"src/agentlab/agents/visual_agent/visual_agent_prompts.py","language":"python","start_line":39,"end_line":53,"context_start_line":19,"context_end_line":73,"code":" \"\"\"\n A class to represent various flags used to control features in an application.\n \"\"\"\n\n obs: dp.ObsFlags = None\n action: dp.ActionFlags = None\n use_thinking: bool = True\n use_concrete_example: bool = False\n use_abstract_example: bool = True\n enable_chat: bool = False\n extra_instructions: str | None = None\n\n\nclass SystemPrompt(dp.PromptElement):\n _prompt = \"\"\"\\\nYou are an agent trying to solve a web task based on the content of the page and\nuser instructions. You can interact with the page and explore, and send messages to the user. Each time you\nsubmit an action it will be sent to the browser and you will receive a new page.\"\"\"\n\n\ndef make_instructions(obs: dict, from_chat: bool, extra_instructions: str | None):\n \"\"\"Convenient wrapper to extract instructions from either goal or chat\"\"\"\n if from_chat:\n instructions = dp.ChatInstructions(\n obs[\"chat_messages\"], extra_instructions=extra_instructions\n )\n else:\n if sum([msg[\"role\"] == \"user\" for msg in obs.get(\"chat_messages\", [])]) > 1:\n logging.warning(\n \"Agent is in goal mode, but multiple user messages are present in the chat. Consider switching to `enable_chat=True`.\"\n )\n instructions = dp.GoalInstructions(\n obs[\"goal_object\"], extra_instructions=extra_instructions\n )\n return instructions\n\n\nclass History(dp.PromptElement):\n \"\"\"\n Format the actions and thoughts of previous steps.\"\"\"\n\n def __init__(self, actions, thoughts) -> None:\n super().__init__()\n prompt_elements = []\n for i, (action, thought) in enumerate(zip(actions, thoughts)):\n prompt_elements.append(\n f\"\"\"\n## Step {i}\n### Thoughts:\n{thought}\n### Action:\n{action}\n\"\"\"\n )\n self._prompt = \"\\n\".join(prompt_elements) + \"\\n\"","source_hash":"8a892029aea421778ab9e274dcbea31ed91baaca2bf89aec1d229c18c6bb58f5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.visual_agent.visual_agent_prompts.History","uri":"program://AgentLab/class/src.agentlab.agents.visual_agent.visual_agent_prompts.History#L56-L73","kind":"class","name":"History","path":"src/agentlab/agents/visual_agent/visual_agent_prompts.py","language":"python","start_line":56,"end_line":73,"context_start_line":36,"context_end_line":93,"code":"submit an action it will be sent to the browser and you will receive a new page.\"\"\"\n\n\ndef make_instructions(obs: dict, from_chat: bool, extra_instructions: str | None):\n \"\"\"Convenient wrapper to extract instructions from either goal or chat\"\"\"\n if from_chat:\n instructions = dp.ChatInstructions(\n obs[\"chat_messages\"], extra_instructions=extra_instructions\n )\n else:\n if sum([msg[\"role\"] == \"user\" for msg in obs.get(\"chat_messages\", [])]) > 1:\n logging.warning(\n \"Agent is in goal mode, but multiple user messages are present in the chat. Consider switching to `enable_chat=True`.\"\n )\n instructions = dp.GoalInstructions(\n obs[\"goal_object\"], extra_instructions=extra_instructions\n )\n return instructions\n\n\nclass History(dp.PromptElement):\n \"\"\"\n Format the actions and thoughts of previous steps.\"\"\"\n\n def __init__(self, actions, thoughts) -> None:\n super().__init__()\n prompt_elements = []\n for i, (action, thought) in enumerate(zip(actions, thoughts)):\n prompt_elements.append(\n f\"\"\"\n## Step {i}\n### Thoughts:\n{thought}\n### Action:\n{action}\n\"\"\"\n )\n self._prompt = \"\\n\".join(prompt_elements) + \"\\n\"\n\n\nclass Observation(dp.PromptElement):\n \"\"\"Observation of the current step.\n\n Contains the html, the accessibility tree and the error logs.\n \"\"\"\n\n def __init__(self, obs, flags: dp.ObsFlags) -> None:\n super().__init__()\n self.flags = flags\n self.obs = obs\n\n # for a multi-tab browser, we need to show the current tab\n self.tabs = dp.Tabs(\n obs,\n visible=lambda: flags.use_tabs,\n prefix=\"## \",\n )\n","source_hash":"8a892029aea421778ab9e274dcbea31ed91baaca2bf89aec1d229c18c6bb58f5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.visual_agent.visual_agent_prompts.Observation","uri":"program://AgentLab/class/src.agentlab.agents.visual_agent.visual_agent_prompts.Observation#L76-L121","kind":"class","name":"Observation","path":"src/agentlab/agents/visual_agent/visual_agent_prompts.py","language":"python","start_line":76,"end_line":121,"context_start_line":56,"context_end_line":141,"code":"class History(dp.PromptElement):\n \"\"\"\n Format the actions and thoughts of previous steps.\"\"\"\n\n def __init__(self, actions, thoughts) -> None:\n super().__init__()\n prompt_elements = []\n for i, (action, thought) in enumerate(zip(actions, thoughts)):\n prompt_elements.append(\n f\"\"\"\n## Step {i}\n### Thoughts:\n{thought}\n### Action:\n{action}\n\"\"\"\n )\n self._prompt = \"\\n\".join(prompt_elements) + \"\\n\"\n\n\nclass Observation(dp.PromptElement):\n \"\"\"Observation of the current step.\n\n Contains the html, the accessibility tree and the error logs.\n \"\"\"\n\n def __init__(self, obs, flags: dp.ObsFlags) -> None:\n super().__init__()\n self.flags = flags\n self.obs = obs\n\n # for a multi-tab browser, we need to show the current tab\n self.tabs = dp.Tabs(\n obs,\n visible=lambda: flags.use_tabs,\n prefix=\"## \",\n )\n\n # if an error is present, we need to show it\n self.error = dp.Error(\n obs[\"last_action_error\"],\n visible=lambda: flags.use_error_logs and obs[\"last_action_error\"],\n prefix=\"## \",\n )\n\n @property\n def _prompt(self) -> str:\n return f\"\"\"\n# Observation of current step:\n{self.tabs.prompt}{self.error.prompt}\n\n\"\"\"\n\n def add_screenshot(self, prompt: BaseMessage) -> BaseMessage:\n if self.flags.use_screenshot:\n if self.flags.use_som:\n screenshot = self.obs[\"screenshot_som\"]\n prompt.add_text(\n \"\\n## Screenshot:\\nHere is a screenshot of the page, it is annotated with bounding boxes and corresponding bids:\"\n )\n else:\n screenshot = self.obs[\"screenshot\"]\n prompt.add_text(\"\\n## Screenshot:\\nHere is a screenshot of the page:\")\n img_url = image_to_jpg_base64_url(screenshot)\n prompt.add_image(img_url, detail=self.flags.openai_vision_detail)\n return prompt\n\n\nclass MainPrompt(dp.PromptElement):\n\n def __init__(\n self,\n action_set: AbstractActionSet,\n obs: dict,\n actions: list[str],\n thoughts: list[str],\n flags: PromptFlags,\n ) -> None:\n super().__init__()\n self.flags = flags\n self.history = History(actions, thoughts)\n self.instructions = make_instructions(obs, flags.enable_chat, flags.extra_instructions)\n self.obs = Observation(obs, self.flags.obs)\n\n self.action_prompt = dp.ActionPrompt(action_set, action_flags=flags.action)\n self.think = dp.Think(visible=lambda: flags.use_thinking)","source_hash":"8a892029aea421778ab9e274dcbea31ed91baaca2bf89aec1d229c18c6bb58f5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.visual_agent.visual_agent_prompts.MainPrompt","uri":"program://AgentLab/class/src.agentlab.agents.visual_agent.visual_agent_prompts.MainPrompt#L124-L185","kind":"class","name":"MainPrompt","path":"src/agentlab/agents/visual_agent/visual_agent_prompts.py","language":"python","start_line":124,"end_line":185,"context_start_line":104,"context_end_line":185,"code":"# Observation of current step:\n{self.tabs.prompt}{self.error.prompt}\n\n\"\"\"\n\n def add_screenshot(self, prompt: BaseMessage) -> BaseMessage:\n if self.flags.use_screenshot:\n if self.flags.use_som:\n screenshot = self.obs[\"screenshot_som\"]\n prompt.add_text(\n \"\\n## Screenshot:\\nHere is a screenshot of the page, it is annotated with bounding boxes and corresponding bids:\"\n )\n else:\n screenshot = self.obs[\"screenshot\"]\n prompt.add_text(\"\\n## Screenshot:\\nHere is a screenshot of the page:\")\n img_url = image_to_jpg_base64_url(screenshot)\n prompt.add_image(img_url, detail=self.flags.openai_vision_detail)\n return prompt\n\n\nclass MainPrompt(dp.PromptElement):\n\n def __init__(\n self,\n action_set: AbstractActionSet,\n obs: dict,\n actions: list[str],\n thoughts: list[str],\n flags: PromptFlags,\n ) -> None:\n super().__init__()\n self.flags = flags\n self.history = History(actions, thoughts)\n self.instructions = make_instructions(obs, flags.enable_chat, flags.extra_instructions)\n self.obs = Observation(obs, self.flags.obs)\n\n self.action_prompt = dp.ActionPrompt(action_set, action_flags=flags.action)\n self.think = dp.Think(visible=lambda: flags.use_thinking)\n\n @property\n def _prompt(self) -> HumanMessage:\n prompt = HumanMessage(self.instructions.prompt)\n prompt.add_text(\n f\"\"\"\\\n{self.obs.prompt}\\\n{self.history.prompt}\\\n{self.action_prompt.prompt}\\\n{self.think.prompt}\\\n\"\"\"\n )\n\n if self.flags.use_abstract_example:\n prompt.add_text(\n f\"\"\"\n# Abstract Example\n\nHere is an abstract version of the answer with description of the content of\neach tag. Make sure you follow this structure, but replace the content with your\nanswer:\n{self.think.abstract_ex}\\\n{self.action_prompt.abstract_ex}\\\n\"\"\"\n )\n\n if self.flags.use_concrete_example:\n prompt.add_text(\n f\"\"\"\n# Concrete Example\n\nHere is a concrete example of how to format your answer.\nMake sure to follow the template with proper tags:\n{self.think.concrete_ex}\\\n{self.action_prompt.concrete_ex}\\\n\"\"\"\n )\n return self.obs.add_screenshot(prompt)\n\n def _parse_answer(self, text_answer):\n ans_dict = {}\n ans_dict.update(self.think.parse_answer(text_answer))\n ans_dict.update(self.action_prompt.parse_answer(text_answer))\n return ans_dict","source_hash":"8a892029aea421778ab9e274dcbea31ed91baaca2bf89aec1d229c18c6bb58f5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.visual_agent.visual_agent_prompts.__init__","uri":"program://AgentLab/function/src.agentlab.agents.visual_agent.visual_agent_prompts.__init__#L126-L141","kind":"function","name":"__init__","path":"src/agentlab/agents/visual_agent/visual_agent_prompts.py","language":"python","start_line":126,"end_line":141,"context_start_line":106,"context_end_line":161,"code":"\n\"\"\"\n\n def add_screenshot(self, prompt: BaseMessage) -> BaseMessage:\n if self.flags.use_screenshot:\n if self.flags.use_som:\n screenshot = self.obs[\"screenshot_som\"]\n prompt.add_text(\n \"\\n## Screenshot:\\nHere is a screenshot of the page, it is annotated with bounding boxes and corresponding bids:\"\n )\n else:\n screenshot = self.obs[\"screenshot\"]\n prompt.add_text(\"\\n## Screenshot:\\nHere is a screenshot of the page:\")\n img_url = image_to_jpg_base64_url(screenshot)\n prompt.add_image(img_url, detail=self.flags.openai_vision_detail)\n return prompt\n\n\nclass MainPrompt(dp.PromptElement):\n\n def __init__(\n self,\n action_set: AbstractActionSet,\n obs: dict,\n actions: list[str],\n thoughts: list[str],\n flags: PromptFlags,\n ) -> None:\n super().__init__()\n self.flags = flags\n self.history = History(actions, thoughts)\n self.instructions = make_instructions(obs, flags.enable_chat, flags.extra_instructions)\n self.obs = Observation(obs, self.flags.obs)\n\n self.action_prompt = dp.ActionPrompt(action_set, action_flags=flags.action)\n self.think = dp.Think(visible=lambda: flags.use_thinking)\n\n @property\n def _prompt(self) -> HumanMessage:\n prompt = HumanMessage(self.instructions.prompt)\n prompt.add_text(\n f\"\"\"\\\n{self.obs.prompt}\\\n{self.history.prompt}\\\n{self.action_prompt.prompt}\\\n{self.think.prompt}\\\n\"\"\"\n )\n\n if self.flags.use_abstract_example:\n prompt.add_text(\n f\"\"\"\n# Abstract Example\n\nHere is an abstract version of the answer with description of the content of\neach tag. Make sure you follow this structure, but replace the content with your","source_hash":"8a892029aea421778ab9e274dcbea31ed91baaca2bf89aec1d229c18c6bb58f5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.visual_agent.visual_agent_prompts._prompt","uri":"program://AgentLab/function/src.agentlab.agents.visual_agent.visual_agent_prompts._prompt#L144-L179","kind":"function","name":"_prompt","path":"src/agentlab/agents/visual_agent/visual_agent_prompts.py","language":"python","start_line":144,"end_line":179,"context_start_line":124,"context_end_line":185,"code":"class MainPrompt(dp.PromptElement):\n\n def __init__(\n self,\n action_set: AbstractActionSet,\n obs: dict,\n actions: list[str],\n thoughts: list[str],\n flags: PromptFlags,\n ) -> None:\n super().__init__()\n self.flags = flags\n self.history = History(actions, thoughts)\n self.instructions = make_instructions(obs, flags.enable_chat, flags.extra_instructions)\n self.obs = Observation(obs, self.flags.obs)\n\n self.action_prompt = dp.ActionPrompt(action_set, action_flags=flags.action)\n self.think = dp.Think(visible=lambda: flags.use_thinking)\n\n @property\n def _prompt(self) -> HumanMessage:\n prompt = HumanMessage(self.instructions.prompt)\n prompt.add_text(\n f\"\"\"\\\n{self.obs.prompt}\\\n{self.history.prompt}\\\n{self.action_prompt.prompt}\\\n{self.think.prompt}\\\n\"\"\"\n )\n\n if self.flags.use_abstract_example:\n prompt.add_text(\n f\"\"\"\n# Abstract Example\n\nHere is an abstract version of the answer with description of the content of\neach tag. Make sure you follow this structure, but replace the content with your\nanswer:\n{self.think.abstract_ex}\\\n{self.action_prompt.abstract_ex}\\\n\"\"\"\n )\n\n if self.flags.use_concrete_example:\n prompt.add_text(\n f\"\"\"\n# Concrete Example\n\nHere is a concrete example of how to format your answer.\nMake sure to follow the template with proper tags:\n{self.think.concrete_ex}\\\n{self.action_prompt.concrete_ex}\\\n\"\"\"\n )\n return self.obs.add_screenshot(prompt)\n\n def _parse_answer(self, text_answer):\n ans_dict = {}\n ans_dict.update(self.think.parse_answer(text_answer))\n ans_dict.update(self.action_prompt.parse_answer(text_answer))\n return ans_dict","source_hash":"8a892029aea421778ab9e274dcbea31ed91baaca2bf89aec1d229c18c6bb58f5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.visual_agent.visual_agent_prompts.add_screenshot","uri":"program://AgentLab/function/src.agentlab.agents.visual_agent.visual_agent_prompts.add_screenshot#L109-L121","kind":"function","name":"add_screenshot","path":"src/agentlab/agents/visual_agent/visual_agent_prompts.py","language":"python","start_line":109,"end_line":121,"context_start_line":89,"context_end_line":141,"code":" obs,\n visible=lambda: flags.use_tabs,\n prefix=\"## \",\n )\n\n # if an error is present, we need to show it\n self.error = dp.Error(\n obs[\"last_action_error\"],\n visible=lambda: flags.use_error_logs and obs[\"last_action_error\"],\n prefix=\"## \",\n )\n\n @property\n def _prompt(self) -> str:\n return f\"\"\"\n# Observation of current step:\n{self.tabs.prompt}{self.error.prompt}\n\n\"\"\"\n\n def add_screenshot(self, prompt: BaseMessage) -> BaseMessage:\n if self.flags.use_screenshot:\n if self.flags.use_som:\n screenshot = self.obs[\"screenshot_som\"]\n prompt.add_text(\n \"\\n## Screenshot:\\nHere is a screenshot of the page, it is annotated with bounding boxes and corresponding bids:\"\n )\n else:\n screenshot = self.obs[\"screenshot\"]\n prompt.add_text(\"\\n## Screenshot:\\nHere is a screenshot of the page:\")\n img_url = image_to_jpg_base64_url(screenshot)\n prompt.add_image(img_url, detail=self.flags.openai_vision_detail)\n return prompt\n\n\nclass MainPrompt(dp.PromptElement):\n\n def __init__(\n self,\n action_set: AbstractActionSet,\n obs: dict,\n actions: list[str],\n thoughts: list[str],\n flags: PromptFlags,\n ) -> None:\n super().__init__()\n self.flags = flags\n self.history = History(actions, thoughts)\n self.instructions = make_instructions(obs, flags.enable_chat, flags.extra_instructions)\n self.obs = Observation(obs, self.flags.obs)\n\n self.action_prompt = dp.ActionPrompt(action_set, action_flags=flags.action)\n self.think = dp.Think(visible=lambda: flags.use_thinking)","source_hash":"8a892029aea421778ab9e274dcbea31ed91baaca2bf89aec1d229c18c6bb58f5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.visual_agent.visual_agent_prompts._parse_answer","uri":"program://AgentLab/function/src.agentlab.agents.visual_agent.visual_agent_prompts._parse_answer#L181-L185","kind":"function","name":"_parse_answer","path":"src/agentlab/agents/visual_agent/visual_agent_prompts.py","language":"python","start_line":181,"end_line":185,"context_start_line":161,"context_end_line":185,"code":"each tag. Make sure you follow this structure, but replace the content with your\nanswer:\n{self.think.abstract_ex}\\\n{self.action_prompt.abstract_ex}\\\n\"\"\"\n )\n\n if self.flags.use_concrete_example:\n prompt.add_text(\n f\"\"\"\n# Concrete Example\n\nHere is a concrete example of how to format your answer.\nMake sure to follow the template with proper tags:\n{self.think.concrete_ex}\\\n{self.action_prompt.concrete_ex}\\\n\"\"\"\n )\n return self.obs.add_screenshot(prompt)\n\n def _parse_answer(self, text_answer):\n ans_dict = {}\n ans_dict.update(self.think.parse_answer(text_answer))\n ans_dict.update(self.action_prompt.parse_answer(text_answer))\n return ans_dict","source_hash":"8a892029aea421778ab9e274dcbea31ed91baaca2bf89aec1d229c18c6bb58f5","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.tapeagent.agent","uri":"program://AgentLab/module/src.agentlab.agents.tapeagent.agent#L1-L103","kind":"module","name":"src.agentlab.agents.tapeagent.agent","path":"src/agentlab/agents/tapeagent/agent.py","language":"python","start_line":1,"end_line":103,"context_start_line":1,"context_end_line":103,"code":"import logging\nfrom dataclasses import dataclass\nfrom typing import Literal\n\nimport bgym\nimport hydra\nfrom omegaconf import DictConfig\nfrom pydantic import Field\nfrom tapeagents.agent import Agent\nfrom tapeagents.core import Action, Observation, StopStep, TapeMetadata, Thought\nfrom tapeagents.core import Tape as BaseTape\n\nfrom agentlab.agents.agent_args import AgentArgs\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n\nclass ExtendedMetadata(TapeMetadata):\n name: str = \"\"\n task: dict = {}\n terminated: bool = False\n truncated: bool = False\n reward: float = 0.0\n attempt_number: int = 0\n other: dict = {}\n\n\nclass Tape(BaseTape):\n metadata: ExtendedMetadata = Field(default_factory=ExtendedMetadata) # type: ignore\n\n\ndef load_config(config_name: str) -> DictConfig:\n with hydra.initialize(config_path=\"conf\", version_base=\"1.1\"):\n config = hydra.compose(config_name=config_name)\n return config\n\n\n@dataclass\nclass TapeAgentArgs(AgentArgs):\n config: DictConfig = None # type: ignore\n\n def make_agent(self) -> bgym.Agent:\n agent: Agent = hydra.utils.instantiate(self.config.agent)\n return TapeAgent(agent=agent)\n\n\n@dataclass\nclass TapeAgentInfo(bgym.AgentInfo):\n thoughts: list[Thought] = None # type: ignore\n\n\nclass DictObservation(Observation):\n \"\"\"\n Container for wrapping old dict observation into new Observation class.\n \"\"\"\n\n kind: Literal[\"dict_observation\"] = \"dict_observation\" # type: ignore\n content: str\n\n\nclass TapeAgent(bgym.Agent):\n agent: Agent\n tape: Tape\n\n def __init__(self, agent: Agent):\n super().__init__()\n self.agent = agent\n self.tape = Tape(steps=[])\n\n def obs_preprocessor(self, obs: Observation | list[Observation]) -> list[Observation]:\n if isinstance(obs, Observation):\n obs = [obs]\n assert isinstance(obs, list), f\"Expected list of Observations, got {type(obs)}\"\n logger.info(f\"Observations: {[type(o).__name__ for o in obs]}\")\n return obs\n\n def get_action(self, obs: Observation | list[Observation]) -> tuple[Action, TapeAgentInfo]:\n self.tape += obs # type: ignore\n thoughts: list[Thought] = []\n action = None\n while not action:\n for event in self.agent.run(self.tape):\n if not event.step:\n continue\n self.tape = self.tape.append(event.step)\n if isinstance(event.step, Thought):\n thoughts.append(event.step)\n logger.info(f\"Thought: {event.step.llm_view()}\")\n elif isinstance(event.step, Action) and not action: # we use first action only\n action = event.step\n logger.info(f\"Action: {action.llm_view()}\")\n else:\n # there could be control flow steps for switching nodes and if clauses\n logger.info(f\"Other step: {type(event.step)}\")\n logger.info(f\"Tape after run: ({len(self.tape)}) {[type(s).__name__ for s in self.tape]}\")\n return (action, TapeAgentInfo(thoughts=thoughts))\n\n @property\n def final_tape(self) -> Tape:\n truncated = not any([isinstance(s, StopStep) for s in self.tape.steps])\n self.tape.metadata = ExtendedMetadata(author=self.agent.name, truncated=truncated)\n return self.tape","source_hash":"cbbcba9b006e6d3ccc039bd74c29ec4ef9240b940dff78a509e2b75c3e29c461","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.tapeagent.agent.ExtendedMetadata","uri":"program://AgentLab/class/src.agentlab.agents.tapeagent.agent.ExtendedMetadata#L19-L26","kind":"class","name":"ExtendedMetadata","path":"src/agentlab/agents/tapeagent/agent.py","language":"python","start_line":19,"end_line":26,"context_start_line":1,"context_end_line":46,"code":"import logging\nfrom dataclasses import dataclass\nfrom typing import Literal\n\nimport bgym\nimport hydra\nfrom omegaconf import DictConfig\nfrom pydantic import Field\nfrom tapeagents.agent import Agent\nfrom tapeagents.core import Action, Observation, StopStep, TapeMetadata, Thought\nfrom tapeagents.core import Tape as BaseTape\n\nfrom agentlab.agents.agent_args import AgentArgs\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n\nclass ExtendedMetadata(TapeMetadata):\n name: str = \"\"\n task: dict = {}\n terminated: bool = False\n truncated: bool = False\n reward: float = 0.0\n attempt_number: int = 0\n other: dict = {}\n\n\nclass Tape(BaseTape):\n metadata: ExtendedMetadata = Field(default_factory=ExtendedMetadata) # type: ignore\n\n\ndef load_config(config_name: str) -> DictConfig:\n with hydra.initialize(config_path=\"conf\", version_base=\"1.1\"):\n config = hydra.compose(config_name=config_name)\n return config\n\n\n@dataclass\nclass TapeAgentArgs(AgentArgs):\n config: DictConfig = None # type: ignore\n\n def make_agent(self) -> bgym.Agent:\n agent: Agent = hydra.utils.instantiate(self.config.agent)\n return TapeAgent(agent=agent)\n","source_hash":"cbbcba9b006e6d3ccc039bd74c29ec4ef9240b940dff78a509e2b75c3e29c461","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.tapeagent.agent.Tape","uri":"program://AgentLab/class/src.agentlab.agents.tapeagent.agent.Tape#L29-L30","kind":"class","name":"Tape","path":"src/agentlab/agents/tapeagent/agent.py","language":"python","start_line":29,"end_line":30,"context_start_line":9,"context_end_line":50,"code":"from tapeagents.agent import Agent\nfrom tapeagents.core import Action, Observation, StopStep, TapeMetadata, Thought\nfrom tapeagents.core import Tape as BaseTape\n\nfrom agentlab.agents.agent_args import AgentArgs\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n\nclass ExtendedMetadata(TapeMetadata):\n name: str = \"\"\n task: dict = {}\n terminated: bool = False\n truncated: bool = False\n reward: float = 0.0\n attempt_number: int = 0\n other: dict = {}\n\n\nclass Tape(BaseTape):\n metadata: ExtendedMetadata = Field(default_factory=ExtendedMetadata) # type: ignore\n\n\ndef load_config(config_name: str) -> DictConfig:\n with hydra.initialize(config_path=\"conf\", version_base=\"1.1\"):\n config = hydra.compose(config_name=config_name)\n return config\n\n\n@dataclass\nclass TapeAgentArgs(AgentArgs):\n config: DictConfig = None # type: ignore\n\n def make_agent(self) -> bgym.Agent:\n agent: Agent = hydra.utils.instantiate(self.config.agent)\n return TapeAgent(agent=agent)\n\n\n@dataclass\nclass TapeAgentInfo(bgym.AgentInfo):\n thoughts: list[Thought] = None # type: ignore","source_hash":"cbbcba9b006e6d3ccc039bd74c29ec4ef9240b940dff78a509e2b75c3e29c461","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.tapeagent.agent.load_config","uri":"program://AgentLab/function/src.agentlab.agents.tapeagent.agent.load_config#L33-L36","kind":"function","name":"load_config","path":"src/agentlab/agents/tapeagent/agent.py","language":"python","start_line":33,"end_line":36,"context_start_line":13,"context_end_line":56,"code":"from agentlab.agents.agent_args import AgentArgs\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n\nclass ExtendedMetadata(TapeMetadata):\n name: str = \"\"\n task: dict = {}\n terminated: bool = False\n truncated: bool = False\n reward: float = 0.0\n attempt_number: int = 0\n other: dict = {}\n\n\nclass Tape(BaseTape):\n metadata: ExtendedMetadata = Field(default_factory=ExtendedMetadata) # type: ignore\n\n\ndef load_config(config_name: str) -> DictConfig:\n with hydra.initialize(config_path=\"conf\", version_base=\"1.1\"):\n config = hydra.compose(config_name=config_name)\n return config\n\n\n@dataclass\nclass TapeAgentArgs(AgentArgs):\n config: DictConfig = None # type: ignore\n\n def make_agent(self) -> bgym.Agent:\n agent: Agent = hydra.utils.instantiate(self.config.agent)\n return TapeAgent(agent=agent)\n\n\n@dataclass\nclass TapeAgentInfo(bgym.AgentInfo):\n thoughts: list[Thought] = None # type: ignore\n\n\nclass DictObservation(Observation):\n \"\"\"\n Container for wrapping old dict observation into new Observation class.\n \"\"\"","source_hash":"cbbcba9b006e6d3ccc039bd74c29ec4ef9240b940dff78a509e2b75c3e29c461","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.tapeagent.agent.TapeAgentArgs","uri":"program://AgentLab/class/src.agentlab.agents.tapeagent.agent.TapeAgentArgs#L40-L45","kind":"class","name":"TapeAgentArgs","path":"src/agentlab/agents/tapeagent/agent.py","language":"python","start_line":40,"end_line":45,"context_start_line":20,"context_end_line":65,"code":" name: str = \"\"\n task: dict = {}\n terminated: bool = False\n truncated: bool = False\n reward: float = 0.0\n attempt_number: int = 0\n other: dict = {}\n\n\nclass Tape(BaseTape):\n metadata: ExtendedMetadata = Field(default_factory=ExtendedMetadata) # type: ignore\n\n\ndef load_config(config_name: str) -> DictConfig:\n with hydra.initialize(config_path=\"conf\", version_base=\"1.1\"):\n config = hydra.compose(config_name=config_name)\n return config\n\n\n@dataclass\nclass TapeAgentArgs(AgentArgs):\n config: DictConfig = None # type: ignore\n\n def make_agent(self) -> bgym.Agent:\n agent: Agent = hydra.utils.instantiate(self.config.agent)\n return TapeAgent(agent=agent)\n\n\n@dataclass\nclass TapeAgentInfo(bgym.AgentInfo):\n thoughts: list[Thought] = None # type: ignore\n\n\nclass DictObservation(Observation):\n \"\"\"\n Container for wrapping old dict observation into new Observation class.\n \"\"\"\n\n kind: Literal[\"dict_observation\"] = \"dict_observation\" # type: ignore\n content: str\n\n\nclass TapeAgent(bgym.Agent):\n agent: Agent\n tape: Tape\n","source_hash":"cbbcba9b006e6d3ccc039bd74c29ec4ef9240b940dff78a509e2b75c3e29c461","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.tapeagent.agent.TapeAgentInfo","uri":"program://AgentLab/class/src.agentlab.agents.tapeagent.agent.TapeAgentInfo#L49-L50","kind":"class","name":"TapeAgentInfo","path":"src/agentlab/agents/tapeagent/agent.py","language":"python","start_line":49,"end_line":50,"context_start_line":29,"context_end_line":70,"code":"class Tape(BaseTape):\n metadata: ExtendedMetadata = Field(default_factory=ExtendedMetadata) # type: ignore\n\n\ndef load_config(config_name: str) -> DictConfig:\n with hydra.initialize(config_path=\"conf\", version_base=\"1.1\"):\n config = hydra.compose(config_name=config_name)\n return config\n\n\n@dataclass\nclass TapeAgentArgs(AgentArgs):\n config: DictConfig = None # type: ignore\n\n def make_agent(self) -> bgym.Agent:\n agent: Agent = hydra.utils.instantiate(self.config.agent)\n return TapeAgent(agent=agent)\n\n\n@dataclass\nclass TapeAgentInfo(bgym.AgentInfo):\n thoughts: list[Thought] = None # type: ignore\n\n\nclass DictObservation(Observation):\n \"\"\"\n Container for wrapping old dict observation into new Observation class.\n \"\"\"\n\n kind: Literal[\"dict_observation\"] = \"dict_observation\" # type: ignore\n content: str\n\n\nclass TapeAgent(bgym.Agent):\n agent: Agent\n tape: Tape\n\n def __init__(self, agent: Agent):\n super().__init__()\n self.agent = agent\n self.tape = Tape(steps=[])\n","source_hash":"cbbcba9b006e6d3ccc039bd74c29ec4ef9240b940dff78a509e2b75c3e29c461","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.tapeagent.agent.DictObservation","uri":"program://AgentLab/class/src.agentlab.agents.tapeagent.agent.DictObservation#L53-L59","kind":"class","name":"DictObservation","path":"src/agentlab/agents/tapeagent/agent.py","language":"python","start_line":53,"end_line":59,"context_start_line":33,"context_end_line":79,"code":"def load_config(config_name: str) -> DictConfig:\n with hydra.initialize(config_path=\"conf\", version_base=\"1.1\"):\n config = hydra.compose(config_name=config_name)\n return config\n\n\n@dataclass\nclass TapeAgentArgs(AgentArgs):\n config: DictConfig = None # type: ignore\n\n def make_agent(self) -> bgym.Agent:\n agent: Agent = hydra.utils.instantiate(self.config.agent)\n return TapeAgent(agent=agent)\n\n\n@dataclass\nclass TapeAgentInfo(bgym.AgentInfo):\n thoughts: list[Thought] = None # type: ignore\n\n\nclass DictObservation(Observation):\n \"\"\"\n Container for wrapping old dict observation into new Observation class.\n \"\"\"\n\n kind: Literal[\"dict_observation\"] = \"dict_observation\" # type: ignore\n content: str\n\n\nclass TapeAgent(bgym.Agent):\n agent: Agent\n tape: Tape\n\n def __init__(self, agent: Agent):\n super().__init__()\n self.agent = agent\n self.tape = Tape(steps=[])\n\n def obs_preprocessor(self, obs: Observation | list[Observation]) -> list[Observation]:\n if isinstance(obs, Observation):\n obs = [obs]\n assert isinstance(obs, list), f\"Expected list of Observations, got {type(obs)}\"\n logger.info(f\"Observations: {[type(o).__name__ for o in obs]}\")\n return obs\n\n def get_action(self, obs: Observation | list[Observation]) -> tuple[Action, TapeAgentInfo]:\n self.tape += obs # type: ignore","source_hash":"cbbcba9b006e6d3ccc039bd74c29ec4ef9240b940dff78a509e2b75c3e29c461","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.tapeagent.agent.TapeAgent","uri":"program://AgentLab/class/src.agentlab.agents.tapeagent.agent.TapeAgent#L62-L103","kind":"class","name":"TapeAgent","path":"src/agentlab/agents/tapeagent/agent.py","language":"python","start_line":62,"end_line":103,"context_start_line":42,"context_end_line":103,"code":"\n def make_agent(self) -> bgym.Agent:\n agent: Agent = hydra.utils.instantiate(self.config.agent)\n return TapeAgent(agent=agent)\n\n\n@dataclass\nclass TapeAgentInfo(bgym.AgentInfo):\n thoughts: list[Thought] = None # type: ignore\n\n\nclass DictObservation(Observation):\n \"\"\"\n Container for wrapping old dict observation into new Observation class.\n \"\"\"\n\n kind: Literal[\"dict_observation\"] = \"dict_observation\" # type: ignore\n content: str\n\n\nclass TapeAgent(bgym.Agent):\n agent: Agent\n tape: Tape\n\n def __init__(self, agent: Agent):\n super().__init__()\n self.agent = agent\n self.tape = Tape(steps=[])\n\n def obs_preprocessor(self, obs: Observation | list[Observation]) -> list[Observation]:\n if isinstance(obs, Observation):\n obs = [obs]\n assert isinstance(obs, list), f\"Expected list of Observations, got {type(obs)}\"\n logger.info(f\"Observations: {[type(o).__name__ for o in obs]}\")\n return obs\n\n def get_action(self, obs: Observation | list[Observation]) -> tuple[Action, TapeAgentInfo]:\n self.tape += obs # type: ignore\n thoughts: list[Thought] = []\n action = None\n while not action:\n for event in self.agent.run(self.tape):\n if not event.step:\n continue\n self.tape = self.tape.append(event.step)\n if isinstance(event.step, Thought):\n thoughts.append(event.step)\n logger.info(f\"Thought: {event.step.llm_view()}\")\n elif isinstance(event.step, Action) and not action: # we use first action only\n action = event.step\n logger.info(f\"Action: {action.llm_view()}\")\n else:\n # there could be control flow steps for switching nodes and if clauses\n logger.info(f\"Other step: {type(event.step)}\")\n logger.info(f\"Tape after run: ({len(self.tape)}) {[type(s).__name__ for s in self.tape]}\")\n return (action, TapeAgentInfo(thoughts=thoughts))\n\n @property\n def final_tape(self) -> Tape:\n truncated = not any([isinstance(s, StopStep) for s in self.tape.steps])\n self.tape.metadata = ExtendedMetadata(author=self.agent.name, truncated=truncated)\n return self.tape","source_hash":"cbbcba9b006e6d3ccc039bd74c29ec4ef9240b940dff78a509e2b75c3e29c461","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.tapeagent.agent.make_agent","uri":"program://AgentLab/function/src.agentlab.agents.tapeagent.agent.make_agent#L43-L45","kind":"function","name":"make_agent","path":"src/agentlab/agents/tapeagent/agent.py","language":"python","start_line":43,"end_line":45,"context_start_line":23,"context_end_line":65,"code":" truncated: bool = False\n reward: float = 0.0\n attempt_number: int = 0\n other: dict = {}\n\n\nclass Tape(BaseTape):\n metadata: ExtendedMetadata = Field(default_factory=ExtendedMetadata) # type: ignore\n\n\ndef load_config(config_name: str) -> DictConfig:\n with hydra.initialize(config_path=\"conf\", version_base=\"1.1\"):\n config = hydra.compose(config_name=config_name)\n return config\n\n\n@dataclass\nclass TapeAgentArgs(AgentArgs):\n config: DictConfig = None # type: ignore\n\n def make_agent(self) -> bgym.Agent:\n agent: Agent = hydra.utils.instantiate(self.config.agent)\n return TapeAgent(agent=agent)\n\n\n@dataclass\nclass TapeAgentInfo(bgym.AgentInfo):\n thoughts: list[Thought] = None # type: ignore\n\n\nclass DictObservation(Observation):\n \"\"\"\n Container for wrapping old dict observation into new Observation class.\n \"\"\"\n\n kind: Literal[\"dict_observation\"] = \"dict_observation\" # type: ignore\n content: str\n\n\nclass TapeAgent(bgym.Agent):\n agent: Agent\n tape: Tape\n","source_hash":"cbbcba9b006e6d3ccc039bd74c29ec4ef9240b940dff78a509e2b75c3e29c461","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.tapeagent.agent.__init__","uri":"program://AgentLab/function/src.agentlab.agents.tapeagent.agent.__init__#L66-L69","kind":"function","name":"__init__","path":"src/agentlab/agents/tapeagent/agent.py","language":"python","start_line":66,"end_line":69,"context_start_line":46,"context_end_line":89,"code":"\n\n@dataclass\nclass TapeAgentInfo(bgym.AgentInfo):\n thoughts: list[Thought] = None # type: ignore\n\n\nclass DictObservation(Observation):\n \"\"\"\n Container for wrapping old dict observation into new Observation class.\n \"\"\"\n\n kind: Literal[\"dict_observation\"] = \"dict_observation\" # type: ignore\n content: str\n\n\nclass TapeAgent(bgym.Agent):\n agent: Agent\n tape: Tape\n\n def __init__(self, agent: Agent):\n super().__init__()\n self.agent = agent\n self.tape = Tape(steps=[])\n\n def obs_preprocessor(self, obs: Observation | list[Observation]) -> list[Observation]:\n if isinstance(obs, Observation):\n obs = [obs]\n assert isinstance(obs, list), f\"Expected list of Observations, got {type(obs)}\"\n logger.info(f\"Observations: {[type(o).__name__ for o in obs]}\")\n return obs\n\n def get_action(self, obs: Observation | list[Observation]) -> tuple[Action, TapeAgentInfo]:\n self.tape += obs # type: ignore\n thoughts: list[Thought] = []\n action = None\n while not action:\n for event in self.agent.run(self.tape):\n if not event.step:\n continue\n self.tape = self.tape.append(event.step)\n if isinstance(event.step, Thought):\n thoughts.append(event.step)\n logger.info(f\"Thought: {event.step.llm_view()}\")","source_hash":"cbbcba9b006e6d3ccc039bd74c29ec4ef9240b940dff78a509e2b75c3e29c461","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.tapeagent.agent.obs_preprocessor","uri":"program://AgentLab/function/src.agentlab.agents.tapeagent.agent.obs_preprocessor#L71-L76","kind":"function","name":"obs_preprocessor","path":"src/agentlab/agents/tapeagent/agent.py","language":"python","start_line":71,"end_line":76,"context_start_line":51,"context_end_line":96,"code":"\n\nclass DictObservation(Observation):\n \"\"\"\n Container for wrapping old dict observation into new Observation class.\n \"\"\"\n\n kind: Literal[\"dict_observation\"] = \"dict_observation\" # type: ignore\n content: str\n\n\nclass TapeAgent(bgym.Agent):\n agent: Agent\n tape: Tape\n\n def __init__(self, agent: Agent):\n super().__init__()\n self.agent = agent\n self.tape = Tape(steps=[])\n\n def obs_preprocessor(self, obs: Observation | list[Observation]) -> list[Observation]:\n if isinstance(obs, Observation):\n obs = [obs]\n assert isinstance(obs, list), f\"Expected list of Observations, got {type(obs)}\"\n logger.info(f\"Observations: {[type(o).__name__ for o in obs]}\")\n return obs\n\n def get_action(self, obs: Observation | list[Observation]) -> tuple[Action, TapeAgentInfo]:\n self.tape += obs # type: ignore\n thoughts: list[Thought] = []\n action = None\n while not action:\n for event in self.agent.run(self.tape):\n if not event.step:\n continue\n self.tape = self.tape.append(event.step)\n if isinstance(event.step, Thought):\n thoughts.append(event.step)\n logger.info(f\"Thought: {event.step.llm_view()}\")\n elif isinstance(event.step, Action) and not action: # we use first action only\n action = event.step\n logger.info(f\"Action: {action.llm_view()}\")\n else:\n # there could be control flow steps for switching nodes and if clauses\n logger.info(f\"Other step: {type(event.step)}\")\n logger.info(f\"Tape after run: ({len(self.tape)}) {[type(s).__name__ for s in self.tape]}\")","source_hash":"cbbcba9b006e6d3ccc039bd74c29ec4ef9240b940dff78a509e2b75c3e29c461","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.tapeagent.agent.get_action","uri":"program://AgentLab/function/src.agentlab.agents.tapeagent.agent.get_action#L78-L97","kind":"function","name":"get_action","path":"src/agentlab/agents/tapeagent/agent.py","language":"python","start_line":78,"end_line":97,"context_start_line":58,"context_end_line":103,"code":" kind: Literal[\"dict_observation\"] = \"dict_observation\" # type: ignore\n content: str\n\n\nclass TapeAgent(bgym.Agent):\n agent: Agent\n tape: Tape\n\n def __init__(self, agent: Agent):\n super().__init__()\n self.agent = agent\n self.tape = Tape(steps=[])\n\n def obs_preprocessor(self, obs: Observation | list[Observation]) -> list[Observation]:\n if isinstance(obs, Observation):\n obs = [obs]\n assert isinstance(obs, list), f\"Expected list of Observations, got {type(obs)}\"\n logger.info(f\"Observations: {[type(o).__name__ for o in obs]}\")\n return obs\n\n def get_action(self, obs: Observation | list[Observation]) -> tuple[Action, TapeAgentInfo]:\n self.tape += obs # type: ignore\n thoughts: list[Thought] = []\n action = None\n while not action:\n for event in self.agent.run(self.tape):\n if not event.step:\n continue\n self.tape = self.tape.append(event.step)\n if isinstance(event.step, Thought):\n thoughts.append(event.step)\n logger.info(f\"Thought: {event.step.llm_view()}\")\n elif isinstance(event.step, Action) and not action: # we use first action only\n action = event.step\n logger.info(f\"Action: {action.llm_view()}\")\n else:\n # there could be control flow steps for switching nodes and if clauses\n logger.info(f\"Other step: {type(event.step)}\")\n logger.info(f\"Tape after run: ({len(self.tape)}) {[type(s).__name__ for s in self.tape]}\")\n return (action, TapeAgentInfo(thoughts=thoughts))\n\n @property\n def final_tape(self) -> Tape:\n truncated = not any([isinstance(s, StopStep) for s in self.tape.steps])\n self.tape.metadata = ExtendedMetadata(author=self.agent.name, truncated=truncated)\n return self.tape","source_hash":"cbbcba9b006e6d3ccc039bd74c29ec4ef9240b940dff78a509e2b75c3e29c461","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.tapeagent.agent.final_tape","uri":"program://AgentLab/function/src.agentlab.agents.tapeagent.agent.final_tape#L100-L103","kind":"function","name":"final_tape","path":"src/agentlab/agents/tapeagent/agent.py","language":"python","start_line":100,"end_line":103,"context_start_line":80,"context_end_line":103,"code":" thoughts: list[Thought] = []\n action = None\n while not action:\n for event in self.agent.run(self.tape):\n if not event.step:\n continue\n self.tape = self.tape.append(event.step)\n if isinstance(event.step, Thought):\n thoughts.append(event.step)\n logger.info(f\"Thought: {event.step.llm_view()}\")\n elif isinstance(event.step, Action) and not action: # we use first action only\n action = event.step\n logger.info(f\"Action: {action.llm_view()}\")\n else:\n # there could be control flow steps for switching nodes and if clauses\n logger.info(f\"Other step: {type(event.step)}\")\n logger.info(f\"Tape after run: ({len(self.tape)}) {[type(s).__name__ for s in self.tape]}\")\n return (action, TapeAgentInfo(thoughts=thoughts))\n\n @property\n def final_tape(self) -> Tape:\n truncated = not any([isinstance(s, StopStep) for s in self.tape.steps])\n self.tape.metadata = ExtendedMetadata(author=self.agent.name, truncated=truncated)\n return self.tape","source_hash":"cbbcba9b006e6d3ccc039bd74c29ec4ef9240b940dff78a509e2b75c3e29c461","truncated":false} {"repo_id":"AgentLab","entity_id":"py:src.agentlab.agents.tapeagent.experiments.run_gaia","uri":"program://AgentLab/module/src.agentlab.agents.tapeagent.experiments.run_gaia#L1-L25","kind":"module","name":"src.agentlab.agents.tapeagent.experiments.run_gaia","path":"src/agentlab/agents/tapeagent/experiments/run_gaia.py","language":"python","start_line":1,"end_line":25,"context_start_line":1,"context_end_line":25,"code":"import logging\nimport os\n\nfrom agentlab.agents.tapeagent.agent import TapeAgentArgs, load_config\nfrom agentlab.benchmarks.gaia import GaiaBenchmark, stop_old_sandbox\nfrom agentlab.experiments.study import make_study\n\nfmt = \"%(asctime)s - %(levelname)s - %(name)s:%(lineno)d - %(funcName)s() - %(message)s\"\nlogging.basicConfig(level=logging.INFO, force=True, format=fmt, handlers=[logging.StreamHandler()])\n\nif __name__ == \"__main__\":\n config = load_config(\"gaia_l1\")\n study = make_study(\n benchmark=GaiaBenchmark.from_config(config), # type: ignore\n agent_args=TapeAgentArgs(agent_name=config.name, config=config),\n comment=config.comment,\n logging_level=logging.INFO,\n logging_level_stdout=logging.INFO,\n )\n stop_old_sandbox()\n if os.environ.get(\"AGENTLAB_DEBUG\"):\n study.exp_args_list = study.exp_args_list[:3]\n study.run(n_jobs=1, n_relaunch=1, parallel_backend=\"sequential\")\n else:\n study.run(n_jobs=config.n_jobs, n_relaunch=1, parallel_backend=config.parallel_backend)","source_hash":"6c5bf10626ab7864ca131189e4922c4c8edb78f40023f34855b45955da463ab0","truncated":false} {"repo_id":"AgentLab","entity_id":"file:main.py","uri":"program://AgentLab/file/main.py","kind":"file","name":"main.py","path":"main.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"\"\"\"\nNote: This script is a convenience script to launch experiments instead of using\nthe command line.\n\nCopy this script and modify at will, but don't push your changes to the\nrepository.\n\"\"\"\n\nimport logging\n\nfrom agentlab.agents.generic_agent import (\n AGENT_LLAMA3_70B,\n AGENT_LLAMA31_70B,\n RANDOM_SEARCH_AGENT,\n AGENT_4o,\n AGENT_4o_MINI,\n AGENT_o3_MINI,\n AGENT_37_SONNET,\n AGENT_CLAUDE_SONNET_35,\n AGENT_GPT5_MINI,\n)","source_hash":"cf52282bd08c6ac5ba93fcfc1ccacc25b04c07ed59f1f8a8ad630906d8499366","truncated":false} {"repo_id":"AgentLab","entity_id":"file:main_workarena_debug.py","uri":"program://AgentLab/file/main_workarena_debug.py","kind":"file","name":"main_workarena_debug.py","path":"main_workarena_debug.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"\"\"\"\nNote: This script is a convenience script to launch experiments instead of using\nthe command line.\n\nCopy this script and modify at will, but don't push your changes to the\nrepository.\n\"\"\"\n\nimport logging\nfrom copy import deepcopy\n\nimport bgym\n\nfrom agentlab.agents.tool_use_agent.tool_use_agent import (\n DEFAULT_PROMPT_CONFIG,\n GPT_4_1,\n ToolUseAgentArgs,\n)\nfrom agentlab.experiments.study import Study\n\nlogging.getLogger().setLevel(logging.INFO)","source_hash":"92a99e0d2b6b2fb8c18fb1060abf2c17ba8d9a100132f242959c7fb8ac18a4d8","truncated":false} {"repo_id":"AgentLab","entity_id":"file:add_study_to_repro_journal.py","uri":"program://AgentLab/file/add_study_to_repro_journal.py","kind":"file","name":"add_study_to_repro_journal.py","path":"add_study_to_repro_journal.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":18,"code":"import os\nfrom pathlib import Path\nfrom agentlab.experiments.study import Study\n\n\nbase_dir = \"/home/toolkit/ui_copilot_results\"\n\nexp_paths = [\n \"2025-01-31_22-08-34_genericagent-o3-mini-2025-01-31-on-workarena-l1\",\n # '2025-02-02_01-53-45_genericagent-openai-o1-mini-2024-09-12-on-workarena-l1',\n \"2025-02-02_01-55-04_genericagent-openai-o1-mini-2024-09-12-on-workarena-l1\",\n]\nfull_paths = [os.path.join(base_dir, exp_path) for exp_path in exp_paths]\n\nfor full_path in full_paths:\n study = Study.load(Path(full_path))\n\n study.append_to_journal(strict_reproducibility=False)","source_hash":"dc9b4b94f8f744a3656b875dd287be370af8551604785b276bd041bd6ba5b408","truncated":false} {"repo_id":"AgentLab","entity_id":"file:tests/test_main.py","uri":"program://AgentLab/file/tests/test_main.py","kind":"file","name":"tests/test_main.py","path":"tests/test_main.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import subprocess\nimport sys\nfrom pathlib import Path\n\nimport pytest\n\n\n@pytest.mark.pricy\ndef test_main_script_execution():\n # this should trigger agent_4o_mini on miniwob_tiny_test unless this was\n # reconfigured differently.\n path = Path(__file__).parent.parent / \"main.py\"\n\n sys.path.insert(0, str(path.parent))\n\n # just make sure it's in the right state\n main = __import__(path.stem)\n assert main.benchmark == \"miniwob_tiny_test\"\n assert main.reproducibility_mode == False\n assert main.relaunch == False\n assert main.n_jobs <= 10","source_hash":"1c339f46889f59d74a302b20ab4529bc84b49bcb05ef73f1d278f62ac5758348","truncated":false} {"repo_id":"AgentLab","entity_id":"file:tests/test_ui_assistant.py","uri":"program://AgentLab/file/tests/test_ui_assistant.py","kind":"file","name":"tests/test_ui_assistant.py","path":"tests/test_ui_assistant.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":9,"code":"from agentlab.ui_assistant import make_exp_args\nfrom agentlab.agents.generic_agent import AGENT_4o\n\n\ndef test_make_exp_args():\n \"\"\"Basic unit test to detect refactoring errors.\"\"\"\n exp_args = make_exp_args(AGENT_4o, \"https://www.google.com\")\n\n assert exp_args.agent_args.flags.action.demo_mode == \"default\"","source_hash":"3a084bf5c64c372102deec1fae59a53a3c7f99ae7b2bb77735960cb40568a15a","truncated":false} {"repo_id":"AgentLab","entity_id":"file:tests/verify_rate_limit_anthropic.py","uri":"program://AgentLab/file/tests/verify_rate_limit_anthropic.py","kind":"file","name":"tests/verify_rate_limit_anthropic.py","path":"tests/verify_rate_limit_anthropic.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import os\nimport time\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\n\nimport anthropic\n\nclient = anthropic.Anthropic(api_key=os.environ[\"ANTHROPIC_API_KEY\"])\n\n\ndef make_request(messages):\n response = client.messages.create(\n model=\"claude-3-5-sonnet-20241022\", max_tokens=10, messages=messages\n )\n return response.usage\n\n\ndef make_message(text):\n return {\n \"role\": \"user\",\n \"content\": [\n {","source_hash":"a74fab2ec054dffa24e51f8b972c405769daca2f4af30651e2c9a3b558a387d8","truncated":false} {"repo_id":"AgentLab","entity_id":"file:tests/analyze/test_overlay_utils.py","uri":"program://AgentLab/file/tests/analyze/test_overlay_utils.py","kind":"file","name":"tests/analyze/test_overlay_utils.py","path":"tests/analyze/test_overlay_utils.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from PIL import Image\n\nfrom agentlab.analyze import overlay_utils\n\n\ndef test_parse_function_calls():\n\n test_code = \"\"\"\nmouse_click(34, 59)\nfill(\"a234\", \"test\")\nclick('b123', button=\"right\", modifiers=[\"Shift\", \"Control\"])\nselect_option(\"c456\", [\"option1\", \"option2\"])\n\"\"\"\n\n result = overlay_utils.parse_function_calls(test_code)\n\n assert result[1].function_name == \"mouse_click\"\n assert result[1].name == \"y\"\n assert test_code[result[1].start_index : result[1].stop_index] == \"59\"\n\n assert result[8].function_name == \"select_option\"","source_hash":"9cc9b9688d8e5c68972d712ecf778467e597d4632f32b2585c4ee95da238a7e1","truncated":false} {"repo_id":"AgentLab","entity_id":"file:tests/analyze/test_inspect_results.py","uri":"program://AgentLab/file/tests/analyze/test_inspect_results.py","kind":"file","name":"tests/analyze/test_inspect_results.py","path":"tests/analyze/test_inspect_results.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from pathlib import Path\nimport shutil\nimport tempfile\n\nimport pandas as pd\nfrom agentlab.analyze.inspect_results import get_study_summary\n\n\ndef test_get_study_summary():\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n study_dir = Path(tmp_dir) / \"test_study\"\n\n study_dir_original = Path(__file__).parent.parent / \"data\" / \"test_study\"\n\n # recursively copy the study to the temp dir using shutil\n shutil.copytree(study_dir_original, study_dir)\n\n sentinel = {}\n\n summary = get_study_summary(study_dir, sentinel=sentinel)","source_hash":"1b1e25989eb1e81a99bbac77634a76f275aa278a331ac3e97b1ddea85e224bd9","truncated":false} {"repo_id":"AgentLab","entity_id":"file:tests/llm/test_huggingface_utils.py","uri":"program://AgentLab/file/tests/llm/test_huggingface_utils.py","kind":"file","name":"tests/llm/test_huggingface_utils.py","path":"tests/llm/test_huggingface_utils.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import pytest\n\nfrom agentlab.llm.chat_api import HuggingFaceURLChatModel, make_system_message, make_user_message\nfrom agentlab.llm.llm_utils import download_and_save_model\nfrom agentlab.llm.prompt_templates import STARCHAT_PROMPT_TEMPLATE\n\n# TODO(optimass): figure out a good model for all tests\n\n\n@pytest.mark.skip(reason=\"Requires a local model checkpoint\")\ndef test_CustomLLMChatbot_locally():\n # model_path = \"google/flan-t5-base\" # remote model on HuggingFace Hub\n model_path = \"/mnt/ui_copilot/data_rw/models/starcoderbase-1b-ft\" # local model in shared volum\n\n chatbot = HuggingFaceURLChatModel(model_path=model_path, temperature=1e-3)\n\n messages = [\n make_system_message(\"Please tell me back the following word: \"),\n make_user_message(\"bird\"),\n ]\n","source_hash":"edea9db32c39d22cd6c155ff848d563256bff43e05e0fc6f3282e583888fe9d3","truncated":false} {"repo_id":"AgentLab","entity_id":"file:tests/llm/test_llm_configs.py","uri":"program://AgentLab/file/tests/llm/test_llm_configs.py","kind":"file","name":"tests/llm/test_llm_configs.py","path":"tests/llm/test_llm_configs.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":8,"code":"from agentlab.llm.llm_configs import CHAT_MODEL_ARGS_DICT\nfrom agentlab.llm.chat_api import BaseModelArgs\n\n\ndef test_llm_configs():\n\n for _, args in CHAT_MODEL_ARGS_DICT.items():\n assert isinstance(args, BaseModelArgs)","source_hash":"c905bf93d4a57f7fd18cc7000a71dbf31077cd1603c5b7f5641bd6000502b880","truncated":false} {"repo_id":"AgentLab","entity_id":"file:tests/llm/test_tracking.py","uri":"program://AgentLab/file/tests/llm/test_tracking.py","kind":"file","name":"tests/llm/test_tracking.py","path":"tests/llm/test_tracking.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import os\nimport time\nfrom functools import partial\n\nimport pytest\n\nimport agentlab.llm.tracking as tracking\nfrom agentlab.llm.chat_api import (\n AzureChatModel,\n OpenAIChatModel,\n OpenRouterChatModel,\n make_system_message,\n make_user_message,\n)\n\n\ndef test_get_action_decorator():\n action, agent_info = tracking.cost_tracker_decorator(lambda x, y: call_llm())(None, None)\n assert action == \"action\"\n assert agent_info[\"stats\"] == {\n \"input_tokens\": 1,","source_hash":"216a1e92d2c1072a6f138def3517be4247855c2c6b9687a0b4c3f597044c5813","truncated":false} {"repo_id":"AgentLab","entity_id":"file:tests/llm/test_litellm_api.py","uri":"program://AgentLab/file/tests/llm/test_litellm_api.py","kind":"file","name":"tests/llm/test_litellm_api.py","path":"tests/llm/test_litellm_api.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import os\nfrom functools import partial\n\nimport pytest\nfrom agentlab.llm.litellm_api import LiteLLMModelArgs\nfrom agentlab.llm.response_api import APIPayload, LLMOutput\n\nchat_api_tools = [\n {\n \"type\": \"function\",\n \"name\": \"get_weather\",\n \"description\": \"Get the current weather in a given location.\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The location to get the weather for.\",\n },\n \"unit\": {\n \"type\": \"string\",","source_hash":"3d536844800bbc74d0b13ec4b727a271d2cc56690613d4878114cf35d293b527","truncated":false} {"repo_id":"AgentLab","entity_id":"file:tests/llm/test_response_api.py","uri":"program://AgentLab/file/tests/llm/test_response_api.py","kind":"file","name":"tests/llm/test_response_api.py","path":"tests/llm/test_response_api.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import os\nfrom typing import Any, Dict, List, Optional\nfrom unittest.mock import MagicMock, patch\n\nimport anthropic\nimport openai\nimport pytest\n\nfrom agentlab.llm import tracking\nfrom agentlab.llm.response_api import (\n AnthropicAPIMessageBuilder,\n APIPayload,\n ClaudeResponseModelArgs,\n LLMOutput,\n OpenAIChatCompletionAPIMessageBuilder,\n OpenAIChatModelArgs,\n OpenAIResponseAPIMessageBuilder,\n OpenAIResponseModelArgs,\n)\n\n","source_hash":"81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3","truncated":false} {"repo_id":"AgentLab","entity_id":"file:tests/llm/test_chat_api.py","uri":"program://AgentLab/file/tests/llm/test_chat_api.py","kind":"file","name":"tests/llm/test_chat_api.py","path":"tests/llm/test_chat_api.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import os\n\nimport pytest\n\nfrom agentlab.llm.chat_api import (\n AnthropicModelArgs,\n AzureModelArgs,\n OpenAIModelArgs,\n make_system_message,\n make_user_message,\n)\n\n# TODO(optimass): figure out a good model for all tests\n\n\nif \"AGENTLAB_LOCAL_TEST\" in os.environ:\n skip_tests = os.environ[\"AGENTLAB_LOCAL_TEST\"] != \"1\"\nelse:\n skip_tests = False\n\n","source_hash":"1827bbbe5c8ea015a003ddf56dcabfe4f2dadd506ae591ed9bd0a089f03d664b","truncated":false} {"repo_id":"AgentLab","entity_id":"file:tests/llm/test_llm_utils.py","uri":"program://AgentLab/file/tests/llm/test_llm_utils.py","kind":"file","name":"tests/llm/test_llm_utils.py","path":"tests/llm/test_llm_utils.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import warnings\nfrom typing import Literal\nfrom unittest.mock import Mock\n\nimport httpx\nimport pytest\nfrom openai import RateLimitError\n\nfrom agentlab.llm import llm_utils\nfrom agentlab.llm.chat_api import make_system_message\n\nyaml_str = \"\"\"Analysis:\nThis is the analysis\n\nSummary: This is the summary\n\nConfidence Score: 7\n\"\"\"\n\n\ndef test_yaml_parser():","source_hash":"da51ff0c4decad4aa674ede0c63e9a1ea65685e1b1f3b04ef81628923c1af447","truncated":false} {"repo_id":"AgentLab","entity_id":"file:tests/experiments/test_launch_exp.py","uri":"program://AgentLab/file/tests/experiments/test_launch_exp.py","kind":"file","name":"tests/experiments/test_launch_exp.py","path":"tests/experiments/test_launch_exp.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import math\nimport tempfile\nfrom pathlib import Path\n\nimport pytest\n\nfrom agentlab.agents.generic_agent.agent_configs import FLAGS_GPT_3_5, AGENT_4o_MINI\nfrom agentlab.agents.generic_agent.generic_agent import GenericAgentArgs\nfrom agentlab.analyze import inspect_results\nfrom agentlab.experiments.launch_exp import (\n find_incomplete,\n non_dummy_count,\n run_experiments,\n)\nfrom agentlab.experiments.loop import EnvArgs, ExpArgs\nfrom agentlab.experiments.study import Study\nfrom agentlab.llm.chat_api import CheatMiniWoBLLMArgs\n\n\ndef test_relaunch_study():\n study_dir = Path(__file__).parent.parent / \"data\" / \"test_study\"","source_hash":"2928ffc5b8e30dcb8f46368d60abb04f9c1527c9ea2dbf790053a8f1308d9285","truncated":false} {"repo_id":"AgentLab","entity_id":"file:tests/experiments/test_multi_server.py","uri":"program://AgentLab/file/tests/experiments/test_multi_server.py","kind":"file","name":"tests/experiments/test_multi_server.py","path":"tests/experiments/test_multi_server.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from agentlab.experiments.multi_server import WebArenaInstanceVars\nfrom browsergym.webarena.instance import WebArenaInstance\n\n\ndef test_webarena_multiserver():\n\n instance_1 = WebArenaInstanceVars(\n base_url=\"http://webarena1.eastus.cloudapp.azure.com\",\n shopping=\"8082/\",\n shopping_admin=\"8083/admin\",\n reddit=\"8080\",\n gitlab=\"9001\",\n wikipedia=\"8081/wikipedia_en_all_maxi_2022-05/A/User:The_other_Kiwix_guy/Landing\",\n map=\"443\",\n homepage=\"80\",\n full_reset=\"7565\",\n module_name=\"webarena\",\n prefix=\"WA_\",\n )\n\n instance_1.init()","source_hash":"a2e6320bd23ca7f9804927c7f751ae02a9d2ebc372b66404a1cc1c46ccf4d86f","truncated":false} {"repo_id":"AgentLab","entity_id":"file:tests/experiments/test_ray.py","uri":"program://AgentLab/file/tests/experiments/test_ray.py","kind":"file","name":"tests/experiments/test_ray.py","path":"tests/experiments/test_ray.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import bgym\nimport pytest\nimport ray\nfrom flaky import flaky\n\nfrom agentlab.experiments.exp_utils import MockedExpArgs, add_dependencies\nfrom agentlab.experiments.graph_execution_ray import execute_task_graph\n\nTASK_TIME = 3\n\n\n@flaky(max_runs=3, min_passes=1)\ndef test_execute_task_graph():\n # Define a list of ExpArgs with dependencies\n exp_args_list = [\n MockedExpArgs(exp_id=\"task1\", depends_on=[]),\n MockedExpArgs(exp_id=\"task2\", depends_on=[\"task1\"]),\n MockedExpArgs(exp_id=\"task3\", depends_on=[\"task1\"]),\n MockedExpArgs(exp_id=\"task4\", depends_on=[\"task2\", \"task3\"]),\n ]\n","source_hash":"cf7a13d302b1156c30cdf415e6286413e1c5d07dc70e8f5cb8b04cc3ec89cae1","truncated":false} {"repo_id":"AgentLab","entity_id":"file:tests/experiments/test_args.py","uri":"program://AgentLab/file/tests/experiments/test_args.py","kind":"file","name":"tests/experiments/test_args.py","path":"tests/experiments/test_args.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from ast import mod\nfrom dataclasses import dataclass\nfrom agentlab.experiments.args import (\n expand_cross_product,\n CrossProd,\n Choice,\n make_progression_study,\n sample_args,\n make_ablation_study,\n)\n\n\n@dataclass\nclass LLMArgsTest:\n model_name: str = \"model1\"\n temperature: float = 0.1\n\n\n@dataclass\nclass ExpArgsTest:\n llm_args: LLMArgsTest","source_hash":"bf29ba898a0df2cd4add1b820df73fd73661deaecdc8c76fc7ddd4a153b6be81","truncated":false} {"repo_id":"AgentLab","entity_id":"file:tests/experiments/test_reproducibility_util.py","uri":"program://AgentLab/file/tests/experiments/test_reproducibility_util.py","kind":"file","name":"tests/experiments/test_reproducibility_util.py","path":"tests/experiments/test_reproducibility_util.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import json\nimport tempfile\nimport time\nfrom pathlib import Path\n\nimport bgym\nimport pytest\nfrom bgym import DEFAULT_BENCHMARKS\n\nfrom agentlab.agents.generic_agent import AGENT_4o_MINI\nfrom agentlab.analyze import inspect_results\nfrom agentlab.experiments import reproducibility_util\n\n\n@pytest.mark.parametrize(\n \"benchmark_name\",\n [\"miniwob\", \"workarena_l1\", \"webarena\", \"visualwebarena\"],\n)\ndef test_get_reproducibility_info(benchmark_name):\n\n benchmark = DEFAULT_BENCHMARKS[benchmark_name]()","source_hash":"dc2db573a132c2279ef58e00f126007efe53c15b26bbc1b860be283c3a20a80d","truncated":false} {"repo_id":"AgentLab","entity_id":"file:tests/experiments/test_study.py","uri":"program://AgentLab/file/tests/experiments/test_study.py","kind":"file","name":"tests/experiments/test_study.py","path":"tests/experiments/test_study.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import pytest\nfrom agentlab.agents.generic_agent.agent_configs import FLAGS_GPT_4o\nfrom agentlab.agents.generic_agent.generic_agent import GenericAgentArgs\nfrom agentlab.llm.chat_api import CheatMiniWoBLLMArgs\nfrom agentlab.experiments.study import ParallelStudies, make_study, Study\nfrom agentlab.experiments.multi_server import WebArenaInstanceVars\nimport logging\n\n\nlogging.getLogger().setLevel(logging.INFO)\n\n\ndef _make_agent_args_list():\n # CheatMiniWoB agents won't succeed on WebArena, this is just for testing parallelization\n agent_args_list = []\n for i in range(2):\n agent_args = GenericAgentArgs(\n chat_model_args=CheatMiniWoBLLMArgs(),\n flags=FLAGS_GPT_4o,\n )\n","source_hash":"4b57b012597beb12a266466d11fa45ce8ed5c221029ccf7650893a5f54c0a840","truncated":false} {"repo_id":"AgentLab","entity_id":"file:tests/experiments/test_exp_configs.py","uri":"program://AgentLab/file/tests/experiments/test_exp_configs.py","kind":"file","name":"tests/experiments/test_exp_configs.py","path":"tests/experiments/test_exp_configs.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":1,"code":"from agentlab.experiments import study","source_hash":"aed6b5dda43aa03be5153eec7c5a1a91392be82bfdf13500d2cf4ca03cc8c824","truncated":false} {"repo_id":"AgentLab","entity_id":"file:tests/benchmarks/test_osworld.py","uri":"program://AgentLab/file/tests/benchmarks/test_osworld.py","kind":"file","name":"tests/benchmarks/test_osworld.py","path":"tests/benchmarks/test_osworld.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import importlib.util\nimport tempfile\nfrom pathlib import Path\nfrom unittest.mock import patch\n\nimport pytest\n\nspec = importlib.util.find_spec(\"desktop_env\")\nif spec is None:\n DESKTOP_ENV_AVAILABLE = False\n OSWorldActionSet = None\n OsworldEnvArgs = None\n OsworldGym = None\nelse:\n # If desktop_env is available, import the necessary classes\n from agentlab.benchmarks.osworld import (\n OSWorldActionSet,\n OsworldEnvArgs,\n OsworldGym,\n )\n","source_hash":"7c6d97bf660ba9a6641061024160423300df9e423f2bf383fda7c4269974f15d","truncated":false} {"repo_id":"AgentLab","entity_id":"file:tests/agents/test_agent.py","uri":"program://AgentLab/file/tests/agents/test_agent.py","kind":"file","name":"tests/agents/test_agent.py","path":"tests/agents/test_agent.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import re\nimport tempfile\nfrom dataclasses import dataclass\nfrom pathlib import Path\n\nfrom openai import OpenAIError\n\nfrom agentlab.agents.generic_agent.agent_configs import FLAGS_GPT_3_5\nfrom agentlab.agents.generic_agent.generic_agent import GenericAgentArgs\nfrom agentlab.analyze import inspect_results\nfrom agentlab.experiments import launch_exp\nfrom agentlab.experiments.loop import EnvArgs, ExpArgs\nfrom agentlab.llm.chat_api import BaseModelArgs, CheatMiniWoBLLMArgs\nfrom agentlab.llm.llm_utils import Discussion\n\n\ndef test_generic_agent():\n exp_args = ExpArgs(\n agent_args=GenericAgentArgs(\n chat_model_args=CheatMiniWoBLLMArgs(),\n flags=FLAGS_GPT_3_5,","source_hash":"8b280feaabcb410d9982a02da3621c0520a0bfff2cc8fca85fc4bdbde9241cab","truncated":false} {"repo_id":"AgentLab","entity_id":"file:tests/agents/test_visualwebarena_agent.py","uri":"program://AgentLab/file/tests/agents/test_visualwebarena_agent.py","kind":"file","name":"tests/agents/test_visualwebarena_agent.py","path":"tests/agents/test_visualwebarena_agent.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import logging\nimport tempfile\n\nimport pytest\n\nfrom agentlab.agents.visualwebarena.agent import VisualWebArenaAgentArgs\nfrom agentlab.experiments.loop import EnvArgs, ExpArgs\nfrom agentlab.llm.llm_configs import CHAT_MODEL_ARGS_DICT\n\n\n@pytest.mark.pricy\ndef test_agent():\n with tempfile.TemporaryDirectory() as exp_dir:\n env_args = EnvArgs(\n task_name=\"miniwob.click-button\",\n task_seed=0,\n max_steps=10,\n headless=True,\n )\n\n chat_model_args = CHAT_MODEL_ARGS_DICT[\"openai/gpt-4o-mini-2024-07-18\"]","source_hash":"286649828d67dc957a6c23717b83ced89895c03c76a57e47edefc1fb9419650f","truncated":false} {"repo_id":"AgentLab","entity_id":"file:tests/agents/test_gaia_agent.py","uri":"program://AgentLab/file/tests/agents/test_gaia_agent.py","kind":"file","name":"tests/agents/test_gaia_agent.py","path":"tests/agents/test_gaia_agent.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import os\nimport uuid\nfrom pathlib import Path\n\ntry:\n from tapeagents.steps import ImageObservation\n\n from agentlab.agents.tapeagent.agent import TapeAgent, TapeAgentArgs, load_config\n from agentlab.benchmarks.gaia import GaiaBenchmark, GaiaQuestion\nexcept ModuleNotFoundError:\n import pytest\n\n pytest.skip(\"Skipping test due to missing dependencies\", allow_module_level=True)\n\n\ndef mock_dataset() -> dict:\n \"\"\"Mock dataset for testing purposes.\"\"\"\n data = [{\"task_id\": str(uuid.uuid4()), \"file_name\": \"\", \"file_path\": \"\"} for i in range(165)]\n data[5] = {\n \"task_id\": \"32102e3e-d12a-4209-9163-7b3a104efe5d\",\n \"Question\": \"\"\"The attached spreadsheet shows the inventory for a movie and video game rental store in Seattle, Washington. What is the title of the oldest Blu-Ray recorded in this spreadsheet? Return it as appearing in the spreadsheet.\"\"\",","source_hash":"3973ab8712b8441a0bdc67ee6032767bee5bfc2c2d5fc2645ffda3029149e000","truncated":false} {"repo_id":"AgentLab","entity_id":"file:tests/agents/test_generic_prompt.py","uri":"program://AgentLab/file/tests/agents/test_generic_prompt.py","kind":"file","name":"tests/agents/test_generic_prompt.py","path":"tests/agents/test_generic_prompt.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from copy import deepcopy\n\nimport bgym\nimport pytest\nfrom bgym import HighLevelActionSet, HighLevelActionSetArgs\n\nfrom agentlab.agents import dynamic_prompting as dp\nfrom agentlab.agents.generic_agent.agent_configs import FLAGS_GPT_3_5\nfrom agentlab.agents.generic_agent.generic_agent_prompt import GenericPromptFlags, MainPrompt\nfrom agentlab.llm.llm_utils import count_tokens\n\nhtml_template = \"\"\"\n\n\n
    \nHello World.\nStep {}.\n
    \n\nsome extra text to make the html longer\n","source_hash":"70ca9f3685fe3a46c52a6b90f3d153e9e86317d2c424342a1b20fc46f1a94df8","truncated":false} {"repo_id":"AgentLab","entity_id":"file:docs/source/conf.py","uri":"program://AgentLab/file/docs/source/conf.py","kind":"file","name":"docs/source/conf.py","path":"docs/source/conf.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"# Configuration file for the Sphinx documentation builder.\n#\n# For the full list of built-in configuration values, see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Project information -----------------------------------------------------\n# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information\n\nimport os\nimport subprocess\n\n\n# Automatically retrieve the project version from Git\ndef get_version():\n try:\n return subprocess.check_output([\"git\", \"describe\", \"--tags\"], encoding=\"utf-8\").strip()\n except Exception:\n return \"0.0.0\"\n\n\nproject = \"AgentLab\"","source_hash":"7f722dfbb158949f79af66975bcbd75d4d7bd4a3423059fe20b75e6747ba7454","truncated":false} {"repo_id":"AgentLab","entity_id":"file:tutorials/2_eval_on_miniwob/experiment.py","uri":"program://AgentLab/file/tutorials/2_eval_on_miniwob/experiment.py","kind":"file","name":"tutorials/2_eval_on_miniwob/experiment.py","path":"tutorials/2_eval_on_miniwob/experiment.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from pathlib import Path\n\nfrom bgym import DEFAULT_BENCHMARKS\nfrom dotenv import load_dotenv\n\nfrom agentlab.agents.generic_agent.tmlr_config import (\n BASE_FLAGS,\n CHAT_MODEL_ARGS_DICT,\n GenericAgentArgs,\n)\nfrom agentlab.benchmarks.setup_benchmark import ensure_benchmark\nfrom agentlab.experiments.study import Study\n\n# This ensures MiniWob assets are downloaded and sets the MINIWOB_URL .env in the project dir.\nproject_dir = Path(__file__).parents[2]\nensure_benchmark(\"miniwob\", project_root=project_dir)\nload_dotenv(project_dir.joinpath(\".env\"), override=False) # load .env variables\n\n\nagent_config = GenericAgentArgs(\n chat_model_args=CHAT_MODEL_ARGS_DICT[","source_hash":"2b1633b04e44be0153236f5ecfe637bac3342fdf685db42ae3b87f19f70b8cd3","truncated":false} {"repo_id":"AgentLab","entity_id":"file:experiments/run_osworld.py","uri":"program://AgentLab/file/experiments/run_osworld.py","kind":"file","name":"experiments/run_osworld.py","path":"experiments/run_osworld.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import json\nimport logging\nimport os\n\nfrom agentlab.agents.tool_use_agent.tool_use_agent import OSWORLD_CLAUDE\nfrom agentlab.benchmarks.osworld import OsworldBenchmark\nfrom agentlab.experiments.study import Study, make_study\n\nfmt = \"%(asctime)s - %(levelname)s - %(name)s:%(lineno)d - %(funcName)s() - %(message)s\"\nlogging.basicConfig(level=logging.INFO, force=True, format=fmt, handlers=[logging.StreamHandler()])\n\n\ndef get_most_recent_incomplete_study() -> Study:\n \"\"\"\n Relaunch an existing study, this will continue incomplete experiments and relaunch errored experiments.\n \"\"\"\n study = Study.load_most_recent()\n study.find_incomplete(include_errors=True)\n return study\n\n","source_hash":"0fc43cb4e5da8b657895d33c00a64d162b11f1c6d537b3ef7dda5d8816b0d11e","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/ui_assistant.py","uri":"program://AgentLab/file/src/agentlab/ui_assistant.py","kind":"file","name":"src/agentlab/ui_assistant.py","path":"src/agentlab/ui_assistant.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import argparse\n\nfrom agentlab.agents.agent_args import AgentArgs\nfrom agentlab.agents.generic_agent.generic_agent import GenericAgentArgs\nfrom agentlab.experiments.exp_utils import RESULTS_DIR\nfrom agentlab.experiments.launch_exp import import_object\nfrom agentlab.experiments.loop import EnvArgs, ExpArgs\n\n\ndef make_exp_args(agent_args: AgentArgs, start_url: str) -> ExpArgs:\n try:\n agent_args.flags.action.demo_mode = \"default\"\n except AttributeError:\n pass\n\n if isinstance(agent_args, GenericAgentArgs):\n agent_args.flags.enable_chat = True\n\n exp_args = ExpArgs(\n agent_args=agent_args,\n env_args=EnvArgs(","source_hash":"d297795cc2edb4903388ef4913c82c39a17de6bc6f1eeb89cd5c8fa93e10c07b","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/__init__.py","uri":"program://AgentLab/file/src/agentlab/__init__.py","kind":"file","name":"src/agentlab/__init__.py","path":"src/agentlab/__init__.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":1,"code":"__version__ = \"v0.4.0\"","source_hash":"90bc4a6cb1ea13dcd149177d069ac2c38721aa93449db0b9fb13ffcc10cd7ea8","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/analyze/archive_studies.py","uri":"program://AgentLab/file/src/agentlab/analyze/archive_studies.py","kind":"file","name":"src/agentlab/analyze/archive_studies.py","path":"src/agentlab/analyze/archive_studies.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import os\nfrom dataclasses import dataclass\nfrom pathlib import Path\n\nimport pandas as pd\nfrom tqdm import tqdm\n\nfrom agentlab.analyze import inspect_results\nfrom agentlab.experiments.exp_utils import RESULTS_DIR\nfrom agentlab.experiments.study import Study\n\n\n@dataclass\nclass StudyInfo:\n study_dir: Path\n study: Study\n summary_df: pd.DataFrame\n should_delete: bool = False\n reason: str = \"\"\n\n","source_hash":"6cea6f7c8acdd87b74c18ca1ab84052938be3024762c472ead51035c163aa15a","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/analyze/overlay_utils.py","uri":"program://AgentLab/file/src/agentlab/analyze/overlay_utils.py","kind":"file","name":"src/agentlab/analyze/overlay_utils.py","path":"src/agentlab/analyze/overlay_utils.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import ast\nimport inspect\nimport math\nfrom dataclasses import dataclass\nfrom typing import Any, Union\n\nimport matplotlib.pyplot as plt\nimport PIL\nfrom browsergym.core.action.highlevel import ACTION_SUBSETS\nfrom PIL import Image, ImageDraw\n\nBGYM_FUNCTION_MAP = {}\nfor subset in (\"bid\", \"coord\"):\n for func in ACTION_SUBSETS[subset]:\n if func not in BGYM_FUNCTION_MAP:\n BGYM_FUNCTION_MAP[func.__name__] = func\n\n\n@dataclass\nclass ArgInfo:\n function_name: str","source_hash":"f23004ae2cfc4f02728461d5ba27cf3f81962e76a322170cc44292b655b5401e","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/analyze/episode_to_html.py","uri":"program://AgentLab/file/src/agentlab/analyze/episode_to_html.py","kind":"file","name":"src/agentlab/analyze/episode_to_html.py","path":"src/agentlab/analyze/episode_to_html.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import base64\nfrom io import BytesIO\nfrom pathlib import Path\n\nfrom agentlab.experiments.loop import ExpResult\nfrom agentlab.experiments.study import get_most_recent_study\nfrom agentlab.llm.llm_utils import BaseMessage as AgentLabBaseMessage\n\n\ndef exp_result_to_html(\n exp_result: ExpResult,\n steps_open: bool = True,\n som_open: bool = False,\n axtree_open: bool = False,\n html_open: bool = False,\n prompt_open: bool = False,\n embed_images: bool = True,\n) -> str:\n \"\"\"\n Convert an ExpResult to HTML with collapsible sections.\n","source_hash":"1fae4026df4795d85b781ead9e29f29bc0ee9d8eaa04518e66f9b2c0e7f98d4a","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/analyze/inspect_results.ipynb","uri":"program://AgentLab/file/src/agentlab/analyze/inspect_results.ipynb","kind":"file","name":"src/agentlab/analyze/inspect_results.ipynb","path":"src/agentlab/analyze/inspect_results.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import fnmatch\nimport json\nimport random\nimport re\nimport traceback\nimport warnings\nfrom collections import defaultdict\nfrom logging import warn\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\nfrom IPython.display import display\nfrom tqdm import tqdm\n\nfrom agentlab.experiments.loop import ExpResult, get_exp_result, yield_all_exp_results\n\n# TODO find a more portable way to code set_task_category_as_index at least\n# handle dynamic imports. We don't want to always import workarena\n# from browsergym.workarena import TASK_CATEGORY_MAP\n","source_hash":"c31beb2b52085accfac773eec58e2a6594f99745c745696eb6facd3611d3ff02","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/analyze/inspect_results.py","uri":"program://AgentLab/file/src/agentlab/analyze/inspect_results.py","kind":"file","name":"src/agentlab/analyze/inspect_results.py","path":"src/agentlab/analyze/inspect_results.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import fnmatch\nimport json\nimport random\nimport re\nimport traceback\nimport warnings\nfrom collections import defaultdict\nfrom logging import warn\nfrom pathlib import Path\n\nimport numpy as np\nimport pandas as pd\nfrom IPython.display import display\nfrom tqdm import tqdm\n\nfrom agentlab.experiments.loop import ExpResult, get_exp_result, yield_all_exp_results\n\n# TODO find a more portable way to code set_task_category_as_index at least\n# handle dynamic imports. We don't want to always import workarena\n# from browsergym.workarena import TASK_CATEGORY_MAP\n","source_hash":"c31beb2b52085accfac773eec58e2a6594f99745c745696eb6facd3611d3ff02","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/analyze/tapes.py","uri":"program://AgentLab/file/src/agentlab/analyze/tapes.py","kind":"file","name":"src/agentlab/analyze/tapes.py","path":"src/agentlab/analyze/tapes.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import json\nimport logging\nimport sys\nfrom collections import defaultdict\nfrom pathlib import Path\n\nimport numpy as np\nimport yaml\nfrom tapeagents.core import Step, StepMetadata\nfrom tapeagents.observe import retrieve_all_llm_calls\nfrom tapeagents.renderers.camera_ready_renderer import CameraReadyRenderer\nfrom tapeagents.tape_browser import TapeBrowser\n\nfrom agentlab.agents.tapeagent.agent import ExtendedMetadata, Tape\nfrom agentlab.benchmarks.gaia import step_error\n\nlogger = logging.getLogger(__name__)\nfmt = \"%(asctime)s - %(levelname)s - %(name)s:%(lineno)d - %(funcName)s() - %(message)s\"\nlogging.basicConfig(level=logging.INFO, force=True, format=fmt, handlers=[logging.StreamHandler()])\n\n","source_hash":"0886d56828de6e814d2184744b93e93ae8e0f032fec4aa65abdf3c0ec0539b56","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/analyze/agent_xray.py","uri":"program://AgentLab/file/src/agentlab/analyze/agent_xray.py","kind":"file","name":"src/agentlab/analyze/agent_xray.py","path":"src/agentlab/analyze/agent_xray.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import base64\nimport html\nimport os\nimport traceback\nfrom copy import deepcopy\nfrom io import BytesIO\nfrom logging import warning\nfrom pathlib import Path\n\nimport gradio as gr\nimport matplotlib.patches as patches\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom attr import dataclass\nfrom browsergym.experiments.loop import StepInfo as BGymStepInfo\nfrom langchain.schema import BaseMessage, HumanMessage\nfrom openai import OpenAI\nfrom openai.types.responses import ResponseFunctionToolCall\nfrom PIL import Image\n","source_hash":"2b6f548e28bf20888c6489ec1bc1a9e6289f0a1a4b74ea469634ef0ad26feff5","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/llm/huggingface_utils.py","uri":"program://AgentLab/file/src/agentlab/llm/huggingface_utils.py","kind":"file","name":"src/agentlab/llm/huggingface_utils.py","path":"src/agentlab/llm/huggingface_utils.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import logging\nimport os\nimport time\nfrom functools import partial\nfrom typing import Any, List, Optional, Union\n\nfrom pydantic import Field\n\nfrom agentlab.llm.base_api import AbstractChatModel\nfrom agentlab.llm.llm_utils import AIMessage, Discussion\nfrom agentlab.llm.prompt_templates import PromptTemplate, get_prompt_template\n\n\nclass HFBaseChatModel(AbstractChatModel):\n \"\"\"\n Custom LLM Chatbot that can interface with HuggingFace models with support for multiple samples.\n\n This class allows for the creation of a custom chatbot using models hosted\n on HuggingFace Hub or a local checkpoint. It provides flexibility in defining\n the temperature for response sampling and the maximum number of new tokens\n in the response.","source_hash":"9b2a8e6feb3b940567fd65f65fda874c9e0450ae9bf7c2721c0059438927ab0d","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/llm/response_api.py","uri":"program://AgentLab/file/src/agentlab/llm/response_api.py","kind":"file","name":"src/agentlab/llm/response_api.py","path":"src/agentlab/llm/response_api.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import json\nimport logging\nimport os\nfrom abc import ABC, abstractmethod\nfrom dataclasses import dataclass, field\nfrom typing import Any, Dict, List, Literal, Optional, Union\n\nimport openai\nfrom anthropic import Anthropic\nfrom anthropic.types import Completion\nfrom anthropic.types import Message as AnthrophicMessage\nfrom openai import OpenAI\n\nfrom agentlab.llm.llm_utils import image_to_png_base64_url\n\nfrom .base_api import BaseModelArgs\nfrom .llm_utils import (\n call_anthropic_api_with_retries,\n call_openai_api_with_retries,\n)\nfrom .tracking import TrackAPIPricingMixin","source_hash":"92bf756ba9b2e35bf371fd014c37e10d930ef566fe6d8e052eea8fc444eccad8","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/llm/base_api.py","uri":"program://AgentLab/file/src/agentlab/llm/base_api.py","kind":"file","name":"src/agentlab/llm/base_api.py","path":"src/agentlab/llm/base_api.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from abc import ABC, abstractmethod\nfrom dataclasses import dataclass\n\n\nclass AbstractChatModel(ABC):\n @abstractmethod\n def __call__(self, messages: list[dict]) -> dict:\n pass\n\n def get_stats(self):\n return {}\n\n\n@dataclass\nclass BaseModelArgs(ABC):\n \"\"\"Base class for all model arguments.\"\"\"\n\n model_name: str\n max_total_tokens: int = None\n max_input_tokens: int = None\n max_new_tokens: int = None","source_hash":"27abf96f0b6dc633c41b67db2274629ce0b8cdff07b92d72e060fb5d4847efae","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/llm/litellm_api.py","uri":"program://AgentLab/file/src/agentlab/llm/litellm_api.py","kind":"file","name":"src/agentlab/llm/litellm_api.py","path":"src/agentlab/llm/litellm_api.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import json\nimport logging\nfrom dataclasses import dataclass\nfrom functools import partial\nfrom typing import Any, Dict, List, Optional, Type\n\nimport litellm\nfrom litellm import completion\nfrom openai.types.chat import ChatCompletion as OpenAIChatCompletion\n\nfrom agentlab.llm.base_api import BaseModelArgs\nfrom agentlab.llm.response_api import (\n AgentlabAction,\n APIPayload,\n BaseModelWithPricing,\n LLMOutput,\n Message,\n MessageBuilder,\n OpenAIChatCompletionAPIMessageBuilder,\n ToolCall,\n ToolCalls,","source_hash":"f3f6175f1c2738428c62617db32724b4e1beb4e49a2e57a47037f8c91fdeddbc","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/llm/chat_api.py","uri":"program://AgentLab/file/src/agentlab/llm/chat_api.py","kind":"file","name":"src/agentlab/llm/chat_api.py","path":"src/agentlab/llm/chat_api.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import logging\nimport os\nimport re\nimport time\nfrom dataclasses import dataclass\nfrom functools import partial\nfrom typing import Optional\n\nimport anthropic\nimport openai\nfrom openai import NOT_GIVEN, OpenAI\n\nimport agentlab.llm.tracking as tracking\nfrom agentlab.llm.base_api import AbstractChatModel, BaseModelArgs\nfrom agentlab.llm.llm_utils import AIMessage, Discussion\n\n\ndef make_system_message(content: str) -> dict:\n return dict(role=\"system\", content=content)\n\n","source_hash":"f1cd0d3d0b86825e76bb18c7afbf8934d4974a38e67e41b452c636f0d6c1c887","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/llm/llm_configs.py","uri":"program://AgentLab/file/src/agentlab/llm/llm_configs.py","kind":"file","name":"src/agentlab/llm/llm_configs.py","path":"src/agentlab/llm/llm_configs.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from openai import NOT_GIVEN\n\nfrom agentlab.llm.chat_api import (\n AnthropicModelArgs,\n AzureModelArgs,\n OpenAIModelArgs,\n OpenRouterModelArgs,\n SelfHostedModelArgs,\n)\n\ndefault_oss_llms_args = {\n \"n_retry_server\": 4,\n \"temperature\": 0.01,\n}\n\nCLOSED_SOURCE_APIS = [\n \"openai\",\n \"reka\",\n \"test\",\n]\n","source_hash":"31b18587dcd9b3994a48302d8d832f7691cca607352131d2b5405f03f19606d8","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/llm/llm_utils.py","uri":"program://AgentLab/file/src/agentlab/llm/llm_utils.py","kind":"file","name":"src/agentlab/llm/llm_utils.py","path":"src/agentlab/llm/llm_utils.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import base64\nimport collections\nimport importlib\nimport io\nimport json\nimport logging\nimport os\nimport re\nimport time\nfrom copy import deepcopy\nfrom functools import cache\nfrom typing import TYPE_CHECKING, Any, Union\nfrom warnings import warn\n\nimport anthropic\nimport numpy as np\nimport openai\nimport tiktoken\nimport yaml\nfrom PIL import Image\n","source_hash":"c1e24a6f26b52277a012fcf031064732bd24955844738a647dd60caed2a715d1","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/llm/prompt_templates.py","uri":"program://AgentLab/file/src/agentlab/llm/prompt_templates.py","kind":"file","name":"src/agentlab/llm/prompt_templates.py","path":"src/agentlab/llm/prompt_templates.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from dataclasses import dataclass\nfrom typing import List\n\n\"\"\"\nTo use this class, you should have the ``openai`` python package installed, and the\nenvironment variable ``OPENAI_API_KEY`` set with your API key.\n\"\"\"\n\n\n@dataclass\nclass PromptTemplate:\n \"\"\"\n Base class for prompt templates.\n\n Defines a standard interface for prompt templates, ensuring that they contain\n the required fields for the CustomLLMChatbot.\n \"\"\"\n\n system: str\n human: str\n ai: str","source_hash":"3da81094279e3091ad880202e48336f663aeab6597eebec6c6fdc3a22aa3ac67","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/llm/tracking.py","uri":"program://AgentLab/file/src/agentlab/llm/tracking.py","kind":"file","name":"src/agentlab/llm/tracking.py","path":"src/agentlab/llm/tracking.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import importlib\nimport logging\nimport os\nimport re\nimport threading\nfrom collections import defaultdict\nfrom contextlib import contextmanager\nfrom dataclasses import dataclass, field\nfrom functools import cache, partial\nfrom typing import Optional\n\nimport requests\n\nlangchain_community = importlib.util.find_spec(\"langchain_community\")\nif langchain_community is not None:\n from langchain_community.callbacks import bedrock_anthropic_callback, openai_info\nelse:\n bedrock_anthropic_callback = None\n openai_info = None\nfrom litellm import completion_cost, get_model_info\n","source_hash":"2bdfa7569198f98ab9d4fb35ee9455450b6585e301295e394b34fb7b0490974f","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/experiments/launch_exp.py","uri":"program://AgentLab/file/src/agentlab/experiments/launch_exp.py","kind":"file","name":"src/agentlab/experiments/launch_exp.py","path":"src/agentlab/experiments/launch_exp.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import logging\nfrom importlib import import_module\nfrom pathlib import Path\n\nimport bgym\n\nfrom agentlab.experiments.exp_utils import run_exp\nfrom agentlab.experiments.loop import ExpArgs, yield_all_exp_results\n\n\ndef run_experiments(\n n_jobs,\n exp_args_list: list[ExpArgs],\n study_dir,\n parallel_backend=\"ray\",\n avg_step_timeout=60,\n):\n \"\"\"Run a list of ExpArgs in parallel.\n\n To ensure optimal parallelism, make sure ExpArgs.depend_on is set correctly\n and the backend is set to dask.","source_hash":"265b9682466bfb34018c9a6edc0a40a3a9b377a6671201c019e26e803bd99ec9","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/experiments/get_ray_url.py","uri":"program://AgentLab/file/src/agentlab/experiments/get_ray_url.py","kind":"file","name":"src/agentlab/experiments/get_ray_url.py","path":"src/agentlab/experiments/get_ray_url.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":10,"code":"\"\"\"Temporary script to get the ray dashboard url for the current experiment.\n\nTODO figure out a more convenient way.\n\"\"\"\n\nimport ray\n\ncontext = ray.init(address=\"auto\", ignore_reinit_error=True)\n\nprint(context)","source_hash":"a8590a8c7f293d042ec1f1ad204ab03a2cacfd2906b526360b7d65998c76e28b","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/experiments/graph_execution_ray.py","uri":"program://AgentLab/file/src/agentlab/experiments/graph_execution_ray.py","kind":"file","name":"src/agentlab/experiments/graph_execution_ray.py","path":"src/agentlab/experiments/graph_execution_ray.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import logging\nimport time\n\nimport bgym\nimport ray\nfrom ray.util import state\n\nfrom agentlab.experiments.exp_utils import _episode_timeout, run_exp\n\nlogger = logging.getLogger(__name__)\n\nrun_exp = ray.remote(run_exp)\n\n\ndef execute_task_graph(exp_args_list: list[bgym.ExpArgs], avg_step_timeout=60):\n \"\"\"Execute a task graph in parallel while respecting dependencies using Ray.\"\"\"\n\n exp_args_map = {exp_args.exp_id: exp_args for exp_args in exp_args_list}\n task_map = {}\n\n def get_task(exp_arg: bgym.ExpArgs):","source_hash":"caf5d357ffbbc51aa5d57b3b7a8b622760391be851d97fb7694bebb7e071ac2e","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/experiments/study.py","uri":"program://AgentLab/file/src/agentlab/experiments/study.py","kind":"file","name":"src/agentlab/experiments/study.py","path":"src/agentlab/experiments/study.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import gzip\nimport logging\nimport os\nimport pickle\nimport random\nimport uuid\nfrom abc import ABC, abstractmethod\nfrom concurrent.futures import ProcessPoolExecutor\nfrom dataclasses import asdict, dataclass\nfrom datetime import datetime\nfrom multiprocessing import Manager, Pool, Queue\nfrom pathlib import Path\n\nimport bgym\nfrom bgym import DEFAULT_BENCHMARKS, Benchmark\nfrom slugify import slugify\n\nfrom agentlab.agents.agent_args import AgentArgs\nfrom agentlab.analyze import inspect_results\nfrom agentlab.benchmarks.abstract_env import AbstractEnvArgs\nfrom agentlab.experiments import reproducibility_util as repro","source_hash":"9fc48e904c84951afeeaae7a4f365b40a665dc6a6bd851d8761c9b89e727b920","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/experiments/view_dep_graph.py","uri":"program://AgentLab/file/src/agentlab/experiments/view_dep_graph.py","kind":"file","name":"src/agentlab/experiments/view_dep_graph.py","path":"src/agentlab/experiments/view_dep_graph.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"\"\"\"Dirty script to visualize the dependency graph of a benchmark, e.g. webarena, vsisualwebarena,\netc. You may have to detust it to make it work for you.\"\"\"\n\nimport math\n\nimport bgym\nimport matplotlib.pyplot as plt\nimport networkx as nx\nimport numpy as np\nfrom bgym import DEFAULT_BENCHMARKS\n\n\ndef clean_dict(dependency_dict: dict[str, list[str]]) -> dict[str, list[str]]:\n new_dep = {}\n for key, deps in dependency_dict.items():\n new_key = key.split(\".\")[-1]\n\n new_dep[new_key] = [dep.split(\".\")[-1] for dep in deps]\n return new_dep\n\n","source_hash":"38fc7f4b3b3bd85837a1ff7a48042eb13eadea3099d3552a13f4ef4ed8b9d95e","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/experiments/reproducibility_util.py","uri":"program://AgentLab/file/src/agentlab/experiments/reproducibility_util.py","kind":"file","name":"src/agentlab/experiments/reproducibility_util.py","path":"src/agentlab/experiments/reproducibility_util.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import csv\nimport logging\nimport os\nimport platform\nfrom datetime import datetime\nfrom importlib import metadata\nfrom pathlib import Path\n\nimport bgym\nimport pandas as pd\nfrom bgym import Benchmark\nfrom git import InvalidGitRepositoryError, Repo\nfrom git.config import GitConfigParser\n\nimport agentlab\nfrom agentlab.experiments.exp_utils import RESULTS_DIR\n\n\ndef _get_repo(module):\n return Repo(Path(module.__file__).resolve().parent, search_parent_directories=True)\n","source_hash":"54254f1615c59eebcc3ecb9a908298bd8feee1e144e94c521941f58e97a914cd","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/experiments/loop.py","uri":"program://AgentLab/file/src/agentlab/experiments/loop.py","kind":"file","name":"src/agentlab/experiments/loop.py","path":"src/agentlab/experiments/loop.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import gzip\nimport importlib.metadata\nimport json\nimport logging\nimport os\nimport pickle\nimport re\nimport sys\nimport time\nimport traceback\nimport uuid\nfrom abc import ABC, abstractmethod\nfrom collections import defaultdict\nfrom dataclasses import asdict, dataclass, field, is_dataclass\nfrom datetime import datetime\nfrom pathlib import Path\nfrom typing import Optional\n\nimport gymnasium as gym\nimport numpy as np\nfrom browsergym.core.chat import Chat","source_hash":"df7878b63efa81189cdb7b214765466d94d2c2f84a41a1d160718bc07673d2d0","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/experiments/args.py","uri":"program://AgentLab/file/src/agentlab/experiments/args.py","kind":"file","name":"src/agentlab/experiments/args.py","path":"src/agentlab/experiments/args.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import copy\nfrom abc import ABC\nfrom dataclasses import fields, is_dataclass\nfrom itertools import product\nfrom typing import Any\n\nimport numpy as np\n\n\nclass CrossProd:\n \"\"\"Use to specify that this will be part of a cross product\"\"\"\n\n def __init__(self, elements):\n self.elements = elements\n\n\nclass Distribution(ABC):\n \"\"\"Generic Class to identify that this is a distribution\"\"\"\n\n def sample(self):\n pass","source_hash":"30ff2cebecae01c0a3a071dad3427ae92d108ec344da45dbd2c9209d4ba14fde","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/experiments/list_openai_models.py","uri":"program://AgentLab/file/src/agentlab/experiments/list_openai_models.py","kind":"file","name":"src/agentlab/experiments/list_openai_models.py","path":"src/agentlab/experiments/list_openai_models.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":17,"code":"import pandas as pd\nfrom openai import OpenAI\n\nif __name__ == \"__main__\":\n models = OpenAI().models.list()\n df = pd.DataFrame([dict(model) for model in models.data])\n\n # Filter GPT models or o1 models\n # df = df[df[\"id\"].str.contains(\"gpt\") | df[\"id\"].str.contains(\"o1\")]\n\n # Convert Unix timestamps to dates (YYYY-MM-DD) and remove time\n df[\"created\"] = pd.to_datetime(df[\"created\"], unit=\"s\").dt.date\n df.sort_values(by=\"created\", inplace=True)\n # Print all entries\n\n # print all entries\n print(df.to_string(index=False))","source_hash":"9f935337b3d793cfb6f6d54b6bbd75b69d2fa8ad5a86fbd6892679459d8766c4","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/experiments/exp_utils.py","uri":"program://AgentLab/file/src/agentlab/experiments/exp_utils.py","kind":"file","name":"src/agentlab/experiments/exp_utils.py","path":"src/agentlab/experiments/exp_utils.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import logging\nimport os\nimport signal\nimport sys\nfrom contextlib import contextmanager\nfrom pathlib import Path\nfrom time import sleep, time\n\nfrom tqdm import tqdm\n\nfrom agentlab.experiments.loop import ExpArgs, yield_all_exp_results\n\nlogger = logging.getLogger(__name__) # Get logger based on module name\n\n\n# TODO move this to a more appropriate place\nRESULTS_DIR = os.environ.get(\"AGENTLAB_EXP_ROOT\", None)\nif RESULTS_DIR is None:\n RESULTS_DIR = os.environ.get(\"UI_COPILOT_RESULTS_DIR\", None)\nif RESULTS_DIR is None:\n logging.info(\"$AGENTLAB_EXP_ROOT is not defined, Using $HOME/agentlab_results.\")","source_hash":"17d1b5506ed9382907144659933fe9f98306d66360952b7b038b4328e0ccd15c","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/experiments/multi_server.py","uri":"program://AgentLab/file/src/agentlab/experiments/multi_server.py","kind":"file","name":"src/agentlab/experiments/multi_server.py","path":"src/agentlab/experiments/multi_server.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from copy import deepcopy\nfrom dataclasses import dataclass\nimport os\nimport sys\nfrom browsergym.webarena.instance import WebArenaInstance\n\n\nclass BaseServer:\n \"\"\"Base class for server instances.\n\n Behaves like an identity function for running in parallel on servers that don't need multiple\n instances.\n \"\"\"\n\n def init(self):\n pass\n\n\n@dataclass\nclass WebArenaInstanceVars(BaseServer):\n base_url: str","source_hash":"b2e9ed6a57c9dce3c95b9b51a0341cbd691b6d291027868cfd623679c881f805","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/experiments/reproduce_study.py","uri":"program://AgentLab/file/src/agentlab/experiments/reproduce_study.py","kind":"file","name":"src/agentlab/experiments/reproduce_study.py","path":"src/agentlab/experiments/reproduce_study.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":20,"code":"\"\"\"\nThis script will leverage an old study to reproduce it on the same tasks and\nsame seeds. Instead of calling the LLM it will reuse the responses from the old\nllm. Load the study in agent-xray and look at the Agent Info HTML to compare\nthe diff in HTML format.\n\"\"\"\n\nfrom agentlab.agents.generic_agent.reproducibility_agent import reproduce_study\nfrom agentlab.experiments.exp_utils import RESULTS_DIR\n\n\nif __name__ == \"__main__\":\n\n # replace by your study name\n old_study = \"2024-06-03_12-28-51_final_run_miniwob_llama3-70b\"\n\n study = reproduce_study(RESULTS_DIR / old_study)\n n_jobs = 1\n\n study.run(n_jobs=n_jobs, parallel_backend=\"joblib\", strict_reproducibility=False)","source_hash":"fb54634e33ff821da2fe641b1294a4be24293939635948dd5a8835047acbf312","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/benchmarks/osworld_axtree_preprocessing.py","uri":"program://AgentLab/file/src/agentlab/benchmarks/osworld_axtree_preprocessing.py","kind":"file","name":"src/agentlab/benchmarks/osworld_axtree_preprocessing.py","path":"src/agentlab/benchmarks/osworld_axtree_preprocessing.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import io\nimport xml.etree.ElementTree as ET\nfrom typing import Tuple, List\n\nfrom PIL import Image, ImageDraw, ImageFont\n\n\ndef find_leaf_nodes(xlm_file_str):\n if not xlm_file_str:\n return []\n\n root = ET.fromstring(xlm_file_str)\n\n # Recursive function to traverse the XML tree and collect leaf nodes\n def collect_leaf_nodes(node, leaf_nodes):\n # If the node has no children, it is a leaf node, add it to the list\n if not list(node):\n leaf_nodes.append(node)\n # If the node has children, recurse on each child\n for child in node:\n collect_leaf_nodes(child, leaf_nodes)","source_hash":"4cb35b293c6393e48a284c855e83a477a2f320e71125d5b15b7f35c5784a5813","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/benchmarks/multitool_gym.py","uri":"program://AgentLab/file/src/agentlab/benchmarks/multitool_gym.py","kind":"file","name":"src/agentlab/benchmarks/multitool_gym.py","path":"src/agentlab/benchmarks/multitool_gym.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import logging\nimport time\n\nfrom tapeagents.core import Action, Observation, StopStep\nfrom tapeagents.environment import ToolCollectionEnvironment\nfrom tapeagents.tools.base import StatefulTool, Tool\n\nfrom agentlab.benchmarks.abstract_env import AbstractEnv\n\nlogger = logging.getLogger(__name__)\n\n\nclass MultiToolGym(AbstractEnv):\n def __init__(self, tools: list[Tool | StatefulTool], max_turns: int = 50):\n self._env = ToolCollectionEnvironment(tools)\n self._actions = self._env.actions()\n self.max_turns = max_turns\n self._turns = 0\n\n def reset(self):\n self._env.reset()","source_hash":"0121f2a4c9e33e166df62de14a979af68c34c4c370c7a854d9045b9cccf7b907","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/benchmarks/osworld.py","uri":"program://AgentLab/file/src/agentlab/benchmarks/osworld.py","kind":"file","name":"src/agentlab/benchmarks/osworld.py","path":"src/agentlab/benchmarks/osworld.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import ast\nimport importlib.util\nimport json\nimport logging\nimport os\nimport time\nfrom copy import deepcopy\nfrom dataclasses import dataclass\nfrom io import BytesIO\nfrom pathlib import Path\nfrom typing import Any, Literal\n\nimport numpy as np\nfrom bgym import AbstractActionSet\nfrom dataclasses_json import DataClassJsonMixin\nfrom PIL import Image\n\nfrom agentlab.benchmarks.abstract_env import (\n AbstractBenchmark,\n AbstractEnv,\n AbstractEnvArgs,","source_hash":"c9d01601d1e42a7119d50f151f9ea2c1ad3db146dd657c5d551000493811d9b4","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/benchmarks/setup_benchmark.py","uri":"program://AgentLab/file/src/agentlab/benchmarks/setup_benchmark.py","kind":"file","name":"src/agentlab/benchmarks/setup_benchmark.py","path":"src/agentlab/benchmarks/setup_benchmark.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"\"\"\"Tiny benchmark setup helpers.\n\nCurrently supports MiniWob++: clones the repo at a pinned commit and writes\nMINIWOB_URL to .env. Designed to be minimal and easy to maintain.\n\"\"\"\n\nfrom __future__ import annotations\n\nimport logging\nimport os\nimport pathlib\nfrom typing import Optional\n\nlogger = logging.getLogger(__name__)\n\n\ndef _ensure_repo(repo_url: str, clone_dir: pathlib.Path, commit: Optional[str] = None) -> None:\n \"\"\"Clone repo if missing and optionally checkout a commit (minimal, shell-only).\n\n Args:\n repo_url: URL of the git repository to clone.","source_hash":"a2597599a39af02750d5124d62209074fb748a14c1b64b8eb71b075b1221541c","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/benchmarks/gaia.py","uri":"program://AgentLab/file/src/agentlab/benchmarks/gaia.py","kind":"file","name":"src/agentlab/benchmarks/gaia.py","path":"src/agentlab/benchmarks/gaia.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import logging\nimport os\nimport re\nimport shutil\nimport string\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Any, Literal, Self\n\nimport datasets\nimport hydra\nimport podman\nimport tapeagents.config\nfrom omegaconf import DictConfig\nfrom pdf2image import convert_from_path\nfrom pydantic import ConfigDict, Field\nfrom tapeagents.core import Action, Observation, StopStep, Thought\nfrom tapeagents.environment import ContainerExecutor, StatefulTool, Tool\nfrom tapeagents.steps import ImageObservation\nfrom tapeagents.tools.simple_browser import SimpleTextBrowser\n","source_hash":"db73d9f3e673af2bc589b90e998691b15ddaa5957e8fbc0b3d1177e6224f8ec4","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/benchmarks/abstract_env.py","uri":"program://AgentLab/file/src/agentlab/benchmarks/abstract_env.py","kind":"file","name":"src/agentlab/benchmarks/abstract_env.py","path":"src/agentlab/benchmarks/abstract_env.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import time\nfrom abc import ABC, abstractmethod\nfrom functools import wraps\n\nimport gymnasium as gym\nfrom dataclasses_json import DataClassJsonMixin\nfrom pydantic import BaseModel\n\n\nclass AbstractEnvArgs(DataClassJsonMixin):\n @abstractmethod\n def make_env(self, action_mapping, exp_dir, exp_task_kwargs) -> \"AbstractEnv\":\n \"\"\"Create an instance of the environment with the arguments stored in this object.\n\n Args:\n action_mapping (dict[str,str]): mapping from the agent's action space to the environment's action space\n see AbstractActionSet.to_python_code from BrowserGym for an example\n exp_dir (str): directory where the experiment is stored\n exp_task_kwargs (dict[str,Any]): additional arguments for the environment\n\n Returns:","source_hash":"4054d73c1c64b770c3f4a905170391f03fbefb17e92af7bf39464d4cf98e29c2","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/benchmarks/osworld.md","uri":"program://AgentLab/file/src/agentlab/benchmarks/osworld.md","kind":"file","name":"src/agentlab/benchmarks/osworld.md","path":"src/agentlab/benchmarks/osworld.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import ast\nimport importlib.util\nimport json\nimport logging\nimport os\nimport time\nfrom copy import deepcopy\nfrom dataclasses import dataclass\nfrom io import BytesIO\nfrom pathlib import Path\nfrom typing import Any, Literal\n\nimport numpy as np\nfrom bgym import AbstractActionSet\nfrom dataclasses_json import DataClassJsonMixin\nfrom PIL import Image\n\nfrom agentlab.benchmarks.abstract_env import (\n AbstractBenchmark,\n AbstractEnv,\n AbstractEnvArgs,","source_hash":"c9d01601d1e42a7119d50f151f9ea2c1ad3db146dd657c5d551000493811d9b4","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/agents/debug_agent.py","uri":"program://AgentLab/file/src/agentlab/agents/debug_agent.py","kind":"file","name":"src/agentlab/agents/debug_agent.py","path":"src/agentlab/agents/debug_agent.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from copy import deepcopy\nfrom dataclasses import asdict, dataclass\nfrom functools import partial\n\nimport bgym\nfrom browsergym.experiments.agent import Agent, AgentInfo\nfrom browsergym.utils.obs import flatten_axtree_to_str, flatten_dom_to_str, overlay_som, prune_html\n\nfrom agentlab.agents.agent_args import AgentArgs\nfrom agentlab.llm.chat_api import BaseModelArgs\nfrom agentlab.llm.llm_utils import ParseError, image_to_png_base64_url, parse_html_tags_raise, retry\nfrom agentlab.llm.tracking import cost_tracker_decorator\n\n\n@dataclass\nclass DebugAgentArgs(AgentArgs):\n\n def __post_init__(self):\n try: # some attributes might be temporarily args.CrossProd for hyperparameter generation\n self.agent_name = f\"debug\".replace(\"/\", \"_\")\n except AttributeError:","source_hash":"48d9323219b0658be63c19e3f41255000da7c606419646f214814b733a4ef6ff","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/agents/agent_utils.py","uri":"program://AgentLab/file/src/agentlab/agents/agent_utils.py","kind":"file","name":"src/agentlab/agents/agent_utils.py","path":"src/agentlab/agents/agent_utils.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import copy\n\nfrom PIL import Image, ImageDraw\nfrom playwright.sync_api import Page\n\nfrom agentlab.analyze import overlay_utils\nfrom agentlab.llm.llm_utils import img_to_base_64\n\n\ndef draw_mouse_pointer(image: Image.Image, x: int, y: int) -> Image.Image:\n \"\"\"\n Draws a semi-transparent mouse pointer at (x, y) on the image.\n Returns a new image with the pointer drawn.\n\n Args:\n image: The image to draw the mouse pointer on.\n x: The x coordinate for the mouse pointer.\n y: The y coordinate for the mouse pointer.\n\n Returns:\n A new image with the mouse pointer drawn.","source_hash":"4462d17e470b9949b50ce3d5d90d0e1bb0197e37747f67e2d3c56f4f25a87451","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/agents/agent_args.py","uri":"program://AgentLab/file/src/agentlab/agents/agent_args.py","kind":"file","name":"src/agentlab/agents/agent_args.py","path":"src/agentlab/agents/agent_args.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import bgym\nfrom bgym import AbstractAgentArgs, Benchmark\n\n\nclass AgentArgs(AbstractAgentArgs):\n \"\"\"Base class for agent arguments for instantiating an agent.\n\n Define agent arguments as dataclass variables of this class. For example:\n\n class MyAgentArgs(AgentArgs):\n my_arg: str = \"default_value\"\n my_other_arg: int = 42\n\n Note: for working properly with AgentXRay, the arguments need to be serializable and hasable.\n \"\"\"\n\n def set_benchmark(self, benchmark: Benchmark, demo_mode: bool):\n \"\"\"Optional method to set benchmark specific flags.\n\n This allows the agent to have minor adjustments based on the benchmark.\n E.g. using a benchmark specific action space. Or letting the agent see","source_hash":"d03b7139142f9e096b89377ebb32196c2727f77c251de82f9ae26f189add791a","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/agents/dynamic_prompting.py","uri":"program://AgentLab/file/src/agentlab/agents/dynamic_prompting.py","kind":"file","name":"src/agentlab/agents/dynamic_prompting.py","path":"src/agentlab/agents/dynamic_prompting.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import abc\nimport logging\nimport platform\nimport time\nfrom copy import copy, deepcopy\nfrom dataclasses import asdict, dataclass\nfrom textwrap import dedent\nfrom typing import Literal\nfrom warnings import warn\n\nimport bgym\nfrom bgym import HighLevelActionSetArgs\nfrom browsergym.core.action.base import AbstractActionSet\nfrom browsergym.utils.obs import flatten_axtree_to_str, flatten_dom_to_str, overlay_som, prune_html\n\nfrom agentlab.llm.llm_utils import (\n BaseMessage,\n ParseError,\n count_tokens,\n extract_code_blocks,\n image_to_jpg_base64_url,","source_hash":"2ff2b702dfe1b18217dedd423ab941d3b8822982bcd3f0711f52cc8bd6666202","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/agents/__init__.py","uri":"program://AgentLab/file/src/agentlab/agents/__init__.py","kind":"file","name":"src/agentlab/agents/__init__.py","path":"src/agentlab/agents/__init__.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":13,"code":"\"\"\"\nAgentLab's pre-implemented agents.\n\nThis module contains the agent implementations for AgentLab. With currently:\n\n- GenericAgent: Our baseline agent for evaluation\n\n- MostBasicAgent: A basic agent for learning our framework\n\n- TapeAgent: An agent that uses the Tape data structure to perform actions\n\n- VisualWebArenaAgent: An implementation of the agent used in WebArena and VisualWebArena\n\"\"\"","source_hash":"c3d8fe96df3a586a91843c85c9f3958ae1568e4fc63e8883b96127815453c652","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/agents/generic_agent/agent_configs.py","uri":"program://AgentLab/file/src/agentlab/agents/generic_agent/agent_configs.py","kind":"file","name":"src/agentlab/agents/generic_agent/agent_configs.py","path":"src/agentlab/agents/generic_agent/agent_configs.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"\"\"\"\nBasic flags and agent configurations for generic agents.\n\"\"\"\n\nimport bgym\nfrom bgym import HighLevelActionSetArgs\n\nfrom agentlab.agents import dynamic_prompting as dp\nfrom agentlab.experiments import args\nfrom agentlab.llm.llm_configs import CHAT_MODEL_ARGS_DICT\n\nfrom .generic_agent import GenericAgentArgs\nfrom .generic_agent_prompt import GenericPromptFlags\nfrom .tmlr_config import BASE_FLAGS\n\nFLAGS_CUSTOM = GenericPromptFlags(\n obs=dp.ObsFlags(\n use_html=False,\n use_ax_tree=True,\n use_focused_element=True,\n use_error_logs=True,","source_hash":"6020025b89c9e1cc61602abf8aa62eb226f93863096f465504f3e2c8b4320153","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/agents/generic_agent/tmlr_config.py","uri":"program://AgentLab/file/src/agentlab/agents/generic_agent/tmlr_config.py","kind":"file","name":"src/agentlab/agents/generic_agent/tmlr_config.py","path":"src/agentlab/agents/generic_agent/tmlr_config.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"\"\"\"\nSpecific configurations for our 2024 TMLR submission.\n\"\"\"\n\nfrom copy import deepcopy\n\nfrom agentlab.agents import dynamic_prompting as dp\nfrom agentlab.experiments import args\nfrom agentlab.llm.llm_configs import CHAT_MODEL_ARGS_DICT\n\nfrom .generic_agent import GenericAgentArgs\nfrom .generic_agent_prompt import GenericPromptFlags\n\nBASE_FLAGS = GenericPromptFlags(\n obs=dp.ObsFlags(\n use_html=False,\n use_ax_tree=True,\n use_focused_element=True,\n use_error_logs=True,\n use_history=True,\n use_past_error_logs=False,","source_hash":"06211236905e1cd069c121dedb6f097aa004ae9a3a2d70db8443e52c710b4e5a","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/agents/generic_agent/generic_agent_prompt.py","uri":"program://AgentLab/file/src/agentlab/agents/generic_agent/generic_agent_prompt.py","kind":"file","name":"src/agentlab/agents/generic_agent/generic_agent_prompt.py","path":"src/agentlab/agents/generic_agent/generic_agent_prompt.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"\"\"\"\nPrompt builder for GenericAgent\n\nIt is based on the dynamic_prompting module from the agentlab package.\n\"\"\"\n\nimport logging\nfrom dataclasses import dataclass\n\nfrom browsergym.core import action\nfrom browsergym.core.action.base import AbstractActionSet\n\nfrom agentlab.agents import dynamic_prompting as dp\nfrom agentlab.llm.llm_utils import HumanMessage, parse_html_tags_raise\n\n\n@dataclass\nclass GenericPromptFlags(dp.Flags):\n \"\"\"\n A class to represent various flags used to control features in an application.\n","source_hash":"e690d7ed2be97e8ca025ab9ad9919704d985e04dc4a0085e862e7cfb6da199c7","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/agents/generic_agent/__init__.py","uri":"program://AgentLab/file/src/agentlab/agents/generic_agent/__init__.py","kind":"file","name":"src/agentlab/agents/generic_agent/__init__.py","path":"src/agentlab/agents/generic_agent/__init__.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"\"\"\"\nBaseline agent for all ServiceNow papers\n\nThis module contains the GenericAgent class, which is the baseline agent for all ServiceNow papers. \\\nIt is a simple agent that can be ran OOB on all BrowserGym environments. It is also shipped with \\\na few configurations that can be used to run it on different environments.\n\"\"\"\n\nfrom .agent_configs import (\n AGENT_3_5,\n AGENT_8B,\n AGENT_37_SONNET,\n AGENT_CLAUDE_SONNET_35,\n AGENT_CLAUDE_SONNET_35_VISION,\n AGENT_CUSTOM,\n AGENT_LLAMA3_70B,\n AGENT_LLAMA4_17B_INSTRUCT,\n AGENT_LLAMA31_70B,\n CHAT_MODEL_ARGS_DICT,\n RANDOM_SEARCH_AGENT,\n AGENT_4o,","source_hash":"2a5d9ed5c01a3ecf108ab597a6d59f0a21f1cd0688540785eacaef6ae7b1081b","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/agents/generic_agent/reproducibility_agent.py","uri":"program://AgentLab/file/src/agentlab/agents/generic_agent/reproducibility_agent.py","kind":"file","name":"src/agentlab/agents/generic_agent/reproducibility_agent.py","path":"src/agentlab/agents/generic_agent/reproducibility_agent.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"\"\"\"\nAn agent that reproduces exactly the same traces as GenericAgent, to compare the results.\n\n\nThis module contains the classes and functions to reproduce the results of a\nstudy. It is used to create a new study that will run the same experiments as\nthe original study, but with a reproducibility agent that will mimic the same\nanswers as the original agent.\n\nStats are collected to compare the original agent's answers with the new agent's\nanswers. Load the this reproducibility study in agent-xray to compare the results.\n\"\"\"\n\nimport difflib\nimport logging\nimport time\nfrom copy import copy\nfrom dataclasses import dataclass\nfrom pathlib import Path\n\nimport bgym","source_hash":"4795ed880ced937c515cd3e8ddf827383ae64e84c23a3ca7ddb43c6677e307b3","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/agents/generic_agent/generic_agent.py","uri":"program://AgentLab/file/src/agentlab/agents/generic_agent/generic_agent.py","kind":"file","name":"src/agentlab/agents/generic_agent/generic_agent.py","path":"src/agentlab/agents/generic_agent/generic_agent.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"\"\"\"\nGenericAgent implementation for AgentLab\n\nThis module defines a `GenericAgent` class and its associated arguments for use in the AgentLab framework. \\\nThe `GenericAgent` class is designed to interact with a chat-based model to determine actions based on \\\nobservations. It includes methods for preprocessing observations, generating actions, and managing internal \\\nstate such as plans, memories, and thoughts. The `GenericAgentArgs` class provides configuration options for \\\nthe agent, including model arguments and flags for various behaviors.\n\"\"\"\n\nfrom copy import deepcopy\nfrom dataclasses import asdict, dataclass\nfrom functools import partial\nfrom warnings import warn\n\nimport bgym\nfrom bgym import Benchmark\nfrom browsergym.experiments.agent import Agent, AgentInfo\n\nfrom agentlab.agents import dynamic_prompting as dp\nfrom agentlab.agents.agent_args import AgentArgs","source_hash":"9e02ab608fc8c0e5ecb5f89846584a35c120ce97e2c5af5dfc39a9532f51895c","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/agents/hitl_agent/base_multi_candidate_agent.py","uri":"program://AgentLab/file/src/agentlab/agents/hitl_agent/base_multi_candidate_agent.py","kind":"file","name":"src/agentlab/agents/hitl_agent/base_multi_candidate_agent.py","path":"src/agentlab/agents/hitl_agent/base_multi_candidate_agent.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from typing_extensions import Protocol\n\nfrom agentlab.agents.agent_args import AgentArgs\n\n\nclass MultiCandidateAgent(Protocol):\n \"\"\"\n Protocol for agents that generate multiple candidates for get_action.\n\n This protocol defines the contract for agents that can generate\n multiple candidate actions and allow selection of one of them for execution.\n \"\"\"\n\n def get_candidate_generations(\n self, obs: dict, hint: list[str] | None = None, n_candidates: int = 3\n ) -> \"list[dict]\":\n \"\"\"\n Generate multiple candidate actions for the given observation.\n\n You can pass extra info in agent_info to update internal state of the\n agent based on the selected candidate. Your internal state management","source_hash":"9be787fa4619fecac3c7f3d314dbf3a8b369797bfbcd9da134729e601b18a596","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/agents/hitl_agent/launch_hint_ui.py","uri":"program://AgentLab/file/src/agentlab/agents/hitl_agent/launch_hint_ui.py","kind":"file","name":"src/agentlab/agents/hitl_agent/launch_hint_ui.py","path":"src/agentlab/agents/hitl_agent/launch_hint_ui.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"\"\"\"\nConsole launcher for the Human-in-the-Loop Generic Agent UI.\n\nUsage (installed entry point):\n agentlab-mentor --benchmark miniwob --task-name miniwob.book-flight --seed 123 --no-headless\n\nThis will run a Study with the MultipleProposalGenericAgent and the selected task.\n\"\"\"\n\nfrom __future__ import annotations\n\nimport argparse\nimport logging\nfrom pathlib import Path\n\nimport bgym\n\nfrom agentlab.agents.hitl_agent.generic_human_guided_agent import get_base_agent\nfrom agentlab.experiments.exp_utils import RESULTS_DIR\nfrom agentlab.experiments.study import Study\n","source_hash":"c683689e509a684a36e0b5ae00b55d64df79c43b22b3199c053399e2514f5a71","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/agents/hitl_agent/multi_candidate_generic_agent.py","uri":"program://AgentLab/file/src/agentlab/agents/hitl_agent/multi_candidate_generic_agent.py","kind":"file","name":"src/agentlab/agents/hitl_agent/multi_candidate_generic_agent.py","path":"src/agentlab/agents/hitl_agent/multi_candidate_generic_agent.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import re\nfrom dataclasses import asdict, dataclass\nfrom typing import Dict, List\n\nfrom browsergym.experiments.agent import AgentInfo\n\nfrom agentlab.agents import dynamic_prompting as dp\nfrom agentlab.agents.generic_agent.generic_agent import GenericAgent, GenericAgentArgs\nfrom agentlab.agents.generic_agent.generic_agent_prompt import MainPrompt\nfrom agentlab.llm.llm_utils import Discussion, HumanMessage, SystemMessage\n\n\nclass CandidatesGeneration(dp.PromptElement):\n # Ask for multiple alternatives; each candidate must contain and .\n def __init__(self, hint: list[str] | None = None, n_candidates=3) -> None:\n self.hint = hint\n self.n_candidates = n_candidates\n self.hint_prompt = \"\\n\".join(f\"{i}. {c}\" for i, c in enumerate(hint, 1)) if hint else \"\"\n super().__init__(True)\n self._prompt = [\n dict(","source_hash":"5c1333972fbbdf23cbb8773feaacf67e1a818ae613d6adb17b19bb143073211d","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/agents/hitl_agent/hint_labelling.py","uri":"program://AgentLab/file/src/agentlab/agents/hitl_agent/hint_labelling.py","kind":"file","name":"src/agentlab/agents/hitl_agent/hint_labelling.py","path":"src/agentlab/agents/hitl_agent/hint_labelling.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import json\nimport logging\nfrom importlib import resources\nfrom queue import Queue\nfrom typing import Dict, List, Optional\n\nimport playwright.sync_api\nfrom browsergym.core import _get_global_playwright\nfrom pydantic import BaseModel, Field\n\nlogger = logging.getLogger(__name__)\n\nHINT_LABELING_DIR = resources.files(\"agentlab.agents.hitl_agent.hint_labelling_ui_files\")\n\n\nclass HintLabelingInputs(BaseModel):\n goal: str\n error_feedback: str = \"\"\n screenshot: str # base64 screenshot (original/current)\n screenshots: List[str] = Field(default_factory=list) # list of base64 screenshots for hover\n axtree: str","source_hash":"de08d8c488bf308ab6d374a107442c6c9ee8f0a1be83dc1ddb3609458b7f0333","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/agents/hitl_agent/hitl_agent.py","uri":"program://AgentLab/file/src/agentlab/agents/hitl_agent/hitl_agent.py","kind":"file","name":"src/agentlab/agents/hitl_agent/hitl_agent.py","path":"src/agentlab/agents/hitl_agent/hitl_agent.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"from dataclasses import dataclass\nfrom typing import Optional\n\nimport bgym\nimport playwright\nfrom browsergym.experiments.agent import Agent\n\nfrom agentlab.agents.agent_args import AgentArgs\nfrom agentlab.agents.agent_utils import overlay_action\nfrom agentlab.agents.hitl_agent.base_multi_candidate_agent import MultiCandidateAgent\nfrom agentlab.agents.hitl_agent.hint_labelling import (\n HintLabeling,\n HintLabelingInputs,\n)\nfrom agentlab.llm.llm_utils import img_to_base_64\nfrom agentlab.llm.tracking import cost_tracker_decorator\n\n\nclass HumanInTheLoopAgent(Agent):\n\n def __init__(","source_hash":"caf3281cd0903c179066ffe16ff1af60ce1821b667e4368d4d92b80696f74396","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/agents/hitl_agent/generic_human_guided_agent.py","uri":"program://AgentLab/file/src/agentlab/agents/hitl_agent/generic_human_guided_agent.py","kind":"file","name":"src/agentlab/agents/hitl_agent/generic_human_guided_agent.py","path":"src/agentlab/agents/hitl_agent/generic_human_guided_agent.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import base64\nimport copy\nimport io\nimport re\nfrom dataclasses import Field, asdict, dataclass\nfrom typing import Dict, List\n\nimport bgym\nimport numpy as np\nfrom browsergym.experiments.agent import AgentInfo\nfrom PIL import Image\n\nfrom agentlab.agents import dynamic_prompting as dp\nfrom agentlab.agents.agent_utils import overlay_action\nfrom agentlab.agents.generic_agent.generic_agent import GenericAgent, GenericAgentArgs\nfrom agentlab.agents.generic_agent.generic_agent_prompt import MainPrompt\nfrom agentlab.agents.hitl_agent.hint_labelling import (\n HintLabeling,\n HintLabelingInputs,\n)\nfrom agentlab.llm.llm_utils import (","source_hash":"f8460a9a650e76095028142556a146fcf58f38b76dcabb6d885d2ba81b245aea","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/agents/tool_use_agent/__init__.py","uri":"program://AgentLab/file/src/agentlab/agents/tool_use_agent/__init__.py","kind":"file","name":"src/agentlab/agents/tool_use_agent/__init__.py","path":"src/agentlab/agents/tool_use_agent/__init__.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":6,"code":"import sys\n\nfrom agentlab.agents.tool_use_agent.tool_use_agent import *\n\n# for backward compatibility of unpickling\nsys.modules[__name__ + \".multi_tool_agent\"] = sys.modules[__name__]","source_hash":"7f3f213f972f4d7459846c62fdf8b9c09da91cd28610d3afd59cef384cfa1d3c","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/agents/tool_use_agent/tool_use_agent.py","uri":"program://AgentLab/file/src/agentlab/agents/tool_use_agent/tool_use_agent.py","kind":"file","name":"src/agentlab/agents/tool_use_agent/tool_use_agent.py","path":"src/agentlab/agents/tool_use_agent/tool_use_agent.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import fnmatch\nimport json\nimport logging\nimport os\nimport random\nimport time\nfrom abc import ABC, abstractmethod\nfrom collections import defaultdict\nfrom copy import copy\nfrom dataclasses import asdict, dataclass, field\nfrom pathlib import Path\nfrom typing import Any, Literal\n\nimport bgym\nimport numpy as np\nimport pandas as pd\nimport requests\nfrom bgym import Benchmark as BgymBenchmark\nfrom browsergym.core.observation import extract_screenshot\nfrom browsergym.utils.obs import (\n flatten_axtree_to_str,","source_hash":"0aaa22a2ebd8845f6a17e105ac4061ad48d951f42e10d09378bdea9927c9b8bb","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/agents/visualwebarena/agent.py","uri":"program://AgentLab/file/src/agentlab/agents/visualwebarena/agent.py","kind":"file","name":"src/agentlab/agents/visualwebarena/agent.py","path":"src/agentlab/agents/visualwebarena/agent.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import base64\nimport importlib.resources\nimport io\nfrom copy import deepcopy\nfrom dataclasses import dataclass\nfrom functools import partial\nfrom typing import Any, Literal\n\nimport numpy as np\nimport PIL.Image\nfrom browsergym.core.action.highlevel import HighLevelActionSet\nfrom browsergym.experiments import Agent, AgentInfo\nfrom browsergym.experiments.benchmark import Benchmark, HighLevelActionSetArgs\nfrom browsergym.utils.obs import overlay_som\n\nfrom agentlab.llm.base_api import AbstractChatModel\nfrom agentlab.llm.chat_api import BaseModelArgs, make_system_message, make_user_message\nfrom agentlab.llm.llm_configs import CHAT_MODEL_ARGS_DICT\nfrom agentlab.llm.llm_utils import ParseError, extract_code_blocks, retry\nfrom agentlab.llm.tracking import cost_tracker_decorator\n","source_hash":"912b863f7f974eecbc6c38dd320667aba7b66c5281d610f35992a3eb8abedeb5","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/agents/visualwebarena/prompts.py","uri":"program://AgentLab/file/src/agentlab/agents/visualwebarena/prompts.py","kind":"file","name":"src/agentlab/agents/visualwebarena/prompts.py","path":"src/agentlab/agents/visualwebarena/prompts.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"# Best-attempt reproduction of original prompts the VisualWebArena agent\n\nTEMPLATES = {}\n\n# https://github.com/web-arena-x/visualwebarena/blob/89f5af29305c3d1e9f97ce4421462060a70c9a03/agent/prompts/raw/p_cot_id_actree_3s.py#L1\nTEMPLATES[\"axtree\"] = {\n \"intro\": \"\"\"\\\nYou are an autonomous intelligent agent tasked with navigating a web browser. You will be given web-based tasks. These tasks will be accomplished through the use of specific actions you can issue.\n\nHere's the information you'll have:\nThe user's objective: This is the task you're trying to complete.\nThe current web page's accessibility tree: This is a simplified representation of the webpage, providing key information.\nThe current web page's URL: This is the page you're currently navigating.\nThe open tabs: These are the tabs you have open.\nThe previous action: This is the action you just performed. It may be helpful to track your progress.\n\n{action_space_description}\n\nTo be successful, it is very important to follow the following rules:\n1. You should only issue an action that is valid given the current observation\n2. You should only issue one action at a time.","source_hash":"fe1c62488bb08638debc1156c94e20ff19342180acfe4c18eb51b96b74f2bc62","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/agents/most_basic_agent/most_basic_agent.py","uri":"program://AgentLab/file/src/agentlab/agents/most_basic_agent/most_basic_agent.py","kind":"file","name":"src/agentlab/agents/most_basic_agent/most_basic_agent.py","path":"src/agentlab/agents/most_basic_agent/most_basic_agent.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import logging\nfrom dataclasses import asdict, dataclass\nfrom typing import TYPE_CHECKING, Any\n\nimport bgym\n\nfrom agentlab.agents.agent_args import AgentArgs\nfrom agentlab.experiments.loop import ExpArgs\nfrom agentlab.llm.llm_configs import CHAT_MODEL_ARGS_DICT\nfrom agentlab.llm.llm_utils import (\n Discussion,\n HumanMessage,\n ParseError,\n SystemMessage,\n extract_code_blocks,\n retry,\n)\nfrom agentlab.llm.tracking import cost_tracker_decorator\n\nif TYPE_CHECKING:\n from agentlab.llm.chat_api import BaseModelArgs","source_hash":"e5bb1cb531d9222de92644376a2b8c982cb1e1cbf6358da92daf2e88e56be510","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/agents/visual_agent/agent_configs.py","uri":"program://AgentLab/file/src/agentlab/agents/visual_agent/agent_configs.py","kind":"file","name":"src/agentlab/agents/visual_agent/agent_configs.py","path":"src/agentlab/agents/visual_agent/agent_configs.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import bgym\nfrom bgym import HighLevelActionSetArgs\n\nimport agentlab.agents.dynamic_prompting as dp\nfrom agentlab.llm.llm_configs import CHAT_MODEL_ARGS_DICT\n\nfrom .visual_agent import VisualAgentArgs\nfrom .visual_agent_prompts import PromptFlags\n\n# the other flags are ignored for this agent.\nDEFAULT_OBS_FLAGS = dp.ObsFlags(\n use_tabs=True, # will be overridden by the benchmark when set_benchmark is called after initalizing the agent\n use_error_logs=True,\n use_past_error_logs=False,\n use_screenshot=True,\n use_som=False,\n openai_vision_detail=\"auto\",\n)\n\nDEFAULT_ACTION_FLAGS = dp.ActionFlags(\n action_set=HighLevelActionSetArgs(subsets=[\"coord\"]),","source_hash":"4a2d34d262ffc699c7888d2fa972367b853b989de126d2e6159546d6f266e113","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/agents/visual_agent/visual_agent.py","uri":"program://AgentLab/file/src/agentlab/agents/visual_agent/visual_agent.py","kind":"file","name":"src/agentlab/agents/visual_agent/visual_agent.py","path":"src/agentlab/agents/visual_agent/visual_agent.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"\"\"\"\nGenericAgent implementation for AgentLab\n\nThis module defines a `GenericAgent` class and its associated arguments for use in the AgentLab framework. \\\nThe `GenericAgent` class is designed to interact with a chat-based model to determine actions based on \\\nobservations. It includes methods for preprocessing observations, generating actions, and managing internal \\\nstate such as plans, memories, and thoughts. The `GenericAgentArgs` class provides configuration options for \\\nthe agent, including model arguments and flags for various behaviors.\n\"\"\"\n\nfrom dataclasses import asdict, dataclass\n\nimport bgym\nfrom bgym import Benchmark\nfrom browsergym.experiments.agent import Agent, AgentInfo\n\nfrom agentlab.agents import dynamic_prompting as dp\nfrom agentlab.agents.agent_args import AgentArgs\nfrom agentlab.llm.chat_api import BaseModelArgs\nfrom agentlab.llm.llm_utils import Discussion, ParseError, SystemMessage, retry\nfrom agentlab.llm.tracking import cost_tracker_decorator","source_hash":"4f085c9089a1abb38ca6509bb2a1119a10d59abd733edff938f95988bc7a0b5e","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/agents/visual_agent/visual_agent_prompts.py","uri":"program://AgentLab/file/src/agentlab/agents/visual_agent/visual_agent_prompts.py","kind":"file","name":"src/agentlab/agents/visual_agent/visual_agent_prompts.py","path":"src/agentlab/agents/visual_agent/visual_agent_prompts.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"\"\"\"\nPrompt builder for GenericAgent\n\nIt is based on the dynamic_prompting module from the agentlab package.\n\"\"\"\n\nimport logging\nfrom dataclasses import dataclass\nimport bgym\n\nfrom browsergym.core.action.base import AbstractActionSet\n\nfrom agentlab.agents import dynamic_prompting as dp\nfrom agentlab.llm.llm_utils import BaseMessage, HumanMessage, image_to_jpg_base64_url\n\n\n@dataclass\nclass PromptFlags(dp.Flags):\n \"\"\"\n A class to represent various flags used to control features in an application.\n \"\"\"","source_hash":"8a892029aea421778ab9e274dcbea31ed91baaca2bf89aec1d229c18c6bb58f5","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/agents/tapeagent/agent.py","uri":"program://AgentLab/file/src/agentlab/agents/tapeagent/agent.py","kind":"file","name":"src/agentlab/agents/tapeagent/agent.py","path":"src/agentlab/agents/tapeagent/agent.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import logging\nfrom dataclasses import dataclass\nfrom typing import Literal\n\nimport bgym\nimport hydra\nfrom omegaconf import DictConfig\nfrom pydantic import Field\nfrom tapeagents.agent import Agent\nfrom tapeagents.core import Action, Observation, StopStep, TapeMetadata, Thought\nfrom tapeagents.core import Tape as BaseTape\n\nfrom agentlab.agents.agent_args import AgentArgs\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n\nclass ExtendedMetadata(TapeMetadata):\n name: str = \"\"\n task: dict = {}","source_hash":"cbbcba9b006e6d3ccc039bd74c29ec4ef9240b940dff78a509e2b75c3e29c461","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/agents/tapeagent/__init__.py","uri":"program://AgentLab/file/src/agentlab/agents/tapeagent/__init__.py","kind":"file","name":"src/agentlab/agents/tapeagent/__init__.py","path":"src/agentlab/agents/tapeagent/__init__.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import json\nfrom dataclasses import asdict, is_dataclass\n\nimport numpy as np\nfrom tapeagents.core import Step, StepMetadata\nfrom tapeagents.dialog_tape import AssistantStep, AssistantThought\nfrom tapeagents.io import save_json_tape, save_tape_images\n\nfrom agentlab.agents.tapeagent.agent import DictObservation, Tape, TapeAgent\n\n__all__ = [\"as_tape\", \"save_tape\", \"TapeAgent\", \"Tape\"]\n\n\ndef as_tape(steps_info: list) -> Tape:\n \"\"\"\n Create a Tape object from the steps info.\n\n Args:\n steps_info: list of StepInfo objects.\n\n Returns:","source_hash":"b118ce2e7080663f1e85f030ef14f6b23da49b6f41df61f9b68cc9f5ee239873","truncated":false} {"repo_id":"AgentLab","entity_id":"file:src/agentlab/agents/tapeagent/experiments/run_gaia.py","uri":"program://AgentLab/file/src/agentlab/agents/tapeagent/experiments/run_gaia.py","kind":"file","name":"src/agentlab/agents/tapeagent/experiments/run_gaia.py","path":"src/agentlab/agents/tapeagent/experiments/run_gaia.py","language":"python","start_line":1,"end_line":1,"context_start_line":1,"context_end_line":21,"code":"import logging\nimport os\n\nfrom agentlab.agents.tapeagent.agent import TapeAgentArgs, load_config\nfrom agentlab.benchmarks.gaia import GaiaBenchmark, stop_old_sandbox\nfrom agentlab.experiments.study import make_study\n\nfmt = \"%(asctime)s - %(levelname)s - %(name)s:%(lineno)d - %(funcName)s() - %(message)s\"\nlogging.basicConfig(level=logging.INFO, force=True, format=fmt, handlers=[logging.StreamHandler()])\n\nif __name__ == \"__main__\":\n config = load_config(\"gaia_l1\")\n study = make_study(\n benchmark=GaiaBenchmark.from_config(config), # type: ignore\n agent_args=TapeAgentArgs(agent_name=config.name, config=config),\n comment=config.comment,\n logging_level=logging.INFO,\n logging_level_stdout=logging.INFO,\n )\n stop_old_sandbox()\n if os.environ.get(\"AGENTLAB_DEBUG\"):","source_hash":"6c5bf10626ab7864ca131189e4922c4c8edb78f40023f34855b45955da463ab0","truncated":false}