{"repo_id":"AgentLab","entity_id":"py:main","uri":"program://AgentLab/module/main#L1-L74","kind":"module","name":"main","path":"main.py","language":"python","start_line":1,"end_line":74,"context_start_line":1,"context_end_line":74,"code":"\"\"\"\nNote: This script is a convenience script to launch experiments instead of using\nthe command line.\n\nCopy this script and modify at will, but don't push your changes to the\nrepository.\n\"\"\"\n\nimport logging\n\nfrom agentlab.agents.generic_agent import (\n AGENT_LLAMA3_70B,\n AGENT_LLAMA31_70B,\n RANDOM_SEARCH_AGENT,\n AGENT_4o,\n AGENT_4o_MINI,\n AGENT_o3_MINI,\n AGENT_37_SONNET,\n AGENT_CLAUDE_SONNET_35,\n AGENT_GPT5_MINI,\n)\nfrom agentlab.experiments.study import Study\n\nlogging.getLogger().setLevel(logging.INFO)\n\n# choose your agent or provide a new agent\nagent_args = [AGENT_4o_MINI]\n# agent_args = [AGENT_4o]\n\n\n# ## select the benchmark to run on\nbenchmark = \"miniwob_tiny_test\"\n# benchmark = \"miniwob\"\n# benchmark = \"workarena_l1\"\n# benchmark = \"workarena_l2\"\n# benchmark = \"workarena_l3\"\n# benchmark = \"webarena\"\n\n# Set reproducibility_mode = True for reproducibility\n# this will \"ask\" agents to be deterministic. Also, it will prevent you from launching if you have\n# local changes. For your custom agents you need to implement set_reproducibility_mode\nreproducibility_mode = False\n\n# Set relaunch = True to relaunch an existing study, this will continue incomplete\n# experiments and relaunch errored experiments\nrelaunch = False\n\n## Number of parallel jobs\nn_jobs = 4 # Make sure to use 1 job when debugging in VSCode\n# n_jobs = -1 # to use all available cores\n\n\nif __name__ == \"__main__\": # necessary for dask backend\n\n if reproducibility_mode:\n [a.set_reproducibility_mode() for a in agent_args]\n\n if relaunch:\n # relaunch an existing study\n study = Study.load_most_recent(contains=None)\n study.find_incomplete(include_errors=True)\n\n else:\n study = Study(agent_args, benchmark, logging_level_stdout=logging.WARNING)\n\n study.run(\n n_jobs=n_jobs,\n parallel_backend=\"ray\",\n strict_reproducibility=reproducibility_mode,\n n_relaunch=3,\n )\n\n if reproducibility_mode:\n study.append_to_journal(strict_reproducibility=True)","source_hash":"cf52282bd08c6ac5ba93fcfc1ccacc25b04c07ed59f1f8a8ad630906d8499366","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:main_workarena_debug","uri":"program://AgentLab/module/main_workarena_debug#L1-L77","kind":"module","name":"main_workarena_debug","path":"main_workarena_debug.py","language":"python","start_line":1,"end_line":77,"context_start_line":1,"context_end_line":77,"code":"\"\"\"\nNote: This script is a convenience script to launch experiments instead of using\nthe command line.\n\nCopy this script and modify at will, but don't push your changes to the\nrepository.\n\"\"\"\n\nimport logging\nfrom copy import deepcopy\n\nimport bgym\n\nfrom agentlab.agents.tool_use_agent.tool_use_agent import (\n DEFAULT_PROMPT_CONFIG,\n GPT_4_1,\n ToolUseAgentArgs,\n)\nfrom agentlab.experiments.study import Study\n\nlogging.getLogger().setLevel(logging.INFO)\n\nconfig = deepcopy(DEFAULT_PROMPT_CONFIG)\n# config.keep_last_n_obs = 1\nconfig.obs.use_som = True\n\n\nagent_configs = [\n ToolUseAgentArgs(\n model_args=GPT_4_1,\n config=config,\n ),\n # ToolUseAgentArgs(\n # model_args=GPT_4_1,\n # config=config,\n # ),\n]\n\nfor agent_config in agent_configs:\n agent_config.config.action_subsets = (\"workarena\",) # use the workarena action set\n\n\n# ## select the benchmark to run on\n# benchmark = \"miniwob_tiny_test\"\nbenchmark = \"workarena_l1\"\n\n\nbenchmark = bgym.DEFAULT_BENCHMARKS[benchmark](n_repeats=4) # type: bgym.Benchmark\nbenchmark = benchmark.subset_from_glob(\"task_name\", \"*create*\")\n\n# for env_args in benchmark.env_args_list:\n# print(env_args.task_name)\n# env_args.max_steps = 15\n\nrelaunch = False\n\n## Number of parallel jobs\nn_jobs = 10 # Make sure to use 1 job when debugging in VSCode\nparallel_backend = \"ray\"\n# parallel_backend = \"sequential\" # activate sequential backend for debugging in VSCode\n\nif __name__ == \"__main__\": # necessary for dask backend\n\n if relaunch:\n # relaunch an existing study\n study = Study.load_most_recent(contains=None)\n study.find_incomplete(include_errors=True)\n\n else:\n study = Study(agent_configs, benchmark, logging_level_stdout=logging.WARNING)\n\n study.run(\n n_jobs=n_jobs,\n parallel_backend=parallel_backend, # \"ray\", \"joblib\" or \"sequential\"\n strict_reproducibility=False,\n n_relaunch=3,\n )","source_hash":"92a99e0d2b6b2fb8c18fb1060abf2c17ba8d9a100132f242959c7fb8ac18a4d8","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:add_study_to_repro_journal","uri":"program://AgentLab/module/add_study_to_repro_journal#L1-L18","kind":"module","name":"add_study_to_repro_journal","path":"add_study_to_repro_journal.py","language":"python","start_line":1,"end_line":18,"context_start_line":1,"context_end_line":18,"code":"import os\nfrom pathlib import Path\nfrom agentlab.experiments.study import Study\n\n\nbase_dir = \"/home/toolkit/ui_copilot_results\"\n\nexp_paths = [\n \"2025-01-31_22-08-34_genericagent-o3-mini-2025-01-31-on-workarena-l1\",\n # '2025-02-02_01-53-45_genericagent-openai-o1-mini-2024-09-12-on-workarena-l1',\n \"2025-02-02_01-55-04_genericagent-openai-o1-mini-2024-09-12-on-workarena-l1\",\n]\nfull_paths = [os.path.join(base_dir, exp_path) for exp_path in exp_paths]\n\nfor full_path in full_paths:\n study = Study.load(Path(full_path))\n\n study.append_to_journal(strict_reproducibility=False)","source_hash":"dc9b4b94f8f744a3656b875dd287be370af8551604785b276bd041bd6ba5b408","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.test_main","uri":"program://AgentLab/module/tests.test_main#L1-L28","kind":"module","name":"tests.test_main","path":"tests/test_main.py","language":"python","start_line":1,"end_line":28,"context_start_line":1,"context_end_line":28,"code":"import subprocess\nimport sys\nfrom pathlib import Path\n\nimport pytest\n\n\n@pytest.mark.pricy\ndef test_main_script_execution():\n # this should trigger agent_4o_mini on miniwob_tiny_test unless this was\n # reconfigured differently.\n path = Path(__file__).parent.parent / \"main.py\"\n\n sys.path.insert(0, str(path.parent))\n\n # just make sure it's in the right state\n main = __import__(path.stem)\n assert main.benchmark == \"miniwob_tiny_test\"\n assert main.reproducibility_mode == False\n assert main.relaunch == False\n assert main.n_jobs <= 10\n\n result = subprocess.run([\"python\", str(path)], capture_output=True, text=True, timeout=5 * 60)\n assert result.returncode == 0\n\n\nif __name__ == \"__main__\":\n test_main_script_execution()","source_hash":"1c339f46889f59d74a302b20ab4529bc84b49bcb05ef73f1d278f62ac5758348","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.test_main.test_main_script_execution","uri":"program://AgentLab/function/tests.test_main.test_main_script_execution#L9-L24","kind":"function","name":"test_main_script_execution","path":"tests/test_main.py","language":"python","start_line":9,"end_line":24,"context_start_line":1,"context_end_line":28,"code":"import subprocess\nimport sys\nfrom pathlib import Path\n\nimport pytest\n\n\n@pytest.mark.pricy\ndef test_main_script_execution():\n # this should trigger agent_4o_mini on miniwob_tiny_test unless this was\n # reconfigured differently.\n path = Path(__file__).parent.parent / \"main.py\"\n\n sys.path.insert(0, str(path.parent))\n\n # just make sure it's in the right state\n main = __import__(path.stem)\n assert main.benchmark == \"miniwob_tiny_test\"\n assert main.reproducibility_mode == False\n assert main.relaunch == False\n assert main.n_jobs <= 10\n\n result = subprocess.run([\"python\", str(path)], capture_output=True, text=True, timeout=5 * 60)\n assert result.returncode == 0\n\n\nif __name__ == \"__main__\":\n test_main_script_execution()","source_hash":"1c339f46889f59d74a302b20ab4529bc84b49bcb05ef73f1d278f62ac5758348","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.test_ui_assistant","uri":"program://AgentLab/module/tests.test_ui_assistant#L1-L9","kind":"module","name":"tests.test_ui_assistant","path":"tests/test_ui_assistant.py","language":"python","start_line":1,"end_line":9,"context_start_line":1,"context_end_line":9,"code":"from agentlab.ui_assistant import make_exp_args\nfrom agentlab.agents.generic_agent import AGENT_4o\n\n\ndef test_make_exp_args():\n \"\"\"Basic unit test to detect refactoring errors.\"\"\"\n exp_args = make_exp_args(AGENT_4o, \"https://www.google.com\")\n\n assert exp_args.agent_args.flags.action.demo_mode == \"default\"","source_hash":"3a084bf5c64c372102deec1fae59a53a3c7f99ae7b2bb77735960cb40568a15a","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.test_ui_assistant.test_make_exp_args","uri":"program://AgentLab/function/tests.test_ui_assistant.test_make_exp_args#L5-L9","kind":"function","name":"test_make_exp_args","path":"tests/test_ui_assistant.py","language":"python","start_line":5,"end_line":9,"context_start_line":1,"context_end_line":9,"code":"from agentlab.ui_assistant import make_exp_args\nfrom agentlab.agents.generic_agent import AGENT_4o\n\n\ndef test_make_exp_args():\n \"\"\"Basic unit test to detect refactoring errors.\"\"\"\n exp_args = make_exp_args(AGENT_4o, \"https://www.google.com\")\n\n assert exp_args.agent_args.flags.action.demo_mode == \"default\"","source_hash":"3a084bf5c64c372102deec1fae59a53a3c7f99ae7b2bb77735960cb40568a15a","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.verify_rate_limit_anthropic","uri":"program://AgentLab/module/tests.verify_rate_limit_anthropic#L1-L89","kind":"module","name":"tests.verify_rate_limit_anthropic","path":"tests/verify_rate_limit_anthropic.py","language":"python","start_line":1,"end_line":89,"context_start_line":1,"context_end_line":89,"code":"import os\nimport time\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\n\nimport anthropic\n\nclient = anthropic.Anthropic(api_key=os.environ[\"ANTHROPIC_API_KEY\"])\n\n\ndef make_request(messages):\n response = client.messages.create(\n model=\"claude-3-5-sonnet-20241022\", max_tokens=10, messages=messages\n )\n return response.usage\n\n\ndef make_message(text):\n return {\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": text,\n }\n ],\n }\n\n\ndef add_cache_control(message: dict, cache_type=\"ephemeral\"):\n message[\"content\"][0][\"cache_control\"] = {\"type\": cache_type}\n\n\ndef remove_cache_control(message: dict):\n if \"cache_control\" in message[\"content\"][0]:\n del message[\"content\"][0][\"cache_control\"]\n\n\ndef test_rate_limit_single(thread_id):\n # Create ~100k token message that will be cached\n big_text = \"This is a large block of text for caching. \" * 10000 # ~100k tokens\n medium_text = \"This is a large block of text for caching. \" * 2000 # ~10k tokens\n\n print(f\"Thread {thread_id}: Starting rate limit test with cached content...\")\n\n # Rebuild conversation each time (simulating web agent)\n messages = []\n\n # Add all previous conversation turns\n for i in range(5):\n if i == 0:\n messages.append(make_message(big_text))\n t0 = time.time()\n else:\n messages.append(make_message(medium_text))\n add_cache_control(messages[-1])\n try:\n usage = make_request(messages)\n dt = time.time() - t0\n print(f\"{dt:.2f}: Thread {thread_id}: {usage}\")\n except Exception as e:\n print(f\"Thread {thread_id}: Error - {e}\")\n break\n remove_cache_control(messages[-1])\n\n\ndef test_rate_limit_parallel(num_threads=3):\n print(f\"Starting parallel rate limit test with {num_threads} threads...\")\n\n with ThreadPoolExecutor(max_workers=num_threads) as executor:\n futures = [executor.submit(test_rate_limit_single, i) for i in range(num_threads)]\n\n for future in as_completed(futures):\n try:\n future.result()\n except Exception as e:\n print(f\"Thread completed with error: {e}\")\n\n\ndef test_rate_limit():\n # Original single-threaded version\n test_rate_limit_single(0)\n\n\nif __name__ == \"__main__\":\n # Use parallel version to quickly exhaust rate limits\n test_rate_limit_parallel(num_threads=3)\n\n # Or use original single-threaded version\n # test_rate_limit()","source_hash":"a74fab2ec054dffa24e51f8b972c405769daca2f4af30651e2c9a3b558a387d8","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.verify_rate_limit_anthropic.make_request","uri":"program://AgentLab/function/tests.verify_rate_limit_anthropic.make_request#L10-L14","kind":"function","name":"make_request","path":"tests/verify_rate_limit_anthropic.py","language":"python","start_line":10,"end_line":14,"context_start_line":1,"context_end_line":34,"code":"import os\nimport time\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\n\nimport anthropic\n\nclient = anthropic.Anthropic(api_key=os.environ[\"ANTHROPIC_API_KEY\"])\n\n\ndef make_request(messages):\n response = client.messages.create(\n model=\"claude-3-5-sonnet-20241022\", max_tokens=10, messages=messages\n )\n return response.usage\n\n\ndef make_message(text):\n return {\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": text,\n }\n ],\n }\n\n\ndef add_cache_control(message: dict, cache_type=\"ephemeral\"):\n message[\"content\"][0][\"cache_control\"] = {\"type\": cache_type}\n\n\ndef remove_cache_control(message: dict):\n if \"cache_control\" in message[\"content\"][0]:","source_hash":"a74fab2ec054dffa24e51f8b972c405769daca2f4af30651e2c9a3b558a387d8","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.verify_rate_limit_anthropic.make_message","uri":"program://AgentLab/function/tests.verify_rate_limit_anthropic.make_message#L17-L26","kind":"function","name":"make_message","path":"tests/verify_rate_limit_anthropic.py","language":"python","start_line":17,"end_line":26,"context_start_line":1,"context_end_line":46,"code":"import os\nimport time\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\n\nimport anthropic\n\nclient = anthropic.Anthropic(api_key=os.environ[\"ANTHROPIC_API_KEY\"])\n\n\ndef make_request(messages):\n response = client.messages.create(\n model=\"claude-3-5-sonnet-20241022\", max_tokens=10, messages=messages\n )\n return response.usage\n\n\ndef make_message(text):\n return {\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": text,\n }\n ],\n }\n\n\ndef add_cache_control(message: dict, cache_type=\"ephemeral\"):\n message[\"content\"][0][\"cache_control\"] = {\"type\": cache_type}\n\n\ndef remove_cache_control(message: dict):\n if \"cache_control\" in message[\"content\"][0]:\n del message[\"content\"][0][\"cache_control\"]\n\n\ndef test_rate_limit_single(thread_id):\n # Create ~100k token message that will be cached\n big_text = \"This is a large block of text for caching. \" * 10000 # ~100k tokens\n medium_text = \"This is a large block of text for caching. \" * 2000 # ~10k tokens\n\n print(f\"Thread {thread_id}: Starting rate limit test with cached content...\")\n\n # Rebuild conversation each time (simulating web agent)\n messages = []","source_hash":"a74fab2ec054dffa24e51f8b972c405769daca2f4af30651e2c9a3b558a387d8","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.verify_rate_limit_anthropic.add_cache_control","uri":"program://AgentLab/function/tests.verify_rate_limit_anthropic.add_cache_control#L29-L30","kind":"function","name":"add_cache_control","path":"tests/verify_rate_limit_anthropic.py","language":"python","start_line":29,"end_line":30,"context_start_line":9,"context_end_line":50,"code":"\ndef make_request(messages):\n response = client.messages.create(\n model=\"claude-3-5-sonnet-20241022\", max_tokens=10, messages=messages\n )\n return response.usage\n\n\ndef make_message(text):\n return {\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": text,\n }\n ],\n }\n\n\ndef add_cache_control(message: dict, cache_type=\"ephemeral\"):\n message[\"content\"][0][\"cache_control\"] = {\"type\": cache_type}\n\n\ndef remove_cache_control(message: dict):\n if \"cache_control\" in message[\"content\"][0]:\n del message[\"content\"][0][\"cache_control\"]\n\n\ndef test_rate_limit_single(thread_id):\n # Create ~100k token message that will be cached\n big_text = \"This is a large block of text for caching. \" * 10000 # ~100k tokens\n medium_text = \"This is a large block of text for caching. \" * 2000 # ~10k tokens\n\n print(f\"Thread {thread_id}: Starting rate limit test with cached content...\")\n\n # Rebuild conversation each time (simulating web agent)\n messages = []\n\n # Add all previous conversation turns\n for i in range(5):\n if i == 0:","source_hash":"a74fab2ec054dffa24e51f8b972c405769daca2f4af30651e2c9a3b558a387d8","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.verify_rate_limit_anthropic.remove_cache_control","uri":"program://AgentLab/function/tests.verify_rate_limit_anthropic.remove_cache_control#L33-L35","kind":"function","name":"remove_cache_control","path":"tests/verify_rate_limit_anthropic.py","language":"python","start_line":33,"end_line":35,"context_start_line":13,"context_end_line":55,"code":" )\n return response.usage\n\n\ndef make_message(text):\n return {\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": text,\n }\n ],\n }\n\n\ndef add_cache_control(message: dict, cache_type=\"ephemeral\"):\n message[\"content\"][0][\"cache_control\"] = {\"type\": cache_type}\n\n\ndef remove_cache_control(message: dict):\n if \"cache_control\" in message[\"content\"][0]:\n del message[\"content\"][0][\"cache_control\"]\n\n\ndef test_rate_limit_single(thread_id):\n # Create ~100k token message that will be cached\n big_text = \"This is a large block of text for caching. \" * 10000 # ~100k tokens\n medium_text = \"This is a large block of text for caching. \" * 2000 # ~10k tokens\n\n print(f\"Thread {thread_id}: Starting rate limit test with cached content...\")\n\n # Rebuild conversation each time (simulating web agent)\n messages = []\n\n # Add all previous conversation turns\n for i in range(5):\n if i == 0:\n messages.append(make_message(big_text))\n t0 = time.time()\n else:\n messages.append(make_message(medium_text))\n add_cache_control(messages[-1])","source_hash":"a74fab2ec054dffa24e51f8b972c405769daca2f4af30651e2c9a3b558a387d8","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.verify_rate_limit_anthropic.test_rate_limit_single","uri":"program://AgentLab/function/tests.verify_rate_limit_anthropic.test_rate_limit_single#L38-L63","kind":"function","name":"test_rate_limit_single","path":"tests/verify_rate_limit_anthropic.py","language":"python","start_line":38,"end_line":63,"context_start_line":18,"context_end_line":83,"code":" return {\n \"role\": \"user\",\n \"content\": [\n {\n \"type\": \"text\",\n \"text\": text,\n }\n ],\n }\n\n\ndef add_cache_control(message: dict, cache_type=\"ephemeral\"):\n message[\"content\"][0][\"cache_control\"] = {\"type\": cache_type}\n\n\ndef remove_cache_control(message: dict):\n if \"cache_control\" in message[\"content\"][0]:\n del message[\"content\"][0][\"cache_control\"]\n\n\ndef test_rate_limit_single(thread_id):\n # Create ~100k token message that will be cached\n big_text = \"This is a large block of text for caching. \" * 10000 # ~100k tokens\n medium_text = \"This is a large block of text for caching. \" * 2000 # ~10k tokens\n\n print(f\"Thread {thread_id}: Starting rate limit test with cached content...\")\n\n # Rebuild conversation each time (simulating web agent)\n messages = []\n\n # Add all previous conversation turns\n for i in range(5):\n if i == 0:\n messages.append(make_message(big_text))\n t0 = time.time()\n else:\n messages.append(make_message(medium_text))\n add_cache_control(messages[-1])\n try:\n usage = make_request(messages)\n dt = time.time() - t0\n print(f\"{dt:.2f}: Thread {thread_id}: {usage}\")\n except Exception as e:\n print(f\"Thread {thread_id}: Error - {e}\")\n break\n remove_cache_control(messages[-1])\n\n\ndef test_rate_limit_parallel(num_threads=3):\n print(f\"Starting parallel rate limit test with {num_threads} threads...\")\n\n with ThreadPoolExecutor(max_workers=num_threads) as executor:\n futures = [executor.submit(test_rate_limit_single, i) for i in range(num_threads)]\n\n for future in as_completed(futures):\n try:\n future.result()\n except Exception as e:\n print(f\"Thread completed with error: {e}\")\n\n\ndef test_rate_limit():\n # Original single-threaded version\n test_rate_limit_single(0)\n\n","source_hash":"a74fab2ec054dffa24e51f8b972c405769daca2f4af30651e2c9a3b558a387d8","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.verify_rate_limit_anthropic.test_rate_limit_parallel","uri":"program://AgentLab/function/tests.verify_rate_limit_anthropic.test_rate_limit_parallel#L66-L76","kind":"function","name":"test_rate_limit_parallel","path":"tests/verify_rate_limit_anthropic.py","language":"python","start_line":66,"end_line":76,"context_start_line":46,"context_end_line":89,"code":" messages = []\n\n # Add all previous conversation turns\n for i in range(5):\n if i == 0:\n messages.append(make_message(big_text))\n t0 = time.time()\n else:\n messages.append(make_message(medium_text))\n add_cache_control(messages[-1])\n try:\n usage = make_request(messages)\n dt = time.time() - t0\n print(f\"{dt:.2f}: Thread {thread_id}: {usage}\")\n except Exception as e:\n print(f\"Thread {thread_id}: Error - {e}\")\n break\n remove_cache_control(messages[-1])\n\n\ndef test_rate_limit_parallel(num_threads=3):\n print(f\"Starting parallel rate limit test with {num_threads} threads...\")\n\n with ThreadPoolExecutor(max_workers=num_threads) as executor:\n futures = [executor.submit(test_rate_limit_single, i) for i in range(num_threads)]\n\n for future in as_completed(futures):\n try:\n future.result()\n except Exception as e:\n print(f\"Thread completed with error: {e}\")\n\n\ndef test_rate_limit():\n # Original single-threaded version\n test_rate_limit_single(0)\n\n\nif __name__ == \"__main__\":\n # Use parallel version to quickly exhaust rate limits\n test_rate_limit_parallel(num_threads=3)\n\n # Or use original single-threaded version\n # test_rate_limit()","source_hash":"a74fab2ec054dffa24e51f8b972c405769daca2f4af30651e2c9a3b558a387d8","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.verify_rate_limit_anthropic.test_rate_limit","uri":"program://AgentLab/function/tests.verify_rate_limit_anthropic.test_rate_limit#L79-L81","kind":"function","name":"test_rate_limit","path":"tests/verify_rate_limit_anthropic.py","language":"python","start_line":79,"end_line":81,"context_start_line":59,"context_end_line":89,"code":" print(f\"{dt:.2f}: Thread {thread_id}: {usage}\")\n except Exception as e:\n print(f\"Thread {thread_id}: Error - {e}\")\n break\n remove_cache_control(messages[-1])\n\n\ndef test_rate_limit_parallel(num_threads=3):\n print(f\"Starting parallel rate limit test with {num_threads} threads...\")\n\n with ThreadPoolExecutor(max_workers=num_threads) as executor:\n futures = [executor.submit(test_rate_limit_single, i) for i in range(num_threads)]\n\n for future in as_completed(futures):\n try:\n future.result()\n except Exception as e:\n print(f\"Thread completed with error: {e}\")\n\n\ndef test_rate_limit():\n # Original single-threaded version\n test_rate_limit_single(0)\n\n\nif __name__ == \"__main__\":\n # Use parallel version to quickly exhaust rate limits\n test_rate_limit_parallel(num_threads=3)\n\n # Or use original single-threaded version\n # test_rate_limit()","source_hash":"a74fab2ec054dffa24e51f8b972c405769daca2f4af30651e2c9a3b558a387d8","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.analyze.test_overlay_utils","uri":"program://AgentLab/module/tests.analyze.test_overlay_utils#L1-L81","kind":"module","name":"tests.analyze.test_overlay_utils","path":"tests/analyze/test_overlay_utils.py","language":"python","start_line":1,"end_line":81,"context_start_line":1,"context_end_line":81,"code":"from PIL import Image\n\nfrom agentlab.analyze import overlay_utils\n\n\ndef test_parse_function_calls():\n\n test_code = \"\"\"\nmouse_click(34, 59)\nfill(\"a234\", \"test\")\nclick('b123', button=\"right\", modifiers=[\"Shift\", \"Control\"])\nselect_option(\"c456\", [\"option1\", \"option2\"])\n\"\"\"\n\n result = overlay_utils.parse_function_calls(test_code)\n\n assert result[1].function_name == \"mouse_click\"\n assert result[1].name == \"y\"\n assert test_code[result[1].start_index : result[1].stop_index] == \"59\"\n\n assert result[8].function_name == \"select_option\"\n assert result[8].name == \"options\"\n assert test_code[result[8].start_index : result[8].stop_index] == '[\"option1\", \"option2\"]'\n\n\ndef test_filtering_args():\n test_code = \"\"\"\nmouse_click(34, 59)\nfill(\"a234\", \"test\")\nmouse_drag_and_drop(34, 59, to_x=100, to_y=200)\ndrag_and_drop(\"a123\", \"b456\")\n\"\"\"\n result = overlay_utils.parse_function_calls(test_code)\n args = overlay_utils.find_bids_and_xy_pairs(result)\n\n assert len(args) == 6 # Expecting 4 args: 2 mouse clicks, 1 fill, 1 select_option\n\n assert args[0].function_name == \"mouse_click\"\n assert args[0].name == \"xy\"\n assert args[0].value == (34.0, 59.0)\n assert test_code[args[0].start_index : args[0].stop_index] == \"34, 59\"\n\n assert args[2].name == \"from_xy\"\n assert args[3].name == \"to_xy\"\n assert test_code[args[3].start_index : args[3].stop_index] == \"to_x=100, to_y=200\"\n\n\ndef manual_eval():\n \"\"\"Manual test function that displays the resulting image.\"\"\"\n import matplotlib.pyplot as plt\n\n # Create a white test image\n img = Image.new(\"RGB\", (400, 300), \"white\")\n\n # Test action string with multiple function calls\n action_string = \"\"\"mouse_click(100, 150)\nfill(\"search_box\", \"hello world\")\nclick(\"submit_btn\")\"\"\"\n\n # Mock properties mapping bids to bounding boxes\n properties = {\n \"search_box\": {\"bbox\": (50, 50, 100, 50)},\n \"submit_btn\": {\"bbox\": (150, 100, 120, 30)},\n }\n\n # Annotate the image and get colored HTML\n html_result = overlay_utils.annotate_action(img, action_string, properties, colormap=\"tab10\")\n\n # Display result\n plt.figure(figsize=(10, 6))\n plt.imshow(img)\n plt.axis(\"off\")\n plt.show()\n\n print(\"HTML with colored arguments:\")\n print(html_result)\n print(\"\\nManual test completed!\")\n\n\nif __name__ == \"__main__\":\n manual_eval()","source_hash":"9cc9b9688d8e5c68972d712ecf778467e597d4632f32b2585c4ee95da238a7e1","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.analyze.test_overlay_utils.test_parse_function_calls","uri":"program://AgentLab/function/tests.analyze.test_overlay_utils.test_parse_function_calls#L6-L23","kind":"function","name":"test_parse_function_calls","path":"tests/analyze/test_overlay_utils.py","language":"python","start_line":6,"end_line":23,"context_start_line":1,"context_end_line":43,"code":"from PIL import Image\n\nfrom agentlab.analyze import overlay_utils\n\n\ndef test_parse_function_calls():\n\n test_code = \"\"\"\nmouse_click(34, 59)\nfill(\"a234\", \"test\")\nclick('b123', button=\"right\", modifiers=[\"Shift\", \"Control\"])\nselect_option(\"c456\", [\"option1\", \"option2\"])\n\"\"\"\n\n result = overlay_utils.parse_function_calls(test_code)\n\n assert result[1].function_name == \"mouse_click\"\n assert result[1].name == \"y\"\n assert test_code[result[1].start_index : result[1].stop_index] == \"59\"\n\n assert result[8].function_name == \"select_option\"\n assert result[8].name == \"options\"\n assert test_code[result[8].start_index : result[8].stop_index] == '[\"option1\", \"option2\"]'\n\n\ndef test_filtering_args():\n test_code = \"\"\"\nmouse_click(34, 59)\nfill(\"a234\", \"test\")\nmouse_drag_and_drop(34, 59, to_x=100, to_y=200)\ndrag_and_drop(\"a123\", \"b456\")\n\"\"\"\n result = overlay_utils.parse_function_calls(test_code)\n args = overlay_utils.find_bids_and_xy_pairs(result)\n\n assert len(args) == 6 # Expecting 4 args: 2 mouse clicks, 1 fill, 1 select_option\n\n assert args[0].function_name == \"mouse_click\"\n assert args[0].name == \"xy\"\n assert args[0].value == (34.0, 59.0)\n assert test_code[args[0].start_index : args[0].stop_index] == \"34, 59\"\n\n assert args[2].name == \"from_xy\"","source_hash":"9cc9b9688d8e5c68972d712ecf778467e597d4632f32b2585c4ee95da238a7e1","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.analyze.test_overlay_utils.test_filtering_args","uri":"program://AgentLab/function/tests.analyze.test_overlay_utils.test_filtering_args#L26-L45","kind":"function","name":"test_filtering_args","path":"tests/analyze/test_overlay_utils.py","language":"python","start_line":26,"end_line":45,"context_start_line":6,"context_end_line":65,"code":"def test_parse_function_calls():\n\n test_code = \"\"\"\nmouse_click(34, 59)\nfill(\"a234\", \"test\")\nclick('b123', button=\"right\", modifiers=[\"Shift\", \"Control\"])\nselect_option(\"c456\", [\"option1\", \"option2\"])\n\"\"\"\n\n result = overlay_utils.parse_function_calls(test_code)\n\n assert result[1].function_name == \"mouse_click\"\n assert result[1].name == \"y\"\n assert test_code[result[1].start_index : result[1].stop_index] == \"59\"\n\n assert result[8].function_name == \"select_option\"\n assert result[8].name == \"options\"\n assert test_code[result[8].start_index : result[8].stop_index] == '[\"option1\", \"option2\"]'\n\n\ndef test_filtering_args():\n test_code = \"\"\"\nmouse_click(34, 59)\nfill(\"a234\", \"test\")\nmouse_drag_and_drop(34, 59, to_x=100, to_y=200)\ndrag_and_drop(\"a123\", \"b456\")\n\"\"\"\n result = overlay_utils.parse_function_calls(test_code)\n args = overlay_utils.find_bids_and_xy_pairs(result)\n\n assert len(args) == 6 # Expecting 4 args: 2 mouse clicks, 1 fill, 1 select_option\n\n assert args[0].function_name == \"mouse_click\"\n assert args[0].name == \"xy\"\n assert args[0].value == (34.0, 59.0)\n assert test_code[args[0].start_index : args[0].stop_index] == \"34, 59\"\n\n assert args[2].name == \"from_xy\"\n assert args[3].name == \"to_xy\"\n assert test_code[args[3].start_index : args[3].stop_index] == \"to_x=100, to_y=200\"\n\n\ndef manual_eval():\n \"\"\"Manual test function that displays the resulting image.\"\"\"\n import matplotlib.pyplot as plt\n\n # Create a white test image\n img = Image.new(\"RGB\", (400, 300), \"white\")\n\n # Test action string with multiple function calls\n action_string = \"\"\"mouse_click(100, 150)\nfill(\"search_box\", \"hello world\")\nclick(\"submit_btn\")\"\"\"\n\n # Mock properties mapping bids to bounding boxes\n properties = {\n \"search_box\": {\"bbox\": (50, 50, 100, 50)},\n \"submit_btn\": {\"bbox\": (150, 100, 120, 30)},\n }\n","source_hash":"9cc9b9688d8e5c68972d712ecf778467e597d4632f32b2585c4ee95da238a7e1","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.analyze.test_overlay_utils.manual_eval","uri":"program://AgentLab/function/tests.analyze.test_overlay_utils.manual_eval#L48-L77","kind":"function","name":"manual_eval","path":"tests/analyze/test_overlay_utils.py","language":"python","start_line":48,"end_line":77,"context_start_line":28,"context_end_line":81,"code":"mouse_click(34, 59)\nfill(\"a234\", \"test\")\nmouse_drag_and_drop(34, 59, to_x=100, to_y=200)\ndrag_and_drop(\"a123\", \"b456\")\n\"\"\"\n result = overlay_utils.parse_function_calls(test_code)\n args = overlay_utils.find_bids_and_xy_pairs(result)\n\n assert len(args) == 6 # Expecting 4 args: 2 mouse clicks, 1 fill, 1 select_option\n\n assert args[0].function_name == \"mouse_click\"\n assert args[0].name == \"xy\"\n assert args[0].value == (34.0, 59.0)\n assert test_code[args[0].start_index : args[0].stop_index] == \"34, 59\"\n\n assert args[2].name == \"from_xy\"\n assert args[3].name == \"to_xy\"\n assert test_code[args[3].start_index : args[3].stop_index] == \"to_x=100, to_y=200\"\n\n\ndef manual_eval():\n \"\"\"Manual test function that displays the resulting image.\"\"\"\n import matplotlib.pyplot as plt\n\n # Create a white test image\n img = Image.new(\"RGB\", (400, 300), \"white\")\n\n # Test action string with multiple function calls\n action_string = \"\"\"mouse_click(100, 150)\nfill(\"search_box\", \"hello world\")\nclick(\"submit_btn\")\"\"\"\n\n # Mock properties mapping bids to bounding boxes\n properties = {\n \"search_box\": {\"bbox\": (50, 50, 100, 50)},\n \"submit_btn\": {\"bbox\": (150, 100, 120, 30)},\n }\n\n # Annotate the image and get colored HTML\n html_result = overlay_utils.annotate_action(img, action_string, properties, colormap=\"tab10\")\n\n # Display result\n plt.figure(figsize=(10, 6))\n plt.imshow(img)\n plt.axis(\"off\")\n plt.show()\n\n print(\"HTML with colored arguments:\")\n print(html_result)\n print(\"\\nManual test completed!\")\n\n\nif __name__ == \"__main__\":\n manual_eval()","source_hash":"9cc9b9688d8e5c68972d712ecf778467e597d4632f32b2585c4ee95da238a7e1","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.analyze.test_inspect_results","uri":"program://AgentLab/module/tests.analyze.test_inspect_results#L1-L35","kind":"module","name":"tests.analyze.test_inspect_results","path":"tests/analyze/test_inspect_results.py","language":"python","start_line":1,"end_line":35,"context_start_line":1,"context_end_line":35,"code":"from pathlib import Path\nimport shutil\nimport tempfile\n\nimport pandas as pd\nfrom agentlab.analyze.inspect_results import get_study_summary\n\n\ndef test_get_study_summary():\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n study_dir = Path(tmp_dir) / \"test_study\"\n\n study_dir_original = Path(__file__).parent.parent / \"data\" / \"test_study\"\n\n # recursively copy the study to the temp dir using shutil\n shutil.copytree(study_dir_original, study_dir)\n\n sentinel = {}\n\n summary = get_study_summary(study_dir, sentinel=sentinel)\n assert isinstance(summary, pd.DataFrame)\n assert sentinel[\"from_cache\"] == False\n\n summary = get_study_summary(study_dir, sentinel=sentinel)\n assert isinstance(summary, pd.DataFrame)\n assert sentinel[\"from_cache\"] == True\n\n summary = get_study_summary(study_dir, ignore_cache=True, sentinel=sentinel)\n assert isinstance(summary, pd.DataFrame)\n assert sentinel[\"from_cache\"] == False\n\n\nif __name__ == \"__main__\":\n test_get_study_summary()","source_hash":"1b1e25989eb1e81a99bbac77634a76f275aa278a331ac3e97b1ddea85e224bd9","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.analyze.test_inspect_results.test_get_study_summary","uri":"program://AgentLab/function/tests.analyze.test_inspect_results.test_get_study_summary#L9-L31","kind":"function","name":"test_get_study_summary","path":"tests/analyze/test_inspect_results.py","language":"python","start_line":9,"end_line":31,"context_start_line":1,"context_end_line":35,"code":"from pathlib import Path\nimport shutil\nimport tempfile\n\nimport pandas as pd\nfrom agentlab.analyze.inspect_results import get_study_summary\n\n\ndef test_get_study_summary():\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n study_dir = Path(tmp_dir) / \"test_study\"\n\n study_dir_original = Path(__file__).parent.parent / \"data\" / \"test_study\"\n\n # recursively copy the study to the temp dir using shutil\n shutil.copytree(study_dir_original, study_dir)\n\n sentinel = {}\n\n summary = get_study_summary(study_dir, sentinel=sentinel)\n assert isinstance(summary, pd.DataFrame)\n assert sentinel[\"from_cache\"] == False\n\n summary = get_study_summary(study_dir, sentinel=sentinel)\n assert isinstance(summary, pd.DataFrame)\n assert sentinel[\"from_cache\"] == True\n\n summary = get_study_summary(study_dir, ignore_cache=True, sentinel=sentinel)\n assert isinstance(summary, pd.DataFrame)\n assert sentinel[\"from_cache\"] == False\n\n\nif __name__ == \"__main__\":\n test_get_study_summary()","source_hash":"1b1e25989eb1e81a99bbac77634a76f275aa278a331ac3e97b1ddea85e224bd9","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.llm.test_huggingface_utils","uri":"program://AgentLab/module/tests.llm.test_huggingface_utils#L1-L32","kind":"module","name":"tests.llm.test_huggingface_utils","path":"tests/llm/test_huggingface_utils.py","language":"python","start_line":1,"end_line":32,"context_start_line":1,"context_end_line":32,"code":"import pytest\n\nfrom agentlab.llm.chat_api import HuggingFaceURLChatModel, make_system_message, make_user_message\nfrom agentlab.llm.llm_utils import download_and_save_model\nfrom agentlab.llm.prompt_templates import STARCHAT_PROMPT_TEMPLATE\n\n# TODO(optimass): figure out a good model for all tests\n\n\n@pytest.mark.skip(reason=\"Requires a local model checkpoint\")\ndef test_CustomLLMChatbot_locally():\n # model_path = \"google/flan-t5-base\" # remote model on HuggingFace Hub\n model_path = \"/mnt/ui_copilot/data_rw/models/starcoderbase-1b-ft\" # local model in shared volum\n\n chatbot = HuggingFaceURLChatModel(model_path=model_path, temperature=1e-3)\n\n messages = [\n make_system_message(\"Please tell me back the following word: \"),\n make_user_message(\"bird\"),\n ]\n\n answer = chatbot(messages)\n\n print(answer.content)\n\n\n@pytest.mark.skip(reason=\"Requires downloading a large file on disk local model checkpoint\")\ndef test_download_and_save_model():\n model_path = \"meta-llama/Llama-2-70b-chat\"\n save_dir = \"test_models\"\n\n download_and_save_model(model_path, save_dir)","source_hash":"edea9db32c39d22cd6c155ff848d563256bff43e05e0fc6f3282e583888fe9d3","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.llm.test_huggingface_utils.test_CustomLLMChatbot_locally","uri":"program://AgentLab/function/tests.llm.test_huggingface_utils.test_CustomLLMChatbot_locally#L11-L24","kind":"function","name":"test_CustomLLMChatbot_locally","path":"tests/llm/test_huggingface_utils.py","language":"python","start_line":11,"end_line":24,"context_start_line":1,"context_end_line":32,"code":"import pytest\n\nfrom agentlab.llm.chat_api import HuggingFaceURLChatModel, make_system_message, make_user_message\nfrom agentlab.llm.llm_utils import download_and_save_model\nfrom agentlab.llm.prompt_templates import STARCHAT_PROMPT_TEMPLATE\n\n# TODO(optimass): figure out a good model for all tests\n\n\n@pytest.mark.skip(reason=\"Requires a local model checkpoint\")\ndef test_CustomLLMChatbot_locally():\n # model_path = \"google/flan-t5-base\" # remote model on HuggingFace Hub\n model_path = \"/mnt/ui_copilot/data_rw/models/starcoderbase-1b-ft\" # local model in shared volum\n\n chatbot = HuggingFaceURLChatModel(model_path=model_path, temperature=1e-3)\n\n messages = [\n make_system_message(\"Please tell me back the following word: \"),\n make_user_message(\"bird\"),\n ]\n\n answer = chatbot(messages)\n\n print(answer.content)\n\n\n@pytest.mark.skip(reason=\"Requires downloading a large file on disk local model checkpoint\")\ndef test_download_and_save_model():\n model_path = \"meta-llama/Llama-2-70b-chat\"\n save_dir = \"test_models\"\n\n download_and_save_model(model_path, save_dir)","source_hash":"edea9db32c39d22cd6c155ff848d563256bff43e05e0fc6f3282e583888fe9d3","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.llm.test_huggingface_utils.test_download_and_save_model","uri":"program://AgentLab/function/tests.llm.test_huggingface_utils.test_download_and_save_model#L28-L32","kind":"function","name":"test_download_and_save_model","path":"tests/llm/test_huggingface_utils.py","language":"python","start_line":28,"end_line":32,"context_start_line":8,"context_end_line":32,"code":"\n\n@pytest.mark.skip(reason=\"Requires a local model checkpoint\")\ndef test_CustomLLMChatbot_locally():\n # model_path = \"google/flan-t5-base\" # remote model on HuggingFace Hub\n model_path = \"/mnt/ui_copilot/data_rw/models/starcoderbase-1b-ft\" # local model in shared volum\n\n chatbot = HuggingFaceURLChatModel(model_path=model_path, temperature=1e-3)\n\n messages = [\n make_system_message(\"Please tell me back the following word: \"),\n make_user_message(\"bird\"),\n ]\n\n answer = chatbot(messages)\n\n print(answer.content)\n\n\n@pytest.mark.skip(reason=\"Requires downloading a large file on disk local model checkpoint\")\ndef test_download_and_save_model():\n model_path = \"meta-llama/Llama-2-70b-chat\"\n save_dir = \"test_models\"\n\n download_and_save_model(model_path, save_dir)","source_hash":"edea9db32c39d22cd6c155ff848d563256bff43e05e0fc6f3282e583888fe9d3","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.llm.test_llm_configs","uri":"program://AgentLab/module/tests.llm.test_llm_configs#L1-L8","kind":"module","name":"tests.llm.test_llm_configs","path":"tests/llm/test_llm_configs.py","language":"python","start_line":1,"end_line":8,"context_start_line":1,"context_end_line":8,"code":"from agentlab.llm.llm_configs import CHAT_MODEL_ARGS_DICT\nfrom agentlab.llm.chat_api import BaseModelArgs\n\n\ndef test_llm_configs():\n\n for _, args in CHAT_MODEL_ARGS_DICT.items():\n assert isinstance(args, BaseModelArgs)","source_hash":"c905bf93d4a57f7fd18cc7000a71dbf31077cd1603c5b7f5641bd6000502b880","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.llm.test_llm_configs.test_llm_configs","uri":"program://AgentLab/function/tests.llm.test_llm_configs.test_llm_configs#L5-L8","kind":"function","name":"test_llm_configs","path":"tests/llm/test_llm_configs.py","language":"python","start_line":5,"end_line":8,"context_start_line":1,"context_end_line":8,"code":"from agentlab.llm.llm_configs import CHAT_MODEL_ARGS_DICT\nfrom agentlab.llm.chat_api import BaseModelArgs\n\n\ndef test_llm_configs():\n\n for _, args in CHAT_MODEL_ARGS_DICT.items():\n assert isinstance(args, BaseModelArgs)","source_hash":"c905bf93d4a57f7fd18cc7000a71dbf31077cd1603c5b7f5641bd6000502b880","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.llm.test_tracking","uri":"program://AgentLab/module/tests.llm.test_tracking#L1-L177","kind":"module","name":"tests.llm.test_tracking","path":"tests/llm/test_tracking.py","language":"python","start_line":1,"end_line":177,"context_start_line":1,"context_end_line":177,"code":"import os\nimport time\nfrom functools import partial\n\nimport pytest\n\nimport agentlab.llm.tracking as tracking\nfrom agentlab.llm.chat_api import (\n AzureChatModel,\n OpenAIChatModel,\n OpenRouterChatModel,\n make_system_message,\n make_user_message,\n)\n\n\ndef test_get_action_decorator():\n action, agent_info = tracking.cost_tracker_decorator(lambda x, y: call_llm())(None, None)\n assert action == \"action\"\n assert agent_info[\"stats\"] == {\n \"input_tokens\": 1,\n \"output_tokens\": 1,\n \"cost\": 1.0,\n }\n\n\nOPENROUTER_API_KEY_AVAILABLE = os.environ.get(\"OPENROUTER_API_KEY\") is not None\n\nOPENROUTER_MODELS = (\n \"anthropic/claude-3.5-sonnet\",\n \"meta-llama/llama-3.1-405b-instruct\",\n \"meta-llama/llama-3.1-70b-instruct\",\n \"meta-llama/llama-3.1-8b-instruct\",\n \"google/gemini-pro-1.5\",\n)\n\n\n@pytest.mark.skipif(not OPENROUTER_API_KEY_AVAILABLE, reason=\"OpenRouter API key is not available\")\ndef test_get_pricing_openrouter():\n pricing = tracking.get_pricing_openrouter()\n assert isinstance(pricing, dict)\n assert all(isinstance(v, dict) for v in pricing.values())\n for model in OPENROUTER_MODELS:\n assert model in pricing\n assert isinstance(pricing[model], dict)\n assert all(isinstance(v, float) for v in pricing[model].values())\n\n\ndef test_get_pricing_openai():\n pricing = tracking.get_pricing_openai()\n assert isinstance(pricing, dict)\n assert all(\"prompt\" in pricing[model] and \"completion\" in pricing[model] for model in pricing)\n assert all(isinstance(pricing[model][\"prompt\"], float) for model in pricing)\n assert all(isinstance(pricing[model][\"completion\"], float) for model in pricing)\n\n\ndef call_llm():\n if hasattr(tracking.TRACKER, \"instance\") and isinstance(\n tracking.TRACKER.instance, tracking.LLMTracker\n ):\n tracking.TRACKER.instance(1, 1, 1)\n return \"action\", {\"stats\": {}}\n\n\ndef test_tracker():\n with tracking.set_tracker() as tracker:\n _, _ = call_llm()\n\n assert tracker.stats[\"cost\"] == 1\n\n\ndef test_imbricate_trackers():\n with tracking.set_tracker() as tracker4:\n with tracking.set_tracker() as tracker1:\n _, _ = call_llm()\n with tracking.set_tracker() as tracker3:\n _, _ = call_llm()\n _, _ = call_llm()\n with tracking.set_tracker() as tracker1bis:\n _, _ = call_llm()\n\n assert tracker1.stats[\"cost\"] == 1\n assert tracker1bis.stats[\"cost\"] == 1\n assert tracker3.stats[\"cost\"] == 3\n assert tracker4.stats[\"cost\"] == 4\n\n\ndef test_threaded_trackers():\n \"\"\"thread_2 occurs in the middle of thread_1, results should be separate.\"\"\"\n import threading\n\n def thread_1(results=None):\n with tracking.set_tracker() as tracker:\n time.sleep(1)\n _, _ = call_llm()\n time.sleep(1)\n results[0] = tracker.stats\n\n def thread_2(results=None):\n time.sleep(1)\n with tracking.set_tracker() as tracker:\n _, _ = call_llm()\n results[1] = tracker.stats\n\n results = [None] * 2\n threads = [\n threading.Thread(target=partial(thread_1, results=results)),\n threading.Thread(target=partial(thread_2, results=results)),\n ]\n for thread in threads:\n thread.start()\n for thread in threads:\n thread.join()\n\n assert all(result[\"cost\"] == 1 for result in results)\n\n\nOPENAI_API_KEY_AVAILABLE = os.environ.get(\"OPENAI_API_KEY\") is not None\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not OPENAI_API_KEY_AVAILABLE, reason=\"OpenAI API key is not available\")\ndef test_openai_chat_model():\n chat_model = OpenAIChatModel(\"gpt-4o-mini\")\n assert chat_model.input_cost > 0\n assert chat_model.output_cost > 0\n\n messages = [\n make_system_message(\"You are an helpful virtual assistant\"),\n make_user_message(\"Give the third prime number\"),\n ]\n with tracking.set_tracker() as tracker:\n answer = chat_model(messages)\n assert \"5\" in answer.get(\"content\")\n assert tracker.stats[\"cost\"] > 0\n\n\nAZURE_OPENAI_API_KEY_AVAILABLE = (\n os.environ.get(\"AZURE_OPENAI_API_KEY\") is not None\n and os.environ.get(\"AZURE_OPENAI_ENDPOINT\") is not None\n)\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(\n not AZURE_OPENAI_API_KEY_AVAILABLE, reason=\"Azure OpenAI API key is not available\"\n)\ndef test_azure_chat_model():\n chat_model = AzureChatModel(model_name=\"gpt-4.1-nano\", deployment_name=\"gpt-4.1-nano\")\n assert chat_model.input_cost > 0\n assert chat_model.output_cost > 0\n\n messages = [\n make_system_message(\"You are an helpful virtual assistant\"),\n make_user_message(\"Give the third prime number\"),\n ]\n with tracking.set_tracker() as tracker:\n answer = chat_model(messages)\n assert \"5\" in answer.get(\"content\")\n assert tracker.stats[\"cost\"] > 0\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not OPENROUTER_API_KEY_AVAILABLE, reason=\"OpenRouter API key is not available\")\ndef test_openrouter_chat_model():\n chat_model = OpenRouterChatModel(\"openai/gpt-4o-mini\")\n assert chat_model.input_cost > 0\n assert chat_model.output_cost > 0\n\n messages = [\n make_system_message(\"You are an helpful virtual assistant\"),\n make_user_message(\"Give the third prime number\"),\n ]\n with tracking.set_tracker() as tracker:\n answer = chat_model(messages)\n assert \"5\" in answer.get(\"content\")\n assert tracker.stats[\"cost\"] > 0","source_hash":"216a1e92d2c1072a6f138def3517be4247855c2c6b9687a0b4c3f597044c5813","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.llm.test_tracking.test_get_action_decorator","uri":"program://AgentLab/function/tests.llm.test_tracking.test_get_action_decorator#L17-L24","kind":"function","name":"test_get_action_decorator","path":"tests/llm/test_tracking.py","language":"python","start_line":17,"end_line":24,"context_start_line":1,"context_end_line":44,"code":"import os\nimport time\nfrom functools import partial\n\nimport pytest\n\nimport agentlab.llm.tracking as tracking\nfrom agentlab.llm.chat_api import (\n AzureChatModel,\n OpenAIChatModel,\n OpenRouterChatModel,\n make_system_message,\n make_user_message,\n)\n\n\ndef test_get_action_decorator():\n action, agent_info = tracking.cost_tracker_decorator(lambda x, y: call_llm())(None, None)\n assert action == \"action\"\n assert agent_info[\"stats\"] == {\n \"input_tokens\": 1,\n \"output_tokens\": 1,\n \"cost\": 1.0,\n }\n\n\nOPENROUTER_API_KEY_AVAILABLE = os.environ.get(\"OPENROUTER_API_KEY\") is not None\n\nOPENROUTER_MODELS = (\n \"anthropic/claude-3.5-sonnet\",\n \"meta-llama/llama-3.1-405b-instruct\",\n \"meta-llama/llama-3.1-70b-instruct\",\n \"meta-llama/llama-3.1-8b-instruct\",\n \"google/gemini-pro-1.5\",\n)\n\n\n@pytest.mark.skipif(not OPENROUTER_API_KEY_AVAILABLE, reason=\"OpenRouter API key is not available\")\ndef test_get_pricing_openrouter():\n pricing = tracking.get_pricing_openrouter()\n assert isinstance(pricing, dict)\n assert all(isinstance(v, dict) for v in pricing.values())\n for model in OPENROUTER_MODELS:\n assert model in pricing","source_hash":"216a1e92d2c1072a6f138def3517be4247855c2c6b9687a0b4c3f597044c5813","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.llm.test_tracking.test_get_pricing_openrouter","uri":"program://AgentLab/function/tests.llm.test_tracking.test_get_pricing_openrouter#L39-L46","kind":"function","name":"test_get_pricing_openrouter","path":"tests/llm/test_tracking.py","language":"python","start_line":39,"end_line":46,"context_start_line":19,"context_end_line":66,"code":" assert action == \"action\"\n assert agent_info[\"stats\"] == {\n \"input_tokens\": 1,\n \"output_tokens\": 1,\n \"cost\": 1.0,\n }\n\n\nOPENROUTER_API_KEY_AVAILABLE = os.environ.get(\"OPENROUTER_API_KEY\") is not None\n\nOPENROUTER_MODELS = (\n \"anthropic/claude-3.5-sonnet\",\n \"meta-llama/llama-3.1-405b-instruct\",\n \"meta-llama/llama-3.1-70b-instruct\",\n \"meta-llama/llama-3.1-8b-instruct\",\n \"google/gemini-pro-1.5\",\n)\n\n\n@pytest.mark.skipif(not OPENROUTER_API_KEY_AVAILABLE, reason=\"OpenRouter API key is not available\")\ndef test_get_pricing_openrouter():\n pricing = tracking.get_pricing_openrouter()\n assert isinstance(pricing, dict)\n assert all(isinstance(v, dict) for v in pricing.values())\n for model in OPENROUTER_MODELS:\n assert model in pricing\n assert isinstance(pricing[model], dict)\n assert all(isinstance(v, float) for v in pricing[model].values())\n\n\ndef test_get_pricing_openai():\n pricing = tracking.get_pricing_openai()\n assert isinstance(pricing, dict)\n assert all(\"prompt\" in pricing[model] and \"completion\" in pricing[model] for model in pricing)\n assert all(isinstance(pricing[model][\"prompt\"], float) for model in pricing)\n assert all(isinstance(pricing[model][\"completion\"], float) for model in pricing)\n\n\ndef call_llm():\n if hasattr(tracking.TRACKER, \"instance\") and isinstance(\n tracking.TRACKER.instance, tracking.LLMTracker\n ):\n tracking.TRACKER.instance(1, 1, 1)\n return \"action\", {\"stats\": {}}\n\n\ndef test_tracker():\n with tracking.set_tracker() as tracker:","source_hash":"216a1e92d2c1072a6f138def3517be4247855c2c6b9687a0b4c3f597044c5813","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.llm.test_tracking.test_get_pricing_openai","uri":"program://AgentLab/function/tests.llm.test_tracking.test_get_pricing_openai#L49-L54","kind":"function","name":"test_get_pricing_openai","path":"tests/llm/test_tracking.py","language":"python","start_line":49,"end_line":54,"context_start_line":29,"context_end_line":74,"code":"OPENROUTER_MODELS = (\n \"anthropic/claude-3.5-sonnet\",\n \"meta-llama/llama-3.1-405b-instruct\",\n \"meta-llama/llama-3.1-70b-instruct\",\n \"meta-llama/llama-3.1-8b-instruct\",\n \"google/gemini-pro-1.5\",\n)\n\n\n@pytest.mark.skipif(not OPENROUTER_API_KEY_AVAILABLE, reason=\"OpenRouter API key is not available\")\ndef test_get_pricing_openrouter():\n pricing = tracking.get_pricing_openrouter()\n assert isinstance(pricing, dict)\n assert all(isinstance(v, dict) for v in pricing.values())\n for model in OPENROUTER_MODELS:\n assert model in pricing\n assert isinstance(pricing[model], dict)\n assert all(isinstance(v, float) for v in pricing[model].values())\n\n\ndef test_get_pricing_openai():\n pricing = tracking.get_pricing_openai()\n assert isinstance(pricing, dict)\n assert all(\"prompt\" in pricing[model] and \"completion\" in pricing[model] for model in pricing)\n assert all(isinstance(pricing[model][\"prompt\"], float) for model in pricing)\n assert all(isinstance(pricing[model][\"completion\"], float) for model in pricing)\n\n\ndef call_llm():\n if hasattr(tracking.TRACKER, \"instance\") and isinstance(\n tracking.TRACKER.instance, tracking.LLMTracker\n ):\n tracking.TRACKER.instance(1, 1, 1)\n return \"action\", {\"stats\": {}}\n\n\ndef test_tracker():\n with tracking.set_tracker() as tracker:\n _, _ = call_llm()\n\n assert tracker.stats[\"cost\"] == 1\n\n\ndef test_imbricate_trackers():\n with tracking.set_tracker() as tracker4:\n with tracking.set_tracker() as tracker1:","source_hash":"216a1e92d2c1072a6f138def3517be4247855c2c6b9687a0b4c3f597044c5813","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.llm.test_tracking.call_llm","uri":"program://AgentLab/function/tests.llm.test_tracking.call_llm#L57-L62","kind":"function","name":"call_llm","path":"tests/llm/test_tracking.py","language":"python","start_line":57,"end_line":62,"context_start_line":37,"context_end_line":82,"code":"\n@pytest.mark.skipif(not OPENROUTER_API_KEY_AVAILABLE, reason=\"OpenRouter API key is not available\")\ndef test_get_pricing_openrouter():\n pricing = tracking.get_pricing_openrouter()\n assert isinstance(pricing, dict)\n assert all(isinstance(v, dict) for v in pricing.values())\n for model in OPENROUTER_MODELS:\n assert model in pricing\n assert isinstance(pricing[model], dict)\n assert all(isinstance(v, float) for v in pricing[model].values())\n\n\ndef test_get_pricing_openai():\n pricing = tracking.get_pricing_openai()\n assert isinstance(pricing, dict)\n assert all(\"prompt\" in pricing[model] and \"completion\" in pricing[model] for model in pricing)\n assert all(isinstance(pricing[model][\"prompt\"], float) for model in pricing)\n assert all(isinstance(pricing[model][\"completion\"], float) for model in pricing)\n\n\ndef call_llm():\n if hasattr(tracking.TRACKER, \"instance\") and isinstance(\n tracking.TRACKER.instance, tracking.LLMTracker\n ):\n tracking.TRACKER.instance(1, 1, 1)\n return \"action\", {\"stats\": {}}\n\n\ndef test_tracker():\n with tracking.set_tracker() as tracker:\n _, _ = call_llm()\n\n assert tracker.stats[\"cost\"] == 1\n\n\ndef test_imbricate_trackers():\n with tracking.set_tracker() as tracker4:\n with tracking.set_tracker() as tracker1:\n _, _ = call_llm()\n with tracking.set_tracker() as tracker3:\n _, _ = call_llm()\n _, _ = call_llm()\n with tracking.set_tracker() as tracker1bis:\n _, _ = call_llm()\n\n assert tracker1.stats[\"cost\"] == 1","source_hash":"216a1e92d2c1072a6f138def3517be4247855c2c6b9687a0b4c3f597044c5813","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.llm.test_tracking.test_tracker","uri":"program://AgentLab/function/tests.llm.test_tracking.test_tracker#L65-L69","kind":"function","name":"test_tracker","path":"tests/llm/test_tracking.py","language":"python","start_line":65,"end_line":69,"context_start_line":45,"context_end_line":89,"code":" assert isinstance(pricing[model], dict)\n assert all(isinstance(v, float) for v in pricing[model].values())\n\n\ndef test_get_pricing_openai():\n pricing = tracking.get_pricing_openai()\n assert isinstance(pricing, dict)\n assert all(\"prompt\" in pricing[model] and \"completion\" in pricing[model] for model in pricing)\n assert all(isinstance(pricing[model][\"prompt\"], float) for model in pricing)\n assert all(isinstance(pricing[model][\"completion\"], float) for model in pricing)\n\n\ndef call_llm():\n if hasattr(tracking.TRACKER, \"instance\") and isinstance(\n tracking.TRACKER.instance, tracking.LLMTracker\n ):\n tracking.TRACKER.instance(1, 1, 1)\n return \"action\", {\"stats\": {}}\n\n\ndef test_tracker():\n with tracking.set_tracker() as tracker:\n _, _ = call_llm()\n\n assert tracker.stats[\"cost\"] == 1\n\n\ndef test_imbricate_trackers():\n with tracking.set_tracker() as tracker4:\n with tracking.set_tracker() as tracker1:\n _, _ = call_llm()\n with tracking.set_tracker() as tracker3:\n _, _ = call_llm()\n _, _ = call_llm()\n with tracking.set_tracker() as tracker1bis:\n _, _ = call_llm()\n\n assert tracker1.stats[\"cost\"] == 1\n assert tracker1bis.stats[\"cost\"] == 1\n assert tracker3.stats[\"cost\"] == 3\n assert tracker4.stats[\"cost\"] == 4\n\n\ndef test_threaded_trackers():\n \"\"\"thread_2 occurs in the middle of thread_1, results should be separate.\"\"\"","source_hash":"216a1e92d2c1072a6f138def3517be4247855c2c6b9687a0b4c3f597044c5813","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.llm.test_tracking.test_imbricate_trackers","uri":"program://AgentLab/function/tests.llm.test_tracking.test_imbricate_trackers#L72-L85","kind":"function","name":"test_imbricate_trackers","path":"tests/llm/test_tracking.py","language":"python","start_line":72,"end_line":85,"context_start_line":52,"context_end_line":105,"code":" assert all(\"prompt\" in pricing[model] and \"completion\" in pricing[model] for model in pricing)\n assert all(isinstance(pricing[model][\"prompt\"], float) for model in pricing)\n assert all(isinstance(pricing[model][\"completion\"], float) for model in pricing)\n\n\ndef call_llm():\n if hasattr(tracking.TRACKER, \"instance\") and isinstance(\n tracking.TRACKER.instance, tracking.LLMTracker\n ):\n tracking.TRACKER.instance(1, 1, 1)\n return \"action\", {\"stats\": {}}\n\n\ndef test_tracker():\n with tracking.set_tracker() as tracker:\n _, _ = call_llm()\n\n assert tracker.stats[\"cost\"] == 1\n\n\ndef test_imbricate_trackers():\n with tracking.set_tracker() as tracker4:\n with tracking.set_tracker() as tracker1:\n _, _ = call_llm()\n with tracking.set_tracker() as tracker3:\n _, _ = call_llm()\n _, _ = call_llm()\n with tracking.set_tracker() as tracker1bis:\n _, _ = call_llm()\n\n assert tracker1.stats[\"cost\"] == 1\n assert tracker1bis.stats[\"cost\"] == 1\n assert tracker3.stats[\"cost\"] == 3\n assert tracker4.stats[\"cost\"] == 4\n\n\ndef test_threaded_trackers():\n \"\"\"thread_2 occurs in the middle of thread_1, results should be separate.\"\"\"\n import threading\n\n def thread_1(results=None):\n with tracking.set_tracker() as tracker:\n time.sleep(1)\n _, _ = call_llm()\n time.sleep(1)\n results[0] = tracker.stats\n\n def thread_2(results=None):\n time.sleep(1)\n with tracking.set_tracker() as tracker:\n _, _ = call_llm()\n results[1] = tracker.stats\n\n results = [None] * 2","source_hash":"216a1e92d2c1072a6f138def3517be4247855c2c6b9687a0b4c3f597044c5813","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.llm.test_tracking.test_threaded_trackers","uri":"program://AgentLab/function/tests.llm.test_tracking.test_threaded_trackers#L88-L115","kind":"function","name":"test_threaded_trackers","path":"tests/llm/test_tracking.py","language":"python","start_line":88,"end_line":115,"context_start_line":68,"context_end_line":135,"code":"\n assert tracker.stats[\"cost\"] == 1\n\n\ndef test_imbricate_trackers():\n with tracking.set_tracker() as tracker4:\n with tracking.set_tracker() as tracker1:\n _, _ = call_llm()\n with tracking.set_tracker() as tracker3:\n _, _ = call_llm()\n _, _ = call_llm()\n with tracking.set_tracker() as tracker1bis:\n _, _ = call_llm()\n\n assert tracker1.stats[\"cost\"] == 1\n assert tracker1bis.stats[\"cost\"] == 1\n assert tracker3.stats[\"cost\"] == 3\n assert tracker4.stats[\"cost\"] == 4\n\n\ndef test_threaded_trackers():\n \"\"\"thread_2 occurs in the middle of thread_1, results should be separate.\"\"\"\n import threading\n\n def thread_1(results=None):\n with tracking.set_tracker() as tracker:\n time.sleep(1)\n _, _ = call_llm()\n time.sleep(1)\n results[0] = tracker.stats\n\n def thread_2(results=None):\n time.sleep(1)\n with tracking.set_tracker() as tracker:\n _, _ = call_llm()\n results[1] = tracker.stats\n\n results = [None] * 2\n threads = [\n threading.Thread(target=partial(thread_1, results=results)),\n threading.Thread(target=partial(thread_2, results=results)),\n ]\n for thread in threads:\n thread.start()\n for thread in threads:\n thread.join()\n\n assert all(result[\"cost\"] == 1 for result in results)\n\n\nOPENAI_API_KEY_AVAILABLE = os.environ.get(\"OPENAI_API_KEY\") is not None\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not OPENAI_API_KEY_AVAILABLE, reason=\"OpenAI API key is not available\")\ndef test_openai_chat_model():\n chat_model = OpenAIChatModel(\"gpt-4o-mini\")\n assert chat_model.input_cost > 0\n assert chat_model.output_cost > 0\n\n messages = [\n make_system_message(\"You are an helpful virtual assistant\"),\n make_user_message(\"Give the third prime number\"),\n ]\n with tracking.set_tracker() as tracker:\n answer = chat_model(messages)\n assert \"5\" in answer.get(\"content\")\n assert tracker.stats[\"cost\"] > 0","source_hash":"216a1e92d2c1072a6f138def3517be4247855c2c6b9687a0b4c3f597044c5813","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.llm.test_tracking.test_openai_chat_model","uri":"program://AgentLab/function/tests.llm.test_tracking.test_openai_chat_model#L123-L135","kind":"function","name":"test_openai_chat_model","path":"tests/llm/test_tracking.py","language":"python","start_line":123,"end_line":135,"context_start_line":103,"context_end_line":155,"code":" results[1] = tracker.stats\n\n results = [None] * 2\n threads = [\n threading.Thread(target=partial(thread_1, results=results)),\n threading.Thread(target=partial(thread_2, results=results)),\n ]\n for thread in threads:\n thread.start()\n for thread in threads:\n thread.join()\n\n assert all(result[\"cost\"] == 1 for result in results)\n\n\nOPENAI_API_KEY_AVAILABLE = os.environ.get(\"OPENAI_API_KEY\") is not None\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not OPENAI_API_KEY_AVAILABLE, reason=\"OpenAI API key is not available\")\ndef test_openai_chat_model():\n chat_model = OpenAIChatModel(\"gpt-4o-mini\")\n assert chat_model.input_cost > 0\n assert chat_model.output_cost > 0\n\n messages = [\n make_system_message(\"You are an helpful virtual assistant\"),\n make_user_message(\"Give the third prime number\"),\n ]\n with tracking.set_tracker() as tracker:\n answer = chat_model(messages)\n assert \"5\" in answer.get(\"content\")\n assert tracker.stats[\"cost\"] > 0\n\n\nAZURE_OPENAI_API_KEY_AVAILABLE = (\n os.environ.get(\"AZURE_OPENAI_API_KEY\") is not None\n and os.environ.get(\"AZURE_OPENAI_ENDPOINT\") is not None\n)\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(\n not AZURE_OPENAI_API_KEY_AVAILABLE, reason=\"Azure OpenAI API key is not available\"\n)\ndef test_azure_chat_model():\n chat_model = AzureChatModel(model_name=\"gpt-4.1-nano\", deployment_name=\"gpt-4.1-nano\")\n assert chat_model.input_cost > 0\n assert chat_model.output_cost > 0\n\n messages = [\n make_system_message(\"You are an helpful virtual assistant\"),\n make_user_message(\"Give the third prime number\"),","source_hash":"216a1e92d2c1072a6f138def3517be4247855c2c6b9687a0b4c3f597044c5813","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.llm.test_tracking.test_azure_chat_model","uri":"program://AgentLab/function/tests.llm.test_tracking.test_azure_chat_model#L148-L160","kind":"function","name":"test_azure_chat_model","path":"tests/llm/test_tracking.py","language":"python","start_line":148,"end_line":160,"context_start_line":128,"context_end_line":177,"code":" messages = [\n make_system_message(\"You are an helpful virtual assistant\"),\n make_user_message(\"Give the third prime number\"),\n ]\n with tracking.set_tracker() as tracker:\n answer = chat_model(messages)\n assert \"5\" in answer.get(\"content\")\n assert tracker.stats[\"cost\"] > 0\n\n\nAZURE_OPENAI_API_KEY_AVAILABLE = (\n os.environ.get(\"AZURE_OPENAI_API_KEY\") is not None\n and os.environ.get(\"AZURE_OPENAI_ENDPOINT\") is not None\n)\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(\n not AZURE_OPENAI_API_KEY_AVAILABLE, reason=\"Azure OpenAI API key is not available\"\n)\ndef test_azure_chat_model():\n chat_model = AzureChatModel(model_name=\"gpt-4.1-nano\", deployment_name=\"gpt-4.1-nano\")\n assert chat_model.input_cost > 0\n assert chat_model.output_cost > 0\n\n messages = [\n make_system_message(\"You are an helpful virtual assistant\"),\n make_user_message(\"Give the third prime number\"),\n ]\n with tracking.set_tracker() as tracker:\n answer = chat_model(messages)\n assert \"5\" in answer.get(\"content\")\n assert tracker.stats[\"cost\"] > 0\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not OPENROUTER_API_KEY_AVAILABLE, reason=\"OpenRouter API key is not available\")\ndef test_openrouter_chat_model():\n chat_model = OpenRouterChatModel(\"openai/gpt-4o-mini\")\n assert chat_model.input_cost > 0\n assert chat_model.output_cost > 0\n\n messages = [\n make_system_message(\"You are an helpful virtual assistant\"),\n make_user_message(\"Give the third prime number\"),\n ]\n with tracking.set_tracker() as tracker:\n answer = chat_model(messages)\n assert \"5\" in answer.get(\"content\")\n assert tracker.stats[\"cost\"] > 0","source_hash":"216a1e92d2c1072a6f138def3517be4247855c2c6b9687a0b4c3f597044c5813","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.llm.test_tracking.test_openrouter_chat_model","uri":"program://AgentLab/function/tests.llm.test_tracking.test_openrouter_chat_model#L165-L177","kind":"function","name":"test_openrouter_chat_model","path":"tests/llm/test_tracking.py","language":"python","start_line":165,"end_line":177,"context_start_line":145,"context_end_line":177,"code":"@pytest.mark.skipif(\n not AZURE_OPENAI_API_KEY_AVAILABLE, reason=\"Azure OpenAI API key is not available\"\n)\ndef test_azure_chat_model():\n chat_model = AzureChatModel(model_name=\"gpt-4.1-nano\", deployment_name=\"gpt-4.1-nano\")\n assert chat_model.input_cost > 0\n assert chat_model.output_cost > 0\n\n messages = [\n make_system_message(\"You are an helpful virtual assistant\"),\n make_user_message(\"Give the third prime number\"),\n ]\n with tracking.set_tracker() as tracker:\n answer = chat_model(messages)\n assert \"5\" in answer.get(\"content\")\n assert tracker.stats[\"cost\"] > 0\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not OPENROUTER_API_KEY_AVAILABLE, reason=\"OpenRouter API key is not available\")\ndef test_openrouter_chat_model():\n chat_model = OpenRouterChatModel(\"openai/gpt-4o-mini\")\n assert chat_model.input_cost > 0\n assert chat_model.output_cost > 0\n\n messages = [\n make_system_message(\"You are an helpful virtual assistant\"),\n make_user_message(\"Give the third prime number\"),\n ]\n with tracking.set_tracker() as tracker:\n answer = chat_model(messages)\n assert \"5\" in answer.get(\"content\")\n assert tracker.stats[\"cost\"] > 0","source_hash":"216a1e92d2c1072a6f138def3517be4247855c2c6b9687a0b4c3f597044c5813","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.llm.test_tracking.thread_1","uri":"program://AgentLab/function/tests.llm.test_tracking.thread_1#L92-L97","kind":"function","name":"thread_1","path":"tests/llm/test_tracking.py","language":"python","start_line":92,"end_line":97,"context_start_line":72,"context_end_line":117,"code":"def test_imbricate_trackers():\n with tracking.set_tracker() as tracker4:\n with tracking.set_tracker() as tracker1:\n _, _ = call_llm()\n with tracking.set_tracker() as tracker3:\n _, _ = call_llm()\n _, _ = call_llm()\n with tracking.set_tracker() as tracker1bis:\n _, _ = call_llm()\n\n assert tracker1.stats[\"cost\"] == 1\n assert tracker1bis.stats[\"cost\"] == 1\n assert tracker3.stats[\"cost\"] == 3\n assert tracker4.stats[\"cost\"] == 4\n\n\ndef test_threaded_trackers():\n \"\"\"thread_2 occurs in the middle of thread_1, results should be separate.\"\"\"\n import threading\n\n def thread_1(results=None):\n with tracking.set_tracker() as tracker:\n time.sleep(1)\n _, _ = call_llm()\n time.sleep(1)\n results[0] = tracker.stats\n\n def thread_2(results=None):\n time.sleep(1)\n with tracking.set_tracker() as tracker:\n _, _ = call_llm()\n results[1] = tracker.stats\n\n results = [None] * 2\n threads = [\n threading.Thread(target=partial(thread_1, results=results)),\n threading.Thread(target=partial(thread_2, results=results)),\n ]\n for thread in threads:\n thread.start()\n for thread in threads:\n thread.join()\n\n assert all(result[\"cost\"] == 1 for result in results)\n\n","source_hash":"216a1e92d2c1072a6f138def3517be4247855c2c6b9687a0b4c3f597044c5813","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.llm.test_tracking.thread_2","uri":"program://AgentLab/function/tests.llm.test_tracking.thread_2#L99-L103","kind":"function","name":"thread_2","path":"tests/llm/test_tracking.py","language":"python","start_line":99,"end_line":103,"context_start_line":79,"context_end_line":123,"code":" with tracking.set_tracker() as tracker1bis:\n _, _ = call_llm()\n\n assert tracker1.stats[\"cost\"] == 1\n assert tracker1bis.stats[\"cost\"] == 1\n assert tracker3.stats[\"cost\"] == 3\n assert tracker4.stats[\"cost\"] == 4\n\n\ndef test_threaded_trackers():\n \"\"\"thread_2 occurs in the middle of thread_1, results should be separate.\"\"\"\n import threading\n\n def thread_1(results=None):\n with tracking.set_tracker() as tracker:\n time.sleep(1)\n _, _ = call_llm()\n time.sleep(1)\n results[0] = tracker.stats\n\n def thread_2(results=None):\n time.sleep(1)\n with tracking.set_tracker() as tracker:\n _, _ = call_llm()\n results[1] = tracker.stats\n\n results = [None] * 2\n threads = [\n threading.Thread(target=partial(thread_1, results=results)),\n threading.Thread(target=partial(thread_2, results=results)),\n ]\n for thread in threads:\n thread.start()\n for thread in threads:\n thread.join()\n\n assert all(result[\"cost\"] == 1 for result in results)\n\n\nOPENAI_API_KEY_AVAILABLE = os.environ.get(\"OPENAI_API_KEY\") is not None\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not OPENAI_API_KEY_AVAILABLE, reason=\"OpenAI API key is not available\")\ndef test_openai_chat_model():","source_hash":"216a1e92d2c1072a6f138def3517be4247855c2c6b9687a0b4c3f597044c5813","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.llm.test_litellm_api","uri":"program://AgentLab/module/tests.llm.test_litellm_api#L1-L167","kind":"module","name":"tests.llm.test_litellm_api","path":"tests/llm/test_litellm_api.py","language":"python","start_line":1,"end_line":167,"context_start_line":1,"context_end_line":167,"code":"import os\nfrom functools import partial\n\nimport pytest\nfrom agentlab.llm.litellm_api import LiteLLMModelArgs\nfrom agentlab.llm.response_api import APIPayload, LLMOutput\n\nchat_api_tools = [\n {\n \"type\": \"function\",\n \"name\": \"get_weather\",\n \"description\": \"Get the current weather in a given location.\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The location to get the weather for.\",\n },\n \"unit\": {\n \"type\": \"string\",\n \"enum\": [\"celsius\", \"fahrenheit\"],\n \"description\": \"The unit of temperature.\",\n },\n },\n \"required\": [\"location\"],\n },\n },\n {\n \"type\": \"function\",\n \"name\": \"get_time\",\n \"description\": \"Get the current time in a given location.\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The location to get the time for.\",\n }\n },\n \"required\": [\"location\"],\n },\n },\n]\n\n\n# test_config (setting name, BaseModelArgs, model_name, tools)\ntool_test_configs = [\n (\"gpt-4.1\", LiteLLMModelArgs, \"openai/gpt-4.1-2025-04-14\", chat_api_tools),\n # (\"claude-3\", LiteLLMModelArgs, \"anthropic/claude-3-haiku-20240307\", anthropic_tools), # fails for parallel tool calls\n # (\"claude-3.7\", LiteLLMModelArgs, \"anthropic/claude-3-7-sonnet-20250219\", anthropic_tools), # fails for parallel tool calls\n (\"claude-4-sonnet\", LiteLLMModelArgs, \"anthropic/claude-sonnet-4-20250514\", chat_api_tools),\n # (\"gpt-o3\", LiteLLMModelArgs, \"openai/o3-2025-04-16\", chat_api_tools), # fails for parallel tool calls\n # add more models as needed\n]\n\n\ndef add_user_messages(msg_builder):\n return [\n msg_builder.user().add_text(\"What is the weather in Paris and Delhi?\"),\n msg_builder.user().add_text(\"You must call multiple tools to achieve the task.\"),\n ]\n\n\n## Test multiaction\n@pytest.mark.pricy\ndef test_multi_action_tool_calls():\n \"\"\"\n Test that the model can produce multiple tool calls in parallel.\n Note: Remove assert and Uncomment commented lines to see the full behaviour of models and tool choices.\n \"\"\"\n res_df = []\n for tool_choice in [\n # \"none\",\n \"required\", # fails for Responses API\n \"any\", # fails for Responses API\n \"auto\",\n # \"get_weather\", # force a specific tool call\n ]:\n for name, llm_class, checkpoint_name, tools in tool_test_configs:\n model_args = llm_class(model_name=checkpoint_name, max_new_tokens=200, temperature=None)\n llm, msg_builder = model_args.make_model(), model_args.get_message_builder()\n messages = add_user_messages(msg_builder)\n if tool_choice == \"get_weather\": # force a specific tool call\n response: LLMOutput = llm(\n APIPayload(messages=messages, tools=tools, force_call_tool=tool_choice)\n )\n else:\n response: LLMOutput = llm(\n APIPayload(messages=messages, tools=tools, tool_choice=tool_choice)\n )\n num_tool_calls = len(response.tool_calls) if response.tool_calls else 0\n row = {\n \"model\": name,\n \"checkpoint\": checkpoint_name,\n \"tool_choice\": tool_choice,\n \"num_tool_calls\": num_tool_calls,\n \"action\": response.action,\n }\n res_df.append(row)\n assert (\n num_tool_calls == 2\n ), f\"Expected 2 tool calls, but got {num_tool_calls} for {name} with tool choice {tool_choice}\"\n # import pandas as pd\n # print(pd.DataFrame(res_df))\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not os.getenv(\"OPENAI_API_KEY\"), reason=\"Skipping as OpenAI API key not set\")\ndef test_single_tool_call():\n \"\"\"\n Test that the LLMOutput contains only one tool call when use_only_first_toolcall is True.\n \"\"\"\n for tool_choice in [\n # 'none',\n \"required\",\n \"any\",\n \"auto\",\n ]:\n for name, llm_class, checkpoint_name, tools in tool_test_configs:\n print(name, \"tool choice:\", tool_choice, \"\\n\", \"**\" * 10)\n llm_class = partial(llm_class, use_only_first_toolcall=True)\n model_args = llm_class(model_name=checkpoint_name, max_new_tokens=200, temperature=None)\n llm, msg_builder = model_args.make_model(), model_args.get_message_builder()\n messages = add_user_messages(msg_builder)\n if tool_choice == \"get_weather\": # force a specific tool call\n response: LLMOutput = llm(\n APIPayload(messages=messages, tools=tools, force_call_tool=tool_choice)\n )\n else:\n response: LLMOutput = llm(\n APIPayload(messages=messages, tools=tools, tool_choice=tool_choice)\n )\n num_tool_calls = len(response.tool_calls) if response.tool_calls else 0\n assert (\n num_tool_calls == 1\n ), f\"Expected 1 tool calls, but got {num_tool_calls} for {name} with tool choice {tool_choice }\"\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not os.getenv(\"OPENAI_API_KEY\"), reason=\"Skipping as OpenAI API key not set\")\ndef test_force_tool_call():\n \"\"\"\n Test that the model can produce a specific tool call when requested.\n The user message asks the 'weather' but we force call tool \"get_time\".\n We test if 'get_time' is present in the tool calls.\n Note: Model can have other tool calls as well.\n \"\"\"\n force_call_tool = \"get_time\"\n for name, llm_class, checkpoint_name, tools in tool_test_configs:\n model_args = llm_class(model_name=checkpoint_name, max_new_tokens=200, temperature=None)\n llm, msg_builder = model_args.make_model(), model_args.get_message_builder()\n messages = add_user_messages(msg_builder) # asks weather in Paris and Delhi\n response: LLMOutput = llm(\n APIPayload(messages=messages, tools=tools, force_call_tool=force_call_tool)\n )\n called_fn_names = [call.name for call in response.tool_calls] if response.tool_calls else []\n assert response.tool_calls is not None\n assert any(\n fn_name == \"get_time\" for fn_name in called_fn_names\n ), f\"Model:{name},Expected all tool calls to be 'get_time', but got {called_fn_names} with force call {force_call_tool}\"\n\n\nif __name__ == \"__main__\":\n test_multi_action_tool_calls()\n test_force_tool_call()\n test_single_tool_call()","source_hash":"3d536844800bbc74d0b13ec4b727a271d2cc56690613d4878114cf35d293b527","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.llm.test_litellm_api.add_user_messages","uri":"program://AgentLab/function/tests.llm.test_litellm_api.add_user_messages#L58-L62","kind":"function","name":"add_user_messages","path":"tests/llm/test_litellm_api.py","language":"python","start_line":58,"end_line":62,"context_start_line":38,"context_end_line":82,"code":" \"description\": \"The location to get the time for.\",\n }\n },\n \"required\": [\"location\"],\n },\n },\n]\n\n\n# test_config (setting name, BaseModelArgs, model_name, tools)\ntool_test_configs = [\n (\"gpt-4.1\", LiteLLMModelArgs, \"openai/gpt-4.1-2025-04-14\", chat_api_tools),\n # (\"claude-3\", LiteLLMModelArgs, \"anthropic/claude-3-haiku-20240307\", anthropic_tools), # fails for parallel tool calls\n # (\"claude-3.7\", LiteLLMModelArgs, \"anthropic/claude-3-7-sonnet-20250219\", anthropic_tools), # fails for parallel tool calls\n (\"claude-4-sonnet\", LiteLLMModelArgs, \"anthropic/claude-sonnet-4-20250514\", chat_api_tools),\n # (\"gpt-o3\", LiteLLMModelArgs, \"openai/o3-2025-04-16\", chat_api_tools), # fails for parallel tool calls\n # add more models as needed\n]\n\n\ndef add_user_messages(msg_builder):\n return [\n msg_builder.user().add_text(\"What is the weather in Paris and Delhi?\"),\n msg_builder.user().add_text(\"You must call multiple tools to achieve the task.\"),\n ]\n\n\n## Test multiaction\n@pytest.mark.pricy\ndef test_multi_action_tool_calls():\n \"\"\"\n Test that the model can produce multiple tool calls in parallel.\n Note: Remove assert and Uncomment commented lines to see the full behaviour of models and tool choices.\n \"\"\"\n res_df = []\n for tool_choice in [\n # \"none\",\n \"required\", # fails for Responses API\n \"any\", # fails for Responses API\n \"auto\",\n # \"get_weather\", # force a specific tool call\n ]:\n for name, llm_class, checkpoint_name, tools in tool_test_configs:\n model_args = llm_class(model_name=checkpoint_name, max_new_tokens=200, temperature=None)\n llm, msg_builder = model_args.make_model(), model_args.get_message_builder()","source_hash":"3d536844800bbc74d0b13ec4b727a271d2cc56690613d4878114cf35d293b527","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.llm.test_litellm_api.test_multi_action_tool_calls","uri":"program://AgentLab/function/tests.llm.test_litellm_api.test_multi_action_tool_calls#L67-L103","kind":"function","name":"test_multi_action_tool_calls","path":"tests/llm/test_litellm_api.py","language":"python","start_line":67,"end_line":103,"context_start_line":47,"context_end_line":123,"code":"# test_config (setting name, BaseModelArgs, model_name, tools)\ntool_test_configs = [\n (\"gpt-4.1\", LiteLLMModelArgs, \"openai/gpt-4.1-2025-04-14\", chat_api_tools),\n # (\"claude-3\", LiteLLMModelArgs, \"anthropic/claude-3-haiku-20240307\", anthropic_tools), # fails for parallel tool calls\n # (\"claude-3.7\", LiteLLMModelArgs, \"anthropic/claude-3-7-sonnet-20250219\", anthropic_tools), # fails for parallel tool calls\n (\"claude-4-sonnet\", LiteLLMModelArgs, \"anthropic/claude-sonnet-4-20250514\", chat_api_tools),\n # (\"gpt-o3\", LiteLLMModelArgs, \"openai/o3-2025-04-16\", chat_api_tools), # fails for parallel tool calls\n # add more models as needed\n]\n\n\ndef add_user_messages(msg_builder):\n return [\n msg_builder.user().add_text(\"What is the weather in Paris and Delhi?\"),\n msg_builder.user().add_text(\"You must call multiple tools to achieve the task.\"),\n ]\n\n\n## Test multiaction\n@pytest.mark.pricy\ndef test_multi_action_tool_calls():\n \"\"\"\n Test that the model can produce multiple tool calls in parallel.\n Note: Remove assert and Uncomment commented lines to see the full behaviour of models and tool choices.\n \"\"\"\n res_df = []\n for tool_choice in [\n # \"none\",\n \"required\", # fails for Responses API\n \"any\", # fails for Responses API\n \"auto\",\n # \"get_weather\", # force a specific tool call\n ]:\n for name, llm_class, checkpoint_name, tools in tool_test_configs:\n model_args = llm_class(model_name=checkpoint_name, max_new_tokens=200, temperature=None)\n llm, msg_builder = model_args.make_model(), model_args.get_message_builder()\n messages = add_user_messages(msg_builder)\n if tool_choice == \"get_weather\": # force a specific tool call\n response: LLMOutput = llm(\n APIPayload(messages=messages, tools=tools, force_call_tool=tool_choice)\n )\n else:\n response: LLMOutput = llm(\n APIPayload(messages=messages, tools=tools, tool_choice=tool_choice)\n )\n num_tool_calls = len(response.tool_calls) if response.tool_calls else 0\n row = {\n \"model\": name,\n \"checkpoint\": checkpoint_name,\n \"tool_choice\": tool_choice,\n \"num_tool_calls\": num_tool_calls,\n \"action\": response.action,\n }\n res_df.append(row)\n assert (\n num_tool_calls == 2\n ), f\"Expected 2 tool calls, but got {num_tool_calls} for {name} with tool choice {tool_choice}\"\n # import pandas as pd\n # print(pd.DataFrame(res_df))\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not os.getenv(\"OPENAI_API_KEY\"), reason=\"Skipping as OpenAI API key not set\")\ndef test_single_tool_call():\n \"\"\"\n Test that the LLMOutput contains only one tool call when use_only_first_toolcall is True.\n \"\"\"\n for tool_choice in [\n # 'none',\n \"required\",\n \"any\",\n \"auto\",\n ]:\n for name, llm_class, checkpoint_name, tools in tool_test_configs:\n print(name, \"tool choice:\", tool_choice, \"\\n\", \"**\" * 10)\n llm_class = partial(llm_class, use_only_first_toolcall=True)\n model_args = llm_class(model_name=checkpoint_name, max_new_tokens=200, temperature=None)","source_hash":"3d536844800bbc74d0b13ec4b727a271d2cc56690613d4878114cf35d293b527","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.llm.test_litellm_api.test_single_tool_call","uri":"program://AgentLab/function/tests.llm.test_litellm_api.test_single_tool_call#L110-L137","kind":"function","name":"test_single_tool_call","path":"tests/llm/test_litellm_api.py","language":"python","start_line":110,"end_line":137,"context_start_line":90,"context_end_line":157,"code":" APIPayload(messages=messages, tools=tools, tool_choice=tool_choice)\n )\n num_tool_calls = len(response.tool_calls) if response.tool_calls else 0\n row = {\n \"model\": name,\n \"checkpoint\": checkpoint_name,\n \"tool_choice\": tool_choice,\n \"num_tool_calls\": num_tool_calls,\n \"action\": response.action,\n }\n res_df.append(row)\n assert (\n num_tool_calls == 2\n ), f\"Expected 2 tool calls, but got {num_tool_calls} for {name} with tool choice {tool_choice}\"\n # import pandas as pd\n # print(pd.DataFrame(res_df))\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not os.getenv(\"OPENAI_API_KEY\"), reason=\"Skipping as OpenAI API key not set\")\ndef test_single_tool_call():\n \"\"\"\n Test that the LLMOutput contains only one tool call when use_only_first_toolcall is True.\n \"\"\"\n for tool_choice in [\n # 'none',\n \"required\",\n \"any\",\n \"auto\",\n ]:\n for name, llm_class, checkpoint_name, tools in tool_test_configs:\n print(name, \"tool choice:\", tool_choice, \"\\n\", \"**\" * 10)\n llm_class = partial(llm_class, use_only_first_toolcall=True)\n model_args = llm_class(model_name=checkpoint_name, max_new_tokens=200, temperature=None)\n llm, msg_builder = model_args.make_model(), model_args.get_message_builder()\n messages = add_user_messages(msg_builder)\n if tool_choice == \"get_weather\": # force a specific tool call\n response: LLMOutput = llm(\n APIPayload(messages=messages, tools=tools, force_call_tool=tool_choice)\n )\n else:\n response: LLMOutput = llm(\n APIPayload(messages=messages, tools=tools, tool_choice=tool_choice)\n )\n num_tool_calls = len(response.tool_calls) if response.tool_calls else 0\n assert (\n num_tool_calls == 1\n ), f\"Expected 1 tool calls, but got {num_tool_calls} for {name} with tool choice {tool_choice }\"\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not os.getenv(\"OPENAI_API_KEY\"), reason=\"Skipping as OpenAI API key not set\")\ndef test_force_tool_call():\n \"\"\"\n Test that the model can produce a specific tool call when requested.\n The user message asks the 'weather' but we force call tool \"get_time\".\n We test if 'get_time' is present in the tool calls.\n Note: Model can have other tool calls as well.\n \"\"\"\n force_call_tool = \"get_time\"\n for name, llm_class, checkpoint_name, tools in tool_test_configs:\n model_args = llm_class(model_name=checkpoint_name, max_new_tokens=200, temperature=None)\n llm, msg_builder = model_args.make_model(), model_args.get_message_builder()\n messages = add_user_messages(msg_builder) # asks weather in Paris and Delhi\n response: LLMOutput = llm(\n APIPayload(messages=messages, tools=tools, force_call_tool=force_call_tool)\n )\n called_fn_names = [call.name for call in response.tool_calls] if response.tool_calls else []","source_hash":"3d536844800bbc74d0b13ec4b727a271d2cc56690613d4878114cf35d293b527","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.llm.test_litellm_api.test_force_tool_call","uri":"program://AgentLab/function/tests.llm.test_litellm_api.test_force_tool_call#L142-L161","kind":"function","name":"test_force_tool_call","path":"tests/llm/test_litellm_api.py","language":"python","start_line":142,"end_line":161,"context_start_line":122,"context_end_line":167,"code":" llm_class = partial(llm_class, use_only_first_toolcall=True)\n model_args = llm_class(model_name=checkpoint_name, max_new_tokens=200, temperature=None)\n llm, msg_builder = model_args.make_model(), model_args.get_message_builder()\n messages = add_user_messages(msg_builder)\n if tool_choice == \"get_weather\": # force a specific tool call\n response: LLMOutput = llm(\n APIPayload(messages=messages, tools=tools, force_call_tool=tool_choice)\n )\n else:\n response: LLMOutput = llm(\n APIPayload(messages=messages, tools=tools, tool_choice=tool_choice)\n )\n num_tool_calls = len(response.tool_calls) if response.tool_calls else 0\n assert (\n num_tool_calls == 1\n ), f\"Expected 1 tool calls, but got {num_tool_calls} for {name} with tool choice {tool_choice }\"\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not os.getenv(\"OPENAI_API_KEY\"), reason=\"Skipping as OpenAI API key not set\")\ndef test_force_tool_call():\n \"\"\"\n Test that the model can produce a specific tool call when requested.\n The user message asks the 'weather' but we force call tool \"get_time\".\n We test if 'get_time' is present in the tool calls.\n Note: Model can have other tool calls as well.\n \"\"\"\n force_call_tool = \"get_time\"\n for name, llm_class, checkpoint_name, tools in tool_test_configs:\n model_args = llm_class(model_name=checkpoint_name, max_new_tokens=200, temperature=None)\n llm, msg_builder = model_args.make_model(), model_args.get_message_builder()\n messages = add_user_messages(msg_builder) # asks weather in Paris and Delhi\n response: LLMOutput = llm(\n APIPayload(messages=messages, tools=tools, force_call_tool=force_call_tool)\n )\n called_fn_names = [call.name for call in response.tool_calls] if response.tool_calls else []\n assert response.tool_calls is not None\n assert any(\n fn_name == \"get_time\" for fn_name in called_fn_names\n ), f\"Model:{name},Expected all tool calls to be 'get_time', but got {called_fn_names} with force call {force_call_tool}\"\n\n\nif __name__ == \"__main__\":\n test_multi_action_tool_calls()\n test_force_tool_call()\n test_single_tool_call()","source_hash":"3d536844800bbc74d0b13ec4b727a271d2cc56690613d4878114cf35d293b527","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.llm.test_response_api","uri":"program://AgentLab/module/tests.llm.test_response_api#L1-L803","kind":"module","name":"tests.llm.test_response_api","path":"tests/llm/test_response_api.py","language":"python","start_line":1,"end_line":803,"context_start_line":1,"context_end_line":803,"code":"import os\nfrom typing import Any, Dict, List, Optional\nfrom unittest.mock import MagicMock, patch\n\nimport anthropic\nimport openai\nimport pytest\n\nfrom agentlab.llm import tracking\nfrom agentlab.llm.response_api import (\n AnthropicAPIMessageBuilder,\n APIPayload,\n ClaudeResponseModelArgs,\n LLMOutput,\n OpenAIChatCompletionAPIMessageBuilder,\n OpenAIChatModelArgs,\n OpenAIResponseAPIMessageBuilder,\n OpenAIResponseModelArgs,\n)\n\n\n# Helper to create a mock OpenAI ChatCompletion response\ndef create_mock_openai_chat_completion(\n content=None, tool_calls=None, prompt_tokens=10, completion_tokens=20\n):\n completion = MagicMock(spec=openai.types.chat.ChatCompletion)\n choice = MagicMock()\n message = MagicMock(spec=openai.types.chat.ChatCompletionMessage)\n message.content = content\n message.tool_calls = None\n if tool_calls:\n message.tool_calls = []\n for tc in tool_calls:\n tool_call_mock = MagicMock(\n spec=openai.types.chat.chat_completion_message_tool_call.ChatCompletionMessageToolCall\n )\n tool_call_mock.id = tc[\"id\"]\n tool_call_mock.type = tc[\"type\"]\n tool_call_mock.function = MagicMock()\n tool_call_mock.function.name = tc[\"function\"][\"name\"]\n tool_call_mock.function.arguments = tc[\"function\"][\"arguments\"]\n message.tool_calls.append(tool_call_mock)\n\n choice.message = message\n completion.choices = [choice]\n\n completion.usage = MagicMock()\n # Explicitly set the attributes that get_tokens_counts_from_response will try first.\n # These are the generic names.\n completion.usage.input_tokens = prompt_tokens\n completion.usage.output_tokens = completion_tokens\n\n # Also set the OpenAI-specific names if any other part of the code might look for them directly,\n # or if get_tokens_counts_from_response had different fallback logic.\n completion.usage.prompt_tokens = prompt_tokens\n completion.usage.completion_tokens = completion_tokens\n prompt_tokens_details_mock = MagicMock()\n prompt_tokens_details_mock.cached_tokens = 0\n completion.usage.prompt_tokens_details = prompt_tokens_details_mock\n\n completion.model_dump.return_value = {\n \"id\": \"chatcmpl-xxxx\",\n \"choices\": [\n {\"message\": {\"role\": \"assistant\", \"content\": content, \"tool_calls\": tool_calls}}\n ],\n # Ensure the usage dict in model_dump also reflects the token counts accurately.\n # The get_tokens_counts_from_response also has a path for dict style.\n \"usage\": {\n \"input_tokens\": prompt_tokens, # Generic name\n \"output_tokens\": completion_tokens, # Generic name\n \"prompt_tokens\": prompt_tokens, # OpenAI specific\n \"completion_tokens\": completion_tokens, # OpenAI specific\n \"prompt_tokens_details\": {\"cached_tokens\": 0},\n },\n }\n message.to_dict.return_value = {\n \"role\": \"assistant\",\n \"content\": content,\n \"tool_calls\": tool_calls,\n }\n return completion\n\n\nresponses_api_tools = [\n {\n \"type\": \"function\",\n \"name\": \"get_weather\",\n \"description\": \"Get the current weather in a given location.\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The location to get the weather for.\",\n },\n \"unit\": {\n \"type\": \"string\",\n \"enum\": [\"celsius\", \"fahrenheit\"],\n \"description\": \"The unit of temperature.\",\n },\n },\n \"required\": [\"location\"],\n },\n }\n]\n\nchat_api_tools = [\n {\n \"type\": \"function\",\n \"name\": \"get_weather\",\n \"description\": \"Get the current weather in a given location.\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The location to get the weather for.\",\n },\n \"unit\": {\n \"type\": \"string\",\n \"enum\": [\"celsius\", \"fahrenheit\"],\n \"description\": \"The unit of temperature.\",\n },\n },\n \"required\": [\"location\"],\n },\n }\n]\nanthropic_tools = [\n {\n \"name\": \"get_weather\",\n \"description\": \"Get the current weather in a given location.\",\n \"input_schema\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The location to get the weather for.\",\n },\n },\n \"required\": [\"location\"],\n },\n }\n]\n\n\n# Helper to create a mock Anthropic response\ndef create_mock_anthropic_response(\n text_content=None, tool_use=None, input_tokens=15, output_tokens=25\n):\n\n response = MagicMock(spec=anthropic.types.Message)\n response.type = \"message\" # Explicitly set the type attribute\n response.content = []\n response.content = []\n if text_content:\n text_block = MagicMock(spec=anthropic.types.TextBlock)\n text_block.type = \"text\"\n text_block.text = text_content\n response.content.append(text_block)\n if tool_use:\n tool_use_block = MagicMock(spec=anthropic.types.ToolUseBlock)\n tool_use_block.type = \"tool_use\"\n tool_use_block.id = tool_use[\"id\"]\n tool_use_block.name = tool_use[\"name\"]\n tool_use_block.input = tool_use[\"input\"]\n response.content.append(tool_use_block)\n response.usage = MagicMock()\n response.usage.input_tokens = input_tokens\n response.usage.output_tokens = output_tokens\n response.usage.cache_input_tokens = 0\n response.usage.cache_creation_input_tokens = 0\n return response\n\n\ndef create_mock_openai_responses_api_response(\n outputs: Optional[List[Dict[str, Any]]] = None, input_tokens: int = 10, output_tokens: int = 20\n) -> MagicMock:\n \"\"\"\n Helper to create a mock response object similar to what\n openai.resources.Responses.create() would return.\n Compatible with OpenAIResponseModel and TrackAPIPricingMixin.\n \"\"\"\n\n response_mock = MagicMock(spec=openai.types.responses.response.Response)\n response_mock.type = \"response\"\n response_mock.output = []\n\n if outputs:\n for out_data in outputs:\n output_item_mock = MagicMock()\n output_item_mock.type = out_data.get(\"type\")\n\n if output_item_mock.type == \"function_call\":\n # You can adapt this depending on your expected object structure\n output_item_mock.name = out_data.get(\"name\")\n output_item_mock.arguments = out_data.get(\"arguments\")\n output_item_mock.call_id = out_data.get(\"call_id\")\n elif output_item_mock.type == \"reasoning\":\n output_item_mock.summary = []\n for text_content in out_data.get(\"summary\", []):\n summary_text_mock = MagicMock()\n summary_text_mock.text = text_content\n output_item_mock.summary.append(summary_text_mock)\n\n response_mock.output.append(output_item_mock)\n\n # Token usage for pricing tracking\n response_mock.usage = MagicMock(spec=openai.types.responses.response.ResponseUsage)\n response_mock.usage.input_tokens = input_tokens\n response_mock.usage.output_tokens = output_tokens\n response_mock.usage.prompt_tokens = input_tokens\n response_mock.usage.completion_tokens = output_tokens\n input_tokens_details_mock = MagicMock()\n input_tokens_details_mock.cached_tokens = 0\n response_mock.usage.input_tokens_details = input_tokens_details_mock\n\n return response_mock\n\n\n# --- Test MessageBuilders ---\n\n\ndef test_openai_response_api_message_builder_text():\n builder = OpenAIResponseAPIMessageBuilder.user()\n builder.add_text(\"Hello, world!\")\n messages = builder.prepare_message()\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert messages[0][\"content\"] == [{\"type\": \"input_text\", \"text\": \"Hello, world!\"}]\n\n\ndef test_openai_response_api_message_builder_image():\n builder = OpenAIResponseAPIMessageBuilder.user()\n builder.add_image(\"data:image/png;base64,SIMPLEBASE64STRING\")\n messages = builder.prepare_message()\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert messages[0][\"content\"] == [\n {\"type\": \"input_image\", \"image_url\": \"data:image/png;base64,SIMPLEBASE64STRING\"}\n ]\n\n\ndef test_anthropic_api_message_builder_text():\n builder = AnthropicAPIMessageBuilder.user()\n builder.add_text(\"Hello, Anthropic!\")\n messages = builder.prepare_message()\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert messages[0][\"content\"] == [{\"type\": \"text\", \"text\": \"Hello, Anthropic!\"}]\n\n\ndef test_anthropic_api_message_builder_image():\n builder = AnthropicAPIMessageBuilder.user()\n builder.add_image(\"data:image/png;base64,ANTHROPICBASE64\")\n messages = builder.prepare_message()\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert len(messages[0][\"content\"]) == 1\n image_content = messages[0][\"content\"][0]\n assert image_content[\"type\"] == \"image\"\n assert image_content[\"source\"][\"type\"] == \"base64\"\n assert image_content[\"source\"][\"media_type\"] == \"image/png\"\n assert image_content[\"source\"][\"data\"] == \"ANTHROPICBASE64\" # Base64 prefix should be stripped\n\n\ndef test_openai_chat_completion_api_message_builder_text():\n builder = OpenAIChatCompletionAPIMessageBuilder.user()\n builder.add_text(\"Hello, ChatCompletion!\")\n messages = builder.prepare_message()\n\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert messages[0][\"content\"] == [{\"type\": \"text\", \"text\": \"Hello, ChatCompletion!\"}]\n\n\ndef test_openai_chat_completion_api_message_builder_image():\n builder = OpenAIChatCompletionAPIMessageBuilder.user()\n builder.add_image(\"data:image/jpeg;base64,CHATCOMPLETIONBASE64\")\n messages = builder.prepare_message()\n\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert messages[0][\"content\"] == [\n {\"type\": \"image_url\", \"image_url\": {\"url\": \"data:image/jpeg;base64,CHATCOMPLETIONBASE64\"}}\n ]\n\n\ndef test_openai_chat_completion_model_parse_and_cost():\n args = OpenAIChatModelArgs(model_name=\"gpt-3.5-turbo\")\n with patch(\"agentlab.llm.response_api.OpenAI\") as mock_openai_class:\n mock_client = MagicMock()\n mock_openai_class.return_value = mock_client\n model = args.make_model()\n\n mock_response = create_mock_openai_chat_completion(\n content=\"This is a test thought.\",\n tool_calls=[\n {\n \"id\": \"call_123\",\n \"type\": \"function\",\n \"function\": {\"name\": \"get_weather\", \"arguments\": '{\"location\": \"Paris\"}'},\n }\n ],\n prompt_tokens=50,\n completion_tokens=30,\n )\n\n with patch.object(\n model.client.chat.completions, \"create\", return_value=mock_response\n ) as mock_create:\n with tracking.set_tracker() as global_tracker:\n messages = [\n OpenAIChatCompletionAPIMessageBuilder.user().add_text(\n \"What's the weather in Paris?\"\n )\n ]\n payload = APIPayload(messages=messages)\n parsed_output = model(payload)\n\n mock_create.assert_called_once()\n assert parsed_output.raw_response.choices[0].message.content == \"This is a test thought.\"\n assert parsed_output.action == \"\"\"get_weather(location='Paris')\"\"\"\n assert parsed_output.raw_response.choices[0].message.tool_calls[0].id == \"call_123\"\n # Check cost tracking (token counts)\n assert global_tracker.stats[\"input_tokens\"] == 50\n assert global_tracker.stats[\"output_tokens\"] == 30\n assert global_tracker.stats[\"cost\"] > 0\n\n\ndef test_claude_response_model_parse_and_cost():\n args = ClaudeResponseModelArgs(model_name=\"claude-3-haiku-20240307\")\n model = args.make_model()\n\n mock_anthropic_api_response = create_mock_anthropic_response(\n text_content=\"Thinking about the request.\",\n tool_use={\"id\": \"tool_abc\", \"name\": \"search_web\", \"input\": {\"query\": \"latest news\"}},\n input_tokens=40,\n output_tokens=20,\n )\n\n with patch.object(\n model.client.messages, \"create\", return_value=mock_anthropic_api_response\n ) as mock_create:\n with tracking.set_tracker() as global_tracker:\n messages = [AnthropicAPIMessageBuilder.user().add_text(\"Search for latest news\")]\n payload = APIPayload(messages=messages)\n parsed_output = model(payload)\n\n mock_create.assert_called_once()\n fn_call = next(iter(parsed_output.tool_calls))\n\n assert \"Thinking about the request.\" in parsed_output.think\n assert parsed_output.action == \"\"\"search_web(query='latest news')\"\"\"\n assert fn_call.name == \"search_web\"\n assert global_tracker.stats[\"input_tokens\"] == 40\n assert global_tracker.stats[\"output_tokens\"] == 20\n\n\ndef test_openai_response_model_parse_and_cost():\n args = OpenAIResponseModelArgs(model_name=\"gpt-4.1\")\n\n mock_function_call_output = {\n \"type\": \"function_call\",\n \"name\": \"get_current_weather\",\n \"arguments\": '{\"location\": \"Boston, MA\", \"unit\": \"celsius\"}',\n \"call_id\": \"call_abc123\",\n }\n\n mock_api_resp = create_mock_openai_responses_api_response(\n outputs=[mock_function_call_output],\n input_tokens=70,\n output_tokens=40,\n )\n\n with patch(\"agentlab.llm.response_api.OpenAI\") as mock_openai_class:\n mock_client = MagicMock()\n mock_openai_class.return_value = mock_client\n model = args.make_model()\n\n with patch.object(\n model.client.responses, \"create\", return_value=mock_api_resp\n ) as mock_create_method:\n with tracking.set_tracker() as global_tracker:\n messages = [\n OpenAIResponseAPIMessageBuilder.user().add_text(\"What's the weather in Boston?\")\n ]\n payload = APIPayload(messages=messages)\n parsed_output = model(payload)\n\n mock_create_method.assert_called_once()\n fn_calls = [\n content\n for content in parsed_output.tool_calls.raw_calls.output\n if content.type == \"function_call\"\n ]\n assert parsed_output.action == \"get_current_weather(location='Boston, MA', unit='celsius')\"\n assert fn_calls[0].call_id == \"call_abc123\"\n assert parsed_output.raw_response == mock_api_resp\n assert global_tracker.stats[\"input_tokens\"] == 70\n assert global_tracker.stats[\"output_tokens\"] == 40\n\n\n# --- Test Response Models (Pricy - require API keys and actual calls) ---\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not os.getenv(\"OPENAI_API_KEY\"), reason=\"OPENAI_API_KEY not set\")\ndef test_openai_chat_completion_model_pricy_call():\n \"\"\"Tests OpenAIChatCompletionModel with a real API call.\"\"\"\n args = OpenAIChatModelArgs(\n model_name=\"gpt-4.1\",\n temperature=1e-5,\n max_new_tokens=100,\n )\n\n tools = chat_api_tools\n model = args.make_model()\n\n with tracking.set_tracker() as global_tracker:\n messages = [\n OpenAIChatCompletionAPIMessageBuilder.user().add_text(\"What is the weather in Paris?\")\n ]\n payload = APIPayload(messages=messages, tools=tools, tool_choice=\"required\")\n parsed_output = model(payload)\n\n assert parsed_output.raw_response is not None\n assert (\n parsed_output.action == \"get_weather(location='Paris')\"\n ), f\"\"\" Expected get_weather(location='Paris') but got {parsed_output.action}\"\"\"\n assert global_tracker.stats[\"input_tokens\"] > 0\n assert global_tracker.stats[\"output_tokens\"] > 0\n assert global_tracker.stats[\"cost\"] > 0\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not os.getenv(\"ANTHROPIC_API_KEY\"), reason=\"ANTHROPIC_API_KEY not set\")\ndef test_claude_response_model_pricy_call():\n \"\"\"Tests ClaudeResponseModel with a real API call.\"\"\"\n\n args = ClaudeResponseModelArgs(\n model_name=\"claude-3-haiku-20240307\",\n temperature=1e-5,\n max_new_tokens=100,\n )\n tools = anthropic_tools\n model = args.make_model()\n\n with tracking.set_tracker() as global_tracker:\n messages = [AnthropicAPIMessageBuilder.user().add_text(\"What is the weather in Paris?\")]\n payload = APIPayload(messages=messages, tools=tools)\n parsed_output = model(payload)\n\n assert parsed_output.raw_response is not None\n assert (\n parsed_output.action == \"get_weather(location='Paris')\"\n ), f\"\"\"Expected get_weather('Paris') but got {parsed_output.action}\"\"\"\n assert global_tracker.stats[\"input_tokens\"] > 0\n assert global_tracker.stats[\"output_tokens\"] > 0\n assert global_tracker.stats[\"cost\"] > 0\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not os.getenv(\"OPENAI_API_KEY\"), reason=\"OPENAI_API_KEY not set\")\ndef test_openai_response_model_pricy_call():\n \"\"\"\n Tests OpenAIResponseModel output parsing and cost tracking with both\n function_call and reasoning outputs.\n \"\"\"\n args = OpenAIResponseModelArgs(model_name=\"gpt-4.1\", temperature=1e-5, max_new_tokens=100)\n\n tools = responses_api_tools\n model = args.make_model()\n\n with tracking.set_tracker() as global_tracker:\n messages = [\n OpenAIResponseAPIMessageBuilder.user().add_text(\"What is the weather in Paris?\")\n ]\n payload = APIPayload(messages=messages, tools=tools)\n parsed_output = model(payload)\n\n assert parsed_output.raw_response is not None\n assert (\n parsed_output.action == \"\"\"get_weather(location='Paris', unit='celsius')\"\"\"\n ), f\"\"\" Expected get_weather(location='Paris', unit='celsius') but got {parsed_output.action}\"\"\"\n assert global_tracker.stats[\"input_tokens\"] > 0\n assert global_tracker.stats[\"output_tokens\"] > 0\n assert global_tracker.stats[\"cost\"] > 0\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not os.getenv(\"OPENAI_API_KEY\"), reason=\"OPENAI_API_KEY not set\")\ndef test_openai_response_model_with_multiple_messages_and_cost_tracking():\n \"\"\"\n Test OpenAIResponseModel's output parsing and cost tracking\n with a tool-using assistant and follow-up interaction.\n \"\"\"\n args = OpenAIResponseModelArgs(model_name=\"gpt-4.1\", temperature=1e-5, max_new_tokens=100)\n\n tools = responses_api_tools\n model = args.make_model()\n builder = args.get_message_builder()\n\n messages = [builder.user().add_text(\"What is the weather in Paris?\")]\n\n with tracking.set_tracker() as tracker:\n payload = APIPayload(messages=messages, tools=tools, tool_choice=\"required\")\n parsed = model(payload)\n prev_input = tracker.stats[\"input_tokens\"]\n prev_output = tracker.stats[\"output_tokens\"]\n prev_cost = tracker.stats[\"cost\"]\n\n assert parsed.tool_calls, \"Expected tool calls in the response\"\n # Set tool responses\n for tool_call in parsed.tool_calls:\n tool_call.response_text(\"Its sunny! 25°C\")\n # Simulate tool execution and user follow-up\n messages += [\n builder.add_responded_tool_calls(parsed.tool_calls),\n builder.user().add_text(\"What is the weather in Delhi?\"),\n ]\n\n payload = APIPayload(messages=messages, tools=tools, tool_choice=\"required\")\n parsed = model(payload)\n\n delta_input = tracker.stats[\"input_tokens\"] - prev_input\n delta_output = tracker.stats[\"output_tokens\"] - prev_output\n delta_cost = tracker.stats[\"cost\"] - prev_cost\n\n assert prev_input > 0\n assert prev_output > 0\n assert prev_cost > 0\n assert parsed.raw_response is not None\n assert (\n parsed.action == \"\"\"get_weather(location='Delhi', unit='celsius')\"\"\"\n ), f\n# ... truncated ...","source_hash":"81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3","truncated":true}
{"repo_id":"AgentLab","entity_id":"py:tests.llm.test_response_api.create_mock_openai_chat_completion","uri":"program://AgentLab/function/tests.llm.test_response_api.create_mock_openai_chat_completion#L23-L81","kind":"function","name":"create_mock_openai_chat_completion","path":"tests/llm/test_response_api.py","language":"python","start_line":23,"end_line":81,"context_start_line":3,"context_end_line":101,"code":"from unittest.mock import MagicMock, patch\n\nimport anthropic\nimport openai\nimport pytest\n\nfrom agentlab.llm import tracking\nfrom agentlab.llm.response_api import (\n AnthropicAPIMessageBuilder,\n APIPayload,\n ClaudeResponseModelArgs,\n LLMOutput,\n OpenAIChatCompletionAPIMessageBuilder,\n OpenAIChatModelArgs,\n OpenAIResponseAPIMessageBuilder,\n OpenAIResponseModelArgs,\n)\n\n\n# Helper to create a mock OpenAI ChatCompletion response\ndef create_mock_openai_chat_completion(\n content=None, tool_calls=None, prompt_tokens=10, completion_tokens=20\n):\n completion = MagicMock(spec=openai.types.chat.ChatCompletion)\n choice = MagicMock()\n message = MagicMock(spec=openai.types.chat.ChatCompletionMessage)\n message.content = content\n message.tool_calls = None\n if tool_calls:\n message.tool_calls = []\n for tc in tool_calls:\n tool_call_mock = MagicMock(\n spec=openai.types.chat.chat_completion_message_tool_call.ChatCompletionMessageToolCall\n )\n tool_call_mock.id = tc[\"id\"]\n tool_call_mock.type = tc[\"type\"]\n tool_call_mock.function = MagicMock()\n tool_call_mock.function.name = tc[\"function\"][\"name\"]\n tool_call_mock.function.arguments = tc[\"function\"][\"arguments\"]\n message.tool_calls.append(tool_call_mock)\n\n choice.message = message\n completion.choices = [choice]\n\n completion.usage = MagicMock()\n # Explicitly set the attributes that get_tokens_counts_from_response will try first.\n # These are the generic names.\n completion.usage.input_tokens = prompt_tokens\n completion.usage.output_tokens = completion_tokens\n\n # Also set the OpenAI-specific names if any other part of the code might look for them directly,\n # or if get_tokens_counts_from_response had different fallback logic.\n completion.usage.prompt_tokens = prompt_tokens\n completion.usage.completion_tokens = completion_tokens\n prompt_tokens_details_mock = MagicMock()\n prompt_tokens_details_mock.cached_tokens = 0\n completion.usage.prompt_tokens_details = prompt_tokens_details_mock\n\n completion.model_dump.return_value = {\n \"id\": \"chatcmpl-xxxx\",\n \"choices\": [\n {\"message\": {\"role\": \"assistant\", \"content\": content, \"tool_calls\": tool_calls}}\n ],\n # Ensure the usage dict in model_dump also reflects the token counts accurately.\n # The get_tokens_counts_from_response also has a path for dict style.\n \"usage\": {\n \"input_tokens\": prompt_tokens, # Generic name\n \"output_tokens\": completion_tokens, # Generic name\n \"prompt_tokens\": prompt_tokens, # OpenAI specific\n \"completion_tokens\": completion_tokens, # OpenAI specific\n \"prompt_tokens_details\": {\"cached_tokens\": 0},\n },\n }\n message.to_dict.return_value = {\n \"role\": \"assistant\",\n \"content\": content,\n \"tool_calls\": tool_calls,\n }\n return completion\n\n\nresponses_api_tools = [\n {\n \"type\": \"function\",\n \"name\": \"get_weather\",\n \"description\": \"Get the current weather in a given location.\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The location to get the weather for.\",\n },\n \"unit\": {\n \"type\": \"string\",\n \"enum\": [\"celsius\", \"fahrenheit\"],\n \"description\": \"The unit of temperature.\",\n },\n },","source_hash":"81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.llm.test_response_api.create_mock_anthropic_response","uri":"program://AgentLab/function/tests.llm.test_response_api.create_mock_anthropic_response#L148-L173","kind":"function","name":"create_mock_anthropic_response","path":"tests/llm/test_response_api.py","language":"python","start_line":148,"end_line":173,"context_start_line":128,"context_end_line":193,"code":"]\nanthropic_tools = [\n {\n \"name\": \"get_weather\",\n \"description\": \"Get the current weather in a given location.\",\n \"input_schema\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The location to get the weather for.\",\n },\n },\n \"required\": [\"location\"],\n },\n }\n]\n\n\n# Helper to create a mock Anthropic response\ndef create_mock_anthropic_response(\n text_content=None, tool_use=None, input_tokens=15, output_tokens=25\n):\n\n response = MagicMock(spec=anthropic.types.Message)\n response.type = \"message\" # Explicitly set the type attribute\n response.content = []\n response.content = []\n if text_content:\n text_block = MagicMock(spec=anthropic.types.TextBlock)\n text_block.type = \"text\"\n text_block.text = text_content\n response.content.append(text_block)\n if tool_use:\n tool_use_block = MagicMock(spec=anthropic.types.ToolUseBlock)\n tool_use_block.type = \"tool_use\"\n tool_use_block.id = tool_use[\"id\"]\n tool_use_block.name = tool_use[\"name\"]\n tool_use_block.input = tool_use[\"input\"]\n response.content.append(tool_use_block)\n response.usage = MagicMock()\n response.usage.input_tokens = input_tokens\n response.usage.output_tokens = output_tokens\n response.usage.cache_input_tokens = 0\n response.usage.cache_creation_input_tokens = 0\n return response\n\n\ndef create_mock_openai_responses_api_response(\n outputs: Optional[List[Dict[str, Any]]] = None, input_tokens: int = 10, output_tokens: int = 20\n) -> MagicMock:\n \"\"\"\n Helper to create a mock response object similar to what\n openai.resources.Responses.create() would return.\n Compatible with OpenAIResponseModel and TrackAPIPricingMixin.\n \"\"\"\n\n response_mock = MagicMock(spec=openai.types.responses.response.Response)\n response_mock.type = \"response\"\n response_mock.output = []\n\n if outputs:\n for out_data in outputs:\n output_item_mock = MagicMock()\n output_item_mock.type = out_data.get(\"type\")\n","source_hash":"81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.llm.test_response_api.create_mock_openai_responses_api_response","uri":"program://AgentLab/function/tests.llm.test_response_api.create_mock_openai_responses_api_response#L176-L218","kind":"function","name":"create_mock_openai_responses_api_response","path":"tests/llm/test_response_api.py","language":"python","start_line":176,"end_line":218,"context_start_line":156,"context_end_line":238,"code":" if text_content:\n text_block = MagicMock(spec=anthropic.types.TextBlock)\n text_block.type = \"text\"\n text_block.text = text_content\n response.content.append(text_block)\n if tool_use:\n tool_use_block = MagicMock(spec=anthropic.types.ToolUseBlock)\n tool_use_block.type = \"tool_use\"\n tool_use_block.id = tool_use[\"id\"]\n tool_use_block.name = tool_use[\"name\"]\n tool_use_block.input = tool_use[\"input\"]\n response.content.append(tool_use_block)\n response.usage = MagicMock()\n response.usage.input_tokens = input_tokens\n response.usage.output_tokens = output_tokens\n response.usage.cache_input_tokens = 0\n response.usage.cache_creation_input_tokens = 0\n return response\n\n\ndef create_mock_openai_responses_api_response(\n outputs: Optional[List[Dict[str, Any]]] = None, input_tokens: int = 10, output_tokens: int = 20\n) -> MagicMock:\n \"\"\"\n Helper to create a mock response object similar to what\n openai.resources.Responses.create() would return.\n Compatible with OpenAIResponseModel and TrackAPIPricingMixin.\n \"\"\"\n\n response_mock = MagicMock(spec=openai.types.responses.response.Response)\n response_mock.type = \"response\"\n response_mock.output = []\n\n if outputs:\n for out_data in outputs:\n output_item_mock = MagicMock()\n output_item_mock.type = out_data.get(\"type\")\n\n if output_item_mock.type == \"function_call\":\n # You can adapt this depending on your expected object structure\n output_item_mock.name = out_data.get(\"name\")\n output_item_mock.arguments = out_data.get(\"arguments\")\n output_item_mock.call_id = out_data.get(\"call_id\")\n elif output_item_mock.type == \"reasoning\":\n output_item_mock.summary = []\n for text_content in out_data.get(\"summary\", []):\n summary_text_mock = MagicMock()\n summary_text_mock.text = text_content\n output_item_mock.summary.append(summary_text_mock)\n\n response_mock.output.append(output_item_mock)\n\n # Token usage for pricing tracking\n response_mock.usage = MagicMock(spec=openai.types.responses.response.ResponseUsage)\n response_mock.usage.input_tokens = input_tokens\n response_mock.usage.output_tokens = output_tokens\n response_mock.usage.prompt_tokens = input_tokens\n response_mock.usage.completion_tokens = output_tokens\n input_tokens_details_mock = MagicMock()\n input_tokens_details_mock.cached_tokens = 0\n response_mock.usage.input_tokens_details = input_tokens_details_mock\n\n return response_mock\n\n\n# --- Test MessageBuilders ---\n\n\ndef test_openai_response_api_message_builder_text():\n builder = OpenAIResponseAPIMessageBuilder.user()\n builder.add_text(\"Hello, world!\")\n messages = builder.prepare_message()\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert messages[0][\"content\"] == [{\"type\": \"input_text\", \"text\": \"Hello, world!\"}]\n\n\ndef test_openai_response_api_message_builder_image():\n builder = OpenAIResponseAPIMessageBuilder.user()\n builder.add_image(\"data:image/png;base64,SIMPLEBASE64STRING\")\n messages = builder.prepare_message()\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"","source_hash":"81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.llm.test_response_api.test_openai_response_api_message_builder_text","uri":"program://AgentLab/function/tests.llm.test_response_api.test_openai_response_api_message_builder_text#L224-L230","kind":"function","name":"test_openai_response_api_message_builder_text","path":"tests/llm/test_response_api.py","language":"python","start_line":224,"end_line":230,"context_start_line":204,"context_end_line":250,"code":" output_item_mock.summary.append(summary_text_mock)\n\n response_mock.output.append(output_item_mock)\n\n # Token usage for pricing tracking\n response_mock.usage = MagicMock(spec=openai.types.responses.response.ResponseUsage)\n response_mock.usage.input_tokens = input_tokens\n response_mock.usage.output_tokens = output_tokens\n response_mock.usage.prompt_tokens = input_tokens\n response_mock.usage.completion_tokens = output_tokens\n input_tokens_details_mock = MagicMock()\n input_tokens_details_mock.cached_tokens = 0\n response_mock.usage.input_tokens_details = input_tokens_details_mock\n\n return response_mock\n\n\n# --- Test MessageBuilders ---\n\n\ndef test_openai_response_api_message_builder_text():\n builder = OpenAIResponseAPIMessageBuilder.user()\n builder.add_text(\"Hello, world!\")\n messages = builder.prepare_message()\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert messages[0][\"content\"] == [{\"type\": \"input_text\", \"text\": \"Hello, world!\"}]\n\n\ndef test_openai_response_api_message_builder_image():\n builder = OpenAIResponseAPIMessageBuilder.user()\n builder.add_image(\"data:image/png;base64,SIMPLEBASE64STRING\")\n messages = builder.prepare_message()\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert messages[0][\"content\"] == [\n {\"type\": \"input_image\", \"image_url\": \"data:image/png;base64,SIMPLEBASE64STRING\"}\n ]\n\n\ndef test_anthropic_api_message_builder_text():\n builder = AnthropicAPIMessageBuilder.user()\n builder.add_text(\"Hello, Anthropic!\")\n messages = builder.prepare_message()\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert messages[0][\"content\"] == [{\"type\": \"text\", \"text\": \"Hello, Anthropic!\"}]","source_hash":"81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.llm.test_response_api.test_openai_response_api_message_builder_image","uri":"program://AgentLab/function/tests.llm.test_response_api.test_openai_response_api_message_builder_image#L233-L241","kind":"function","name":"test_openai_response_api_message_builder_image","path":"tests/llm/test_response_api.py","language":"python","start_line":233,"end_line":241,"context_start_line":213,"context_end_line":261,"code":" response_mock.usage.completion_tokens = output_tokens\n input_tokens_details_mock = MagicMock()\n input_tokens_details_mock.cached_tokens = 0\n response_mock.usage.input_tokens_details = input_tokens_details_mock\n\n return response_mock\n\n\n# --- Test MessageBuilders ---\n\n\ndef test_openai_response_api_message_builder_text():\n builder = OpenAIResponseAPIMessageBuilder.user()\n builder.add_text(\"Hello, world!\")\n messages = builder.prepare_message()\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert messages[0][\"content\"] == [{\"type\": \"input_text\", \"text\": \"Hello, world!\"}]\n\n\ndef test_openai_response_api_message_builder_image():\n builder = OpenAIResponseAPIMessageBuilder.user()\n builder.add_image(\"data:image/png;base64,SIMPLEBASE64STRING\")\n messages = builder.prepare_message()\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert messages[0][\"content\"] == [\n {\"type\": \"input_image\", \"image_url\": \"data:image/png;base64,SIMPLEBASE64STRING\"}\n ]\n\n\ndef test_anthropic_api_message_builder_text():\n builder = AnthropicAPIMessageBuilder.user()\n builder.add_text(\"Hello, Anthropic!\")\n messages = builder.prepare_message()\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert messages[0][\"content\"] == [{\"type\": \"text\", \"text\": \"Hello, Anthropic!\"}]\n\n\ndef test_anthropic_api_message_builder_image():\n builder = AnthropicAPIMessageBuilder.user()\n builder.add_image(\"data:image/png;base64,ANTHROPICBASE64\")\n messages = builder.prepare_message()\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert len(messages[0][\"content\"]) == 1\n image_content = messages[0][\"content\"][0]\n assert image_content[\"type\"] == \"image\"","source_hash":"81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.llm.test_response_api.test_anthropic_api_message_builder_text","uri":"program://AgentLab/function/tests.llm.test_response_api.test_anthropic_api_message_builder_text#L244-L250","kind":"function","name":"test_anthropic_api_message_builder_text","path":"tests/llm/test_response_api.py","language":"python","start_line":244,"end_line":250,"context_start_line":224,"context_end_line":270,"code":"def test_openai_response_api_message_builder_text():\n builder = OpenAIResponseAPIMessageBuilder.user()\n builder.add_text(\"Hello, world!\")\n messages = builder.prepare_message()\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert messages[0][\"content\"] == [{\"type\": \"input_text\", \"text\": \"Hello, world!\"}]\n\n\ndef test_openai_response_api_message_builder_image():\n builder = OpenAIResponseAPIMessageBuilder.user()\n builder.add_image(\"data:image/png;base64,SIMPLEBASE64STRING\")\n messages = builder.prepare_message()\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert messages[0][\"content\"] == [\n {\"type\": \"input_image\", \"image_url\": \"data:image/png;base64,SIMPLEBASE64STRING\"}\n ]\n\n\ndef test_anthropic_api_message_builder_text():\n builder = AnthropicAPIMessageBuilder.user()\n builder.add_text(\"Hello, Anthropic!\")\n messages = builder.prepare_message()\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert messages[0][\"content\"] == [{\"type\": \"text\", \"text\": \"Hello, Anthropic!\"}]\n\n\ndef test_anthropic_api_message_builder_image():\n builder = AnthropicAPIMessageBuilder.user()\n builder.add_image(\"data:image/png;base64,ANTHROPICBASE64\")\n messages = builder.prepare_message()\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert len(messages[0][\"content\"]) == 1\n image_content = messages[0][\"content\"][0]\n assert image_content[\"type\"] == \"image\"\n assert image_content[\"source\"][\"type\"] == \"base64\"\n assert image_content[\"source\"][\"media_type\"] == \"image/png\"\n assert image_content[\"source\"][\"data\"] == \"ANTHROPICBASE64\" # Base64 prefix should be stripped\n\n\ndef test_openai_chat_completion_api_message_builder_text():\n builder = OpenAIChatCompletionAPIMessageBuilder.user()\n builder.add_text(\"Hello, ChatCompletion!\")\n messages = builder.prepare_message()","source_hash":"81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.llm.test_response_api.test_anthropic_api_message_builder_image","uri":"program://AgentLab/function/tests.llm.test_response_api.test_anthropic_api_message_builder_image#L253-L264","kind":"function","name":"test_anthropic_api_message_builder_image","path":"tests/llm/test_response_api.py","language":"python","start_line":253,"end_line":264,"context_start_line":233,"context_end_line":284,"code":"def test_openai_response_api_message_builder_image():\n builder = OpenAIResponseAPIMessageBuilder.user()\n builder.add_image(\"data:image/png;base64,SIMPLEBASE64STRING\")\n messages = builder.prepare_message()\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert messages[0][\"content\"] == [\n {\"type\": \"input_image\", \"image_url\": \"data:image/png;base64,SIMPLEBASE64STRING\"}\n ]\n\n\ndef test_anthropic_api_message_builder_text():\n builder = AnthropicAPIMessageBuilder.user()\n builder.add_text(\"Hello, Anthropic!\")\n messages = builder.prepare_message()\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert messages[0][\"content\"] == [{\"type\": \"text\", \"text\": \"Hello, Anthropic!\"}]\n\n\ndef test_anthropic_api_message_builder_image():\n builder = AnthropicAPIMessageBuilder.user()\n builder.add_image(\"data:image/png;base64,ANTHROPICBASE64\")\n messages = builder.prepare_message()\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert len(messages[0][\"content\"]) == 1\n image_content = messages[0][\"content\"][0]\n assert image_content[\"type\"] == \"image\"\n assert image_content[\"source\"][\"type\"] == \"base64\"\n assert image_content[\"source\"][\"media_type\"] == \"image/png\"\n assert image_content[\"source\"][\"data\"] == \"ANTHROPICBASE64\" # Base64 prefix should be stripped\n\n\ndef test_openai_chat_completion_api_message_builder_text():\n builder = OpenAIChatCompletionAPIMessageBuilder.user()\n builder.add_text(\"Hello, ChatCompletion!\")\n messages = builder.prepare_message()\n\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert messages[0][\"content\"] == [{\"type\": \"text\", \"text\": \"Hello, ChatCompletion!\"}]\n\n\ndef test_openai_chat_completion_api_message_builder_image():\n builder = OpenAIChatCompletionAPIMessageBuilder.user()\n builder.add_image(\"data:image/jpeg;base64,CHATCOMPLETIONBASE64\")\n messages = builder.prepare_message()\n\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert messages[0][\"content\"] == [","source_hash":"81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.llm.test_response_api.test_openai_chat_completion_api_message_builder_text","uri":"program://AgentLab/function/tests.llm.test_response_api.test_openai_chat_completion_api_message_builder_text#L267-L274","kind":"function","name":"test_openai_chat_completion_api_message_builder_text","path":"tests/llm/test_response_api.py","language":"python","start_line":267,"end_line":274,"context_start_line":247,"context_end_line":294,"code":" messages = builder.prepare_message()\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert messages[0][\"content\"] == [{\"type\": \"text\", \"text\": \"Hello, Anthropic!\"}]\n\n\ndef test_anthropic_api_message_builder_image():\n builder = AnthropicAPIMessageBuilder.user()\n builder.add_image(\"data:image/png;base64,ANTHROPICBASE64\")\n messages = builder.prepare_message()\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert len(messages[0][\"content\"]) == 1\n image_content = messages[0][\"content\"][0]\n assert image_content[\"type\"] == \"image\"\n assert image_content[\"source\"][\"type\"] == \"base64\"\n assert image_content[\"source\"][\"media_type\"] == \"image/png\"\n assert image_content[\"source\"][\"data\"] == \"ANTHROPICBASE64\" # Base64 prefix should be stripped\n\n\ndef test_openai_chat_completion_api_message_builder_text():\n builder = OpenAIChatCompletionAPIMessageBuilder.user()\n builder.add_text(\"Hello, ChatCompletion!\")\n messages = builder.prepare_message()\n\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert messages[0][\"content\"] == [{\"type\": \"text\", \"text\": \"Hello, ChatCompletion!\"}]\n\n\ndef test_openai_chat_completion_api_message_builder_image():\n builder = OpenAIChatCompletionAPIMessageBuilder.user()\n builder.add_image(\"data:image/jpeg;base64,CHATCOMPLETIONBASE64\")\n messages = builder.prepare_message()\n\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert messages[0][\"content\"] == [\n {\"type\": \"image_url\", \"image_url\": {\"url\": \"data:image/jpeg;base64,CHATCOMPLETIONBASE64\"}}\n ]\n\n\ndef test_openai_chat_completion_model_parse_and_cost():\n args = OpenAIChatModelArgs(model_name=\"gpt-3.5-turbo\")\n with patch(\"agentlab.llm.response_api.OpenAI\") as mock_openai_class:\n mock_client = MagicMock()\n mock_openai_class.return_value = mock_client\n model = args.make_model()","source_hash":"81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.llm.test_response_api.test_openai_chat_completion_api_message_builder_image","uri":"program://AgentLab/function/tests.llm.test_response_api.test_openai_chat_completion_api_message_builder_image#L277-L286","kind":"function","name":"test_openai_chat_completion_api_message_builder_image","path":"tests/llm/test_response_api.py","language":"python","start_line":277,"end_line":286,"context_start_line":257,"context_end_line":306,"code":" assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert len(messages[0][\"content\"]) == 1\n image_content = messages[0][\"content\"][0]\n assert image_content[\"type\"] == \"image\"\n assert image_content[\"source\"][\"type\"] == \"base64\"\n assert image_content[\"source\"][\"media_type\"] == \"image/png\"\n assert image_content[\"source\"][\"data\"] == \"ANTHROPICBASE64\" # Base64 prefix should be stripped\n\n\ndef test_openai_chat_completion_api_message_builder_text():\n builder = OpenAIChatCompletionAPIMessageBuilder.user()\n builder.add_text(\"Hello, ChatCompletion!\")\n messages = builder.prepare_message()\n\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert messages[0][\"content\"] == [{\"type\": \"text\", \"text\": \"Hello, ChatCompletion!\"}]\n\n\ndef test_openai_chat_completion_api_message_builder_image():\n builder = OpenAIChatCompletionAPIMessageBuilder.user()\n builder.add_image(\"data:image/jpeg;base64,CHATCOMPLETIONBASE64\")\n messages = builder.prepare_message()\n\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert messages[0][\"content\"] == [\n {\"type\": \"image_url\", \"image_url\": {\"url\": \"data:image/jpeg;base64,CHATCOMPLETIONBASE64\"}}\n ]\n\n\ndef test_openai_chat_completion_model_parse_and_cost():\n args = OpenAIChatModelArgs(model_name=\"gpt-3.5-turbo\")\n with patch(\"agentlab.llm.response_api.OpenAI\") as mock_openai_class:\n mock_client = MagicMock()\n mock_openai_class.return_value = mock_client\n model = args.make_model()\n\n mock_response = create_mock_openai_chat_completion(\n content=\"This is a test thought.\",\n tool_calls=[\n {\n \"id\": \"call_123\",\n \"type\": \"function\",\n \"function\": {\"name\": \"get_weather\", \"arguments\": '{\"location\": \"Paris\"}'},\n }\n ],\n prompt_tokens=50,\n completion_tokens=30,","source_hash":"81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.llm.test_response_api.test_openai_chat_completion_model_parse_and_cost","uri":"program://AgentLab/function/tests.llm.test_response_api.test_openai_chat_completion_model_parse_and_cost#L289-L328","kind":"function","name":"test_openai_chat_completion_model_parse_and_cost","path":"tests/llm/test_response_api.py","language":"python","start_line":289,"end_line":328,"context_start_line":269,"context_end_line":348,"code":" builder.add_text(\"Hello, ChatCompletion!\")\n messages = builder.prepare_message()\n\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert messages[0][\"content\"] == [{\"type\": \"text\", \"text\": \"Hello, ChatCompletion!\"}]\n\n\ndef test_openai_chat_completion_api_message_builder_image():\n builder = OpenAIChatCompletionAPIMessageBuilder.user()\n builder.add_image(\"data:image/jpeg;base64,CHATCOMPLETIONBASE64\")\n messages = builder.prepare_message()\n\n assert len(messages) == 1\n assert messages[0][\"role\"] == \"user\"\n assert messages[0][\"content\"] == [\n {\"type\": \"image_url\", \"image_url\": {\"url\": \"data:image/jpeg;base64,CHATCOMPLETIONBASE64\"}}\n ]\n\n\ndef test_openai_chat_completion_model_parse_and_cost():\n args = OpenAIChatModelArgs(model_name=\"gpt-3.5-turbo\")\n with patch(\"agentlab.llm.response_api.OpenAI\") as mock_openai_class:\n mock_client = MagicMock()\n mock_openai_class.return_value = mock_client\n model = args.make_model()\n\n mock_response = create_mock_openai_chat_completion(\n content=\"This is a test thought.\",\n tool_calls=[\n {\n \"id\": \"call_123\",\n \"type\": \"function\",\n \"function\": {\"name\": \"get_weather\", \"arguments\": '{\"location\": \"Paris\"}'},\n }\n ],\n prompt_tokens=50,\n completion_tokens=30,\n )\n\n with patch.object(\n model.client.chat.completions, \"create\", return_value=mock_response\n ) as mock_create:\n with tracking.set_tracker() as global_tracker:\n messages = [\n OpenAIChatCompletionAPIMessageBuilder.user().add_text(\n \"What's the weather in Paris?\"\n )\n ]\n payload = APIPayload(messages=messages)\n parsed_output = model(payload)\n\n mock_create.assert_called_once()\n assert parsed_output.raw_response.choices[0].message.content == \"This is a test thought.\"\n assert parsed_output.action == \"\"\"get_weather(location='Paris')\"\"\"\n assert parsed_output.raw_response.choices[0].message.tool_calls[0].id == \"call_123\"\n # Check cost tracking (token counts)\n assert global_tracker.stats[\"input_tokens\"] == 50\n assert global_tracker.stats[\"output_tokens\"] == 30\n assert global_tracker.stats[\"cost\"] > 0\n\n\ndef test_claude_response_model_parse_and_cost():\n args = ClaudeResponseModelArgs(model_name=\"claude-3-haiku-20240307\")\n model = args.make_model()\n\n mock_anthropic_api_response = create_mock_anthropic_response(\n text_content=\"Thinking about the request.\",\n tool_use={\"id\": \"tool_abc\", \"name\": \"search_web\", \"input\": {\"query\": \"latest news\"}},\n input_tokens=40,\n output_tokens=20,\n )\n\n with patch.object(\n model.client.messages, \"create\", return_value=mock_anthropic_api_response\n ) as mock_create:\n with tracking.set_tracker() as global_tracker:\n messages = [AnthropicAPIMessageBuilder.user().add_text(\"Search for latest news\")]\n payload = APIPayload(messages=messages)\n parsed_output = model(payload)","source_hash":"81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.llm.test_response_api.test_claude_response_model_parse_and_cost","uri":"program://AgentLab/function/tests.llm.test_response_api.test_claude_response_model_parse_and_cost#L331-L357","kind":"function","name":"test_claude_response_model_parse_and_cost","path":"tests/llm/test_response_api.py","language":"python","start_line":331,"end_line":357,"context_start_line":311,"context_end_line":377,"code":" ) as mock_create:\n with tracking.set_tracker() as global_tracker:\n messages = [\n OpenAIChatCompletionAPIMessageBuilder.user().add_text(\n \"What's the weather in Paris?\"\n )\n ]\n payload = APIPayload(messages=messages)\n parsed_output = model(payload)\n\n mock_create.assert_called_once()\n assert parsed_output.raw_response.choices[0].message.content == \"This is a test thought.\"\n assert parsed_output.action == \"\"\"get_weather(location='Paris')\"\"\"\n assert parsed_output.raw_response.choices[0].message.tool_calls[0].id == \"call_123\"\n # Check cost tracking (token counts)\n assert global_tracker.stats[\"input_tokens\"] == 50\n assert global_tracker.stats[\"output_tokens\"] == 30\n assert global_tracker.stats[\"cost\"] > 0\n\n\ndef test_claude_response_model_parse_and_cost():\n args = ClaudeResponseModelArgs(model_name=\"claude-3-haiku-20240307\")\n model = args.make_model()\n\n mock_anthropic_api_response = create_mock_anthropic_response(\n text_content=\"Thinking about the request.\",\n tool_use={\"id\": \"tool_abc\", \"name\": \"search_web\", \"input\": {\"query\": \"latest news\"}},\n input_tokens=40,\n output_tokens=20,\n )\n\n with patch.object(\n model.client.messages, \"create\", return_value=mock_anthropic_api_response\n ) as mock_create:\n with tracking.set_tracker() as global_tracker:\n messages = [AnthropicAPIMessageBuilder.user().add_text(\"Search for latest news\")]\n payload = APIPayload(messages=messages)\n parsed_output = model(payload)\n\n mock_create.assert_called_once()\n fn_call = next(iter(parsed_output.tool_calls))\n\n assert \"Thinking about the request.\" in parsed_output.think\n assert parsed_output.action == \"\"\"search_web(query='latest news')\"\"\"\n assert fn_call.name == \"search_web\"\n assert global_tracker.stats[\"input_tokens\"] == 40\n assert global_tracker.stats[\"output_tokens\"] == 20\n\n\ndef test_openai_response_model_parse_and_cost():\n args = OpenAIResponseModelArgs(model_name=\"gpt-4.1\")\n\n mock_function_call_output = {\n \"type\": \"function_call\",\n \"name\": \"get_current_weather\",\n \"arguments\": '{\"location\": \"Boston, MA\", \"unit\": \"celsius\"}',\n \"call_id\": \"call_abc123\",\n }\n\n mock_api_resp = create_mock_openai_responses_api_response(\n outputs=[mock_function_call_output],\n input_tokens=70,\n output_tokens=40,\n )\n\n with patch(\"agentlab.llm.response_api.OpenAI\") as mock_openai_class:\n mock_client = MagicMock()","source_hash":"81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.llm.test_response_api.test_openai_response_model_parse_and_cost","uri":"program://AgentLab/function/tests.llm.test_response_api.test_openai_response_model_parse_and_cost#L360-L401","kind":"function","name":"test_openai_response_model_parse_and_cost","path":"tests/llm/test_response_api.py","language":"python","start_line":360,"end_line":401,"context_start_line":340,"context_end_line":421,"code":" )\n\n with patch.object(\n model.client.messages, \"create\", return_value=mock_anthropic_api_response\n ) as mock_create:\n with tracking.set_tracker() as global_tracker:\n messages = [AnthropicAPIMessageBuilder.user().add_text(\"Search for latest news\")]\n payload = APIPayload(messages=messages)\n parsed_output = model(payload)\n\n mock_create.assert_called_once()\n fn_call = next(iter(parsed_output.tool_calls))\n\n assert \"Thinking about the request.\" in parsed_output.think\n assert parsed_output.action == \"\"\"search_web(query='latest news')\"\"\"\n assert fn_call.name == \"search_web\"\n assert global_tracker.stats[\"input_tokens\"] == 40\n assert global_tracker.stats[\"output_tokens\"] == 20\n\n\ndef test_openai_response_model_parse_and_cost():\n args = OpenAIResponseModelArgs(model_name=\"gpt-4.1\")\n\n mock_function_call_output = {\n \"type\": \"function_call\",\n \"name\": \"get_current_weather\",\n \"arguments\": '{\"location\": \"Boston, MA\", \"unit\": \"celsius\"}',\n \"call_id\": \"call_abc123\",\n }\n\n mock_api_resp = create_mock_openai_responses_api_response(\n outputs=[mock_function_call_output],\n input_tokens=70,\n output_tokens=40,\n )\n\n with patch(\"agentlab.llm.response_api.OpenAI\") as mock_openai_class:\n mock_client = MagicMock()\n mock_openai_class.return_value = mock_client\n model = args.make_model()\n\n with patch.object(\n model.client.responses, \"create\", return_value=mock_api_resp\n ) as mock_create_method:\n with tracking.set_tracker() as global_tracker:\n messages = [\n OpenAIResponseAPIMessageBuilder.user().add_text(\"What's the weather in Boston?\")\n ]\n payload = APIPayload(messages=messages)\n parsed_output = model(payload)\n\n mock_create_method.assert_called_once()\n fn_calls = [\n content\n for content in parsed_output.tool_calls.raw_calls.output\n if content.type == \"function_call\"\n ]\n assert parsed_output.action == \"get_current_weather(location='Boston, MA', unit='celsius')\"\n assert fn_calls[0].call_id == \"call_abc123\"\n assert parsed_output.raw_response == mock_api_resp\n assert global_tracker.stats[\"input_tokens\"] == 70\n assert global_tracker.stats[\"output_tokens\"] == 40\n\n\n# --- Test Response Models (Pricy - require API keys and actual calls) ---\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not os.getenv(\"OPENAI_API_KEY\"), reason=\"OPENAI_API_KEY not set\")\ndef test_openai_chat_completion_model_pricy_call():\n \"\"\"Tests OpenAIChatCompletionModel with a real API call.\"\"\"\n args = OpenAIChatModelArgs(\n model_name=\"gpt-4.1\",\n temperature=1e-5,\n max_new_tokens=100,\n )\n\n tools = chat_api_tools\n model = args.make_model()\n\n with tracking.set_tracker() as global_tracker:\n messages = [","source_hash":"81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.llm.test_response_api.test_openai_chat_completion_model_pricy_call","uri":"program://AgentLab/function/tests.llm.test_response_api.test_openai_chat_completion_model_pricy_call#L409-L433","kind":"function","name":"test_openai_chat_completion_model_pricy_call","path":"tests/llm/test_response_api.py","language":"python","start_line":409,"end_line":433,"context_start_line":389,"context_end_line":453,"code":" parsed_output = model(payload)\n\n mock_create_method.assert_called_once()\n fn_calls = [\n content\n for content in parsed_output.tool_calls.raw_calls.output\n if content.type == \"function_call\"\n ]\n assert parsed_output.action == \"get_current_weather(location='Boston, MA', unit='celsius')\"\n assert fn_calls[0].call_id == \"call_abc123\"\n assert parsed_output.raw_response == mock_api_resp\n assert global_tracker.stats[\"input_tokens\"] == 70\n assert global_tracker.stats[\"output_tokens\"] == 40\n\n\n# --- Test Response Models (Pricy - require API keys and actual calls) ---\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not os.getenv(\"OPENAI_API_KEY\"), reason=\"OPENAI_API_KEY not set\")\ndef test_openai_chat_completion_model_pricy_call():\n \"\"\"Tests OpenAIChatCompletionModel with a real API call.\"\"\"\n args = OpenAIChatModelArgs(\n model_name=\"gpt-4.1\",\n temperature=1e-5,\n max_new_tokens=100,\n )\n\n tools = chat_api_tools\n model = args.make_model()\n\n with tracking.set_tracker() as global_tracker:\n messages = [\n OpenAIChatCompletionAPIMessageBuilder.user().add_text(\"What is the weather in Paris?\")\n ]\n payload = APIPayload(messages=messages, tools=tools, tool_choice=\"required\")\n parsed_output = model(payload)\n\n assert parsed_output.raw_response is not None\n assert (\n parsed_output.action == \"get_weather(location='Paris')\"\n ), f\"\"\" Expected get_weather(location='Paris') but got {parsed_output.action}\"\"\"\n assert global_tracker.stats[\"input_tokens\"] > 0\n assert global_tracker.stats[\"output_tokens\"] > 0\n assert global_tracker.stats[\"cost\"] > 0\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not os.getenv(\"ANTHROPIC_API_KEY\"), reason=\"ANTHROPIC_API_KEY not set\")\ndef test_claude_response_model_pricy_call():\n \"\"\"Tests ClaudeResponseModel with a real API call.\"\"\"\n\n args = ClaudeResponseModelArgs(\n model_name=\"claude-3-haiku-20240307\",\n temperature=1e-5,\n max_new_tokens=100,\n )\n tools = anthropic_tools\n model = args.make_model()\n\n with tracking.set_tracker() as global_tracker:\n messages = [AnthropicAPIMessageBuilder.user().add_text(\"What is the weather in Paris?\")]\n payload = APIPayload(messages=messages, tools=tools)\n parsed_output = model(payload)\n","source_hash":"81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.llm.test_response_api.test_claude_response_model_pricy_call","uri":"program://AgentLab/function/tests.llm.test_response_api.test_claude_response_model_pricy_call#L438-L460","kind":"function","name":"test_claude_response_model_pricy_call","path":"tests/llm/test_response_api.py","language":"python","start_line":438,"end_line":460,"context_start_line":418,"context_end_line":480,"code":" model = args.make_model()\n\n with tracking.set_tracker() as global_tracker:\n messages = [\n OpenAIChatCompletionAPIMessageBuilder.user().add_text(\"What is the weather in Paris?\")\n ]\n payload = APIPayload(messages=messages, tools=tools, tool_choice=\"required\")\n parsed_output = model(payload)\n\n assert parsed_output.raw_response is not None\n assert (\n parsed_output.action == \"get_weather(location='Paris')\"\n ), f\"\"\" Expected get_weather(location='Paris') but got {parsed_output.action}\"\"\"\n assert global_tracker.stats[\"input_tokens\"] > 0\n assert global_tracker.stats[\"output_tokens\"] > 0\n assert global_tracker.stats[\"cost\"] > 0\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not os.getenv(\"ANTHROPIC_API_KEY\"), reason=\"ANTHROPIC_API_KEY not set\")\ndef test_claude_response_model_pricy_call():\n \"\"\"Tests ClaudeResponseModel with a real API call.\"\"\"\n\n args = ClaudeResponseModelArgs(\n model_name=\"claude-3-haiku-20240307\",\n temperature=1e-5,\n max_new_tokens=100,\n )\n tools = anthropic_tools\n model = args.make_model()\n\n with tracking.set_tracker() as global_tracker:\n messages = [AnthropicAPIMessageBuilder.user().add_text(\"What is the weather in Paris?\")]\n payload = APIPayload(messages=messages, tools=tools)\n parsed_output = model(payload)\n\n assert parsed_output.raw_response is not None\n assert (\n parsed_output.action == \"get_weather(location='Paris')\"\n ), f\"\"\"Expected get_weather('Paris') but got {parsed_output.action}\"\"\"\n assert global_tracker.stats[\"input_tokens\"] > 0\n assert global_tracker.stats[\"output_tokens\"] > 0\n assert global_tracker.stats[\"cost\"] > 0\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not os.getenv(\"OPENAI_API_KEY\"), reason=\"OPENAI_API_KEY not set\")\ndef test_openai_response_model_pricy_call():\n \"\"\"\n Tests OpenAIResponseModel output parsing and cost tracking with both\n function_call and reasoning outputs.\n \"\"\"\n args = OpenAIResponseModelArgs(model_name=\"gpt-4.1\", temperature=1e-5, max_new_tokens=100)\n\n tools = responses_api_tools\n model = args.make_model()\n\n with tracking.set_tracker() as global_tracker:\n messages = [\n OpenAIResponseAPIMessageBuilder.user().add_text(\"What is the weather in Paris?\")\n ]\n payload = APIPayload(messages=messages, tools=tools)\n parsed_output = model(payload)","source_hash":"81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.llm.test_response_api.test_openai_response_model_pricy_call","uri":"program://AgentLab/function/tests.llm.test_response_api.test_openai_response_model_pricy_call#L465-L488","kind":"function","name":"test_openai_response_model_pricy_call","path":"tests/llm/test_response_api.py","language":"python","start_line":465,"end_line":488,"context_start_line":445,"context_end_line":508,"code":" )\n tools = anthropic_tools\n model = args.make_model()\n\n with tracking.set_tracker() as global_tracker:\n messages = [AnthropicAPIMessageBuilder.user().add_text(\"What is the weather in Paris?\")]\n payload = APIPayload(messages=messages, tools=tools)\n parsed_output = model(payload)\n\n assert parsed_output.raw_response is not None\n assert (\n parsed_output.action == \"get_weather(location='Paris')\"\n ), f\"\"\"Expected get_weather('Paris') but got {parsed_output.action}\"\"\"\n assert global_tracker.stats[\"input_tokens\"] > 0\n assert global_tracker.stats[\"output_tokens\"] > 0\n assert global_tracker.stats[\"cost\"] > 0\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not os.getenv(\"OPENAI_API_KEY\"), reason=\"OPENAI_API_KEY not set\")\ndef test_openai_response_model_pricy_call():\n \"\"\"\n Tests OpenAIResponseModel output parsing and cost tracking with both\n function_call and reasoning outputs.\n \"\"\"\n args = OpenAIResponseModelArgs(model_name=\"gpt-4.1\", temperature=1e-5, max_new_tokens=100)\n\n tools = responses_api_tools\n model = args.make_model()\n\n with tracking.set_tracker() as global_tracker:\n messages = [\n OpenAIResponseAPIMessageBuilder.user().add_text(\"What is the weather in Paris?\")\n ]\n payload = APIPayload(messages=messages, tools=tools)\n parsed_output = model(payload)\n\n assert parsed_output.raw_response is not None\n assert (\n parsed_output.action == \"\"\"get_weather(location='Paris', unit='celsius')\"\"\"\n ), f\"\"\" Expected get_weather(location='Paris', unit='celsius') but got {parsed_output.action}\"\"\"\n assert global_tracker.stats[\"input_tokens\"] > 0\n assert global_tracker.stats[\"output_tokens\"] > 0\n assert global_tracker.stats[\"cost\"] > 0\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not os.getenv(\"OPENAI_API_KEY\"), reason=\"OPENAI_API_KEY not set\")\ndef test_openai_response_model_with_multiple_messages_and_cost_tracking():\n \"\"\"\n Test OpenAIResponseModel's output parsing and cost tracking\n with a tool-using assistant and follow-up interaction.\n \"\"\"\n args = OpenAIResponseModelArgs(model_name=\"gpt-4.1\", temperature=1e-5, max_new_tokens=100)\n\n tools = responses_api_tools\n model = args.make_model()\n builder = args.get_message_builder()\n\n messages = [builder.user().add_text(\"What is the weather in Paris?\")]\n\n with tracking.set_tracker() as tracker:\n payload = APIPayload(messages=messages, tools=tools, tool_choice=\"required\")\n parsed = model(payload)","source_hash":"81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.llm.test_response_api.test_openai_response_model_with_multiple_messages_and_cost_tracking","uri":"program://AgentLab/function/tests.llm.test_response_api.test_openai_response_model_with_multiple_messages_and_cost_tracking#L493-L542","kind":"function","name":"test_openai_response_model_with_multiple_messages_and_cost_tracking","path":"tests/llm/test_response_api.py","language":"python","start_line":493,"end_line":542,"context_start_line":473,"context_end_line":562,"code":" model = args.make_model()\n\n with tracking.set_tracker() as global_tracker:\n messages = [\n OpenAIResponseAPIMessageBuilder.user().add_text(\"What is the weather in Paris?\")\n ]\n payload = APIPayload(messages=messages, tools=tools)\n parsed_output = model(payload)\n\n assert parsed_output.raw_response is not None\n assert (\n parsed_output.action == \"\"\"get_weather(location='Paris', unit='celsius')\"\"\"\n ), f\"\"\" Expected get_weather(location='Paris', unit='celsius') but got {parsed_output.action}\"\"\"\n assert global_tracker.stats[\"input_tokens\"] > 0\n assert global_tracker.stats[\"output_tokens\"] > 0\n assert global_tracker.stats[\"cost\"] > 0\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not os.getenv(\"OPENAI_API_KEY\"), reason=\"OPENAI_API_KEY not set\")\ndef test_openai_response_model_with_multiple_messages_and_cost_tracking():\n \"\"\"\n Test OpenAIResponseModel's output parsing and cost tracking\n with a tool-using assistant and follow-up interaction.\n \"\"\"\n args = OpenAIResponseModelArgs(model_name=\"gpt-4.1\", temperature=1e-5, max_new_tokens=100)\n\n tools = responses_api_tools\n model = args.make_model()\n builder = args.get_message_builder()\n\n messages = [builder.user().add_text(\"What is the weather in Paris?\")]\n\n with tracking.set_tracker() as tracker:\n payload = APIPayload(messages=messages, tools=tools, tool_choice=\"required\")\n parsed = model(payload)\n prev_input = tracker.stats[\"input_tokens\"]\n prev_output = tracker.stats[\"output_tokens\"]\n prev_cost = tracker.stats[\"cost\"]\n\n assert parsed.tool_calls, \"Expected tool calls in the response\"\n # Set tool responses\n for tool_call in parsed.tool_calls:\n tool_call.response_text(\"Its sunny! 25°C\")\n # Simulate tool execution and user follow-up\n messages += [\n builder.add_responded_tool_calls(parsed.tool_calls),\n builder.user().add_text(\"What is the weather in Delhi?\"),\n ]\n\n payload = APIPayload(messages=messages, tools=tools, tool_choice=\"required\")\n parsed = model(payload)\n\n delta_input = tracker.stats[\"input_tokens\"] - prev_input\n delta_output = tracker.stats[\"output_tokens\"] - prev_output\n delta_cost = tracker.stats[\"cost\"] - prev_cost\n\n assert prev_input > 0\n assert prev_output > 0\n assert prev_cost > 0\n assert parsed.raw_response is not None\n assert (\n parsed.action == \"\"\"get_weather(location='Delhi', unit='celsius')\"\"\"\n ), f\"Unexpected action: {parsed.action}\"\n assert delta_input > 0\n assert delta_output > 0\n assert delta_cost > 0\n assert tracker.stats[\"input_tokens\"] == prev_input + delta_input\n assert tracker.stats[\"output_tokens\"] == prev_output + delta_output\n assert tracker.stats[\"cost\"] == pytest.approx(prev_cost + delta_cost)\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not os.getenv(\"OPENAI_API_KEY\"), reason=\"OPENAI_API_KEY not set\")\ndef test_openai_chat_completion_model_with_multiple_messages_and_cost_tracking():\n \"\"\"\n Test OpenAIResponseModel's output parsing and cost tracking\n with a tool-using assistant and follow-up interaction.\n \"\"\"\n args = OpenAIChatModelArgs(model_name=\"gpt-4.1\", temperature=1e-5, max_new_tokens=100)\n\n tools = [\n {\n \"type\": \"function\",\n \"name\": \"get_weather\",\n \"description\": \"Get the current weather in a given location.\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {","source_hash":"81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.llm.test_response_api.test_openai_chat_completion_model_with_multiple_messages_and_cost_tracking","uri":"program://AgentLab/function/tests.llm.test_response_api.test_openai_chat_completion_model_with_multiple_messages_and_cost_tracking#L547-L617","kind":"function","name":"test_openai_chat_completion_model_with_multiple_messages_and_cost_tracking","path":"tests/llm/test_response_api.py","language":"python","start_line":547,"end_line":617,"context_start_line":527,"context_end_line":637,"code":" delta_output = tracker.stats[\"output_tokens\"] - prev_output\n delta_cost = tracker.stats[\"cost\"] - prev_cost\n\n assert prev_input > 0\n assert prev_output > 0\n assert prev_cost > 0\n assert parsed.raw_response is not None\n assert (\n parsed.action == \"\"\"get_weather(location='Delhi', unit='celsius')\"\"\"\n ), f\"Unexpected action: {parsed.action}\"\n assert delta_input > 0\n assert delta_output > 0\n assert delta_cost > 0\n assert tracker.stats[\"input_tokens\"] == prev_input + delta_input\n assert tracker.stats[\"output_tokens\"] == prev_output + delta_output\n assert tracker.stats[\"cost\"] == pytest.approx(prev_cost + delta_cost)\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not os.getenv(\"OPENAI_API_KEY\"), reason=\"OPENAI_API_KEY not set\")\ndef test_openai_chat_completion_model_with_multiple_messages_and_cost_tracking():\n \"\"\"\n Test OpenAIResponseModel's output parsing and cost tracking\n with a tool-using assistant and follow-up interaction.\n \"\"\"\n args = OpenAIChatModelArgs(model_name=\"gpt-4.1\", temperature=1e-5, max_new_tokens=100)\n\n tools = [\n {\n \"type\": \"function\",\n \"name\": \"get_weather\",\n \"description\": \"Get the current weather in a given location.\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The location to get the weather for.\",\n },\n \"unit\": {\n \"type\": \"string\",\n \"enum\": [\"celsius\", \"fahrenheit\"],\n \"description\": \"The unit of temperature.\",\n },\n },\n \"required\": [\"location\"],\n },\n }\n ]\n\n model = args.make_model()\n builder = args.get_message_builder()\n\n messages = [builder.user().add_text(\"What is the weather in Paris?\")]\n\n with tracking.set_tracker() as tracker:\n payload = APIPayload(messages=messages, tools=tools, tool_choice=\"required\")\n parsed = model(payload)\n prev_input = tracker.stats[\"input_tokens\"]\n prev_output = tracker.stats[\"output_tokens\"]\n prev_cost = tracker.stats[\"cost\"]\n\n for tool_call in parsed.tool_calls:\n tool_call.response_text(\"Its sunny! 25°C\")\n # Simulate tool execution and user follow-up\n messages += [\n builder.add_responded_tool_calls(parsed.tool_calls),\n builder.user().add_text(\"What is the weather in Delhi?\"),\n ]\n # Set tool responses\n\n payload = APIPayload(messages=messages, tools=tools, tool_choice=\"required\")\n parsed = model(payload)\n\n delta_input = tracker.stats[\"input_tokens\"] - prev_input\n delta_output = tracker.stats[\"output_tokens\"] - prev_output\n delta_cost = tracker.stats[\"cost\"] - prev_cost\n\n assert prev_input > 0\n assert prev_output > 0\n assert prev_cost > 0\n assert parsed.raw_response is not None\n assert (\n parsed.action == \"\"\"get_weather(location='Delhi')\"\"\"\n ), f\"Unexpected action: {parsed.action}\"\n assert delta_input > 0\n assert delta_output > 0\n assert delta_cost > 0\n assert tracker.stats[\"input_tokens\"] == prev_input + delta_input\n assert tracker.stats[\"output_tokens\"] == prev_output + delta_output\n assert tracker.stats[\"cost\"] == pytest.approx(prev_cost + delta_cost)\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not os.getenv(\"ANTHROPIC_API_KEY\"), reason=\"ANTHROPIC_API_KEY not set\")\ndef test_claude_model_with_multiple_messages_pricy_call():\n model_factory = ClaudeResponseModelArgs(\n model_name=\"claude-3-haiku-20240307\", temperature=1e-5, max_new_tokens=100\n )\n tools = [\n {\n \"name\": \"get_weather\",\n \"description\": \"Get the current weather in a given location.\",\n \"input_schema\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The location to get the weather for.\",\n },\n \"unit\": {","source_hash":"81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.llm.test_response_api.test_claude_model_with_multiple_messages_pricy_call","uri":"program://AgentLab/function/tests.llm.test_response_api.test_claude_model_with_multiple_messages_pricy_call#L622-L684","kind":"function","name":"test_claude_model_with_multiple_messages_pricy_call","path":"tests/llm/test_response_api.py","language":"python","start_line":622,"end_line":684,"context_start_line":602,"context_end_line":704,"code":" delta_output = tracker.stats[\"output_tokens\"] - prev_output\n delta_cost = tracker.stats[\"cost\"] - prev_cost\n\n assert prev_input > 0\n assert prev_output > 0\n assert prev_cost > 0\n assert parsed.raw_response is not None\n assert (\n parsed.action == \"\"\"get_weather(location='Delhi')\"\"\"\n ), f\"Unexpected action: {parsed.action}\"\n assert delta_input > 0\n assert delta_output > 0\n assert delta_cost > 0\n assert tracker.stats[\"input_tokens\"] == prev_input + delta_input\n assert tracker.stats[\"output_tokens\"] == prev_output + delta_output\n assert tracker.stats[\"cost\"] == pytest.approx(prev_cost + delta_cost)\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(not os.getenv(\"ANTHROPIC_API_KEY\"), reason=\"ANTHROPIC_API_KEY not set\")\ndef test_claude_model_with_multiple_messages_pricy_call():\n model_factory = ClaudeResponseModelArgs(\n model_name=\"claude-3-haiku-20240307\", temperature=1e-5, max_new_tokens=100\n )\n tools = [\n {\n \"name\": \"get_weather\",\n \"description\": \"Get the current weather in a given location.\",\n \"input_schema\": {\n \"type\": \"object\",\n \"properties\": {\n \"location\": {\n \"type\": \"string\",\n \"description\": \"The location to get the weather for.\",\n },\n \"unit\": {\n \"type\": \"string\",\n \"enum\": [\"celsius\", \"fahrenheit\"],\n \"description\": \"The unit of temperature.\",\n },\n },\n \"required\": [\"location\"],\n },\n }\n ]\n model = model_factory.make_model()\n msg_builder = model_factory.get_message_builder()\n messages = []\n\n messages.append(msg_builder.user().add_text(\"What is the weather in Paris?\"))\n with tracking.set_tracker() as global_tracker:\n payload = APIPayload(messages=messages, tools=tools)\n llm_output1 = model(payload)\n\n prev_input = global_tracker.stats[\"input_tokens\"]\n prev_output = global_tracker.stats[\"output_tokens\"]\n prev_cost = global_tracker.stats[\"cost\"]\n\n for tool_call in llm_output1.tool_calls:\n tool_call.response_text(\"It's sunny! 25°C\")\n messages += [\n msg_builder.add_responded_tool_calls(llm_output1.tool_calls),\n msg_builder.user().add_text(\"What is the weather in Delhi?\"),\n ]\n payload = APIPayload(messages=messages, tools=tools)\n llm_output2 = model(payload)\n delta_input = global_tracker.stats[\"input_tokens\"] - prev_input\n delta_output = global_tracker.stats[\"output_tokens\"] - prev_output\n delta_cost = global_tracker.stats[\"cost\"] - prev_cost\n\n assert prev_input > 0, \"Expected previous input tokens to be greater than 0\"\n assert prev_output > 0, \"Expected previous output tokens to be greater than 0\"\n assert prev_cost > 0, \"Expected previous cost value to be greater than 0\"\n assert llm_output2.raw_response is not None\n assert (\n llm_output2.action == \"\"\"get_weather(location='Delhi', unit='celsius')\"\"\"\n ), f\"\"\"Expected get_weather('Delhi') but got {llm_output2.action}\"\"\"\n assert delta_input > 0, \"Expected new input tokens to be greater than 0\"\n assert delta_output > 0, \"Expected new output tokens to be greater than 0\"\n assert delta_cost > 0, \"Expected new cost value to be greater than 0\"\n assert global_tracker.stats[\"input_tokens\"] == prev_input + delta_input\n assert global_tracker.stats[\"output_tokens\"] == prev_output + delta_output\n assert global_tracker.stats[\"cost\"] == pytest.approx(prev_cost + delta_cost)\n\n\n## Test multiaction\n@pytest.mark.pricy\n@pytest.mark.skipif(not os.getenv(\"OPENAI_API_KEY\"), reason=\"Skipping as OpenAI API key not set\")\ndef test_multi_action_tool_calls():\n \"\"\"\n Test that the model can produce multiple tool calls in parallel.\n Uncomment commented lines to see the full behaviour of models and tool choices.\n \"\"\"\n # test_config (setting name, BaseModelArgs, model_name, tools)\n tool_test_configs = [\n (\n \"gpt-4.1-responses API\",\n OpenAIResponseModelArgs,\n \"gpt-4.1-2025-04-14\",\n responses_api_tools,\n ),\n (\"gpt-4.1-chat Completions API\", OpenAIChatModelArgs, \"gpt-4.1-2025-04-14\", chat_api_tools),\n # (\"claude-3\", ClaudeResponseModelArgs, \"claude-3-haiku-20240307\", anthropic_tools), # fails","source_hash":"81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.llm.test_response_api.test_multi_action_tool_calls","uri":"program://AgentLab/function/tests.llm.test_response_api.test_multi_action_tool_calls#L690-L750","kind":"function","name":"test_multi_action_tool_calls","path":"tests/llm/test_response_api.py","language":"python","start_line":690,"end_line":750,"context_start_line":670,"context_end_line":770,"code":" delta_cost = global_tracker.stats[\"cost\"] - prev_cost\n\n assert prev_input > 0, \"Expected previous input tokens to be greater than 0\"\n assert prev_output > 0, \"Expected previous output tokens to be greater than 0\"\n assert prev_cost > 0, \"Expected previous cost value to be greater than 0\"\n assert llm_output2.raw_response is not None\n assert (\n llm_output2.action == \"\"\"get_weather(location='Delhi', unit='celsius')\"\"\"\n ), f\"\"\"Expected get_weather('Delhi') but got {llm_output2.action}\"\"\"\n assert delta_input > 0, \"Expected new input tokens to be greater than 0\"\n assert delta_output > 0, \"Expected new output tokens to be greater than 0\"\n assert delta_cost > 0, \"Expected new cost value to be greater than 0\"\n assert global_tracker.stats[\"input_tokens\"] == prev_input + delta_input\n assert global_tracker.stats[\"output_tokens\"] == prev_output + delta_output\n assert global_tracker.stats[\"cost\"] == pytest.approx(prev_cost + delta_cost)\n\n\n## Test multiaction\n@pytest.mark.pricy\n@pytest.mark.skipif(not os.getenv(\"OPENAI_API_KEY\"), reason=\"Skipping as OpenAI API key not set\")\ndef test_multi_action_tool_calls():\n \"\"\"\n Test that the model can produce multiple tool calls in parallel.\n Uncomment commented lines to see the full behaviour of models and tool choices.\n \"\"\"\n # test_config (setting name, BaseModelArgs, model_name, tools)\n tool_test_configs = [\n (\n \"gpt-4.1-responses API\",\n OpenAIResponseModelArgs,\n \"gpt-4.1-2025-04-14\",\n responses_api_tools,\n ),\n (\"gpt-4.1-chat Completions API\", OpenAIChatModelArgs, \"gpt-4.1-2025-04-14\", chat_api_tools),\n # (\"claude-3\", ClaudeResponseModelArgs, \"claude-3-haiku-20240307\", anthropic_tools), # fails\n # (\"claude-3.7\", ClaudeResponseModelArgs, \"claude-3-7-sonnet-20250219\", anthropic_tools), # fails\n (\"claude-4-sonnet\", ClaudeResponseModelArgs, \"claude-sonnet-4-20250514\", anthropic_tools),\n # add more models as needed\n ]\n\n def add_user_messages(msg_builder):\n return [\n msg_builder.user().add_text(\"What is the weather in Paris and Delhi?\"),\n msg_builder.user().add_text(\"You must call multiple tools to achieve the task.\"),\n ]\n\n res_df = []\n\n for tool_choice in [\n # 'none',\n # 'required', # fails for Responses API\n # 'any', # fails for Responses API\n \"auto\",\n # 'get_weather'\n ]:\n for name, llm_class, checkpoint_name, tools in tool_test_configs:\n print(name, \"tool choice:\", tool_choice, \"\\n\", \"**\" * 10)\n model_args = llm_class(model_name=checkpoint_name, max_new_tokens=200, temperature=None)\n llm, msg_builder = model_args.make_model(), model_args.get_message_builder()\n messages = add_user_messages(msg_builder)\n if tool_choice == \"get_weather\": # force a specific tool call\n response: LLMOutput = llm(\n APIPayload(messages=messages, tools=tools, force_call_tool=tool_choice)\n )\n else:\n response: LLMOutput = llm(\n APIPayload(messages=messages, tools=tools, tool_choice=tool_choice)\n )\n num_tool_calls = len(response.tool_calls) if response.tool_calls else 0\n res_df.append(\n {\n \"model\": name,\n \"checkpoint\": checkpoint_name,\n \"tool_choice\": tool_choice,\n \"num_tool_calls\": num_tool_calls,\n \"action\": response.action,\n }\n )\n assert (\n num_tool_calls == 2\n ), f\"Expected 2 tool calls, but got {num_tool_calls} for {name} with tool choice {tool_choice}\"\n # import pandas as pd\n # print(pd.DataFrame(res_df))\n\n\nEDGE_CASES = [\n # 1. Empty kwargs dict\n (\"valid_function\", {}, \"valid_function()\"),\n # 2. Kwargs with problematic string values (quotes, escapes, unicode)\n (\n \"send_message\",\n {\n \"text\": 'He said \"Hello!\" and used a backslash: \\\\',\n \"unicode\": \"Café naïve résumé 🚀\",\n \"newlines\": \"Line1\\nLine2\\tTabbed\",\n },\n \"send_message(text='He said \\\"Hello!\\\" and used a backslash: \\\\\\\\', unicode='Café naïve résumé 🚀', newlines='Line1\\\\nLine2\\\\tTabbed')\",\n ),\n # 3. Mixed types including problematic float values\n (\n \"complex_call\",","source_hash":"81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.llm.test_response_api.test_tool_call_to_python_code","uri":"program://AgentLab/function/tests.llm.test_response_api.test_tool_call_to_python_code#L791-L798","kind":"function","name":"test_tool_call_to_python_code","path":"tests/llm/test_response_api.py","language":"python","start_line":791,"end_line":798,"context_start_line":771,"context_end_line":803,"code":" {\n \"infinity\": float(\"inf\"),\n \"nan\": float(\"nan\"),\n \"negative_zero\": -0.0,\n \"scientific\": 1.23e-45,\n },\n \"complex_call(infinity=inf, nan=nan, negative_zero=-0.0, scientific=1.23e-45)\",\n ),\n # 4. Deeply nested structures that could stress repr()\n (\n \"process_data\",\n {\n \"nested\": {\"level1\": {\"level2\": {\"level3\": [1, 2, {\"deep\": True}]}}},\n \"circular_ref_like\": {\"a\": {\"b\": {\"c\": \"back_to_start\"}}},\n },\n \"process_data(nested={'level1': {'level2': {'level3': [1, 2, {'deep': True}]}}}, circular_ref_like={'a': {'b': {'c': 'back_to_start'}}})\",\n ),\n]\n\n\ndef test_tool_call_to_python_code():\n from agentlab.llm.response_api import tool_call_to_python_code\n\n for edge_case in EDGE_CASES:\n func_name, kwargs, expected = edge_case\n result = tool_call_to_python_code(func_name, kwargs)\n print(result)\n assert result == expected, f\"Expected {expected} but got {result}\"\n\n\nif __name__ == \"__main__\":\n test_tool_call_to_python_code()\n # test_openai_chat_completion_model_parse_and_cost()","source_hash":"81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.llm.test_response_api.add_user_messages","uri":"program://AgentLab/function/tests.llm.test_response_api.add_user_messages#L710-L714","kind":"function","name":"add_user_messages","path":"tests/llm/test_response_api.py","language":"python","start_line":710,"end_line":714,"context_start_line":690,"context_end_line":734,"code":"def test_multi_action_tool_calls():\n \"\"\"\n Test that the model can produce multiple tool calls in parallel.\n Uncomment commented lines to see the full behaviour of models and tool choices.\n \"\"\"\n # test_config (setting name, BaseModelArgs, model_name, tools)\n tool_test_configs = [\n (\n \"gpt-4.1-responses API\",\n OpenAIResponseModelArgs,\n \"gpt-4.1-2025-04-14\",\n responses_api_tools,\n ),\n (\"gpt-4.1-chat Completions API\", OpenAIChatModelArgs, \"gpt-4.1-2025-04-14\", chat_api_tools),\n # (\"claude-3\", ClaudeResponseModelArgs, \"claude-3-haiku-20240307\", anthropic_tools), # fails\n # (\"claude-3.7\", ClaudeResponseModelArgs, \"claude-3-7-sonnet-20250219\", anthropic_tools), # fails\n (\"claude-4-sonnet\", ClaudeResponseModelArgs, \"claude-sonnet-4-20250514\", anthropic_tools),\n # add more models as needed\n ]\n\n def add_user_messages(msg_builder):\n return [\n msg_builder.user().add_text(\"What is the weather in Paris and Delhi?\"),\n msg_builder.user().add_text(\"You must call multiple tools to achieve the task.\"),\n ]\n\n res_df = []\n\n for tool_choice in [\n # 'none',\n # 'required', # fails for Responses API\n # 'any', # fails for Responses API\n \"auto\",\n # 'get_weather'\n ]:\n for name, llm_class, checkpoint_name, tools in tool_test_configs:\n print(name, \"tool choice:\", tool_choice, \"\\n\", \"**\" * 10)\n model_args = llm_class(model_name=checkpoint_name, max_new_tokens=200, temperature=None)\n llm, msg_builder = model_args.make_model(), model_args.get_message_builder()\n messages = add_user_messages(msg_builder)\n if tool_choice == \"get_weather\": # force a specific tool call\n response: LLMOutput = llm(\n APIPayload(messages=messages, tools=tools, force_call_tool=tool_choice)\n )\n else:","source_hash":"81eb847e19766c2021a4c0c2ec8d7a1ab6cee79d8197c44101314fa84848b8f3","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.llm.test_chat_api","uri":"program://AgentLab/module/tests.llm.test_chat_api#L1-L93","kind":"module","name":"tests.llm.test_chat_api","path":"tests/llm/test_chat_api.py","language":"python","start_line":1,"end_line":93,"context_start_line":1,"context_end_line":93,"code":"import os\n\nimport pytest\n\nfrom agentlab.llm.chat_api import (\n AnthropicModelArgs,\n AzureModelArgs,\n OpenAIModelArgs,\n make_system_message,\n make_user_message,\n)\n\n# TODO(optimass): figure out a good model for all tests\n\n\nif \"AGENTLAB_LOCAL_TEST\" in os.environ:\n skip_tests = os.environ[\"AGENTLAB_LOCAL_TEST\"] != \"1\"\nelse:\n skip_tests = False\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(skip_tests, reason=\"Skipping on remote as Azure is pricy\")\n@pytest.mark.skipif(\n not os.getenv(\"AZURE_OPENAI_API_KEY\"), reason=\"Skipping as Azure API key not set\"\n)\ndef test_api_model_args_azure():\n model_args = AzureModelArgs(\n model_name=\"gpt-4.1-nano\",\n deployment_name=\"gpt-4.1-nano\",\n max_total_tokens=8192,\n max_input_tokens=8192 - 512,\n max_new_tokens=512,\n temperature=1e-1,\n )\n model = model_args.make_model()\n\n messages = [\n make_system_message(\"You are an helpful virtual assistant\"),\n make_user_message(\"Give the third prime number\"),\n ]\n answer = model(messages)\n\n assert \"5\" in answer.get(\"content\")\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(skip_tests, reason=\"Skipping on remote as Azure is pricy\")\n@pytest.mark.skipif(not os.getenv(\"OPENAI_API_KEY\"), reason=\"Skipping as OpenAI API key not set\")\ndef test_api_model_args_openai():\n model_args = OpenAIModelArgs(\n model_name=\"gpt-4o-mini\",\n max_total_tokens=8192,\n max_input_tokens=8192 - 512,\n max_new_tokens=512,\n temperature=1e-1,\n )\n model = model_args.make_model()\n\n messages = [\n make_system_message(\"You are an helpful virtual assistant\"),\n make_user_message(\"Give the third prime number\"),\n ]\n answer = model(messages)\n\n assert \"5\" in answer.get(\"content\")\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(skip_tests, reason=\"Skipping on remote as Anthropic is pricy\")\n@pytest.mark.skipif(\n not os.getenv(\"ANTHROPIC_API_KEY\"), reason=\"Skipping as Anthropic API key not set\"\n)\ndef test_api_model_args_anthropic():\n model_args = AnthropicModelArgs(\n model_name=\"claude-3-haiku-20240307\",\n max_total_tokens=8192,\n max_input_tokens=8192 - 512,\n max_new_tokens=512,\n temperature=1e-1,\n )\n model = model_args.make_model()\n\n messages = [\n make_system_message(\"You are an helpful virtual assistant\"),\n make_user_message(\"Give the third prime number. Just the number, no explanation.\"),\n ]\n answer = model(messages)\n assert \"5\" in answer.get(\"content\")\n\n\nif __name__ == \"__main__\":\n test_api_model_args_anthropic()","source_hash":"1827bbbe5c8ea015a003ddf56dcabfe4f2dadd506ae591ed9bd0a089f03d664b","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.llm.test_chat_api.test_api_model_args_azure","uri":"program://AgentLab/function/tests.llm.test_chat_api.test_api_model_args_azure#L27-L44","kind":"function","name":"test_api_model_args_azure","path":"tests/llm/test_chat_api.py","language":"python","start_line":27,"end_line":44,"context_start_line":7,"context_end_line":64,"code":" AzureModelArgs,\n OpenAIModelArgs,\n make_system_message,\n make_user_message,\n)\n\n# TODO(optimass): figure out a good model for all tests\n\n\nif \"AGENTLAB_LOCAL_TEST\" in os.environ:\n skip_tests = os.environ[\"AGENTLAB_LOCAL_TEST\"] != \"1\"\nelse:\n skip_tests = False\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(skip_tests, reason=\"Skipping on remote as Azure is pricy\")\n@pytest.mark.skipif(\n not os.getenv(\"AZURE_OPENAI_API_KEY\"), reason=\"Skipping as Azure API key not set\"\n)\ndef test_api_model_args_azure():\n model_args = AzureModelArgs(\n model_name=\"gpt-4.1-nano\",\n deployment_name=\"gpt-4.1-nano\",\n max_total_tokens=8192,\n max_input_tokens=8192 - 512,\n max_new_tokens=512,\n temperature=1e-1,\n )\n model = model_args.make_model()\n\n messages = [\n make_system_message(\"You are an helpful virtual assistant\"),\n make_user_message(\"Give the third prime number\"),\n ]\n answer = model(messages)\n\n assert \"5\" in answer.get(\"content\")\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(skip_tests, reason=\"Skipping on remote as Azure is pricy\")\n@pytest.mark.skipif(not os.getenv(\"OPENAI_API_KEY\"), reason=\"Skipping as OpenAI API key not set\")\ndef test_api_model_args_openai():\n model_args = OpenAIModelArgs(\n model_name=\"gpt-4o-mini\",\n max_total_tokens=8192,\n max_input_tokens=8192 - 512,\n max_new_tokens=512,\n temperature=1e-1,\n )\n model = model_args.make_model()\n\n messages = [\n make_system_message(\"You are an helpful virtual assistant\"),\n make_user_message(\"Give the third prime number\"),\n ]\n answer = model(messages)","source_hash":"1827bbbe5c8ea015a003ddf56dcabfe4f2dadd506ae591ed9bd0a089f03d664b","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.llm.test_chat_api.test_api_model_args_openai","uri":"program://AgentLab/function/tests.llm.test_chat_api.test_api_model_args_openai#L50-L66","kind":"function","name":"test_api_model_args_openai","path":"tests/llm/test_chat_api.py","language":"python","start_line":50,"end_line":66,"context_start_line":30,"context_end_line":86,"code":" deployment_name=\"gpt-4.1-nano\",\n max_total_tokens=8192,\n max_input_tokens=8192 - 512,\n max_new_tokens=512,\n temperature=1e-1,\n )\n model = model_args.make_model()\n\n messages = [\n make_system_message(\"You are an helpful virtual assistant\"),\n make_user_message(\"Give the third prime number\"),\n ]\n answer = model(messages)\n\n assert \"5\" in answer.get(\"content\")\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(skip_tests, reason=\"Skipping on remote as Azure is pricy\")\n@pytest.mark.skipif(not os.getenv(\"OPENAI_API_KEY\"), reason=\"Skipping as OpenAI API key not set\")\ndef test_api_model_args_openai():\n model_args = OpenAIModelArgs(\n model_name=\"gpt-4o-mini\",\n max_total_tokens=8192,\n max_input_tokens=8192 - 512,\n max_new_tokens=512,\n temperature=1e-1,\n )\n model = model_args.make_model()\n\n messages = [\n make_system_message(\"You are an helpful virtual assistant\"),\n make_user_message(\"Give the third prime number\"),\n ]\n answer = model(messages)\n\n assert \"5\" in answer.get(\"content\")\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(skip_tests, reason=\"Skipping on remote as Anthropic is pricy\")\n@pytest.mark.skipif(\n not os.getenv(\"ANTHROPIC_API_KEY\"), reason=\"Skipping as Anthropic API key not set\"\n)\ndef test_api_model_args_anthropic():\n model_args = AnthropicModelArgs(\n model_name=\"claude-3-haiku-20240307\",\n max_total_tokens=8192,\n max_input_tokens=8192 - 512,\n max_new_tokens=512,\n temperature=1e-1,\n )\n model = model_args.make_model()\n\n messages = [\n make_system_message(\"You are an helpful virtual assistant\"),\n make_user_message(\"Give the third prime number. Just the number, no explanation.\"),","source_hash":"1827bbbe5c8ea015a003ddf56dcabfe4f2dadd506ae591ed9bd0a089f03d664b","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.llm.test_chat_api.test_api_model_args_anthropic","uri":"program://AgentLab/function/tests.llm.test_chat_api.test_api_model_args_anthropic#L74-L89","kind":"function","name":"test_api_model_args_anthropic","path":"tests/llm/test_chat_api.py","language":"python","start_line":74,"end_line":89,"context_start_line":54,"context_end_line":93,"code":" max_input_tokens=8192 - 512,\n max_new_tokens=512,\n temperature=1e-1,\n )\n model = model_args.make_model()\n\n messages = [\n make_system_message(\"You are an helpful virtual assistant\"),\n make_user_message(\"Give the third prime number\"),\n ]\n answer = model(messages)\n\n assert \"5\" in answer.get(\"content\")\n\n\n@pytest.mark.pricy\n@pytest.mark.skipif(skip_tests, reason=\"Skipping on remote as Anthropic is pricy\")\n@pytest.mark.skipif(\n not os.getenv(\"ANTHROPIC_API_KEY\"), reason=\"Skipping as Anthropic API key not set\"\n)\ndef test_api_model_args_anthropic():\n model_args = AnthropicModelArgs(\n model_name=\"claude-3-haiku-20240307\",\n max_total_tokens=8192,\n max_input_tokens=8192 - 512,\n max_new_tokens=512,\n temperature=1e-1,\n )\n model = model_args.make_model()\n\n messages = [\n make_system_message(\"You are an helpful virtual assistant\"),\n make_user_message(\"Give the third prime number. Just the number, no explanation.\"),\n ]\n answer = model(messages)\n assert \"5\" in answer.get(\"content\")\n\n\nif __name__ == \"__main__\":\n test_api_model_args_anthropic()","source_hash":"1827bbbe5c8ea015a003ddf56dcabfe4f2dadd506ae591ed9bd0a089f03d664b","truncated":false}
{"repo_id":"AgentLab","entity_id":"py:tests.llm.test_llm_utils","uri":"program://AgentLab/module/tests.llm.test_llm_utils#L1-L280","kind":"module","name":"tests.llm.test_llm_utils","path":"tests/llm/test_llm_utils.py","language":"python","start_line":1,"end_line":280,"context_start_line":1,"context_end_line":280,"code":"import warnings\nfrom typing import Literal\nfrom unittest.mock import Mock\n\nimport httpx\nimport pytest\nfrom openai import RateLimitError\n\nfrom agentlab.llm import llm_utils\nfrom agentlab.llm.chat_api import make_system_message\n\nyaml_str = \"\"\"Analysis:\nThis is the analysis\n\nSummary: This is the summary\n\nConfidence Score: 7\n\"\"\"\n\n\ndef test_yaml_parser():\n ans, _, _ = llm_utils.yaml_parser(yaml_str)\n print(ans)\n assert ans[\"Analysis\"] == \"This is the analysis\"\n assert ans[\"Summary\"] == \"This is the summary\"\n assert ans[\"Confidence Score\"] == 7\n\n\ndef test_truncate_tokens():\n text = \"This is a simple test.\"\n truncated = llm_utils.truncate_tokens(text, max_tokens=3)\n assert truncated == \"This is a\"\n\n\ndef test_count_tokens():\n text = \"This is a simple test.\"\n assert llm_utils.count_tokens(text) == 6\n\n\ndef test_json_parser():\n # Testing valid JSON\n message = '{\"test\": \"Hello, World!\"}'\n\n # deactivate warnings\n warnings.filterwarnings(\"ignore\")\n\n value, valid, retry_message = llm_utils.json_parser(message)\n assert value == {\"test\": \"Hello, World!\"}\n assert valid == True\n assert retry_message == \"\"\n\n # Testing invalid JSON\n message = '{\"test\": \"Hello, World!\"' # missing closing brace\n value, valid, retry_message = llm_utils.json_parser(message)\n assert value == {}\n assert valid == False\n assert len(retry_message) > 3\n\n # reactivate warnings\n warnings.filterwarnings(\"default\")\n\n\ndef test_compress_string():\n text = \"\"\"\nThis is a test\nfor paragraph.\n\nThis is a second test.\nhola\nThis is a second test.\n\nThis is a test\nfor paragraph.\n\"\"\"\n\n expected_output = \"\"\"\\\n