"""Quick test: does LongCat-Flash-Chat support tool calling?""" import asyncio, os from dotenv import load_dotenv load_dotenv(os.path.join(os.path.dirname(__file__), ".env")) load_dotenv(os.path.join(os.path.dirname(__file__), "../../.env")) from openai import AsyncOpenAI async def test(): client = AsyncOpenAI( api_key=os.getenv("LONGCAT_API_KEY"), base_url="https://api.longcat.chat/openai", timeout=30.0, ) tools = [{ "type": "function", "function": { "name": "gmail_task", "description": "Delegate a Gmail task. Use for: searching/reading emails, sending/drafting messages, managing labels and filters.", "parameters": {"type": "object", "properties": {"query": {"type": "string", "description": "The Gmail query to perform"}}, "required": ["query"]} } }] # Test 1: tool_choice="required" to force tool call print("=== Test: tool_choice='required' ===") try: resp = await client.chat.completions.create( model="LongCat-Flash-Chat", messages=[{"role": "user", "content": "check my gmail aiwithjawadsaghir@gmail.com"}], tools=tools, tool_choice="required", ) except Exception as e: print(f"Error with required: {e}") # fallback to auto resp = await client.chat.completions.create( model="LongCat-Flash-Chat", messages=[{"role": "user", "content": "check my gmail aiwithjawadsaghir@gmail.com"}], tools=tools, tool_choice="auto", ) msg = resp.choices[0].message print(f"Role: {msg.role}") print(f"Content: {msg.content}") print(f"Tool calls: {msg.tool_calls}") if msg.tool_calls: for tc in msg.tool_calls: print(f" -> {tc.function.name}({tc.function.arguments})") else: print("\n*** MODEL DID NOT CALL ANY TOOLS ***") # Test 2: list available models print("\n=== Available Models ===") try: models = await client.models.list() for m in models.data: print(f" - {m.id}") except Exception as e: print(f"Error listing models: {e}") if __name__ == "__main__": asyncio.run(test())