Spaces:
No application file
No application file
| import os | |
| from fastapi import FastAPI, HTTPException | |
| from pydantic import BaseModel | |
| # LLM wrappers | |
| from langchain_google_genai import ChatGoogleGenerativeAI | |
| from langchain_openai import AzureChatOpenAI | |
| from browser_use import Agent | |
| app = FastAPI() | |
| class Task(BaseModel): | |
| task: str | |
| def make_llm(): | |
| """ | |
| Return a LangChain ChatModel based on env: | |
| LLM_PROVIDER = 'gemini' | 'azure' | 'azure-openai' | |
| """ | |
| provider = os.getenv("LLM_PROVIDER", "gemini").lower() | |
| # --- Google Gemini stays exactly as before ------------------- # | |
| if provider == "gemini": | |
| return ChatGoogleGenerativeAI( | |
| model=os.getenv("GEMINI_MODEL", "gemini-2.0-flash"), | |
| temperature=0.3, | |
| max_retries=3, | |
| timeout=60, | |
| ) | |
| # --- Azure OpenAI ------------------------------------------- # | |
| elif provider in {"azure", "azure-openai"}: | |
| return AzureChatOpenAI( # β correct wrapper | |
| azure_deployment=os.getenv( # β correct kwarg name | |
| "AZURE_OPENAI_DEPLOYMENT", "gpt-35-turbo" | |
| ), | |
| api_version=os.getenv( # β correct kwarg name | |
| "AZURE_OPENAI_API_VERSION", "2024-05-01-preview" | |
| ), | |
| temperature=float(os.getenv("AZURE_OPENAI_TEMPERATURE", "0.3")), | |
| max_retries=3, | |
| timeout=60, | |
| ) | |
| raise ValueError(f"Unsupported LLM_PROVIDER '{provider}'") | |
| async def run_task(t: Task): | |
| try: | |
| llm = make_llm() | |
| agent = Agent(task=t.task, llm=llm) # β constructor has no max_steps | |
| # pick limits from env (or fall back to sane defaults) | |
| max_steps = int(os.getenv("AGENT_MAX_STEPS", 8)) | |
| result = await agent.run_async(max_steps=max_steps) # β here! | |
| return result # Browser-Use returns dict | |
| except Exception as e: | |
| raise HTTPException(status_code=500, detail=str(e)) | |