browserUSEAPI / app.py
simoncck's picture
Update app.py
7199bdb verified
raw
history blame
2.02 kB
import os
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
# LLM wrappers
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_openai import AzureChatOpenAI
from browser_use import Agent
app = FastAPI()
class Task(BaseModel):
task: str
def make_llm():
"""
Return a LangChain ChatModel based on env:
LLM_PROVIDER = 'gemini' | 'azure' | 'azure-openai'
"""
provider = os.getenv("LLM_PROVIDER", "gemini").lower()
# --- Google Gemini stays exactly as before ------------------- #
if provider == "gemini":
return ChatGoogleGenerativeAI(
model=os.getenv("GEMINI_MODEL", "gemini-2.0-flash"),
temperature=0.3,
max_retries=3,
timeout=60,
)
# --- Azure OpenAI ------------------------------------------- #
elif provider in {"azure", "azure-openai"}:
return AzureChatOpenAI( # βœ… correct wrapper
azure_deployment=os.getenv( # βœ… correct kwarg name
"AZURE_OPENAI_DEPLOYMENT", "gpt-35-turbo"
),
api_version=os.getenv( # βœ… correct kwarg name
"AZURE_OPENAI_API_VERSION", "2024-05-01-preview"
),
temperature=float(os.getenv("AZURE_OPENAI_TEMPERATURE", "0.3")),
max_retries=3,
timeout=60,
)
raise ValueError(f"Unsupported LLM_PROVIDER '{provider}'")
@app.post("/run")
async def run_task(t: Task):
try:
llm = make_llm()
agent = Agent(task=t.task, llm=llm) # ← constructor has no max_steps
# pick limits from env (or fall back to sane defaults)
max_steps = int(os.getenv("AGENT_MAX_STEPS", 8))
result = await agent.run_async(max_steps=max_steps) # ← here!
return result # Browser-Use returns dict
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))