Spaces:
No application file
No application file
File size: 2,018 Bytes
f6aaf19 afe2777 f6aaf19 7199bdb afe2777 f6aaf19 afe2777 ae29907 afe2777 481178d afe2777 ae29907 afe2777 ae29907 481178d afe2777 ae29907 f6aaf19 319a220 481178d 319a220 f6aaf19 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 |
import os
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
# LLM wrappers
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_openai import AzureChatOpenAI
from browser_use import Agent
app = FastAPI()
class Task(BaseModel):
task: str
def make_llm():
"""
Return a LangChain ChatModel based on env:
LLM_PROVIDER = 'gemini' | 'azure' | 'azure-openai'
"""
provider = os.getenv("LLM_PROVIDER", "gemini").lower()
# --- Google Gemini stays exactly as before ------------------- #
if provider == "gemini":
return ChatGoogleGenerativeAI(
model=os.getenv("GEMINI_MODEL", "gemini-2.0-flash"),
temperature=0.3,
max_retries=3,
timeout=60,
)
# --- Azure OpenAI ------------------------------------------- #
elif provider in {"azure", "azure-openai"}:
return AzureChatOpenAI( # β
correct wrapper
azure_deployment=os.getenv( # β
correct kwarg name
"AZURE_OPENAI_DEPLOYMENT", "gpt-35-turbo"
),
api_version=os.getenv( # β
correct kwarg name
"AZURE_OPENAI_API_VERSION", "2024-05-01-preview"
),
temperature=float(os.getenv("AZURE_OPENAI_TEMPERATURE", "0.3")),
max_retries=3,
timeout=60,
)
raise ValueError(f"Unsupported LLM_PROVIDER '{provider}'")
@app.post("/run")
async def run_task(t: Task):
try:
llm = make_llm()
agent = Agent(task=t.task, llm=llm) # β constructor has no max_steps
# pick limits from env (or fall back to sane defaults)
max_steps = int(os.getenv("AGENT_MAX_STEPS", 8))
result = await agent.run_async(max_steps=max_steps) # β here!
return result # Browser-Use returns dict
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
|