simoncck commited on
Commit
afe2777
·
verified ·
1 Parent(s): 093485f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +43 -8
app.py CHANGED
@@ -1,7 +1,11 @@
1
  import os
2
  from fastapi import FastAPI, HTTPException
3
  from pydantic import BaseModel
 
 
4
  from langchain_google_genai import ChatGoogleGenerativeAI
 
 
5
  from browser_use import Agent
6
 
7
  app = FastAPI()
@@ -10,22 +14,53 @@ class Task(BaseModel):
10
  task: str
11
 
12
  def make_llm():
 
 
 
 
 
 
13
  # LangChain helper; will raise if GOOGLE_API_KEY missing
14
  # return ChatGoogleGenerativeAI(model="gemini-2.0-flash-exp")
15
 
16
- # Allow override from HF Secrets, but fall back to a stable, supported model
17
- model_id = os.getenv("GEMINI_MODEL", "gemini-1.5-pro-latest")
18
- return ChatGoogleGenerativeAI(
19
- model=model_id,
20
- max_retries=3, # built-in exponential back-off
21
- temperature=0.2,
22
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
 
24
  @app.post("/run")
25
  async def run_task(t: Task):
26
  try:
27
  llm = make_llm()
28
- agent = Agent(task=t.task, llm=llm) # Browser-Use agent
 
 
 
 
 
29
  result = await agent.run_async() # async version
30
  return result # Browser-Use returns dict
31
  except Exception as e:
 
1
  import os
2
  from fastapi import FastAPI, HTTPException
3
  from pydantic import BaseModel
4
+
5
+ # LLM wrappers
6
  from langchain_google_genai import ChatGoogleGenerativeAI
7
+ from langchain_openai import ChatOpenAI
8
+
9
  from browser_use import Agent
10
 
11
  app = FastAPI()
 
14
  task: str
15
 
16
  def make_llm():
17
+ """
18
+ Return a LangChain ChatModel based on env:
19
+ LLM_PROVIDER = 'gemini' | 'azure' | 'azure-openai'
20
+ """
21
+ provider = os.getenv("LLM_PROVIDER", "gemini").lower()
22
+
23
  # LangChain helper; will raise if GOOGLE_API_KEY missing
24
  # return ChatGoogleGenerativeAI(model="gemini-2.0-flash-exp")
25
 
26
+ if provider == "gemini":
27
+ # Google Gemini via Generative AI API
28
+ model = os.getenv("GEMINI_MODEL", "gemini-2.0-flash") # or 1.5-pro, etc.
29
+ return ChatGoogleGenerativeAI(
30
+ model=model,
31
+ max_retries=3,
32
+ timeout=60,
33
+ temperature=0.3,
34
+ )
35
+
36
+ elif provider in {"azure", "azure-openai"}:
37
+ # Azure OpenAI (ChatGPT family)
38
+ return ChatOpenAI(
39
+ azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"),
40
+ azure_deployment_name=os.getenv("AZURE_OPENAI_DEPLOYMENT", "gpt-35-turbo"),
41
+ azure_api_version=os.getenv("AZURE_OPENAI_API_VERSION", "2024-02-15-preview"),
42
+ api_key=os.getenv("AZURE_OPENAI_API_KEY"), # picked up by ChatOpenAI
43
+ max_retries=3,
44
+ timeout=60,
45
+ temperature=float(os.getenv("AZURE_OPENAI_TEMPERATURE", "0.3")),
46
+ )
47
+
48
+ else:
49
+ raise ValueError(
50
+ f"Unsupported LLM_PROVIDER '{provider}'. "
51
+ "Use 'gemini' or 'azure-openai'."
52
+ )
53
 
54
  @app.post("/run")
55
  async def run_task(t: Task):
56
  try:
57
  llm = make_llm()
58
+ agent = Agent(
59
+ task=t.task,
60
+ llm=llm,
61
+ max_steps=int(os.getenv("AGENT_MAX_STEPS", 6)),
62
+ step_delay=float(os.getenv("AGENT_STEP_DELAY", 2.0)),
63
+ )
64
  result = await agent.run_async() # async version
65
  return result # Browser-Use returns dict
66
  except Exception as e: