mgbam commited on
Commit
d57d53a
Β·
verified Β·
1 Parent(s): 6691b20

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -29
app.py CHANGED
@@ -1,12 +1,21 @@
1
  """
2
  app.py – Enterprise SQL Agent (Gradio + smolagents + MCP)
3
 
4
- Secrets / ENV
5
- ─────────────
6
- OPENAI_API_KEY β†’ calls OpenAI (default model gpt-4o, override via OPENAI_MODEL)
7
- GOOGLE_API_KEY β†’ calls Gemini (default model gemini-pro, override via GOOGLE_MODEL)
8
- HF_MODEL_ID β†’ Hugging Face chat-completion model (fallback if no keys)
9
- HF_API_TOKEN β†’ token for gated HF repo (optional)
 
 
 
 
 
 
 
 
 
10
  """
11
 
12
  import os, pathlib, json, pprint, gradio as gr
@@ -14,42 +23,37 @@ from mcp import StdioServerParameters
14
  from smolagents import MCPClient, CodeAgent
15
  from smolagents.models import LiteLLMModel, InferenceClientModel
16
 
17
- # ─── 1. Pick the base LLM ───────────────────────────────────────────────
18
  OPENAI_KEY = os.getenv("OPENAI_API_KEY")
19
- OPENAI_MODEL = os.getenv("OPENAI_MODEL", "gpt-4o")
20
 
21
  GEMINI_KEY = os.getenv("GOOGLE_API_KEY")
22
  GEM_MODEL = os.getenv("GOOGLE_MODEL", "gemini-pro")
23
 
24
  HF_MODEL_ID = os.getenv("HF_MODEL_ID", "microsoft/Phi-3-mini-4k-instruct")
25
- HF_TOKEN = os.getenv("HF_API_TOKEN") # only if the repo is gated
26
 
27
  if OPENAI_KEY:
28
- BASE_MODEL = LiteLLMModel(model_id=f"openai/{OPENAI_MODEL}",
29
- api_key=OPENAI_KEY)
30
- ACTIVE = f"OpenAI Β· {OPENAI_MODEL}"
31
  elif GEMINI_KEY:
32
- BASE_MODEL = LiteLLMModel(model_id=f"google/{GEM_MODEL}",
33
- api_key=GEMINI_KEY)
34
- ACTIVE = f"Gemini Β· {GEM_MODEL}"
35
  else:
36
- BASE_MODEL = InferenceClientModel(model_id=HF_MODEL_ID,
37
- hf_api_token=HF_TOKEN,
38
- timeout=90)
39
- ACTIVE = f"Hugging Face Β· {HF_MODEL_ID}"
40
 
41
- # ─── 2. Path to MCP server ──────────────────────────────────────────────
42
  SERVER_PATH = pathlib.Path(__file__).with_name("mcp_server.py")
43
 
44
- # ─── 3. Chat callback ───────────────────────────────────────────────────
45
  def respond(message: str, history: list):
46
- """Prompt β†’ CodeAgent β†’ MCP tools β†’ safe string reply."""
47
  params = StdioServerParameters(command="python", args=[str(SERVER_PATH)])
48
-
49
  with MCPClient(params) as tools:
50
  answer = CodeAgent(tools=tools, model=BASE_MODEL).run(message)
51
 
52
- # Always stringify for Gradio
53
  if not isinstance(answer, str):
54
  try:
55
  answer = json.dumps(answer, indent=2, ensure_ascii=False)
@@ -62,14 +66,16 @@ def respond(message: str, history: list):
62
  ]
63
  return history, history
64
 
65
- # ─── 4. Build UI ────────────────────────────────────────────────────────
66
  with gr.Blocks(title="Enterprise SQL Agent") as demo:
67
  state = gr.State([])
68
- gr.Markdown("## 🏒 Enterprise SQL Agent β€” ask questions about your data")
69
 
70
  chat = gr.Chatbot(type="messages", label="Conversation")
71
- box = gr.Textbox(placeholder="e.g. Who are my inactive Northeast customers?",
72
- show_label=False)
 
 
73
  box.submit(respond, [box, state], [chat, state])
74
 
75
  with gr.Accordion("Example prompts", open=False):
@@ -79,7 +85,7 @@ with gr.Blocks(title="Enterprise SQL Agent") as demo:
79
  "* Draft re-engagement emails for inactive accounts."
80
  )
81
 
82
- gr.Markdown(f"_Powered by MCP + smolagents + Gradio β€’ Active model β†’ **{ACTIVE}**_")
83
 
84
  if __name__ == "__main__":
85
  demo.launch()
 
1
  """
2
  app.py – Enterprise SQL Agent (Gradio + smolagents + MCP)
3
 
4
+ Provider priority
5
+ ──────────────────
6
+ 1. OpenAI β†’ set OPENAI_API_KEY (override model with OPENAI_MODEL, default = gpt-4o)
7
+ 2. Gemini β†’ set GOOGLE_API_KEY (override model with GOOGLE_MODEL, default = gemini-pro)
8
+ 3. Hugging Face Inference fallback
9
+ β€’ HF_MODEL_ID (default = microsoft/Phi-3-mini-4k-instruct)
10
+ β€’ HF_API_TOKEN (only if the repo is gated)
11
+
12
+ File layout
13
+ ────────────
14
+ app.py
15
+ mcp_server.py
16
+ connectors/
17
+ └─ salesforce_connector.py
18
+ requirements.txt
19
  """
20
 
21
  import os, pathlib, json, pprint, gradio as gr
 
23
  from smolagents import MCPClient, CodeAgent
24
  from smolagents.models import LiteLLMModel, InferenceClientModel
25
 
26
+ # ───────────────────────── 1. Choose base LLM ──────────────────────────
27
  OPENAI_KEY = os.getenv("OPENAI_API_KEY")
28
+ OPENAI_MODEL = os.getenv("OPENAI_MODEL", "gpt-4o") # stable id
29
 
30
  GEMINI_KEY = os.getenv("GOOGLE_API_KEY")
31
  GEM_MODEL = os.getenv("GOOGLE_MODEL", "gemini-pro")
32
 
33
  HF_MODEL_ID = os.getenv("HF_MODEL_ID", "microsoft/Phi-3-mini-4k-instruct")
34
+ HF_TOKEN = os.getenv("HF_API_TOKEN") # optional
35
 
36
  if OPENAI_KEY:
37
+ BASE_MODEL = LiteLLMModel(model_id=f"openai/{OPENAI_MODEL}", api_key=OPENAI_KEY)
38
+ ACTIVE = f"OpenAI Β· {OPENAI_MODEL}"
 
39
  elif GEMINI_KEY:
40
+ BASE_MODEL = LiteLLMModel(model_id=f"google/{GEM_MODEL}", api_key=GEMINI_KEY)
41
+ ACTIVE = f"Gemini Β· {GEM_MODEL}"
 
42
  else:
43
+ BASE_MODEL = InferenceClientModel(model_id=HF_MODEL_ID, hf_api_token=HF_TOKEN, timeout=90)
44
+ ACTIVE = f"Hugging Face Β· {HF_MODEL_ID}"
 
 
45
 
46
+ # ───────────────────────── 2. MCP server path ──────────────────────────
47
  SERVER_PATH = pathlib.Path(__file__).with_name("mcp_server.py")
48
 
49
+ # ───────────────────────── 3. Chat callback ────────────────────────────
50
  def respond(message: str, history: list):
51
+ """Prompt β†’ CodeAgent β†’ MCP tools β†’ string reply."""
52
  params = StdioServerParameters(command="python", args=[str(SERVER_PATH)])
 
53
  with MCPClient(params) as tools:
54
  answer = CodeAgent(tools=tools, model=BASE_MODEL).run(message)
55
 
56
+ # ensure plain-text output
57
  if not isinstance(answer, str):
58
  try:
59
  answer = json.dumps(answer, indent=2, ensure_ascii=False)
 
66
  ]
67
  return history, history
68
 
69
+ # ───────────────────────── 4. Gradio UI ────────────────────────────────
70
  with gr.Blocks(title="Enterprise SQL Agent") as demo:
71
  state = gr.State([])
72
+ gr.Markdown("## 🏒 Enterprise SQL Agent β€” query your data with natural language")
73
 
74
  chat = gr.Chatbot(type="messages", label="Conversation")
75
+ box = gr.Textbox(
76
+ placeholder="e.g. Who are my inactive Northeast customers?",
77
+ show_label=False,
78
+ )
79
  box.submit(respond, [box, state], [chat, state])
80
 
81
  with gr.Accordion("Example prompts", open=False):
 
85
  "* Draft re-engagement emails for inactive accounts."
86
  )
87
 
88
+ gr.Markdown(f"_Powered by MCP Β· smolagents Β· Gradio β€’ Active model β†’ **{ACTIVE}**_")
89
 
90
  if __name__ == "__main__":
91
  demo.launch()