Humanlearning commited on
Commit
769cc9d
·
1 Parent(s): 8c16f30

feat: Add Gradio UI for interactive query and expiry sweep agents, integrated with FastAPI and uvicorn.

Browse files
Files changed (3) hide show
  1. app.py +27 -15
  2. pyproject.toml +3 -1
  3. src/credentialwatch_agent/main.py +9 -3
app.py CHANGED
@@ -1,24 +1,36 @@
1
- import sys
2
  import os
 
3
  import asyncio
 
 
 
4
 
5
  # Add src to path so we can import the package
6
  sys.path.append(os.path.join(os.path.dirname(__file__), "src"))
7
 
8
  from credentialwatch_agent.main import demo, mcp_client
9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  if __name__ == "__main__":
11
- # Simple async wrapper to run the app
12
- loop = asyncio.new_event_loop()
13
- asyncio.set_event_loop(loop)
14
-
15
- try:
16
- print("Connecting to MCP servers...")
17
- loop.run_until_complete(mcp_client.connect())
18
- print("Starting Gradio app...")
19
- demo.launch()
20
- except KeyboardInterrupt:
21
- pass
22
- finally:
23
- print("Closing MCP connections...")
24
- loop.run_until_complete(mcp_client.close())
 
 
1
  import os
2
+ import sys
3
  import asyncio
4
+ from fastapi import FastAPI
5
+ import gradio as gr
6
+ import uvicorn
7
 
8
  # Add src to path so we can import the package
9
  sys.path.append(os.path.join(os.path.dirname(__file__), "src"))
10
 
11
  from credentialwatch_agent.main import demo, mcp_client
12
 
13
+ # Create FastAPI app
14
+ app = FastAPI()
15
+
16
+ @app.on_event("startup")
17
+ async def startup_event():
18
+ """Connect to MCP servers on startup."""
19
+ print("Connecting to MCP servers...")
20
+ # We can't use run_until_complete here because the loop is already running
21
+ await mcp_client.connect()
22
+
23
+ @app.on_event("shutdown")
24
+ async def shutdown_event():
25
+ """Close connections on shutdown."""
26
+ print("Closing MCP connections...")
27
+ await mcp_client.close()
28
+
29
+ # Mount Gradio app
30
+ # path="/" mounts it at the root
31
+ app = gr.mount_gradio_app(app, demo, path="/")
32
+
33
  if __name__ == "__main__":
34
+ # Run with uvicorn
35
+ # This ensures everything runs on the same async loop
36
+ uvicorn.run(app, host="0.0.0.0", port=7860)
 
 
 
 
 
 
 
 
 
 
 
pyproject.toml CHANGED
@@ -11,7 +11,9 @@ dependencies = [
11
  "mcp>=0.1.0",
12
  "gradio[mcp]>=6.0.1",
13
  "python-dotenv>=1.0.0",
14
- "httpx>=0.25.0"
 
 
15
  ]
16
 
17
  [build-system]
 
11
  "mcp>=0.1.0",
12
  "gradio[mcp]>=6.0.1",
13
  "python-dotenv>=1.0.0",
14
+ "httpx>=0.25.0",
15
+ "fastapi>=0.100.0",
16
+ "uvicorn>=0.20.0"
17
  ]
18
 
19
  [build-system]
src/credentialwatch_agent/main.py CHANGED
@@ -47,9 +47,15 @@ async def run_chat_turn(message: str, history: List[List[str]]) -> str:
47
  """
48
  # Convert history to LangChain format
49
  messages = []
50
- for human, ai in history:
51
- messages.append(HumanMessage(content=human))
52
- messages.append(AIMessage(content=ai))
 
 
 
 
 
 
53
  messages.append(HumanMessage(content=message))
54
 
55
  initial_state = {"messages": messages}
 
47
  """
48
  # Convert history to LangChain format
49
  messages = []
50
+ for item in history:
51
+ if isinstance(item, (list, tuple)) and len(item) >= 2:
52
+ human = item[0]
53
+ ai = item[1]
54
+ messages.append(HumanMessage(content=str(human)))
55
+ messages.append(AIMessage(content=str(ai)))
56
+ else:
57
+ # Fallback for unexpected format
58
+ print(f"Warning: Skipping malformed history item: {item}")
59
  messages.append(HumanMessage(content=message))
60
 
61
  initial_state = {"messages": messages}