Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,54 +1,103 @@
|
|
| 1 |
import os
|
|
|
|
|
|
|
| 2 |
import gradio as gr
|
| 3 |
-
from
|
| 4 |
-
from
|
| 5 |
|
| 6 |
-
|
| 7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
|
| 9 |
-
# Define the MCP server URL
|
| 10 |
MCP_URL = "https://oppaai-job-search-mcp-server.hf.space/gradio_api/mcp/sse"
|
| 11 |
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
)
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
#
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
)
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 35 |
history = history or []
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
|
|
|
|
|
|
|
|
|
| 39 |
return history, history
|
| 40 |
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 46 |
|
| 47 |
-
|
|
|
|
| 48 |
|
| 49 |
-
|
| 50 |
-
demo.launch()
|
| 51 |
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
atexit.register(mcp_client.disconnect)
|
|
|
|
| 1 |
import os
|
| 2 |
+
import asyncio
|
| 3 |
+
import json
|
| 4 |
import gradio as gr
|
| 5 |
+
from contextlib import AsyncExitStack
|
| 6 |
+
from typing import Optional
|
| 7 |
|
| 8 |
+
from huggingface_hub import AsyncInferenceClient
|
| 9 |
+
from mcp import ClientSession
|
| 10 |
+
from mcp.http import MCPServerHTTP # Use HTTP MCP client for remote server
|
| 11 |
+
|
| 12 |
+
SYSTEM_PROMPT = """Your name is Jobcy. You are an AI assistant designed to help users find remote jobs by searching through job listings from various sources, including the Jobicy API and other platforms.
|
| 13 |
+
You will list the job listings in a structured format, including the job title, company, location, and a google search link."""
|
| 14 |
|
|
|
|
| 15 |
MCP_URL = "https://oppaai-job-search-mcp-server.hf.space/gradio_api/mcp/sse"
|
| 16 |
|
| 17 |
+
class TinyToolCallingAgent:
|
| 18 |
+
def __init__(self):
|
| 19 |
+
self.mcp_client_session: Optional[ClientSession] = None
|
| 20 |
+
self.exit_stack = AsyncExitStack()
|
| 21 |
+
self.model = AsyncInferenceClient(model="Qwen/Qwen3-8B", provider="hf-inference", token=os.getenv("HF_TOKEN"))
|
| 22 |
+
self.system_prompt = SYSTEM_PROMPT
|
| 23 |
+
self.tools = []
|
| 24 |
+
|
| 25 |
+
async def connect_to_server(self):
|
| 26 |
+
# Connect to MCP server over HTTP SSE URL
|
| 27 |
+
server = MCPServerHTTP(url=MCP_URL)
|
| 28 |
+
self.mcp_client_session = await server.connect()
|
| 29 |
+
response = await self.mcp_client_session.list_tools()
|
| 30 |
+
self.tools = response.tools
|
| 31 |
+
print("Connected to MCP server with tools:", [tool.name for tool in self.tools])
|
| 32 |
+
|
| 33 |
+
async def _call_model(self, messages, tools=None):
|
| 34 |
+
return await self.model.chat_completion(messages, max_tokens=1000, tools=tools)
|
| 35 |
+
|
| 36 |
+
async def process_query(self, query: str) -> str:
|
| 37 |
+
messages = [
|
| 38 |
+
{"role": "system", "content": self.system_prompt},
|
| 39 |
+
{"role": "user", "content": query},
|
| 40 |
+
]
|
| 41 |
+
|
| 42 |
+
tools = [
|
| 43 |
+
{
|
| 44 |
+
"type": "function",
|
| 45 |
+
"function": {"name": tool.name, "description": tool.description, "parameters": tool.inputSchema},
|
| 46 |
+
}
|
| 47 |
+
for tool in self.tools
|
| 48 |
+
]
|
| 49 |
+
|
| 50 |
+
response = await self._call_model(messages, tools=tools)
|
| 51 |
+
final_text = []
|
| 52 |
+
message = response.choices[0].message
|
| 53 |
+
|
| 54 |
+
if message.tool_calls:
|
| 55 |
+
if message.content:
|
| 56 |
+
messages.append({"role": "assistant", "content": message.content})
|
| 57 |
+
for tool_call in message.tool_calls:
|
| 58 |
+
tool_name = tool_call.function.name
|
| 59 |
+
tool_args = json.loads(tool_call.function.arguments)
|
| 60 |
+
result = await self.mcp_client_session.call_tool(tool_name, tool_args)
|
| 61 |
+
final_text.append(f"[Calling tool {tool_name} with args {tool_args}]")
|
| 62 |
+
messages.append({"role": "user", "content": result.content[0].text})
|
| 63 |
+
response = await self._call_model(messages)
|
| 64 |
+
final_text.append(response.choices[0].message.content)
|
| 65 |
+
elif message.content:
|
| 66 |
+
final_text.append(message.content)
|
| 67 |
+
|
| 68 |
+
return "\n".join(final_text)
|
| 69 |
+
|
| 70 |
+
async def cleanup(self):
|
| 71 |
+
if self.mcp_client_session:
|
| 72 |
+
await self.mcp_client_session.aclose()
|
| 73 |
+
await self.exit_stack.aclose()
|
| 74 |
+
|
| 75 |
+
agent = TinyToolCallingAgent()
|
| 76 |
+
|
| 77 |
+
async def gradio_chat(user_input, history):
|
| 78 |
history = history or []
|
| 79 |
+
try:
|
| 80 |
+
response = await agent.process_query(user_input)
|
| 81 |
+
except Exception as e:
|
| 82 |
+
response = f"Error: {str(e)}"
|
| 83 |
+
history.append(("User", user_input))
|
| 84 |
+
history.append(("Jobcy", response))
|
| 85 |
return history, history
|
| 86 |
|
| 87 |
+
async def main():
|
| 88 |
+
await agent.connect_to_server()
|
| 89 |
+
|
| 90 |
+
with gr.Blocks() as demo:
|
| 91 |
+
chatbot = gr.Chatbot()
|
| 92 |
+
user_input = gr.Textbox(placeholder="Ask Jobcy about remote jobs...", label="Your Message")
|
| 93 |
+
state = gr.State([])
|
| 94 |
+
|
| 95 |
+
user_input.submit(gradio_chat, inputs=[user_input, state], outputs=[chatbot, state])
|
| 96 |
|
| 97 |
+
demo.title = "Jobcy Remote Job Search Assistant"
|
| 98 |
+
demo.launch()
|
| 99 |
|
| 100 |
+
await agent.cleanup()
|
|
|
|
| 101 |
|
| 102 |
+
if __name__ == "__main__":
|
| 103 |
+
asyncio.run(main())
|
|
|