Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,103 +1,56 @@
|
|
| 1 |
-
import os
|
| 2 |
import asyncio
|
| 3 |
-
import
|
|
|
|
| 4 |
import gradio as gr
|
| 5 |
-
from
|
| 6 |
-
from
|
| 7 |
-
|
| 8 |
-
from
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
async def process_query(self, query: str) -> str:
|
| 37 |
-
messages = [
|
| 38 |
-
{"role": "system", "content": self.system_prompt},
|
| 39 |
-
{"role": "user", "content": query},
|
| 40 |
-
]
|
| 41 |
-
|
| 42 |
-
tools = [
|
| 43 |
-
{
|
| 44 |
-
"type": "function",
|
| 45 |
-
"function": {"name": tool.name, "description": tool.description, "parameters": tool.inputSchema},
|
| 46 |
-
}
|
| 47 |
-
for tool in self.tools
|
| 48 |
-
]
|
| 49 |
-
|
| 50 |
-
response = await self._call_model(messages, tools=tools)
|
| 51 |
-
final_text = []
|
| 52 |
-
message = response.choices[0].message
|
| 53 |
-
|
| 54 |
-
if message.tool_calls:
|
| 55 |
-
if message.content:
|
| 56 |
-
messages.append({"role": "assistant", "content": message.content})
|
| 57 |
-
for tool_call in message.tool_calls:
|
| 58 |
-
tool_name = tool_call.function.name
|
| 59 |
-
tool_args = json.loads(tool_call.function.arguments)
|
| 60 |
-
result = await self.mcp_client_session.call_tool(tool_name, tool_args)
|
| 61 |
-
final_text.append(f"[Calling tool {tool_name} with args {tool_args}]")
|
| 62 |
-
messages.append({"role": "user", "content": result.content[0].text})
|
| 63 |
-
response = await self._call_model(messages)
|
| 64 |
-
final_text.append(response.choices[0].message.content)
|
| 65 |
-
elif message.content:
|
| 66 |
-
final_text.append(message.content)
|
| 67 |
-
|
| 68 |
-
return "\n".join(final_text)
|
| 69 |
-
|
| 70 |
-
async def cleanup(self):
|
| 71 |
-
if self.mcp_client_session:
|
| 72 |
-
await self.mcp_client_session.aclose()
|
| 73 |
-
await self.exit_stack.aclose()
|
| 74 |
-
|
| 75 |
-
agent = TinyToolCallingAgent()
|
| 76 |
-
|
| 77 |
-
async def gradio_chat(user_input, history):
|
| 78 |
history = history or []
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
except Exception as e:
|
| 82 |
-
response = f"Error: {str(e)}"
|
| 83 |
history.append(("User", user_input))
|
| 84 |
-
history.append(("Jobcy",
|
| 85 |
return history, history
|
| 86 |
|
| 87 |
async def main():
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
state = gr.State([])
|
| 94 |
-
|
| 95 |
-
user_input.submit(gradio_chat, inputs=[user_input, state], outputs=[chatbot, state])
|
| 96 |
|
| 97 |
-
|
| 98 |
-
demo.launch()
|
| 99 |
|
| 100 |
-
|
|
|
|
| 101 |
|
| 102 |
if __name__ == "__main__":
|
| 103 |
asyncio.run(main())
|
|
|
|
|
|
|
| 1 |
import asyncio
|
| 2 |
+
import re
|
| 3 |
+
import os
|
| 4 |
import gradio as gr
|
| 5 |
+
from pydantic_ai import Agent
|
| 6 |
+
from pydantic_ai.mcp import MCPServerHTTP
|
| 7 |
+
from pydantic_ai.models.openai import OpenAIModel
|
| 8 |
+
from pydantic_ai.providers.openai import OpenAIProvider
|
| 9 |
+
|
| 10 |
+
# MCP Server SSE URL
|
| 11 |
+
SSE_URL = "https://oppaai-job-search-mcp-server.hf.space/gradio_api/mcp/sse"
|
| 12 |
+
server = MCPServerHTTP(url=SSE_URL)
|
| 13 |
+
|
| 14 |
+
HF_TOKEN = os.getenv("HF_TOKEN") # Make sure your HF_TOKEN is set in environment variables
|
| 15 |
+
|
| 16 |
+
# Use HuggingFace hosted Qwen3-30B-A3B model
|
| 17 |
+
qwen3_model = OpenAIModel(
|
| 18 |
+
model_name="Qwen/Qwen3-30B-A3B",
|
| 19 |
+
provider=OpenAIProvider(
|
| 20 |
+
base_url="https://api-inference.huggingface.co/models/Qwen/Qwen3-30B-A3B",
|
| 21 |
+
token=HF_TOKEN,
|
| 22 |
+
),
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
# Create Agent with MCP Server
|
| 26 |
+
agent = Agent(
|
| 27 |
+
model=qwen3_model,
|
| 28 |
+
mcp_servers=[server],
|
| 29 |
+
instructions="""
|
| 30 |
+
Your name is Jobcy. You are an AI assistant designed to help users to find remote jobs by searching through job listings from various sources, including the Jobicy API and other platforms.
|
| 31 |
+
You will list the job listings in a structured format, including the job title, company, location, and the google search link.
|
| 32 |
+
"""
|
| 33 |
+
)
|
| 34 |
+
|
| 35 |
+
async def chat_with_agent(user_input, history):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 36 |
history = history or []
|
| 37 |
+
result = await agent.run(user_input)
|
| 38 |
+
cleaned_output = re.sub(r"<think>.*?</think>", "", result.output, flags=re.DOTALL).strip()
|
|
|
|
|
|
|
| 39 |
history.append(("User", user_input))
|
| 40 |
+
history.append(("Jobcy", cleaned_output))
|
| 41 |
return history, history
|
| 42 |
|
| 43 |
async def main():
|
| 44 |
+
async with agent.run_mcp_servers():
|
| 45 |
+
with gr.Blocks() as demo:
|
| 46 |
+
chatbot = gr.Chatbot()
|
| 47 |
+
user_input = gr.Textbox(placeholder="Ask Jobcy about remote jobs or anything else...", label="Your Message")
|
| 48 |
+
state = gr.State([])
|
|
|
|
|
|
|
|
|
|
| 49 |
|
| 50 |
+
user_input.submit(chat_with_agent, inputs=[user_input, state], outputs=[chatbot, state])
|
|
|
|
| 51 |
|
| 52 |
+
demo.title = "Jobcy Remote Job Search Assistant"
|
| 53 |
+
demo.launch()
|
| 54 |
|
| 55 |
if __name__ == "__main__":
|
| 56 |
asyncio.run(main())
|