Job-Application-Assistant / mcp /server_common.py
Noo88ear's picture
πŸš€ Initial deployment of Multi-Agent Job Application Assistant
7498f2c
from __future__ import annotations
import asyncio
from typing import Callable, Awaitable
from mcp.server import Server
from services.web_research import get_role_guidelines
from services.llm import llm
def create_common_tools(server: Server) -> None:
@server.tool()
async def research_guidelines(role_title: str, job_description: str) -> str:
"""Fetch latest best-practice guidance for a role (uses Tavily if configured)."""
return get_role_guidelines(role_title, job_description)
@server.tool()
async def llm_refine(system_prompt: str, user_prompt: str, max_tokens: int = 800) -> str:
"""Refine a text snippet using the configured LLM provider (OpenAI/Anthropic/Gemini)."""
return llm.generate(system_prompt, user_prompt, max_tokens=max_tokens)
def run_server(server: Server, host: str = "127.0.0.1", port: int = 8765) -> None:
# Minimal run loop for development embedding
asyncio.run(server.run_stdio_async())