abhishekrn commited on
Commit
979df15
·
1 Parent(s): 117809b

Clean up: Remove backup_v1 directory and update configuration files

Browse files
backup_v1/agent/__init__.py DELETED
File without changes
backup_v1/agent/model_client.py DELETED
@@ -1,75 +0,0 @@
1
- import aiohttp
2
- import json
3
- from app.config import settings
4
- from app.prompts.system_prompts import topcoder_system_prompt
5
-
6
- class LLMClient:
7
- def __init__(self):
8
- self.api_url = settings.LLM_API_URL or "https://router.huggingface.co/v1/chat/completions"
9
- self.headers = {
10
- "Authorization": f"Bearer {settings.HF_TOKEN}",
11
- "Content-Type": "application/json"
12
- }
13
- self.model = settings.HF_MODEL or "Qwen/Qwen2.5-7B-Instruct:together"
14
- self.temperature = settings.LLM_TEMPERATURE or 0.8
15
-
16
- async def chat(self, messages) -> str:
17
- system_msg = {"role": "system", "content": topcoder_system_prompt}
18
- payload = {
19
- "model": self.model,
20
- "messages": [system_msg] + messages,
21
- "temperature": self.temperature
22
- }
23
-
24
- async with aiohttp.ClientSession() as session:
25
- async with session.post(self.api_url, headers=self.headers, json=payload) as resp:
26
- if resp.status != 200:
27
- text = await resp.text()
28
- raise Exception(f"LLM error {resp.status}: {text}")
29
- data = await resp.json()
30
- return data["choices"][0]["message"]["content"]
31
-
32
- async def decide_tool(self, prompt: str) -> dict:
33
- """Decide which tool to use based on dynamic prompt."""
34
-
35
- system_msg = {
36
- "role": "system",
37
- "content": "You are a tool decision assistant. Analyze the user's request and available tools/resources, then respond ONLY with a JSON object containing the tool name and parameters."
38
- }
39
- user_msg = {
40
- "role": "user",
41
- "content": prompt
42
- }
43
-
44
- response = await self.chat([system_msg, user_msg])
45
-
46
- # Debug: Print the raw LLM response
47
- print(f"🔍 LLM raw decision response:\n{response}")
48
-
49
- # Try parsing the JSON
50
- try:
51
- return json.loads(response.strip())
52
- except json.JSONDecodeError:
53
- # Fallback: treat as chat if LLM fails
54
- return {
55
- "tool": "chat",
56
- "params": {}
57
- }
58
-
59
- async def complete_json(self, prompt: str) -> dict:
60
- """
61
- Calls the LLM to complete a JSON-only response based on a prompt.
62
- """
63
- raw_response = await self.chat([
64
- {"role": "system", "content": "You are a strict API assistant. Only return valid JSON."},
65
- {"role": "user", "content": prompt.strip()}
66
- ])
67
-
68
- try:
69
- # Try to parse JSON safely
70
- json_start = raw_response.find("{")
71
- json_text = raw_response[json_start:]
72
- return json.loads(json_text)
73
- except Exception as e:
74
- return {"error": f"Failed to parse JSON: {str(e)}", "raw": raw_response}
75
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
backup_v1/agent/prompt_engine.py DELETED
@@ -1,110 +0,0 @@
1
- from app.mcp.compact_utils import CompactSpecsUtils
2
-
3
- class PromptBuilder:
4
- def __init__(self):
5
- self.compact_utils = CompactSpecsUtils()
6
-
7
- def build_tool_decision_prompt(self, user_input: str) -> str:
8
- """Build dynamic prompt based on available compact specs."""
9
-
10
- # Get available tools and resources
11
- working_tools = self.compact_utils.get_working_tools()
12
- working_resources = self.compact_utils.get_working_resources()
13
-
14
- # Build tools section with clear usage instructions
15
- tools_section = ""
16
- if working_tools:
17
- tools_section = "Available tools:\n"
18
- for tool_name in working_tools:
19
- spec = self.compact_utils.get_tool_spec(tool_name)
20
- if spec:
21
- description = spec.get("description", "")
22
- parameters = spec.get("parameters", {})
23
-
24
- # Create clear usage instructions with comprehensive parameter details
25
- if tool_name == "query-tc-skills":
26
- tools_section += f"- {tool_name}: Use this when the user asks about skills, technologies, or competencies.\n"
27
- elif tool_name == "query-tc-challenges":
28
- tools_section += f"- {tool_name}: Use this when the user asks about challenges, competitions, or contests.\n"
29
- else:
30
- tools_section += f"- {tool_name}: {description}.\n"
31
-
32
- # Add detailed parameter information
33
- if parameters:
34
- tools_section += f" Parameters for {tool_name}:\n"
35
- for param_name, param_desc in parameters.items():
36
- # Truncate very long descriptions for readability
37
- short_desc = param_desc[:150] + "..." if len(param_desc) > 150 else param_desc
38
- tools_section += f" - {param_name}: {short_desc}\n"
39
-
40
- # Build resources section
41
- resources_section = ""
42
- if working_resources:
43
- resources_section = "Available API resources:\n"
44
- for resource_name in working_resources:
45
- spec = self.compact_utils.get_resource_spec(resource_name)
46
- if spec:
47
- description = spec.get("description", "")
48
- total_endpoints = spec.get("total_endpoints", 0)
49
- key_endpoints = spec.get("key_endpoints", [])
50
-
51
- # Create clear usage instructions
52
- if "challenge" in resource_name.lower():
53
- resources_section += f"- {resource_name}: Use this for challenge API calls. Use /challenges endpoint.\n"
54
- elif "member" in resource_name.lower():
55
- resources_section += f"- {resource_name}: Use this for member API calls. Use /members endpoint.\n"
56
- else:
57
- resources_section += f"- {resource_name}: {description[:100]}...\n"
58
-
59
- # Add detailed parameter information from key endpoints
60
- if key_endpoints:
61
- for endpoint in key_endpoints[:1]: # Show first endpoint parameters
62
- endpoint_params = endpoint.get("parameters", {})
63
- if endpoint_params:
64
- resources_section += f" Parameters for {endpoint.get('path', '')}:\n"
65
- for param_name, param_desc in endpoint_params.items():
66
- # Truncate very long descriptions for readability
67
- short_desc = param_desc[:150] + "..." if len(param_desc) > 150 else param_desc
68
- resources_section += f" - {param_name}: {short_desc}\n"
69
-
70
- prompt = f"""
71
- You are an assistant that ONLY works with Topcoder's Member Communication Platform (MCP).
72
-
73
- Your goal is to decide the correct tool or resource to call, based ONLY on the user input.
74
-
75
- {tools_section}
76
- {resources_section}
77
- - chat: Use for casual greetings, thanks, or general questions that don't need any tool.
78
- - reject: Use if the query is not related to Topcoder.
79
-
80
- IMPORTANT:
81
- - For tools: Only include parameters that have meaningful values from the user query. Do not include empty or default parameters.
82
- - For resources: Use the resource name and specify the endpoint path and method.
83
- - For member queries: Use /members with parameters like handle, page, perPage. For specific members, use /members/{{handle}}.
84
- - For challenge queries: Use /challenges with parameters like status, track, page, perPage.
85
- - Always include relevant parameters in the params object for API calls.
86
-
87
- Respond ONLY with a JSON object in this format:
88
- {{"tool": "<tool_name>", "params": {{"param1": "value1", "param2": "value2"}} }}
89
- OR
90
- {{"resource": "<resource_name>", "endpoint": "/path", "method": "GET", "params": {{"param1": "value1", "param2": "value2"}} }}
91
-
92
- User input: "{user_input}"
93
- """
94
- return prompt
95
-
96
- def get_available_tools(self) -> list:
97
- """Get list of available tools for reference."""
98
- return self.compact_utils.get_working_tools()
99
-
100
- def get_available_resources(self) -> list:
101
- """Get list of available resources for reference."""
102
- return self.compact_utils.get_working_resources()
103
-
104
- def get_tool_parameters(self, tool_name: str) -> dict:
105
- """Get available parameters for a specific tool."""
106
- return self.compact_utils.get_tool_parameters(tool_name)
107
-
108
- def get_resource_endpoints(self, resource_name: str) -> list:
109
- """Get available endpoints for a specific resource."""
110
- return self.compact_utils.get_resource_endpoints(resource_name)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
backup_v1/agent/schema.py DELETED
@@ -1,29 +0,0 @@
1
- from typing import Optional, Literal, Dict, Any, Union
2
- from dataclasses import dataclass
3
-
4
- # Dynamic tool types from compact specs
5
- ToolType = str # Will be populated from compact specs
6
-
7
- @dataclass
8
- class ToolRequest:
9
- tool: ToolType
10
- params: Optional[Dict[str, Any]] = None
11
-
12
- @dataclass
13
- class ToolResponse:
14
- status: Literal["success", "error"]
15
- data: Optional[Any] = None
16
- message: Optional[str] = None
17
-
18
- @dataclass
19
- class CompactToolSpec:
20
- """Compact tool specification from MCP catalog."""
21
- description: str
22
- parameters: Dict[str, str]
23
-
24
- @dataclass
25
- class CompactResourceSpec:
26
- """Compact resource specification from MCP catalog."""
27
- description: str
28
- total_endpoints: int
29
- key_endpoints: list
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
backup_v1/agent/tool_executor.py DELETED
@@ -1,127 +0,0 @@
1
- import asyncio
2
- from app.mcp.stream_client import load_server_config, StreamableHttpMCPClient
3
- from app.mcp.compact_utils import CompactSpecsUtils
4
- from app.agent.schema import ToolRequest, ToolResponse
5
-
6
- class ToolExecutor:
7
- def __init__(self):
8
- self.compact_utils = CompactSpecsUtils()
9
-
10
- async def execute(self, request: ToolRequest) -> ToolResponse:
11
- """Execute a tool or resource request using compact specs and MCP client."""
12
- try:
13
- tool_name = request.tool
14
- params = request.params or {}
15
-
16
- # Check if it's a tool
17
- tool_spec = self.compact_utils.get_tool_spec(tool_name)
18
- if tool_spec:
19
- return await self._execute_tool(tool_name, params)
20
-
21
- # Check if it's a resource
22
- resource_spec = self.compact_utils.get_resource_spec(tool_name)
23
- if resource_spec:
24
- return await self._execute_resource(tool_name, params)
25
-
26
- return ToolResponse(
27
- status="error",
28
- message=f"'{tool_name}' not found in available tools or resources"
29
- )
30
-
31
- except Exception as e:
32
- return ToolResponse(status="error", message=str(e))
33
-
34
- async def _execute_tool(self, tool_name: str, params: dict) -> ToolResponse:
35
- """Execute a tool request."""
36
- try:
37
- # Format the tool request using compact specs
38
- formatted_request = self.compact_utils.format_tool_request(tool_name, params)
39
-
40
- # Execute via MCP client
41
- server_config = load_server_config()
42
- async with StreamableHttpMCPClient(server_config) as client:
43
- await client.initialize()
44
-
45
- # Make the tool call
46
- result = await client.call("tools/call", formatted_request["params"])
47
-
48
- # Check for errors
49
- if "error" in result:
50
- error_msg = result["error"].get("message", "Unknown error")
51
- return ToolResponse(status="error", message=error_msg)
52
-
53
- # Return success with data
54
- return ToolResponse(status="success", data=result.get("result", result))
55
-
56
- except Exception as e:
57
- return ToolResponse(status="error", message=str(e))
58
-
59
- async def _execute_resource(self, resource_name: str, params: dict) -> ToolResponse:
60
- """Execute a resource (API) request using direct API calls."""
61
- try:
62
- # Extract endpoint and method from params
63
- endpoint = params.get("endpoint", "/")
64
- method = params.get("method", "GET")
65
- api_params = {k: v for k, v in params.items() if k not in ["endpoint", "method"]}
66
-
67
- # Substitute path parameters in the endpoint
68
- substituted_endpoint = endpoint
69
- params_to_remove = []
70
- for param_name, param_value in api_params.items():
71
- placeholder = f"{{{param_name}}}"
72
- if placeholder in substituted_endpoint:
73
- substituted_endpoint = substituted_endpoint.replace(placeholder, str(param_value))
74
- # Mark parameter for removal since it's now in the path
75
- params_to_remove.append(param_name)
76
-
77
- # Remove substituted parameters from api_params
78
- for param_name in params_to_remove:
79
- api_params.pop(param_name, None)
80
-
81
- # Make direct API call using compact specs
82
- result = await self.compact_utils.make_direct_api_call(
83
- resource_name, substituted_endpoint, method, api_params
84
- )
85
-
86
- # Check for errors
87
- if result.get("status") != 200:
88
- error_msg = result.get("error", "Unknown error")
89
- return ToolResponse(status="error", message=f"API error {result.get('status')}: {error_msg}")
90
-
91
- # Return success with data
92
- return ToolResponse(status="success", data=result.get("data", result))
93
-
94
- except Exception as e:
95
- return ToolResponse(status="error", message=str(e))
96
-
97
- def get_available_tools(self) -> list:
98
- """Get list of available tools."""
99
- return self.compact_utils.get_working_tools()
100
-
101
- def get_available_resources(self) -> list:
102
- """Get list of available resources."""
103
- return self.compact_utils.get_working_resources()
104
-
105
- def get_tool_parameters(self, tool_name: str) -> dict:
106
- """Get available parameters for a tool."""
107
- return self.compact_utils.get_tool_parameters(tool_name)
108
-
109
- def get_resource_endpoints(self, resource_name: str) -> list:
110
- """Get available endpoints for a resource."""
111
- return self.compact_utils.get_resource_endpoints(resource_name)
112
-
113
- def validate_tool_request(self, tool_name: str, params: dict) -> bool:
114
- """Validate if a tool request is valid."""
115
- try:
116
- self.compact_utils.format_tool_request(tool_name, params)
117
- return True
118
- except Exception:
119
- return False
120
-
121
- def validate_resource_request(self, resource_name: str, endpoint: str, method: str = "GET") -> bool:
122
- """Validate if a resource request is valid."""
123
- try:
124
- self.compact_utils.format_resource_request(resource_name, endpoint, method)
125
- return True
126
- except Exception:
127
- return False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
backup_v1/config/__init__.py DELETED
File without changes
backup_v1/config/api_endpoints.py DELETED
@@ -1,16 +0,0 @@
1
- # Hardcoded base URLs for Topcoder APIs
2
- # This file contains only the base URLs, parameters are fetched dynamically
3
-
4
- API_BASE_URLS = {
5
- "Member_V6_API_Swagger": "https://api.topcoder.com/v5",
6
- "Challenges_V6_API_Swagger": "https://api.topcoder.com/v5",
7
- "Identity_V6_API_Swagger": "https://api.topcoder.com/v5"
8
- }
9
-
10
- def get_base_url(resource_name: str) -> str:
11
- """Get base URL for a resource."""
12
- return API_BASE_URLS.get(resource_name, "")
13
-
14
- def get_all_base_urls() -> dict:
15
- """Get all available base URLs."""
16
- return API_BASE_URLS.copy()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
backup_v1/config/compact_api_specs.py DELETED
@@ -1,65 +0,0 @@
1
- # Generated compact API specs from MCP catalog
2
- # Only includes working tools and resources
3
-
4
- COMPACT_API_SPECS = {
5
- # Working Tools
6
- "query-tc-skills": {
7
- "description": "Returns a list of standardized skills from Topcoder platform, filtered and sorted based on the provided parameters.",
8
- "parameters": {
9
- "name": "Filter by skill names, exact match. Array of string. Optional",
10
- "skillId": "Filter by skill IDs, exact match. Array of string. Optional",
11
- "sortBy": "Sort skills by a specific field. Type: string. Optional. Valid values: \"name\", \"description\", \"created_at\", \"updated_at\"",
12
- "sortOrder": "Sort order. Type: string. Optional. Valid values: \"ASC\", \"DESC\"",
13
- "page": "Page number for pagination, starting from 1. Type: number. Optional. Default: 1",
14
- "perPage": "Number of standardized skills per page, between 1 and 100. Type: number. Optional. Default: 20",
15
- }
16
- },
17
- "query-tc-challenges": {
18
- "description": "Returns a list of Topcoder challenges based on the query parameters.",
19
- "parameters": {
20
- "id": "Filter by challenge ID, exact match. Type: string. Optional",
21
- "status": "Filter by challenge status. Type: string. Optional. Valid values: \"New\", \"Draft\", \"Cancelled\", \"Active\", \"Completed\", \"Deleted\", \"Cancelled - Failed Review\", \"Cancelled - Failed Screening\"... (and 5 more)",
22
- "type": "Filter by type abbreviation, exact match. Type: string. Optional",
23
- "track": "Filter by track, case-insensitive, partial matches are allowed. Type: string. Optional",
24
- "tag": "Filter by tag name, case-insensitive, partial matches are allowed. Type: string. Optional",
25
- "tags": "Filter by multiple tag names, case-insensitive, partial matches are allowed. Array of string. Optional",
26
- "search": "Filter by name, description and tags fields, case-insensitive, partial matches are allowed. Type: string. Optional",
27
- "startDateStart": "Filter by start date (lower bound of date range, ISO format). Type: string. Optional",
28
- "startDateEnd": "Filter by start date (upper bound of date range, ISO format). Type: string. Optional",
29
- "currentPhaseName": "Filter by current phase name. Type: string. Optional",
30
- "sortBy": "Sort challenges by a specific field. Type: string. Optional. Valid values: \"updatedBy\", \"updated\", \"createdBy\", \"created\", \"endDate\", \"startDate\", \"projectId\", \"name\"... (and 5 more)",
31
- "sortOrder": "Sort order. Type: string. Optional. Valid values: \"asc\", \"desc\"",
32
- "totalPrizesFrom": "Filter by the lowest amount of total prizes on the challenge. Type: number. Optional",
33
- "totalPrizesTo": "Filter by the highest amount of total prizes on the challenge. Type: number. Optional",
34
- "createdBy": "Filter by the user who created the challenge. Type: string. Optional",
35
- "page": "Page number for pagination, starting from 1. Type: number. Optional. Default: 1",
36
- "perPage": "Number of challenges per page, between 1 and 100. Type: number. Optional. Default: 20",
37
- }
38
- },
39
-
40
- # Working Resources
41
- "Challenges_V6_API_Swagger": {
42
- "description": "Challenge V5 API: ## Pagination - Requests that return multiple items will be paginated to 20 items by default. - You can specify further pages with the `page` paramete...",
43
- "total_endpoints": 11,
44
- "base_url": "https://api.topcoder.com/v5",
45
- "key_endpoints": [
46
- {"path": "/challenges", "method": "GET", "full_url": "https://api.topcoder.com/v5/challenges", "description": "Available parameters: id, selfService, selfServiceCopilot, confidentialityType, directProjectId, typeIds, trackIds, types, tracks, typeId, trackId, type, track, name, search, description, timelineTemplateId, reviewType, tag, tags, includeAllTags, projectId, forumId, legacyId, status, group, startDateStart, startDateEnd, endDateStart, endDateEnd, currentPhaseName, createdDateStart, createdDateEnd, updatedDateStart, updatedDateEnd, registrationStartDateStart, registrationStartDateEnd, registrationEndDateStart, registrationEndDateEnd, submissionStartDateStart, submissionStartDateEnd, submissionEndDateStart, submissionEndDateEnd, createdBy, updatedBy, isLightweight, memberId, sortBy, sortOrder, isTask, taskIsAssigned, taskMemberId, events, tco, includeAllEvents, useSchedulingAPI, totalPrizesFrom, totalPrizesTo", "parameters": {"id": "Filter by id, exact match. Type: string. Location: query. Optional. Format: UUI", "selfService": "Filter by selfService flag. Type: boolean. Location: query. Optional", "selfServiceCopilot": "Filter by selfServiceCopilot. Type: string. Location: query. Optional", "confidentialityType": "Filter by confidentialityType. Type: string. Location: query. Optional. Valid values: \\\"public\\\", \\\"private\\\"", "directProjectId": "Filter by directProjectId. Type: integer. Location: query. Optional", "typeIds": "Filter by multiple type IDs, exact match.. Type: array. Location: query. Optional", "trackIds": "Filter by multiple track IDs, exact match.. Type: array. Location: query. Optional", "types": "Filter by multiple type abbreviation, exact match. If types is provided, typeIds will be ignored. Type: array. Location: query. Optional", "tracks": "Filter by multiple track abbreviation, exact match. If tracks is provided, trackIds will be ignored. Type: array. Location: query. Optional", "typeId": "Filter by type id, exact match. If type is provided, typeId will be ignored. Type: string. Location: query. Optional. Format: UUID", "trackId": "Filter by track id, exact match. If track is provided, trackId will be ignored. Type: string. Location: query. Optional. Format: UUID", "type": "Filter by type abbreviation, exact match. If provided, the typeId will be ignored. Type: string. Location: query. Optional", "track": "Filter by track, case-insensitive, partial matches are allowed.. Type: string. Location: query. Optional", "name": "Filter by name, case-insensitive, partial matches are allowed. If search is provided, name will be ignored. Type: string. Location: query. Optional", "search": "Filter by name, description and tags fields, case-insensitive, partial matches are allowed. If search is provided, name and description will be ignored.. Type: string. Location: query. Optional", "description": "Filter by description, case-insensitive, partial matches are allowed. If search is provided, description will be ignored.. Type: string. Location: query. Optional", "timelineTemplateId": "Filter by timeline template id, exact match. Type: string. Location: query. Optional", "reviewType": "Filter by review type, case-insensitive, partial matches are allowed.. Type: string. Location: query. Optional", "tag": "Filter by tag name, case-insensitive, partial matches are allowed.. Type: string. Location: query. Optional", "tags": "Filter by multiple tag names, case-insensitive, partial matches are allowed.. Type: array. Location: query. Optional", "includeAllTags": "Require all provided tags to be present on a challenge for a match.. Type: boolean. Location: query. Optional. Default: True", "projectId": "Filter by v5 project id, exact match.. Type: integer. Location: query. Optional", "forumId": "Filter by forum id, exact match.. Type: integer. Location: query. Optional", "legacyId": "Filter by legacy id, exact match.. Type: integer. Location: query. Optional", "status": "Filter by status, case-insensitive, exact match.. Type: string. Location: query. Optional. Valid values: \\\"New\\\", \\\"Draft\\\", \\\"Cancelled\\\", \\\"Active\\\", \\\"Completed\\\", \\\"Deleted\\\"... (and 7 more)", "group": "Filter by group name, case-insensitive, partial matches are allowed.. Type: string. Location: query. Optional", "startDateStart": "Filter by start date (lower bound of date range). Type: string. Location: query. Optional. Format: date-time", "startDateEnd": "Filter by start date (upper bound of date range). Type: string. Location: query. Optional. Format: date-time", "endDateStart": "Filter by end date (lower bound of date range). Type: string. Location: query. Optional. Format: date-time", "endDateEnd": "Filter by end date (upper bound of date range). Type: string. Location: query. Optional. Format: date-time", "currentPhaseName": "Filter by name of the current phase. Type: string. Location: query. Optional", "createdDateStart": "Filter by created date (lower bound of date range). Type: string. Location: query. Optional. Format: date-time", "createdDateEnd": "Filter by created date (upper bound of date range). Type: string. Location: query. Optional. Format: date-time", "updatedDateStart": "Filter by updated date (lower bound of date range). Type: string. Location: query. Optional. Format: date-time", "updatedDateEnd": "Filter by updated date (upper bound of date range). Type: string. Location: query. Optional. Format: date-time", "registrationStartDateStart": "Filter by registration start date (lower bound of date range). Type: string. Location: query. Optional. Format: date-time", "registrationStartDateEnd": "Filter by registration start date (upper bound of date range). Type: string. Location: query. Optional. Format: date-time", "registrationEndDateStart": "Filter by registration end date (lower bound of date range). Type: string. Location: query. Optional. Format: date-time", "registrationEndDateEnd": "Filter by registration end date (upper bound of date range). Type: string. Location: query. Optional. Format: date-time", "submissionStartDateStart": "Filter by submission start date (lower bound of date range). Type: string. Location: query. Optional. Format: date-time", "submissionStartDateEnd": "Filter by submission start date (upper bound of date range). Type: string. Location: query. Optional. Format: date-time", "submissionEndDateStart": "Filter by submission end date (lower bound of date range). Type: string. Location: query. Optional. Format: date-time", "submissionEndDateEnd": "Filter by submission end date (upper bound of date range). Type: string. Location: query. Optional. Format: date-time", "createdBy": "Filter by 'createdBy' field, case-insensitive, partial matches are allowed.. Type: string. Location: query. Optional", "updatedBy": "Filter by 'updatedBy' field, case-insensitive, partial matches are allowed.. Type: string. Location: query. Optional", "isLightweight": "Turn off description and privateDescription for a smaller response payload. Type: boolean. Location: query. Optional. Default: False", "memberId": "Filter challenges memberId has access to. Type: string. Location: query. Optional", "sortBy": "Sort the results by the field.. Type: string. Location: query. Optional. Valid values: \\\"updatedBy\\\", \\\"updated\\\", \\\"createdBy\\\", \\\"created\\\", \\\"endDate\\\", \\\"startDate\\\"... (and 7 more)", "sortOrder": "Order the results by the asc/desc.. Type: string. Location: query. Optional. Valid values: \\\"asc\\\", \\\"desc\\\"", "isTask": "Filter based on the task.isTask property. Only available for admins/m2m. Will be ignored for regular users or not authenticated users. Type: boolean. Location: query. Optional", "taskIsAssigned": "Filter based on the task.isAssigned property. Only available for admins/m2m. Will be ignored for regular users or not authenticated users. Type: boolean. Location: query. Optional", "taskMemberId": "Filter based on the task.memberId property. Only available for admins/m2m. Will be ignored for regular users or not authenticated users. Type: string. Location: query. Optional", "events": "Filter by multiple event keys (tco21). Type: array. Location: query. Optional", "tco": "Filter by tco eligible events. Type: boolean. Location: query. Optional", "includeAllEvents": "Require all provided events to be present on a challenge for a match. Type: boolean. Location: query. Optional. Default: True", "useSchedulingAPI": "Search based on 'legacy.useSchedulingAPI'. Type: boolean. Location: query. Optional", "totalPrizesFrom": "Filter by the lowest amount of total prizes on the challenge. Type: number. Location: query. Optional", "totalPrizesTo": "Filter by the highest amount of total prizes on the challenge. Type: number. Location: query. Optional"}},
47
- ]
48
- },
49
- "Member_V6_API_Swagger": {
50
- "description": "Topcoder Member API: Services that provide access and interaction with all sorts of member profile details. # Pagination - Requests that return multiple items will be pa...",
51
- "total_endpoints": 10,
52
- "base_url": "https://api.topcoder.com/v5",
53
- "key_endpoints": [
54
- {"path": "/members", "method": "GET", "full_url": "https://api.topcoder.com/v5/members", "description": "Available parameters: userId, userIds, handle, handles, handleLower, handlesLower, sort, fields", "parameters": {"userId": "filter by userId. Type: string. Location: query. Optional", "userIds": "filter by userIds. Example - [userId1,userId2,userId3,...,userIdN]. Type: string. Location: query. Optional", "handle": "filter by handle. This will return like search.. Type: string. Location: query. Optional", "handles": "filter by handles. This will return like search. Example - [\\\"handle1\\\",\\\"handle2\\\",\\\"handle3\\\",...,\\\"handleN\\\"]. Type: string. Location: query. Optional", "handleLower": "filter by handle. This will return an exact search.. Type: string. Location: query. Optional", "handlesLower": "filter by handles. This will return an exact search. Example - [\\\"handle1\\\",\\\"handle2\\\",\\\"handle3\\\",...,\\\"handleN\\\"]. Type: string. Location: query. Optional", "sort": "sort by asc or desc. Type: string. Location: query. Optional", "fields": "fields=fieldName1,fieldName2,...,fieldN parameter for choosing which fields of members profile that will be included in response.. Type: string. Location: query. Optional"}},
55
- ]
56
- },
57
- "Identity_V6_API_Swagger": {
58
- "description": "TC Service Identity",
59
- "total_endpoints": 47,
60
- "base_url": "https://api.topcoder.com/v5",
61
- "key_endpoints": [
62
- {"path": "/groups/{groupId}/singleMember/{memberId}", "method": "GET", "full_url": "https://api.topcoder.com/v5/groups/{groupId}/singleMember/{memberId}"},
63
- ]
64
- },
65
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
backup_v1/config/settings.py DELETED
@@ -1,27 +0,0 @@
1
- from dotenv import load_dotenv
2
- import os
3
-
4
- load_dotenv()
5
-
6
- # LLM Configuration
7
- HF_TOKEN = os.getenv("HF_TOKEN")
8
- HF_MODEL = os.getenv("HF_MODEL", "Qwen/Qwen2.5-7B-Instruct:together")
9
- LLM_API_URL = os.getenv("LLM_API_URL", "https://router.huggingface.co/v1/chat/completions")
10
- LLM_TEMPERATURE = float(os.getenv("LLM_TEMPERATURE", "0.8"))
11
-
12
- # MCP Configuration
13
- # Default to provided dev MCP endpoint; can be overridden via env
14
- MCP_BASE_URL = os.getenv("MCP_BASE_URL", "https://api.topcoder-dev.com/v6/mcp/mcp")
15
- MCP_SERVER_URL = os.getenv("MCP_SERVER_URL", "https://api.topcoder-dev.com/v6/mcp/mcp")
16
- MCP_TOKEN = os.getenv("MCP_TOKEN")
17
-
18
- # API Authentication Configuration
19
- # Set to True to exclude endpoints that require authentication
20
- EXCLUDE_AUTH_REQUIRED_ENDPOINTS = os.getenv("EXCLUDE_AUTH_REQUIRED_ENDPOINTS", "true").lower() == "true"
21
-
22
- # Additional authentication filtering options
23
- # Set to True to also exclude endpoints that have 401 responses in their documentation
24
- EXCLUDE_401_RESPONSES = os.getenv("EXCLUDE_401_RESPONSES", "true").lower() == "true"
25
-
26
- # Set to True to be more strict and exclude endpoints that mention authentication in descriptions
27
- STRICT_AUTH_FILTERING = os.getenv("STRICT_AUTH_FILTERING", "false").lower() == "true"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
backup_v1/config/static_responses.py DELETED
@@ -1,6 +0,0 @@
1
- # config/static_responses.py
2
-
3
- REJECT_RESPONSE = (
4
- "I'm here to help with Topcoder-related queries only. "
5
- "Please ask something related to Topcoder challenges, members, or platform features."
6
- )
 
 
 
 
 
 
 
backup_v1/main.py DELETED
@@ -1,133 +0,0 @@
1
- # main.py
2
-
3
- import asyncio
4
-
5
- from app.agent.model_client import LLMClient
6
- from app.ui.interface import launch_ui
7
-
8
-
9
-
10
- # Optional: Manual test for LLM
11
- async def test_llm():
12
- client = LLMClient()
13
- result = await client.chat([
14
- {"role": "user", "content": "What is Topcoder?"}
15
- ])
16
- print("LLM Response:\n", result)
17
-
18
-
19
- # Optional: Manual test for MCP API
20
- async def test_mcp():
21
- client = MCPClient()
22
- challenges = await client.get_challenges()
23
- print("Sample Challenges:\n", challenges[:2])
24
-
25
- member = await client.get_member("tourist")
26
- print("Member Info:\n", member)
27
-
28
-
29
- async def initialize_mcp_registry():
30
- """Connect to MCP server, list tools/resources, and store them in registry."""
31
- from app.mcp.stream_client import load_server_config, StreamableHttpMCPClient
32
- from app.mcp.catalog import MCPCatalog
33
- from app.mcp.analyzers import OpenAPIAnalyzer
34
- from app.mcp.specs_generator import SpecsGenerator
35
-
36
- try:
37
- server_config = load_server_config()
38
- catalog = MCPCatalog()
39
- analyzer = OpenAPIAnalyzer()
40
-
41
- async with StreamableHttpMCPClient(server_config) as client:
42
- # Initialize the session
43
- init_data = await client.initialize()
44
- print("✅ MCP session initialized")
45
-
46
- # Send initialized notification
47
- try:
48
- status, body = await client.send_initialized_notification()
49
- print(f"✅ Initialized notification sent (HTTP {status})")
50
- except Exception as e:
51
- print(f"⚠️ Failed to send initialized notification: {e}")
52
-
53
- # List and catalog tools
54
- print("\n🔧 Cataloging tools...")
55
- tools_data = await client.call("tools/list", {})
56
- tools = tools_data.get("result", {}).get("tools", [])
57
-
58
- for tool in tools:
59
- tool_name = tool.get("name", "unknown")
60
- print(f" 📝 Cataloging tool: {tool_name}")
61
- try:
62
- catalog.save_tool_schema(tool_name, tool)
63
- print(f" ✅ Saved schema for {tool_name}")
64
- except Exception as e:
65
- print(f" ❌ Failed to save schema for {tool_name}: {e}")
66
-
67
- # List and catalog resources
68
- print("\n📚 Cataloging resources...")
69
- resources_data = await client.call("resources/list", {})
70
- resources = resources_data.get("result", {}).get("resources", [])
71
-
72
- for resource in resources:
73
- resource_name = resource.get("name", "unknown")
74
- resource_uri = resource.get("uri", "")
75
- print(f" 📝 Cataloging resource: {resource_name}")
76
-
77
- try:
78
- # Analyze API resources (Swagger/OpenAPI docs)
79
- if "swagger" in resource_uri.lower() or "api-docs" in resource_uri.lower():
80
- print(f" 🔍 Analyzing API documentation: {resource_uri}")
81
- analysis = await analyzer.fetch_and_analyze_api(resource_uri)
82
- catalog.save_resource_analysis(resource_name, analysis)
83
- print(f" ✅ Analyzed and saved {resource_name}")
84
- else:
85
- # For non-API resources, just save the basic info
86
- basic_analysis = {
87
- "name": resource_name,
88
- "uri": resource_uri,
89
- "description": resource.get("description", ""),
90
- "summary": f"Resource: {resource_name}",
91
- "url": resource_uri
92
- }
93
- catalog.save_resource_analysis(resource_name, basic_analysis)
94
- print(f" ✅ Saved basic info for {resource_name}")
95
-
96
- except Exception as e:
97
- print(f" ❌ Failed to catalog {resource_name}: {e}")
98
-
99
- print(f"\n✅ Loaded {len(resources)} resources and {len(tools)} tools from MCP server")
100
-
101
- # Print catalog summary
102
- summary = catalog.get_catalog_summary()
103
- print(f"📁 Catalog stored in: {summary['catalog_dir']}")
104
- print(f"📋 Cataloged tools: {len(summary['tools'])}")
105
- print(f"📋 Cataloged resources: {len(summary['resources'])}")
106
-
107
- # Generate compact specs
108
- print("\n🔧 Generating compact API specs...")
109
- specs_generator = SpecsGenerator()
110
- compact_specs = await specs_generator.generate_compact_specs()
111
- specs_generator.save_compact_specs(compact_specs)
112
- specs_generator.print_summary(compact_specs)
113
-
114
- except Exception as e:
115
- print(f"⚠️ Failed to initialize MCP registry: {e}")
116
- import traceback
117
- traceback.print_exc()
118
- finally:
119
- await analyzer.close()
120
-
121
-
122
- # Note: Cleanup is handled automatically by StreamableHttpMCPClient context managers
123
-
124
-
125
- if __name__ == "__main__":
126
- # Uncomment the tests below if you want to run them manually
127
-
128
- # asyncio.run(test_llm())
129
- # asyncio.run(test_mcp())
130
-
131
- # Initialize MCP tool/resource registry, then launch UI
132
- asyncio.run(initialize_mcp_registry())
133
- launch_ui()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
backup_v1/mcp-config.json DELETED
@@ -1,8 +0,0 @@
1
- {
2
- "mcpServers": {
3
- "topcoder": {
4
- "url": "https://api.topcoder-dev.com/v6/mcp/mcp",
5
- "description": "Topcoder MCP Server for challenges and skills"
6
- }
7
- }
8
- }
 
 
 
 
 
 
 
 
 
backup_v1/mcp/__init__.py DELETED
File without changes
backup_v1/mcp/analyzers.py DELETED
@@ -1,185 +0,0 @@
1
- import json
2
- import yaml
3
- import aiohttp
4
- from typing import Dict, List, Any, Optional
5
- from urllib.parse import urlparse
6
-
7
-
8
- class OpenAPIAnalyzer:
9
- """Analyzes OpenAPI/Swagger documents to extract relevant API structures."""
10
-
11
- def __init__(self):
12
- self.session: Optional[aiohttp.ClientSession] = None
13
-
14
- async def _get_session(self) -> aiohttp.ClientSession:
15
- if not self.session:
16
- self.session = aiohttp.ClientSession()
17
- return self.session
18
-
19
- async def close(self):
20
- if self.session:
21
- await self.session.close()
22
- self.session = None
23
-
24
- async def fetch_and_analyze_api(self, url: str) -> Dict[str, Any]:
25
- """Fetch and analyze an OpenAPI/Swagger document from URL."""
26
- session = await self._get_session()
27
-
28
- try:
29
- async with session.get(url) as resp:
30
- if resp.status != 200:
31
- raise Exception(f"Failed to fetch API doc: HTTP {resp.status}")
32
-
33
- content_type = resp.headers.get("content-type", "")
34
- text = await resp.text()
35
-
36
- # Parse based on content type
37
- if "yaml" in content_type or "yml" in content_type or url.endswith(('.yaml', '.yml')):
38
- doc = yaml.safe_load(text)
39
- else:
40
- doc = json.loads(text)
41
-
42
- return self._analyze_openapi_doc(doc, url)
43
-
44
- except Exception as e:
45
- return {
46
- "error": f"Failed to analyze API at {url}: {str(e)}",
47
- "url": url,
48
- "summary": "API documentation could not be analyzed"
49
- }
50
-
51
- def _analyze_openapi_doc(self, doc: Dict[str, Any], url: str) -> Dict[str, Any]:
52
- """Analyze OpenAPI document and extract relevant information."""
53
- info = doc.get("info", {})
54
- paths = doc.get("paths", {})
55
- servers = doc.get("servers", [])
56
-
57
- # Extract base URL from servers or use known Topcoder API URLs
58
- base_url = None
59
- if servers:
60
- # Use the first server URL as base
61
- base_url = servers[0].get("url", "")
62
- # If it's a relative URL, combine with the original URL
63
- if base_url.startswith("/"):
64
- parsed_url = urlparse(url)
65
- base_url = f"{parsed_url.scheme}://{parsed_url.netloc}{base_url}"
66
- else:
67
- # Use known Topcoder API URLs based on the document URL
68
- if "member-api" in url:
69
- base_url = "https://api.topcoder.com/v5"
70
- elif "challenge-api" in url:
71
- base_url = "https://api.topcoder.com/v5"
72
- elif "identity-api" in url:
73
- base_url = "https://api.topcoder.com/v5"
74
- else:
75
- # Try to extract from the URL itself
76
- parsed_url = urlparse(url)
77
- if "github.com" in parsed_url.netloc:
78
- # For GitHub raw URLs, use the standard Topcoder API
79
- base_url = "https://api.topcoder.com/v5"
80
-
81
- # Extract version from info (but don't add to base URL for Topcoder APIs)
82
- version = info.get("version", "v5")
83
-
84
- # For Topcoder APIs, use the standard format without additional version suffixes
85
- full_base_url = base_url
86
- # Don't add version to base URL for Topcoder APIs since they use /v5/ format
87
-
88
- # Extract basic info
89
- analysis = {
90
- "url": url,
91
- "base_url": full_base_url,
92
- "version": version,
93
- "title": info.get("title", "Unknown API"),
94
- "description": info.get("description", ""),
95
- "summary": "",
96
- "endpoints": [],
97
- "schemas": {}
98
- }
99
-
100
- # Analyze endpoints
101
- for path, path_item in paths.items():
102
- for method, operation in path_item.items():
103
- if method.lower() in ["get", "post", "put", "delete", "patch"]:
104
- # Construct full URL with base URL and version
105
- full_url = None
106
- if full_base_url:
107
- # Ensure path starts with /
108
- clean_path = path if path.startswith("/") else f"/{path}"
109
- full_url = f"{full_base_url}{clean_path}"
110
-
111
- # Check if endpoint requires authentication (has 401 response)
112
- requires_auth = "401" in operation.get("responses", {})
113
-
114
- # Additional checks for authentication requirements
115
- description = operation.get("description", "").lower()
116
- summary = operation.get("summary", "").lower()
117
-
118
- # Check for authentication mentions in description/summary
119
- auth_keywords = ["authentication", "authorization", "bearer", "token", "auth", "login", "credential"]
120
- has_auth_mention = any(keyword in description or keyword in summary for keyword in auth_keywords)
121
-
122
- # Check for secured field mentions
123
- secured_mentions = ["secured", "private", "admin", "authenticated", "authorized"]
124
- has_secured_mention = any(keyword in description or keyword in summary for keyword in secured_mentions)
125
-
126
- # Final authentication requirement
127
- requires_auth = requires_auth or has_auth_mention or has_secured_mention
128
-
129
- endpoint = {
130
- "path": path,
131
- "method": method.upper(),
132
- "summary": operation.get("summary", ""),
133
- "description": operation.get("description", ""),
134
- "parameters": operation.get("parameters", []),
135
- "requestBody": operation.get("requestBody", {}),
136
- "responses": operation.get("responses", {}),
137
- "full_url": full_url,
138
- "requires_auth": requires_auth
139
- }
140
- analysis["endpoints"].append(endpoint)
141
-
142
- # Extract schemas if available
143
- if "components" in doc and "schemas" in doc["components"]:
144
- schemas = doc["components"]["schemas"]
145
- for schema_name, schema in schemas.items():
146
- if isinstance(schema, dict):
147
- analysis["schemas"][schema_name] = {
148
- "type": schema.get("type", "object"),
149
- "properties": schema.get("properties", {}),
150
- "required": schema.get("required", []),
151
- "description": schema.get("description", "")
152
- }
153
-
154
- # Generate summary
155
- analysis["summary"] = f"API with {len(analysis['endpoints'])} endpoints and {len(analysis['schemas'])} schemas"
156
-
157
- return analysis
158
-
159
- def _extract_relevant_schemas(self, analysis: Dict[str, Any]) -> List[Dict[str, Any]]:
160
- """Extract the most relevant schemas from the analysis."""
161
- schemas = []
162
-
163
- for schema_name, schema in analysis.get("schemas", {}).items():
164
- # Focus on schemas that are likely to be important
165
- if schema.get("type") == "object" and schema.get("properties"):
166
- relevant_schema = {
167
- "name": schema_name,
168
- "type": schema.get("type"),
169
- "description": schema.get("description", ""),
170
- "properties": {}
171
- }
172
-
173
- # Extract key properties (limit to first 10 to avoid overwhelming)
174
- properties = schema.get("properties", {})
175
- for prop_name, prop_info in list(properties.items())[:10]:
176
- if isinstance(prop_info, dict):
177
- relevant_schema["properties"][prop_name] = {
178
- "type": prop_info.get("type", "unknown"),
179
- "description": prop_info.get("description", ""),
180
- "required": prop_name in schema.get("required", [])
181
- }
182
-
183
- schemas.append(relevant_schema)
184
-
185
- return schemas
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
backup_v1/mcp/catalog.py DELETED
@@ -1,114 +0,0 @@
1
- import json
2
- import os
3
- from typing import Dict, List, Any, Optional
4
- from pathlib import Path
5
-
6
-
7
- class MCPCatalog:
8
- """Manages structured storage of MCP tools and resources."""
9
-
10
- def __init__(self, catalog_dir: str = "mcp_catalog"):
11
- self.catalog_dir = Path(catalog_dir)
12
- self.catalog_dir.mkdir(exist_ok=True)
13
-
14
- # Ensure subdirectories exist
15
- (self.catalog_dir / "tools").mkdir(exist_ok=True)
16
- (self.catalog_dir / "resources").mkdir(exist_ok=True)
17
-
18
- def save_tool_schema(self, tool_name: str, tool_data: Dict[str, Any]) -> str:
19
- """Save tool schema to disk."""
20
- file_path = self.catalog_dir / "tools" / f"{tool_name}.json"
21
-
22
- # Extract relevant tool information
23
- schema = {
24
- "name": tool_name,
25
- "description": tool_data.get("description", ""),
26
- "inputSchema": tool_data.get("inputSchema", {}),
27
- "outputSchema": tool_data.get("outputSchema", {}),
28
- "parameters": tool_data.get("parameters", []),
29
- "raw_data": tool_data # Keep original data for reference
30
- }
31
-
32
- with open(file_path, 'w', encoding='utf-8') as f:
33
- json.dump(schema, f, indent=2, ensure_ascii=False)
34
-
35
- return str(file_path)
36
-
37
- def save_resource_analysis(self, resource_name: str, analysis: Dict[str, Any]) -> str:
38
- """Save resource analysis to disk."""
39
- # Sanitize filename
40
- safe_name = "".join(c for c in resource_name if c.isalnum() or c in (' ', '-', '_')).rstrip()
41
- safe_name = safe_name.replace(' ', '_')
42
-
43
- file_path = self.catalog_dir / "resources" / f"{safe_name}.json"
44
-
45
- # Structure the analysis data
46
- structured_analysis = {
47
- "name": resource_name,
48
- "url": analysis.get("url", ""),
49
- "base_url": analysis.get("base_url", ""),
50
- "version": analysis.get("version", ""),
51
- "title": analysis.get("title", ""),
52
- "description": analysis.get("description", ""),
53
- "summary": analysis.get("summary", ""),
54
- "endpoints": analysis.get("endpoints", []),
55
- "schemas": analysis.get("schemas", {}),
56
- "error": analysis.get("error", None),
57
- "raw_analysis": analysis # Keep original analysis for reference
58
- }
59
-
60
- with open(file_path, 'w', encoding='utf-8') as f:
61
- json.dump(structured_analysis, f, indent=2, ensure_ascii=False)
62
-
63
- return str(file_path)
64
-
65
- def load_tool_schema(self, tool_name: str) -> Optional[Dict[str, Any]]:
66
- """Load tool schema from disk."""
67
- file_path = self.catalog_dir / "tools" / f"{tool_name}.json"
68
-
69
- if file_path.exists():
70
- with open(file_path, 'r', encoding='utf-8') as f:
71
- return json.load(f)
72
- return None
73
-
74
- def load_resource_analysis(self, resource_name: str) -> Optional[Dict[str, Any]]:
75
- """Load resource analysis from disk."""
76
- safe_name = "".join(c for c in resource_name if c.isalnum() or c in (' ', '-', '_')).rstrip()
77
- safe_name = safe_name.replace(' ', '_')
78
-
79
- file_path = self.catalog_dir / "resources" / f"{safe_name}.json"
80
-
81
- if file_path.exists():
82
- with open(file_path, 'r', encoding='utf-8') as f:
83
- return json.load(f)
84
- return None
85
-
86
- def list_cataloged_tools(self) -> List[str]:
87
- """List all cataloged tools."""
88
- tools_dir = self.catalog_dir / "tools"
89
- if not tools_dir.exists():
90
- return []
91
-
92
- tools = []
93
- for file_path in tools_dir.glob("*.json"):
94
- tools.append(file_path.stem)
95
- return tools
96
-
97
- def list_cataloged_resources(self) -> List[str]:
98
- """List all cataloged resources."""
99
- resources_dir = self.catalog_dir / "resources"
100
- if not resources_dir.exists():
101
- return []
102
-
103
- resources = []
104
- for file_path in resources_dir.glob("*.json"):
105
- resources.append(file_path.stem)
106
- return resources
107
-
108
- def get_catalog_summary(self) -> Dict[str, Any]:
109
- """Get a summary of the catalog contents."""
110
- return {
111
- "tools": self.list_cataloged_tools(),
112
- "resources": self.list_cataloged_resources(),
113
- "catalog_dir": str(self.catalog_dir)
114
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
backup_v1/mcp/catalog_utils.py DELETED
@@ -1,147 +0,0 @@
1
- import json
2
- from typing import Dict, List, Any, Optional
3
- from .catalog import MCPCatalog
4
-
5
-
6
- class CatalogUtils:
7
- """Utility functions for working with cataloged MCP data."""
8
-
9
- def __init__(self, catalog_dir: str = "mcp_catalog"):
10
- self.catalog = MCPCatalog(catalog_dir)
11
-
12
- def get_tool_schema(self, tool_name: str) -> Optional[Dict[str, Any]]:
13
- """Get the schema for a specific tool."""
14
- return self.catalog.load_tool_schema(tool_name)
15
-
16
- def get_resource_analysis(self, resource_name: str) -> Optional[Dict[str, Any]]:
17
- """Get the analysis for a specific resource."""
18
- return self.catalog.load_resource_analysis(resource_name)
19
-
20
- def list_available_tools(self) -> List[str]:
21
- """List all available tools in the catalog."""
22
- return self.catalog.list_cataloged_tools()
23
-
24
- def list_available_resources(self) -> List[str]:
25
- """List all available resources in the catalog."""
26
- return self.catalog.list_cataloged_resources()
27
-
28
- def get_tool_input_schema(self, tool_name: str) -> Optional[Dict[str, Any]]:
29
- """Get the input schema for a tool."""
30
- schema = self.get_tool_schema(tool_name)
31
- if schema:
32
- return schema.get("inputSchema", {})
33
- return None
34
-
35
- def get_tool_output_schema(self, tool_name: str) -> Optional[Dict[str, Any]]:
36
- """Get the output schema for a tool."""
37
- schema = self.get_tool_schema(tool_name)
38
- if schema:
39
- return schema.get("outputSchema", {})
40
- return None
41
-
42
- def get_tool_description(self, tool_name: str) -> Optional[str]:
43
- """Get the description for a tool."""
44
- schema = self.get_tool_schema(tool_name)
45
- if schema:
46
- return schema.get("description", "")
47
- return None
48
-
49
- def format_tool_request(self, tool_name: str, params: Dict[str, Any]) -> Dict[str, Any]:
50
- """Format a request for a tool based on its schema."""
51
- schema = self.get_tool_schema(tool_name)
52
- if not schema:
53
- raise ValueError(f"Tool '{tool_name}' not found in catalog")
54
-
55
- # Validate required parameters
56
- input_schema = schema.get("inputSchema", {})
57
- required_props = input_schema.get("required", [])
58
-
59
- missing_params = []
60
- for prop in required_props:
61
- if prop not in params:
62
- missing_params.append(prop)
63
-
64
- if missing_params:
65
- raise ValueError(f"Missing required parameters for {tool_name}: {missing_params}")
66
-
67
- return {
68
- "method": "tools/call",
69
- "params": {
70
- "name": tool_name,
71
- "arguments": params
72
- }
73
- }
74
-
75
- def get_api_endpoints(self, resource_name: str) -> List[Dict[str, Any]]:
76
- """Get API endpoints from a resource analysis."""
77
- analysis = self.get_resource_analysis(resource_name)
78
- if analysis:
79
- return analysis.get("endpoints", [])
80
- return []
81
-
82
- def get_api_schemas(self, resource_name: str) -> Dict[str, Any]:
83
- """Get API schemas from a resource analysis."""
84
- analysis = self.get_resource_analysis(resource_name)
85
- if analysis:
86
- return analysis.get("schemas", {})
87
- return {}
88
-
89
- def find_api_endpoint(self, resource_name: str, path: str, method: str = "GET") -> Optional[Dict[str, Any]]:
90
- """Find a specific API endpoint in a resource."""
91
- endpoints = self.get_api_endpoints(resource_name)
92
- for endpoint in endpoints:
93
- if endpoint.get("path") == path and endpoint.get("method") == method.upper():
94
- return endpoint
95
- return None
96
-
97
- def get_catalog_summary(self) -> Dict[str, Any]:
98
- """Get a summary of the catalog contents."""
99
- return self.catalog.get_catalog_summary()
100
-
101
- def print_tool_info(self, tool_name: str):
102
- """Print detailed information about a tool."""
103
- schema = self.get_tool_schema(tool_name)
104
- if not schema:
105
- print(f"❌ Tool '{tool_name}' not found in catalog")
106
- return
107
-
108
- print(f"\n🔧 Tool: {tool_name}")
109
- print(f"📝 Description: {schema.get('description', 'No description')}")
110
-
111
- input_schema = schema.get("inputSchema", {})
112
- if input_schema:
113
- print(f"📥 Input Schema:")
114
- properties = input_schema.get("properties", {})
115
- required = input_schema.get("required", [])
116
-
117
- for prop_name, prop_info in properties.items():
118
- required_mark = " (required)" if prop_name in required else ""
119
- print(f" - {prop_name}: {prop_info.get('type', 'unknown')}{required_mark}")
120
- if prop_info.get("description"):
121
- print(f" Description: {prop_info['description']}")
122
-
123
- def print_resource_info(self, resource_name: str):
124
- """Print detailed information about a resource."""
125
- analysis = self.get_resource_analysis(resource_name)
126
- if not analysis:
127
- print(f"❌ Resource '{resource_name}' not found in catalog")
128
- return
129
-
130
- print(f"\n📚 Resource: {resource_name}")
131
- print(f"📝 Title: {analysis.get('title', 'No title')}")
132
- print(f"🔗 URL: {analysis.get('url', 'No URL')}")
133
- print(f"📋 Summary: {analysis.get('summary', 'No summary')}")
134
-
135
- endpoints = analysis.get("endpoints", [])
136
- if endpoints:
137
- print(f"🔌 Endpoints ({len(endpoints)}):")
138
- for endpoint in endpoints[:5]: # Show first 5
139
- print(f" - {endpoint.get('method', 'UNKNOWN')} {endpoint.get('path', 'unknown')}")
140
- if endpoint.get("summary"):
141
- print(f" Summary: {endpoint['summary']}")
142
-
143
- schemas = analysis.get("schemas", {})
144
- if schemas:
145
- print(f"📋 Schemas ({len(schemas)}):")
146
- for schema_name in list(schemas.keys())[:5]: # Show first 5
147
- print(f" - {schema_name}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
backup_v1/mcp/compact_utils.py DELETED
@@ -1,216 +0,0 @@
1
- import json
2
- import asyncio
3
- import aiohttp
4
- import re
5
- from typing import Dict, List, Any, Optional
6
- from pathlib import Path
7
- from app.config.api_endpoints import get_base_url
8
- from .catalog_utils import CatalogUtils
9
-
10
-
11
- class CompactSpecsUtils:
12
- """Utility functions for working with compact API specs."""
13
-
14
- def __init__(self, catalog_dir: str = "mcp_catalog"):
15
- self.catalog_utils = CatalogUtils(catalog_dir)
16
- # Import the generated compact specs
17
- try:
18
- from app.config.compact_api_specs import COMPACT_API_SPECS
19
- self.compact_specs = COMPACT_API_SPECS
20
- except ImportError:
21
- self.compact_specs = {}
22
- print("⚠️ Compact API specs not found. Run initialization first.")
23
-
24
- def get_working_tools(self) -> List[str]:
25
- """Get list of working tools from compact specs."""
26
- return [name for name, spec in self.compact_specs.items()
27
- if "parameters" in spec] # Tools have parameters
28
-
29
- def get_working_resources(self) -> List[str]:
30
- """Get list of working resources from compact specs."""
31
- return [name for name, spec in self.compact_specs.items()
32
- if "total_endpoints" in spec] # Resources have endpoints
33
-
34
- def get_tool_spec(self, tool_name: str) -> Optional[Dict[str, Any]]:
35
- """Get compact spec for a tool."""
36
- if tool_name in self.compact_specs and "parameters" in self.compact_specs[tool_name]:
37
- return self.compact_specs[tool_name]
38
- return None
39
-
40
- def get_resource_spec(self, resource_name: str) -> Optional[Dict[str, Any]]:
41
- """Get compact spec for a resource."""
42
- if resource_name in self.compact_specs and "total_endpoints" in self.compact_specs[resource_name]:
43
- return self.compact_specs[resource_name]
44
- return None
45
-
46
- def format_tool_request(self, tool_name: str, params: Dict[str, Any]) -> Dict[str, Any]:
47
- """Format a tool request using compact specs."""
48
- spec = self.get_tool_spec(tool_name)
49
- if not spec:
50
- raise ValueError(f"Tool '{tool_name}' not found in compact specs")
51
-
52
- # Validate parameters against compact spec
53
- available_params = spec.get("parameters", {})
54
- unknown_params = [p for p in params.keys() if p not in available_params]
55
-
56
- if unknown_params:
57
- print(f"⚠️ Warning: Unknown parameters for {tool_name}: {unknown_params}")
58
-
59
- return {
60
- "method": "tools/call",
61
- "params": {
62
- "name": tool_name,
63
- "arguments": params
64
- }
65
- }
66
-
67
- def format_resource_request(self, resource_name: str, endpoint_path: str, method: str = "GET", params: Dict[str, Any] = None) -> Dict[str, Any]:
68
- """Format a resource (API) request using compact specs."""
69
- spec = self.get_resource_spec(resource_name)
70
- if not spec:
71
- raise ValueError(f"Resource '{resource_name}' not found in compact specs")
72
-
73
- # Validate endpoint exists
74
- key_endpoints = spec.get("key_endpoints", [])
75
- endpoint_found = any(
76
- ep.get("path") == endpoint_path and ep.get("method") == method
77
- for ep in key_endpoints
78
- )
79
-
80
- if not endpoint_found:
81
- print(f"⚠️ Warning: Endpoint {method} {endpoint_path} not found in key endpoints for {resource_name}")
82
-
83
- return {
84
- "method": "resources/read",
85
- "params": {
86
- "name": resource_name,
87
- "uri": endpoint_path,
88
- "arguments": params or {}
89
- }
90
- }
91
-
92
- async def make_direct_api_call(self, resource_name: str, endpoint_path: str, method: str = "GET", params: Dict[str, Any] = None) -> Dict[str, Any]:
93
- """Make a direct API call using hardcoded base URLs."""
94
- # Use hardcoded base URL
95
- base_url = get_base_url(resource_name)
96
- if not base_url:
97
- raise ValueError(f"No base URL found for resource '{resource_name}'")
98
-
99
- # Construct the full URL
100
- full_url = f"{base_url.rstrip('/')}{endpoint_path}"
101
-
102
- # Make the API call
103
- async with aiohttp.ClientSession() as session:
104
- if method.upper() == "GET":
105
- # For GET requests, add params to query string
106
- async with session.get(full_url, params=params or {}) as resp:
107
- return {
108
- "status": resp.status,
109
- "data": await resp.json() if resp.status == 200 else None,
110
- "error": await resp.text() if resp.status != 200 else None
111
- }
112
- else:
113
- # For other methods, send params in body
114
- async with session.request(method, full_url, json=params or {}) as resp:
115
- return {
116
- "status": resp.status,
117
- "data": await resp.json() if resp.status == 200 else None,
118
- "error": await resp.text() if resp.status != 200 else None
119
- }
120
-
121
- def get_resource_endpoint_info(self, resource_name: str, endpoint_path: str, method: str = "GET") -> Optional[Dict[str, Any]]:
122
- """Get information about a specific endpoint in a resource."""
123
- spec = self.get_resource_spec(resource_name)
124
- if not spec:
125
- return None
126
-
127
- key_endpoints = spec.get("key_endpoints", [])
128
- for endpoint in key_endpoints:
129
- if endpoint.get("path") == endpoint_path and endpoint.get("method") == method:
130
- return endpoint
131
- return None
132
-
133
- def get_tool_parameters(self, tool_name: str) -> Dict[str, str]:
134
- """Get available parameters for a tool."""
135
- spec = self.get_tool_spec(tool_name)
136
- if spec:
137
- return spec.get("parameters", {})
138
- return {}
139
-
140
- def get_resource_endpoints(self, resource_name: str) -> List[Dict[str, str]]:
141
- """Get key endpoints for a resource."""
142
- spec = self.get_resource_spec(resource_name)
143
- if spec:
144
- return spec.get("key_endpoints", [])
145
- return []
146
-
147
- def print_compact_summary(self):
148
- """Print a summary of compact specs."""
149
- tools = self.get_working_tools()
150
- resources = self.get_working_resources()
151
-
152
- print(f"📊 Compact Specs Summary:")
153
- print(f" 🔧 Working Tools: {len(tools)}")
154
- for tool_name in tools:
155
- spec = self.get_tool_spec(tool_name)
156
- param_count = len(spec.get("parameters", {}))
157
- print(f" - {tool_name} ({param_count} parameters)")
158
-
159
- print(f" 📚 Working Resources: {len(resources)}")
160
- for resource_name in resources:
161
- spec = self.get_resource_spec(resource_name)
162
- total_endpoints = spec.get("total_endpoints", 0)
163
- print(f" - {resource_name} ({total_endpoints} endpoints)")
164
-
165
- def print_tool_info(self, tool_name: str):
166
- """Print detailed information about a tool from compact specs."""
167
- spec = self.get_tool_spec(tool_name)
168
- if not spec:
169
- print(f"❌ Tool '{tool_name}' not found in compact specs")
170
- return
171
-
172
- print(f"\n🔧 Tool: {tool_name}")
173
- print(f"📝 Description: {spec.get('description', 'No description')}")
174
-
175
- parameters = spec.get("parameters", {})
176
- if parameters:
177
- print(f"📥 Parameters:")
178
- for param_name, param_desc in parameters.items():
179
- print(f" - {param_name}: {param_desc}")
180
-
181
- def print_resource_info(self, resource_name: str):
182
- """Print detailed information about a resource from compact specs."""
183
- spec = self.get_resource_spec(resource_name)
184
- if not spec:
185
- print(f"❌ Resource '{resource_name}' not found in compact specs")
186
- return
187
-
188
- print(f"\n📚 Resource: {resource_name}")
189
- print(f"📝 Description: {spec.get('description', 'No description')}")
190
- print(f"🔌 Total Endpoints: {spec.get('total_endpoints', 0)}")
191
-
192
- key_endpoints = spec.get("key_endpoints", [])
193
- if key_endpoints:
194
- print(f"🔌 Key Endpoints:")
195
- for endpoint in key_endpoints:
196
- print(f" - {endpoint.get('method', 'UNKNOWN')} {endpoint.get('path', 'unknown')}")
197
-
198
- def find_tool_by_description(self, keyword: str) -> List[str]:
199
- """Find tools by keyword in description."""
200
- matching_tools = []
201
- for tool_name, spec in self.compact_specs.items():
202
- if "parameters" in spec: # It's a tool
203
- description = spec.get("description", "").lower()
204
- if keyword.lower() in description:
205
- matching_tools.append(tool_name)
206
- return matching_tools
207
-
208
- def find_resource_by_description(self, keyword: str) -> List[str]:
209
- """Find resources by keyword in description."""
210
- matching_resources = []
211
- for resource_name, spec in self.compact_specs.items():
212
- if "total_endpoints" in spec: # It's a resource
213
- description = spec.get("description", "").lower()
214
- if keyword.lower() in description:
215
- matching_resources.append(resource_name)
216
- return matching_resources
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
backup_v1/mcp/specs_generator.py DELETED
@@ -1,487 +0,0 @@
1
- import json
2
- import asyncio
3
- from typing import Dict, List, Any, Optional
4
- from .catalog_utils import CatalogUtils
5
- from .stream_client import load_server_config, StreamableHttpMCPClient
6
- from app.config.settings import EXCLUDE_AUTH_REQUIRED_ENDPOINTS, EXCLUDE_401_RESPONSES, STRICT_AUTH_FILTERING
7
-
8
-
9
- class SpecsGenerator:
10
- """Generates compact API specs from cataloged data and tests functionality."""
11
-
12
- def __init__(self, catalog_dir: str = "mcp_catalog"):
13
- self.catalog_utils = CatalogUtils(catalog_dir)
14
- self.compact_specs = {}
15
-
16
- async def test_tool_functionality(self, tool_name: str) -> bool:
17
- """Test if a tool is working by making a minimal call."""
18
- try:
19
- server_config = load_server_config()
20
- async with StreamableHttpMCPClient(server_config) as client:
21
- await client.initialize()
22
-
23
- # Get tool schema to find a valid test parameter
24
- schema = self.catalog_utils.get_tool_schema(tool_name)
25
- if not schema:
26
- return False
27
-
28
- input_schema = schema.get("inputSchema", {})
29
- properties = input_schema.get("properties", {})
30
-
31
- # Find a simple parameter to test with
32
- test_params = {}
33
- for param_name, param_info in properties.items():
34
- if param_info.get("type") == "string" and not param_info.get("required", False):
35
- test_params[param_name] = "test"
36
- break
37
- elif param_info.get("type") == "number" and not param_info.get("required", False):
38
- test_params[param_name] = 1
39
- break
40
-
41
- # Make a test call
42
- request = self.catalog_utils.format_tool_request(tool_name, test_params)
43
- result = await client.call("tools/call", request["params"])
44
-
45
- # Check if we got a valid response (not an error)
46
- return "error" not in result
47
-
48
- except Exception as e:
49
- print(f"❌ Tool {tool_name} test failed: {e}")
50
- return False
51
-
52
- async def test_resource_functionality(self, resource_name: str) -> bool:
53
- """Test if a resource (API) is working by checking its endpoints."""
54
- try:
55
- analysis = self.catalog_utils.get_resource_analysis(resource_name)
56
- if not analysis:
57
- return False
58
-
59
- # Check if the analysis has errors
60
- if analysis.get("error"):
61
- return False
62
-
63
- # Check if we have endpoints
64
- endpoints = analysis.get("endpoints", [])
65
- if not endpoints:
66
- return False
67
-
68
- # Check if the API has meaningful content
69
- title = analysis.get("title", "")
70
- description = analysis.get("description", "")
71
-
72
- # If it's a basic resource without API content, consider it working
73
- if not title and not description and len(endpoints) == 0:
74
- return True
75
-
76
- # For API resources, check if we have valid endpoints
77
- return len(endpoints) > 0
78
-
79
- except Exception as e:
80
- print(f"❌ Resource {resource_name} test failed: {e}")
81
- return False
82
-
83
- def generate_compact_tool_spec(self, tool_name: str) -> Optional[Dict[str, Any]]:
84
- """Generate compact spec for a tool."""
85
- schema = self.catalog_utils.get_tool_schema(tool_name)
86
- if not schema:
87
- return None
88
-
89
- description = schema.get("description", "")
90
- input_schema = schema.get("inputSchema", {})
91
- properties = input_schema.get("properties", {})
92
- required = input_schema.get("required", [])
93
-
94
- # Extract ALL parameters with comprehensive details
95
- parameters = {}
96
- for param_name, param_info in properties.items():
97
- param_type = param_info.get("type", "string")
98
- param_desc = param_info.get("description", "")
99
- param_enum = param_info.get("enum", [])
100
- param_required = param_name in required
101
- param_items = param_info.get("items", {})
102
- param_format = param_info.get("format", "")
103
- param_default = param_info.get("default")
104
-
105
- # Build comprehensive parameter description
106
- desc_parts = []
107
-
108
- # Add base description
109
- if param_desc:
110
- base_desc = param_desc.split('.')[0] if '.' in param_desc else param_desc
111
- desc_parts.append(base_desc)
112
-
113
- # Add type information
114
- if param_type == "array" and param_items:
115
- item_type = param_items.get("type", "string")
116
- desc_parts.append(f"Array of {item_type}")
117
- else:
118
- desc_parts.append(f"Type: {param_type}")
119
-
120
- # Add format if specified
121
- if param_format:
122
- desc_parts.append(f"Format: {param_format}")
123
-
124
- # Add required/optional status
125
- desc_parts.append("Required" if param_required else "Optional")
126
-
127
- # Add enum values if available
128
- if param_enum:
129
- enum_str = ", ".join([f'"{val}"' for val in param_enum[:8]]) # Show up to 8 values
130
- if len(param_enum) > 8:
131
- enum_str += f"... (and {len(param_enum) - 8} more)"
132
- desc_parts.append(f"Valid values: {enum_str}")
133
-
134
- # Add default value if specified
135
- if param_default is not None:
136
- desc_parts.append(f"Default: {param_default}")
137
-
138
- # Combine all parts
139
- desc = ". ".join(desc_parts)
140
-
141
- # Clean up description for Python string - escape all special characters
142
- desc = (desc.replace('\\', '\\\\') # Escape backslashes first
143
- .replace('"', '\\"') # Escape quotes
144
- .replace('\n', ' ') # Replace newlines
145
- .replace('\r', ' ') # Replace carriage returns
146
- .replace('`', "'")) # Replace backticks with single quotes
147
- parameters[param_name] = desc
148
-
149
- # Clean up description for Python string
150
- clean_description = description[:200] + "..." if len(description) > 200 else description
151
- clean_description = clean_description.replace('"', '\\"').replace('\n', ' ').replace('\r', ' ')
152
-
153
- return {
154
- "description": clean_description,
155
- "parameters": parameters
156
- }
157
-
158
- def _extract_endpoint_parameters(self, endpoint: Optional[Dict[str, Any]]) -> Dict[str, str]:
159
- """Extract comprehensive parameter information from an OpenAPI endpoint."""
160
- if not endpoint:
161
- return {}
162
-
163
- parameters_info = {}
164
- endpoint_params = endpoint.get("parameters", [])
165
-
166
- for param in endpoint_params:
167
- param_name = param.get("name", "")
168
- param_in = param.get("in", "")
169
- param_type = param.get("type", "string")
170
- param_desc = param.get("description", "")
171
- param_required = param.get("required", False)
172
- param_enum = param.get("enum", [])
173
- param_format = param.get("format", "")
174
- param_default = param.get("default")
175
-
176
- # Skip path parameters for base URL approach
177
- if param_in == "path":
178
- continue
179
-
180
- # Skip body parameters as they're not query params
181
- if param_in == "body":
182
- continue
183
-
184
- # Build comprehensive parameter description
185
- desc_parts = []
186
-
187
- # Add base description
188
- if param_desc:
189
- # Clean up description - remove excessive details but keep key info
190
- clean_desc = param_desc.replace('\n', ' ').replace('\r', ' ')
191
- if len(clean_desc) > 200:
192
- # Take first 200 chars but try to end at sentence
193
- truncated = clean_desc[:200]
194
- last_period = truncated.rfind('.')
195
- if last_period > 100: # If there's a period in reasonable range
196
- clean_desc = truncated[:last_period + 1]
197
- else:
198
- clean_desc = truncated + "..."
199
- desc_parts.append(clean_desc)
200
-
201
- # Add type and location information
202
- desc_parts.append(f"Type: {param_type}")
203
- if param_in:
204
- desc_parts.append(f"Location: {param_in}")
205
-
206
- # Add required/optional status
207
- desc_parts.append("Required" if param_required else "Optional")
208
-
209
- # Add format if specified
210
- if param_format:
211
- desc_parts.append(f"Format: {param_format}")
212
-
213
- # Add enum values if available
214
- if param_enum:
215
- enum_str = ", ".join([f'"{val}"' for val in param_enum[:6]]) # Show up to 6 values
216
- if len(param_enum) > 6:
217
- enum_str += f"... (and {len(param_enum) - 6} more)"
218
- desc_parts.append(f"Valid values: {enum_str}")
219
-
220
- # Add default value if specified
221
- if param_default is not None:
222
- desc_parts.append(f"Default: {param_default}")
223
-
224
- # Combine all parts
225
- desc = ". ".join(desc_parts)
226
-
227
- # Clean up description for Python string - escape all special characters
228
- desc = (desc.replace('\\', '\\\\') # Escape backslashes first
229
- .replace('"', '\\"') # Escape quotes
230
- .replace('\n', ' ') # Replace newlines
231
- .replace('\r', ' ') # Replace carriage returns
232
- .replace('`', "'")) # Replace backticks with single quotes
233
-
234
- if param_name:
235
- parameters_info[param_name] = desc
236
-
237
- return parameters_info
238
-
239
- def generate_compact_resource_spec(self, resource_name: str) -> Optional[Dict[str, Any]]:
240
- """Generate compact spec for a resource."""
241
- analysis = self.catalog_utils.get_resource_analysis(resource_name)
242
- if not analysis:
243
- return None
244
-
245
- title = analysis.get("title", "")
246
- description = analysis.get("description", "")
247
- endpoints = analysis.get("endpoints", [])
248
- base_url = analysis.get("base_url", "")
249
-
250
- # Filter out authentication-required endpoints if configured
251
- if EXCLUDE_AUTH_REQUIRED_ENDPOINTS:
252
- original_count = len(endpoints)
253
- filtered_endpoints = []
254
-
255
- for e in endpoints:
256
- requires_auth = e.get("requires_auth", False)
257
-
258
- # Apply different filtering strategies based on configuration
259
- should_exclude = False
260
-
261
- if EXCLUDE_401_RESPONSES and requires_auth:
262
- should_exclude = True
263
- elif STRICT_AUTH_FILTERING:
264
- # More strict filtering - exclude if any auth indicators found
265
- description = e.get("description", "").lower()
266
- summary = e.get("summary", "").lower()
267
- auth_keywords = ["authentication", "authorization", "bearer", "token", "auth", "login", "credential", "secured", "private", "admin"]
268
- should_exclude = any(keyword in description or keyword in summary for keyword in auth_keywords)
269
-
270
- if not should_exclude:
271
- filtered_endpoints.append(e)
272
-
273
- filtered_count = len(filtered_endpoints)
274
- excluded_count = original_count - filtered_count
275
- print(f" 📊 Filtered {excluded_count} auth-required endpoints from {resource_name} ({original_count} → {filtered_count})")
276
- else:
277
- filtered_endpoints = endpoints
278
-
279
- # Extract key endpoints based on user requirements (base URL approach)
280
- key_endpoints = []
281
-
282
- # For Member API - use base /members endpoint with comprehensive parameters
283
- if "member" in resource_name.lower():
284
- # Find the main /members endpoint - check both filtered and unfiltered endpoints
285
- # since /members is known to work without auth despite having 401 responses
286
- members_endpoint = None
287
-
288
- # First check filtered endpoints
289
- for endpoint in filtered_endpoints:
290
- if endpoint.get("path") == "/members" and endpoint.get("method") == "GET":
291
- members_endpoint = endpoint
292
- break
293
-
294
- # If not found in filtered, check original endpoints (known working case)
295
- if not members_endpoint:
296
- for endpoint in endpoints:
297
- if endpoint.get("path") == "/members" and endpoint.get("method") == "GET":
298
- # Special case: /members works without auth despite 401 responses
299
- if "authentication credential is optional" in endpoint.get("description", "").lower():
300
- members_endpoint = endpoint
301
- break
302
-
303
- # Extract comprehensive parameter information
304
- params_info = self._extract_endpoint_parameters(members_endpoint) if members_endpoint else {}
305
- params_desc = f"Available parameters: {', '.join(params_info.keys())}" if params_info else "Get members list with optional filters like handle, page, perPage"
306
-
307
- key_endpoints.append({
308
- "path": "/members",
309
- "method": "GET",
310
- "full_url": f"{base_url}/members",
311
- "description": params_desc,
312
- "parameters": params_info
313
- })
314
-
315
- # For Challenges API - use base /challenges endpoint with comprehensive parameters
316
- elif "challenge" in resource_name.lower():
317
- # Find the main /challenges endpoint to extract real parameters
318
- challenges_endpoint = None
319
- for endpoint in filtered_endpoints:
320
- if endpoint.get("path") == "/challenges" and endpoint.get("method") == "GET":
321
- challenges_endpoint = endpoint
322
- break
323
-
324
- # Extract comprehensive parameter information
325
- params_info = self._extract_endpoint_parameters(challenges_endpoint) if challenges_endpoint else {}
326
- params_desc = f"Available parameters: {', '.join(params_info.keys())}" if params_info else "Get challenges list with optional filters like status, track, page, perPage"
327
-
328
- key_endpoints.append({
329
- "path": "/challenges",
330
- "method": "GET",
331
- "full_url": f"{base_url}/challenges",
332
- "description": params_desc,
333
- "parameters": params_info
334
- })
335
-
336
- # For other APIs - extract important base endpoints
337
- else:
338
- seen_base_paths = set()
339
- for endpoint in filtered_endpoints[:5]: # Limit to first 5
340
- path = endpoint.get("path", "")
341
- method = endpoint.get("method", "GET")
342
- summary = endpoint.get("summary", "")
343
- full_url = endpoint.get("full_url", "")
344
-
345
- # Extract base path (remove path parameters)
346
- base_path = path.split('/')[1] if '/' in path and len(path.split('/')) > 1 else path
347
- base_path = f"/{base_path}" if not base_path.startswith('/') else base_path
348
-
349
- if base_path not in seen_base_paths and path and method:
350
- seen_base_paths.add(base_path)
351
- key_endpoints.append({
352
- "path": path,
353
- "method": method,
354
- "summary": summary[:100] + "..." if len(summary) > 100 else summary,
355
- "full_url": full_url
356
- })
357
-
358
- # Generate compact description
359
- if title and description:
360
- compact_desc = f"{title}: {description[:150]}..."
361
- elif title:
362
- compact_desc = title
363
- elif description:
364
- compact_desc = description[:200] + "..."
365
- else:
366
- compact_desc = f"API with {len(filtered_endpoints)} endpoints"
367
-
368
- # Clean up description for Python string
369
- compact_desc = compact_desc.replace('"', '\\"').replace('\n', ' ').replace('\r', ' ')
370
-
371
- return {
372
- "description": compact_desc,
373
- "key_endpoints": key_endpoints,
374
- "total_endpoints": len(filtered_endpoints),
375
- "base_url": base_url
376
- }
377
-
378
- async def generate_compact_specs(self) -> Dict[str, Any]:
379
- """Generate compact specs for all working tools and resources."""
380
- print("🔍 Testing functionality and generating compact specs...")
381
-
382
- # Test and generate tool specs
383
- tools = self.catalog_utils.list_available_tools()
384
- working_tools = {}
385
-
386
- for tool_name in tools:
387
- print(f" 🧪 Testing tool: {tool_name}")
388
- if await self.test_tool_functionality(tool_name):
389
- spec = self.generate_compact_tool_spec(tool_name)
390
- if spec:
391
- working_tools[tool_name] = spec
392
- print(f" ✅ {tool_name} - Working")
393
- else:
394
- print(f" ❌ {tool_name} - Not working")
395
-
396
- # Test and generate resource specs
397
- resources = self.catalog_utils.list_available_resources()
398
- working_resources = {}
399
-
400
- for resource_name in resources:
401
- print(f" 🧪 Testing resource: {resource_name}")
402
- if await self.test_resource_functionality(resource_name):
403
- spec = self.generate_compact_resource_spec(resource_name)
404
- if spec:
405
- working_resources[resource_name] = spec
406
- print(f" ✅ {resource_name} - Working")
407
- else:
408
- print(f" ❌ {resource_name} - Not working")
409
-
410
- return {
411
- "tools": working_tools,
412
- "resources": working_resources
413
- }
414
-
415
- def save_compact_specs(self, specs: Dict[str, Any], output_file: str = "app/config/compact_api_specs.py"):
416
- """Save compact specs to a Python file."""
417
- with open(output_file, 'w', encoding='utf-8') as f:
418
- f.write("# Generated compact API specs from MCP catalog\n")
419
- f.write("# Only includes working tools and resources\n\n")
420
-
421
- f.write("COMPACT_API_SPECS = {\n")
422
-
423
- # Write tools
424
- f.write(" # Working Tools\n")
425
- for tool_name, spec in specs.get("tools", {}).items():
426
- f.write(f' "{tool_name}": {{\n')
427
- f.write(f' "description": "{spec["description"]}",\n')
428
- f.write(" \"parameters\": {\n")
429
- for param_name, param_desc in spec["parameters"].items():
430
- f.write(f' "{param_name}": "{param_desc}",\n')
431
- f.write(" }\n")
432
- f.write(" },\n")
433
-
434
- # Write resources
435
- f.write("\n # Working Resources\n")
436
- for resource_name, spec in specs.get("resources", {}).items():
437
- f.write(f' "{resource_name}": {{\n')
438
- f.write(f' "description": "{spec["description"]}",\n')
439
- f.write(f' "total_endpoints": {spec["total_endpoints"]},\n')
440
- if spec.get("base_url"):
441
- f.write(f' "base_url": "{spec["base_url"]}",\n')
442
- f.write(" \"key_endpoints\": [\n")
443
- for endpoint in spec["key_endpoints"][:5]: # Limit to 5 key endpoints
444
- endpoint_line = f' {{"path": "{endpoint["path"]}", "method": "{endpoint["method"]}"'
445
- if endpoint.get("full_url"):
446
- endpoint_line += f', "full_url": "{endpoint["full_url"]}"'
447
- if endpoint.get("description"):
448
- # Escape description for Python string
449
- escaped_desc = endpoint["description"].replace('"', '\\"')
450
- endpoint_line += f', "description": "{escaped_desc}"'
451
- if endpoint.get("parameters"):
452
- # Write parameters as a nested dictionary
453
- endpoint_line += ', "parameters": {'
454
- param_entries = []
455
- for param_name, param_desc in endpoint["parameters"].items():
456
- # Escape all special characters for Python string
457
- escaped_param_desc = (param_desc.replace('\\', '\\\\') # Escape backslashes first
458
- .replace('"', '\\"') # Escape quotes
459
- .replace('\n', ' ') # Replace newlines
460
- .replace('\r', ' ') # Replace carriage returns
461
- .replace('`', "'")) # Replace backticks with single quotes
462
- param_entries.append(f'"{param_name}": "{escaped_param_desc}"')
463
- endpoint_line += ', '.join(param_entries)
464
- endpoint_line += '}'
465
- endpoint_line += "},\n"
466
- f.write(endpoint_line)
467
- f.write(" ]\n")
468
- f.write(" },\n")
469
-
470
- f.write("}\n")
471
-
472
- print(f"✅ Compact specs saved to {output_file}")
473
-
474
- def print_summary(self, specs: Dict[str, Any]):
475
- """Print a summary of the generated specs."""
476
- tools = specs.get("tools", {})
477
- resources = specs.get("resources", {})
478
-
479
- print(f"\n📊 Compact Specs Summary:")
480
- print(f" 🔧 Working Tools: {len(tools)}")
481
- for tool_name in tools:
482
- print(f" - {tool_name}")
483
-
484
- print(f" 📚 Working Resources: {len(resources)}")
485
- for resource_name in resources:
486
- total_endpoints = resources[resource_name].get("total_endpoints", 0)
487
- print(f" - {resource_name} ({total_endpoints} endpoints)")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
backup_v1/mcp/stream_client.py DELETED
@@ -1,308 +0,0 @@
1
- import os
2
- import asyncio
3
- import json
4
- import uuid
5
- from dataclasses import dataclass
6
- from typing import Any, Dict, List, Optional, Tuple
7
- import aiohttp
8
- from app.config import settings
9
-
10
- # Default to local config file instead of ~/.cursor/mcp.json
11
- DEFAULT_CONFIG_PATH = "mcp-config.json"
12
- DEFAULT_SERVER_NAME = "topcoder"
13
- DEFAULT_PROTOCOL_VERSION = "2024-11-05"
14
-
15
-
16
- @dataclass
17
- class ServerConfig:
18
- name: str
19
- url: str
20
- headers: Dict[str, str]
21
-
22
-
23
- def load_server_config(
24
- server_name: str = DEFAULT_SERVER_NAME,
25
- config_path: str = DEFAULT_CONFIG_PATH,
26
- ) -> ServerConfig:
27
- # Allow override via env var for quick testing
28
- env_url = os.environ.get("MCP_SERVER_URL")
29
- env_headers: Dict[str, str] = {}
30
- if token := os.environ.get("MCP_TOKEN"):
31
- env_headers["Authorization"] = f"Bearer {token}"
32
-
33
- # Try config file
34
- url_from_config: Optional[str] = None
35
- headers_from_config: Dict[str, str] = {}
36
-
37
- if os.path.isfile(config_path):
38
- try:
39
- with open(config_path, "r", encoding="utf-8") as f:
40
- cfg = json.load(f)
41
- servers = cfg.get("mcpServers", {})
42
- server_entry = servers.get(server_name, {})
43
- url_from_config = server_entry.get("url")
44
- # Common optional fields others may use
45
- api_key = server_entry.get("apiKey")
46
- if api_key:
47
- headers_from_config["Authorization"] = f"Bearer {api_key}"
48
- # Raw headers block if present
49
- for header_key, header_value in server_entry.get("headers", {}).items():
50
- headers_from_config[header_key] = header_value
51
- except Exception as exc:
52
- raise RuntimeError(f"Failed to read config {config_path}: {exc}")
53
- else:
54
- print(f"Warning: Config file {config_path} not found. Using environment variables or defaults.")
55
-
56
- # Final resolution order: env > config > default
57
- final_url = env_url or url_from_config or "https://api.topcoder-dev.com/v6/mcp/mcp"
58
- final_headers = {**headers_from_config, **env_headers}
59
-
60
- return ServerConfig(name=server_name, url=final_url, headers=final_headers)
61
-
62
-
63
- def build_json_rpc_request(method: str, params: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
64
- request_id = str(uuid.uuid4())
65
- return {
66
- "jsonrpc": "2.0",
67
- "id": request_id,
68
- "method": method,
69
- "params": params or {},
70
- }
71
-
72
-
73
- def _parse_streamable_http_body(text_body: str, content_type: str) -> Dict[str, Any]:
74
- """Parse a Streamable HTTP response body that may be JSON or SSE-framed.
75
-
76
- Returns a single JSON-RPC message object (not an array). If the server
77
- wraps responses in an array, the first element will be returned.
78
- """
79
- ct_lower = (content_type or "").lower()
80
-
81
- # Try JSON first when content-type hints JSON
82
- if "application/json" in ct_lower:
83
- data = json.loads(text_body)
84
- if isinstance(data, list) and data:
85
- return data[0]
86
- return data
87
-
88
- # Fallback: detect SSE-style content or bodies that include 'event:' lines
89
- if "text/event-stream" in ct_lower or text_body.lstrip().startswith("event:"):
90
- # Collect the first 'data: ' block
91
- json_payload_lines: list[str] = []
92
- for raw_line in text_body.splitlines():
93
- line = raw_line.strip("\r\n")
94
- if line.startswith("data:"):
95
- # After 'data:' there may be a space; strip and collect
96
- # Everything after 'data:' is part of the JSON payload line
97
- json_payload_lines.append(line[len("data:"):].lstrip())
98
- elif line == "":
99
- # End of event
100
- if json_payload_lines:
101
- break
102
- if not json_payload_lines:
103
- raise json.JSONDecodeError("No data payload found in SSE body", text_body, 0)
104
- joined = "\n".join(json_payload_lines)
105
- data = json.loads(joined)
106
- if isinstance(data, list) and data:
107
- return data[0]
108
- return data
109
-
110
- # Unknown content-type; try best-effort JSON
111
- data = json.loads(text_body)
112
- if isinstance(data, list) and data:
113
- return data[0]
114
- return data
115
-
116
-
117
- class StreamableHttpMCPClient:
118
- def __init__(self, server_config: ServerConfig, timeout_seconds: int = 60) -> None:
119
- self.server_config = server_config
120
- self.timeout_seconds = timeout_seconds
121
- self.session: Optional[aiohttp.ClientSession] = None
122
- self.mcp_session_id: Optional[str] = None
123
-
124
- async def __aenter__(self) -> "StreamableHttpMCPClient":
125
- headers = {"Content-Type": "application/json"}
126
- # Per spec, client should accept both json and event-stream
127
- headers["Accept"] = "application/json, text/event-stream"
128
- # Add configured headers (e.g., Authorization)
129
- headers.update(self.server_config.headers)
130
- self.session = aiohttp.ClientSession(headers=headers)
131
- return self
132
-
133
- async def __aexit__(self, exc_type, exc, tb) -> None:
134
- if self.session is not None:
135
- await self.session.close()
136
- self.session = None
137
-
138
- async def initialize(self) -> Dict[str, Any]:
139
- if self.session is None:
140
- raise RuntimeError("Client session not started. Use 'async with'.")
141
-
142
- payload = build_json_rpc_request(
143
- "initialize",
144
- params={
145
- "protocolVersion": DEFAULT_PROTOCOL_VERSION,
146
- "clientInfo": {"name": "python-mcp-client", "version": "0.1.0"},
147
- # It's helpful to advertise capabilities, though not required for list calls
148
- "capabilities": {"resources": {}, "prompts": {}, "tools": {}},
149
- },
150
- )
151
- async with self.session.post(self.server_config.url, json=payload, timeout=self.timeout_seconds) as resp:
152
- text = await resp.text()
153
- try:
154
- data = _parse_streamable_http_body(text, resp.headers.get("Content-Type", ""))
155
- except json.JSONDecodeError as exc:
156
- raise RuntimeError(f"Failed to parse initialize response: {exc}\nRaw: {text}") from exc
157
-
158
- # Capture session ID from headers (case-insensitive)
159
- session_id = None
160
- for key, value in resp.headers.items():
161
- if key.lower() == "mcp-session-id":
162
- session_id = value
163
- break
164
- if not session_id:
165
- # Fallback to common casings
166
- session_id = resp.headers.get("Mcp-Session-Id") or resp.headers.get("MCP-Session-Id")
167
-
168
- if not session_id:
169
- raise RuntimeError(
170
- "Missing MCP-Session-Id in initialize response headers. "
171
- f"HTTP {resp.status}, body: {text}"
172
- )
173
-
174
- self.mcp_session_id = session_id
175
- return data
176
-
177
- async def send_initialized_notification(self) -> Tuple[int, str]:
178
- if self.session is None:
179
- raise RuntimeError("Client session not started. Use 'async with'.")
180
- if not self.mcp_session_id:
181
- raise RuntimeError("No MCP session established. Call initialize() first.")
182
-
183
- # notifications/initialized is a notification (no id)
184
- payload = {"jsonrpc": "2.0", "method": "notifications/initialized", "params": {}}
185
- # Include session header
186
- headers = {"mcp-session-id": self.mcp_session_id}
187
- async with self.session.post(self.server_config.url, json=payload, timeout=self.timeout_seconds, headers=headers) as resp:
188
- return resp.status, await resp.text()
189
-
190
- async def call(self, method: str, params: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
191
- if self.session is None:
192
- raise RuntimeError("Client session not started. Use 'async with'.")
193
- if not self.mcp_session_id:
194
- raise RuntimeError("No MCP session established. Call initialize() first.")
195
-
196
- payload = build_json_rpc_request(method, params)
197
- headers = {"mcp-session-id": self.mcp_session_id}
198
- async with self.session.post(self.server_config.url, json=payload, timeout=self.timeout_seconds, headers=headers) as resp:
199
- text = await resp.text()
200
- if resp.status != 200:
201
- raise RuntimeError(f"HTTP error {resp.status}: {text}")
202
- try:
203
- data = _parse_streamable_http_body(text, resp.headers.get("Content-Type", ""))
204
- except json.JSONDecodeError as exc:
205
- raise RuntimeError(f"Failed to parse JSON response: {exc}\nRaw: {text}") from exc
206
- return data
207
-
208
-
209
- class MCPStreamClient:
210
- """
211
- MCP client for connecting to Topcoder MCP server using HTTP.
212
- """
213
-
214
- def __init__(self, base_url: Optional[str] = None):
215
- self.base_url = base_url or settings.MCP_BASE_URL
216
- self._session: Optional[aiohttp.ClientSession] = None
217
- self._session_id: Optional[str] = None
218
-
219
- async def _get_session(self) -> aiohttp.ClientSession:
220
- if not self._session:
221
- headers = {
222
- "Content-Type": "application/json",
223
- "Accept": "application/json, text/event-stream",
224
- }
225
- self._session = aiohttp.ClientSession(headers=headers)
226
- return self._session
227
-
228
- async def close(self):
229
- if self._session:
230
- await self._session.close()
231
- self._session = None
232
-
233
- async def _rpc(self, method: str, params: Dict[str, Any]) -> Any:
234
- session = await self._get_session()
235
- payload = {
236
- "jsonrpc": "2.0",
237
- "id": 1,
238
- "method": method,
239
- "params": params
240
- }
241
-
242
- async with session.post(self.base_url, json=payload) as resp:
243
- # Handle Server-Sent Events (SSE) format
244
- if resp.headers.get("content-type", "").startswith("text/event-stream"):
245
- async for line in resp.content:
246
- line = line.decode('utf-8').strip()
247
- if line.startswith('data: '):
248
- try:
249
- data = json.loads(line[6:]) # Remove 'data: ' prefix
250
- if isinstance(data, dict) and "result" in data:
251
- return data["result"]
252
- return data
253
- except json.JSONDecodeError:
254
- continue
255
- raise Exception("No valid JSON data found in SSE stream")
256
- else:
257
- # Handle regular JSON response
258
- try:
259
- data = await resp.json()
260
- except Exception:
261
- text = await resp.text()
262
- raise Exception(f"Failed to parse MCP response ({resp.status}): {text}")
263
-
264
- if resp.status >= 400:
265
- raise Exception(f"MCP error {resp.status}: {data}")
266
-
267
- # Support JSON-RPC style responses or direct result bodies
268
- if isinstance(data, dict) and "result" in data:
269
- return data["result"]
270
- return data
271
-
272
- async def initialize_session(self) -> str:
273
- """Initialize a session with the MCP server."""
274
- result = await self._rpc("initialize", {
275
- "protocolVersion": "2024-11-05",
276
- "capabilities": {
277
- "tools": {},
278
- "resources": {}
279
- },
280
- "clientInfo": {
281
- "name": "topcoder-mcp-agent",
282
- "version": "1.0.0"
283
- }
284
- })
285
- print("DEBUG: Initialize result:", result)
286
- return "initialized"
287
-
288
- async def list_resources(self) -> List[Dict[str, Any]]:
289
- """List available resources from the MCP server."""
290
- result = await self._rpc("resources/list", {})
291
- # Normalize result to a list
292
- if isinstance(result, dict) and "resources" in result:
293
- return result["resources"] or []
294
- if isinstance(result, list):
295
- return result
296
- return []
297
-
298
- async def list_tools(self) -> List[Dict[str, Any]]:
299
- """List available tools from the MCP server."""
300
- result = await self._rpc("tools/list", {})
301
- if isinstance(result, dict) and "tools" in result:
302
- return result["tools"] or []
303
- if isinstance(result, list):
304
- return result
305
- return []
306
-
307
-
308
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
backup_v1/prompts/default_responses.py DELETED
@@ -1,16 +0,0 @@
1
- # app/prompts/default_responses.py
2
-
3
- default_help_response = (
4
- "🤖 I’m a Topcoder MCP Agent! I can:\n"
5
- "- Show active challenges\n"
6
- "- Fetch member profiles (with handle)\n"
7
- "- Answer questions about Topcoder processes\n\n"
8
- "I won’t be able to help with general knowledge or personal tasks."
9
- )
10
-
11
- help_triggers = [
12
- "what can you do?",
13
- "what can you do for me",
14
- "help",
15
- "how can you help me?",
16
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
backup_v1/prompts/system_prompts.py DELETED
@@ -1,10 +0,0 @@
1
- # app/prompts/system_prompts.py
2
-
3
-
4
- topcoder_system_prompt = (
5
- "You are a specialized conversational assistant for the Topcoder Member Communication Platform (MCP). "
6
- "Your ONLY purpose is to help users with Topcoder-related topics — such as challenges, member profiles, platform usage, rules, or community resources.\n\n"
7
- "You MUST NOT answer questions about general knowledge (e.g., history, science, geography), personal advice, current events, or any topic outside Topcoder.\n\n"
8
- "If the user asks something unrelated to Topcoder, politely respond that you are only trained to assist with Topcoder-specific information."
9
- )
10
-
 
 
 
 
 
 
 
 
 
 
 
backup_v1/ui/__init__.py DELETED
File without changes
backup_v1/ui/interface.py DELETED
@@ -1,151 +0,0 @@
1
- import asyncio
2
- import gradio as gr
3
-
4
- from app.agent.model_client import LLMClient
5
- from app.agent.schema import ToolRequest
6
- from app.agent.tool_executor import ToolExecutor
7
- from app.agent.prompt_engine import PromptBuilder
8
- from app.prompts.system_prompts import topcoder_system_prompt
9
- from app.prompts.default_responses import default_help_response, help_triggers
10
- from app.config.static_responses import REJECT_RESPONSE
11
- from app.utils.json_filter import filter_tool_response
12
-
13
- llm_client = LLMClient()
14
- tool_executor = ToolExecutor()
15
- prompt_builder = PromptBuilder()
16
-
17
- async def agent_response(user_message, history):
18
- # STEP 1: Dynamic tool/resource decision using compact specs
19
- decision_prompt = prompt_builder.build_tool_decision_prompt(user_message)
20
- decision_json = await llm_client.decide_tool(decision_prompt)
21
-
22
-
23
- # Handle both tool and resource decisions
24
- tool = decision_json.get("tool")
25
- resource = decision_json.get("resource")
26
- params = decision_json.get("params", {})
27
-
28
- # For resources, also extract endpoint and method from top level
29
- if resource:
30
- endpoint = decision_json.get("endpoint")
31
- method = decision_json.get("method")
32
- if endpoint:
33
- params["endpoint"] = endpoint
34
- if method:
35
- params["method"] = method
36
-
37
- # STEP 2: Clean up parameters (remove empty values)
38
- if params:
39
- params = {k: v for k, v in params.items() if v and v != ""}
40
-
41
- # STEP 3: General chat
42
- if tool == "chat" or resource == "chat":
43
- return await llm_client.chat([
44
- {"role": "system", "content": topcoder_system_prompt},
45
- {"role": "user", "content": user_message}
46
- ])
47
-
48
- # STEP 4: Rejection
49
- if tool == "reject" or resource == "reject":
50
- return REJECT_RESPONSE
51
-
52
- # STEP 5: Determine what to execute
53
- available_tools = tool_executor.get_available_tools()
54
- available_resources = tool_executor.get_available_resources()
55
-
56
- if tool and tool in available_tools:
57
- # Execute tool
58
- target_name = tool
59
- target_type = "tool"
60
- elif resource and resource in available_resources:
61
- # Execute resource
62
- target_name = resource
63
- target_type = "resource"
64
- else:
65
- # Invalid selection
66
- invalid_name = tool or resource
67
- return f"❌ '{invalid_name}' not available. Available tools: {', '.join(available_tools)}, Available resources: {', '.join(available_resources)}"
68
-
69
- # STEP 6: Parameter extraction if needed
70
- if not params:
71
- if target_type == "tool":
72
- param_extraction_prompt = f"""
73
- Extract parameters for the tool '{target_name}' from this user query: "{user_message}"
74
-
75
- Available parameters for {target_name}:
76
- {tool_executor.get_tool_parameters(target_name)}
77
-
78
- Only include parameters that have meaningful values from the user query.
79
- Respond ONLY with a JSON object containing the extracted parameters:
80
- {{"params": {{"param1": "value1", "param2": "value2"}} }}
81
- """
82
- param_response = await llm_client.complete_json(param_extraction_prompt)
83
- params = param_response.get("params", {})
84
- # Clean up extracted parameters too
85
- params = {k: v for k, v in params.items() if v and v != ""}
86
- else: # resource
87
- # For resources, we need to extract both endpoint parameters AND API parameters
88
- param_extraction_prompt = f"""
89
- Extract parameters for the resource '{target_name}' from this user query: "{user_message}"
90
-
91
- The user wants to call a resource API. You need to extract:
92
- 1. API parameters that should be passed to the endpoint (like handle, challengeId, etc.)
93
- 2. Any path parameters that need to be substituted in the URL
94
-
95
- For example, if the user asks for "member abhishekrn", extract:
96
- - handle: "abhishekrn" (for the API call)
97
-
98
- Common parameters to look for:
99
- - handle: member handle/username
100
- - challengeId: challenge ID
101
- - groupId: group ID
102
- - memberId: member ID
103
- - Any other IDs or values mentioned in the query
104
-
105
- Respond ONLY with a JSON object containing the extracted parameters:
106
- {{"params": {{"param1": "value1", "param2": "value2"}} }}
107
- """
108
- param_response = await llm_client.complete_json(param_extraction_prompt)
109
- extracted_params = param_response.get("params", {})
110
- # Clean up extracted parameters
111
- extracted_params = {k: v for k, v in extracted_params.items() if v and v != ""}
112
-
113
- # Merge with existing params (endpoint and method from LLM decision)
114
- params.update(extracted_params)
115
-
116
- # STEP 7: Execute tool or resource using compact specs
117
- request = ToolRequest(tool=target_name, params=params)
118
- tool_result = await tool_executor.execute(request)
119
-
120
- if tool_result.status != "success":
121
- return f"❌ Error executing `{target_name}`: {tool_result.message}"
122
-
123
- # STEP 8: Filter response before summarizing
124
- filtered_data = filter_tool_response(target_name, tool_result.data)
125
-
126
- summarization_prompt = f"""
127
- You are a Topcoder assistant.
128
-
129
- The user asked: "{user_message}"
130
- {target_type.capitalize()} used: {target_name}
131
- Response (filtered):
132
- {filtered_data}
133
-
134
- Write a clear, concise response that helps the user based on this data.
135
- """
136
-
137
- final_response = await llm_client.chat([
138
- {"role": "system", "content": "You are a helpful Topcoder assistant summarizing tool/resource outputs."},
139
- {"role": "user", "content": summarization_prompt.strip()}
140
- ])
141
-
142
- return final_response
143
-
144
- def launch_ui():
145
- demo = gr.ChatInterface(
146
- fn=agent_response,
147
- title="🧠 Topcoder MCP Agent",
148
- theme="soft",
149
- examples=["Active AI Challenges", "Member details", "Tell me a joke"],
150
- )
151
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
backup_v1/utils/json_filter.py DELETED
@@ -1,72 +0,0 @@
1
- def filter_tool_response(tool: str, data: dict | list) -> dict | list:
2
- """
3
- Filters the API response to first 5 items with only relevant fields.
4
- """
5
- # Handle challenge-related tools and resources
6
- if "challenge" in tool.lower() or tool == "get_challenges":
7
- if isinstance(data, list):
8
- # Return only first 5 challenges with relevant fields
9
- return [
10
- {
11
- "id": challenge.get("id"),
12
- "name": challenge.get("name"),
13
- "track": challenge.get("track"),
14
- "totalPrizes": challenge.get("totalPrizes", 0),
15
- "status": challenge.get("status"),
16
- "registrationEndDate": challenge.get("registrationEndDate")
17
- }
18
- for challenge in data[:5] # First 5 challenges only
19
- ]
20
- else:
21
- # Single challenge - return relevant fields only
22
- return {
23
- "id": data.get("id"),
24
- "name": data.get("name"),
25
- "track": data.get("track"),
26
- "totalPrizes": data.get("totalPrizes", 0),
27
- "status": data.get("status")
28
- }
29
-
30
- # Handle member-related tools and resources
31
- elif "member" in tool.lower() or tool == "get_member":
32
- if isinstance(data, list):
33
- # Return only first 5 members with relevant fields
34
- return [
35
- {
36
- "handle": member.get("handle"),
37
- "firstName": member.get("firstName"),
38
- "lastName": member.get("lastName"),
39
- "status": member.get("status"),
40
- "country": member.get("homeCountryCode")
41
- }
42
- for member in data[:5] # First 5 members only
43
- ]
44
- else:
45
- # Single member - return relevant fields only
46
- return {
47
- "handle": data.get("handle"),
48
- "firstName": data.get("firstName"),
49
- "lastName": data.get("lastName"),
50
- "status": data.get("status"),
51
- "country": data.get("homeCountryCode")
52
- }
53
-
54
- # Handle skills-related tools
55
- elif "skill" in tool.lower():
56
- if isinstance(data, list):
57
- # Return only first 5 skills with relevant fields
58
- return [
59
- {
60
- "id": skill.get("id"),
61
- "name": skill.get("name"),
62
- "description": skill.get("description", "")
63
- }
64
- for skill in data[:5] # First 5 skills only
65
- ]
66
- return data
67
-
68
- # Default fallback - limit to first 5 items for any list
69
- if isinstance(data, list):
70
- return data[:5]
71
-
72
- return data
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
config/api_endpoints.py CHANGED
@@ -9,8 +9,4 @@ API_BASE_URLS = {
9
 
10
  def get_base_url(resource_name: str) -> str:
11
  """Get base URL for a resource."""
12
- return API_BASE_URLS.get(resource_name, "")
13
-
14
- def get_all_base_urls() -> dict:
15
- """Get all available base URLs."""
16
- return API_BASE_URLS.copy()
 
9
 
10
  def get_base_url(resource_name: str) -> str:
11
  """Get base URL for a resource."""
12
+ return API_BASE_URLS.get(resource_name, "")
 
 
 
 
config/conversation_config.py CHANGED
@@ -32,15 +32,12 @@ class EntityExtractionConfig:
32
 
33
  # Extraction limits (configurable)
34
  max_entities_per_tool: int = 5
35
- max_member_entities: int = 3
36
 
37
  # Entity confidence
38
  default_entity_confidence: float = 0.8
39
- high_confidence: float = 0.9
40
 
41
  # Dynamic extraction settings
42
  use_dynamic_patterns: bool = True
43
- extract_from_response_data: bool = True
44
 
45
  @dataclass
46
  class ConversationMemoryConfig:
@@ -57,9 +54,6 @@ class ConversationMemoryConfig:
57
  # Response truncation
58
  max_response_storage_chars: int = 1000
59
  max_context_summary_chars: int = 200
60
-
61
- # Entity tracking
62
- max_tracked_entities: int = 50
63
 
64
  @dataclass
65
  class HistoryHandlerConfig:
@@ -68,7 +62,6 @@ class HistoryHandlerConfig:
68
  # Response limits
69
  max_response_chars_for_display: int = 200
70
  max_turns_in_prompt: int = 5
71
- max_entity_contexts: int = 10
72
 
73
  # Fallback messages
74
  no_context_message: str = "I don't see any relevant information from our previous conversation to answer that question. Could you provide more context or ask me to search for new information?"
@@ -81,12 +74,6 @@ class ToolMappingConfig:
81
 
82
  # Dynamic mapping settings
83
  use_runtime_discovery: bool = True
84
- cache_mappings: bool = True
85
- refresh_cache_on_spec_change: bool = True
86
-
87
- # Fallback behavior when mapping fails
88
- default_entity_type: str = "unknown"
89
- log_unmapped_tools: bool = True
90
 
91
  # Global configuration instance
92
  class ConversationConfig:
 
32
 
33
  # Extraction limits (configurable)
34
  max_entities_per_tool: int = 5
 
35
 
36
  # Entity confidence
37
  default_entity_confidence: float = 0.8
 
38
 
39
  # Dynamic extraction settings
40
  use_dynamic_patterns: bool = True
 
41
 
42
  @dataclass
43
  class ConversationMemoryConfig:
 
54
  # Response truncation
55
  max_response_storage_chars: int = 1000
56
  max_context_summary_chars: int = 200
 
 
 
57
 
58
  @dataclass
59
  class HistoryHandlerConfig:
 
62
  # Response limits
63
  max_response_chars_for_display: int = 200
64
  max_turns_in_prompt: int = 5
 
65
 
66
  # Fallback messages
67
  no_context_message: str = "I don't see any relevant information from our previous conversation to answer that question. Could you provide more context or ask me to search for new information?"
 
74
 
75
  # Dynamic mapping settings
76
  use_runtime_discovery: bool = True
 
 
 
 
 
 
77
 
78
  # Global configuration instance
79
  class ConversationConfig:
config/prompt_templates.py CHANGED
@@ -9,53 +9,6 @@ from typing import Dict, Any
9
  class PromptTemplates:
10
  """Centralized prompt templates for the Topcoder MCP Agent."""
11
 
12
- # System prompts
13
- TOOL_DECISION_SYSTEM = (
14
- "You are a tool decision assistant. Analyze the user's request and available "
15
- "tools/resources, then respond ONLY with a JSON object containing the tool "
16
- "name and parameters."
17
- )
18
-
19
- JSON_ASSISTANT_SYSTEM = (
20
- "You are a strict API assistant. Only return valid JSON."
21
- )
22
-
23
- SUMMARIZATION_SYSTEM = (
24
- "You are a helpful Topcoder assistant summarizing tool/resource outputs."
25
- )
26
-
27
- # Enhanced tool parameter extraction templates with context awareness
28
- TOOL_PARAM_EXTRACTION = """Extract parameters for the tool '{tool_name}' from this user query: "{user_message}"
29
-
30
- Available parameters for {tool_name}:
31
- {tool_parameters}
32
-
33
- CONTEXT-AWARE PARAMETER EXTRACTION:
34
- - Consider the conversation context when extracting parameters
35
- - If the user is asking follow-up questions, use context from previous responses
36
- - If the conversation mentioned specific filters (AI, Machine Learning, etc.), include them
37
- - If the conversation mentioned specific statuses (Active, Completed, etc.), include them
38
- - If the user is asking for "more" or "similar" items, use context from previous responses
39
- - If the user is refining a previous query, combine new parameters with context
40
-
41
- EXTRACTION GUIDELINES:
42
- - Only include parameters that have meaningful values from the user query OR conversation context
43
- - For follow-up questions, consider what was discussed before
44
- - If the user mentions "AI challenges" after discussing challenges, include track="AI"
45
- - If the user asks for "more skills" after discussing programming, include appropriate filters
46
- - If the user asks for "similar challenges", use the same parameters as before
47
-
48
- Respond ONLY with a JSON object containing the extracted parameters:
49
- {{"params": {{"param1": "value1", "param2": "value2"}} }}"""
50
-
51
- # Enhanced resource parameter extraction templates with context awareness
52
- RESOURCE_PARAM_EXTRACTION = """Extract parameters for '{resource_name}' from: "{user_message}"
53
-
54
- Extract only parameters with meaningful values from the user query.
55
- Follow the parameter descriptions exactly as specified in the API specs.
56
-
57
- Respond ONLY with: {{"params": {{"param1": "value1"}} }}"""
58
-
59
  # Gradio interface specific prompts with context awareness
60
  GRADIO_TOOL_PARAM_EXTRACTION = """Extract parameters for the tool '{tool_name}' from this user query: "{user_message}"
61
 
@@ -112,110 +65,6 @@ INSTRUCTIONS:
112
  - Keep it simple and helpful - don't expose technical details
113
  - Remember: "user" is a search term, not a field name"""
114
 
115
- # Tool decision prompt template with conversation context support
116
- TOOL_DECISION_PROMPT = """You are an assistant that ONLY works with Topcoder's Member Communication Platform (MCP).
117
-
118
- Available MCP Tools:
119
- {tools_list}
120
-
121
- Available MCP Resources (APIs):
122
- {resources_list}
123
-
124
- User query: "{user_message}"
125
-
126
- {conversation_context}
127
-
128
- Analyze the query and determine:
129
- 1. If it's asking for Topcoder data (challenges, members, skills, etc.) → select appropriate tool/resource
130
- 2. If it's a brief greeting, thanks, or simple conversational response → use "chat"
131
- 3. If it's asking for general knowledge, non-Topcoder information, or complex non-Topcoder topics → use "reject"
132
-
133
- CONTEXT-AWARE DECISION MAKING:
134
- - Consider the conversation context when making decisions
135
- - If the user is asking follow-up questions, understand they may be referring to previous entities
136
- - If the conversation has established context (e.g., discussing AI challenges), use that context
137
- - For pronouns like "that", "it", "those", refer to the conversation context
138
- - For requests like "more", "similar", "other", use context from previous interactions
139
-
140
- IMPORTANT:
141
- - For tools: Only include parameters that have meaningful values from the user query or conversation context
142
- - For resources: Use the resource name and specify the endpoint path and method
143
- - For member queries: ALWAYS use "Member_V6_API_Swagger" as a RESOURCE (not tool) with endpoint "/members"
144
- - For challenge queries: Use "query-tc-challenges" as a TOOL with parameters like status, track, page, perPage
145
- - For skills queries: Use "query-tc-skills" as a TOOL with parameters like name, skillId, page, perPage
146
- - Always include relevant parameters in the params object for API calls
147
-
148
- Respond ONLY with a JSON object in this format:
149
- {{"tool": "<tool_name>", "params": {{"param1": "value1", "param2": "value2"}} }}
150
- OR
151
- {{"resource": "<resource_name>", "endpoint": "/path", "method": "GET", "params": {{"param1": "value1", "param2": "value2"}} }}
152
-
153
- Choose either "tool" OR "resource", not both.
154
-
155
- """
156
-
157
- # Response summarization template
158
- RESPONSE_SUMMARIZATION = """You are a Topcoder assistant.
159
-
160
- The user asked: "{user_message}"
161
- {target_type} used: {target_name}
162
- Response (filtered):
163
- {filtered_data}
164
-
165
- Write a clear, concise response that helps the user based on this data."""
166
-
167
- @classmethod
168
- def get_tool_param_extraction_prompt(cls, tool_name: str, user_message: str, tool_parameters: str, conversation_context: str = None) -> str:
169
- """Get formatted tool parameter extraction prompt with optional conversation context."""
170
- base_prompt = cls.TOOL_PARAM_EXTRACTION.format(
171
- tool_name=tool_name,
172
- user_message=user_message,
173
- tool_parameters=tool_parameters
174
- )
175
-
176
- if conversation_context:
177
- base_prompt += f"\n\nCONVERSATION CONTEXT:\n{conversation_context}\n\nUse this context to better understand the user's intent and extract appropriate parameters."
178
-
179
- return base_prompt
180
-
181
- @classmethod
182
- def get_resource_param_extraction_prompt(cls, resource_name: str, user_message: str, conversation_context: str = None) -> str:
183
- """Get formatted resource parameter extraction prompt with optional conversation context."""
184
- base_prompt = cls.RESOURCE_PARAM_EXTRACTION.format(
185
- resource_name=resource_name,
186
- user_message=user_message
187
- )
188
-
189
- if conversation_context:
190
- base_prompt += f"\n\nCONVERSATION CONTEXT:\n{conversation_context}\n\nUse this context to better understand the user's intent and extract appropriate parameters."
191
-
192
- return base_prompt
193
-
194
- @classmethod
195
- def get_tool_decision_prompt(cls, user_message: str, tools_list: str, resources_list: str, conversation_context: str = None) -> str:
196
- """Get formatted tool decision prompt with optional conversation context."""
197
- context_section = ""
198
- if conversation_context:
199
- context_section = f"CONVERSATION CONTEXT:\n{conversation_context}\n"
200
-
201
- return cls.TOOL_DECISION_PROMPT.format(
202
- user_message=user_message,
203
- tools_list=tools_list,
204
- resources_list=resources_list,
205
- conversation_context=context_section
206
- )
207
-
208
- @classmethod
209
- def get_response_summarization_prompt(cls, user_message: str, target_type: str,
210
- target_name: str, filtered_data: Any) -> str:
211
- """Get formatted response summarization prompt."""
212
- return cls.RESPONSE_SUMMARIZATION.format(
213
- user_message=user_message,
214
- target_type=target_type,
215
- target_name=target_name,
216
- filtered_data=filtered_data
217
- )
218
-
219
  # Gradio interface specific methods
220
  @classmethod
221
  def get_gradio_tool_param_extraction_prompt(cls, tool_name: str, user_message: str, tool_parameters: str, conversation_context: str = None) -> str:
 
9
  class PromptTemplates:
10
  """Centralized prompt templates for the Topcoder MCP Agent."""
11
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  # Gradio interface specific prompts with context awareness
13
  GRADIO_TOOL_PARAM_EXTRACTION = """Extract parameters for the tool '{tool_name}' from this user query: "{user_message}"
14
 
 
65
  - Keep it simple and helpful - don't expose technical details
66
  - Remember: "user" is a search term, not a field name"""
67
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
  # Gradio interface specific methods
69
  @classmethod
70
  def get_gradio_tool_param_extraction_prompt(cls, tool_name: str, user_message: str, tool_parameters: str, conversation_context: str = None) -> str:
config/static_responses.py CHANGED
@@ -4,32 +4,3 @@ REJECT_RESPONSE = (
4
  "I focus on Topcoder-related topics like challenges, members, and skills. "
5
  "What would you like to know about Topcoder?"
6
  )
7
-
8
- # Human-readable error messages for users
9
- INVALID_TOOL_RESPONSE = (
10
- "I'm sorry, but I couldn't find the specific tool you're looking for. "
11
- "I can help you with Topcoder challenges, member information, skills, and other platform features. "
12
- "Could you please rephrase your request or ask about something else related to Topcoder?"
13
- )
14
-
15
- INVALID_RESOURCE_RESPONSE = (
16
- "I'm sorry, but I couldn't access the specific resource you requested. "
17
- "I can help you with Topcoder challenges, member profiles, skills, and other platform information. "
18
- "Could you please try asking about something else related to Topcoder?"
19
- )
20
-
21
- GENERAL_ERROR_RESPONSE = (
22
- "I encountered an issue while processing your request. "
23
- "Please try again or ask about something else related to Topcoder."
24
- )
25
-
26
- TOOL_EXECUTION_ERROR_RESPONSE = (
27
- "I'm sorry, but I encountered an error while trying to get that information for you. "
28
- "Please try again or ask about something else related to Topcoder."
29
- )
30
-
31
- # Console logging messages for developers
32
- CONSOLE_INVALID_TOOL_MSG = "❌ Invalid tool selection: '{tool_name}' not available. Available tools: {available_tools}"
33
- CONSOLE_INVALID_RESOURCE_MSG = "❌ Invalid resource selection: '{resource_name}' not available. Available resources: {available_resources}"
34
- CONSOLE_TOOL_EXECUTION_ERROR_MSG = "❌ Error executing tool '{tool_name}': {error_message}"
35
- CONSOLE_RESOURCE_EXECUTION_ERROR_MSG = "❌ Error executing resource '{resource_name}': {error_message}"
 
4
  "I focus on Topcoder-related topics like challenges, members, and skills. "
5
  "What would you like to know about Topcoder?"
6
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/application/services/prompt_service.py CHANGED
@@ -1,5 +1,4 @@
1
  from src.mcp.compact_utils import CompactSpecsUtils
2
- from config.prompt_templates import PromptTemplates
3
  from typing import List, Dict, Any, Optional
4
 
5
  class PromptBuilder:
 
1
  from src.mcp.compact_utils import CompactSpecsUtils
 
2
  from typing import List, Dict, Any, Optional
3
 
4
  class PromptBuilder:
src/infrastructure/providers/llm_provider.py CHANGED
@@ -3,7 +3,6 @@ import json
3
  from typing import List, Dict, Any, Optional
4
  from config import settings
5
  from config.system_prompts import topcoder_system_prompt
6
- from config.prompt_templates import PromptTemplates
7
 
8
  class LLMClient:
9
  def __init__(self):
 
3
  from typing import List, Dict, Any, Optional
4
  from config import settings
5
  from config.system_prompts import topcoder_system_prompt
 
6
 
7
  class LLMClient:
8
  def __init__(self):