| | """
|
| | FINAL Topcoder Challenge Intelligence Assistant
|
| | With REAL MCP Integration - Ready for Production
|
| | FIXED: Now uses structuredContent for real challenge data
|
| | """
|
| | import asyncio
|
| | import httpx
|
| | import json
|
| | import gradio as gr
|
| | from datetime import datetime
|
| | from typing import List, Dict, Any, Optional
|
| | from dataclasses import dataclass, asdict
|
| |
|
| | @dataclass
|
| | class Challenge:
|
| | id: str
|
| | title: str
|
| | description: str
|
| | technologies: List[str]
|
| | difficulty: str
|
| | prize: str
|
| | time_estimate: str
|
| | compatibility_score: float = 0.0
|
| | rationale: str = ""
|
| |
|
| | @dataclass
|
| | class UserProfile:
|
| | skills: List[str]
|
| | experience_level: str
|
| | time_available: str
|
| | interests: List[str]
|
| |
|
| | class RealTopcoderMCPEngine:
|
| | """FINAL Production MCP Engine with Real Topcoder Data"""
|
| |
|
| | def __init__(self):
|
| | self.base_url = "https://api.topcoder-dev.com/v6/mcp"
|
| | self.session_id = None
|
| | self.is_connected = False
|
| | self.mock_challenges = self._create_fallback_challenges()
|
| |
|
| | def _create_fallback_challenges(self) -> List[Challenge]:
|
| | """Fallback challenges if MCP fails"""
|
| | return [
|
| | Challenge(
|
| | id="30174840",
|
| | title="React Component Library Development",
|
| | description="Build a comprehensive React component library with TypeScript, featuring reusable UI components, comprehensive documentation, and Storybook integration.",
|
| | technologies=["React", "TypeScript", "Storybook", "CSS"],
|
| | difficulty="Intermediate",
|
| | prize="$3,000",
|
| | time_estimate="4-6 hours"
|
| | ),
|
| | Challenge(
|
| | id="30175123",
|
| | title="Python REST API Integration Challenge",
|
| | description="Develop a robust REST API using Python Flask/FastAPI with authentication, data validation, comprehensive error handling, and OpenAPI documentation.",
|
| | technologies=["Python", "Flask", "REST API", "JSON", "Authentication"],
|
| | difficulty="Intermediate",
|
| | prize="$2,500",
|
| | time_estimate="3-5 hours"
|
| | ),
|
| | Challenge(
|
| | id="30174992",
|
| | title="Blockchain NFT Smart Contract Development",
|
| | description="Create and deploy smart contracts for NFT marketplace with minting, trading, and royalty features on Ethereum blockchain.",
|
| | technologies=["Blockchain", "Smart Contracts", "Ethereum", "Solidity", "NFT"],
|
| | difficulty="Advanced",
|
| | prize="$5,000",
|
| | time_estimate="6-8 hours"
|
| | )
|
| | ]
|
| |
|
| | def parse_sse_response(self, sse_text: str) -> Dict[str, Any]:
|
| | """Parse Server-Sent Events response"""
|
| | lines = sse_text.strip().split('\n')
|
| | for line in lines:
|
| | line = line.strip()
|
| | if line.startswith('data:'):
|
| | data_content = line[5:].strip()
|
| | try:
|
| | return json.loads(data_content)
|
| | except json.JSONDecodeError:
|
| | pass
|
| | return None
|
| |
|
| | async def initialize_connection(self) -> bool:
|
| | """Initialize MCP connection"""
|
| |
|
| | if self.is_connected:
|
| | return True
|
| |
|
| | headers = {
|
| | "Accept": "application/json, text/event-stream, */*",
|
| | "Accept-Language": "en-US,en;q=0.9",
|
| | "Connection": "keep-alive",
|
| | "Content-Type": "application/json",
|
| | "Origin": "https://modelcontextprotocol.io",
|
| | "Referer": "https://modelcontextprotocol.io/",
|
| | "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
|
| | }
|
| |
|
| | init_request = {
|
| | "jsonrpc": "2.0",
|
| | "id": 0,
|
| | "method": "initialize",
|
| | "params": {
|
| | "protocolVersion": "2024-11-05",
|
| | "capabilities": {
|
| | "experimental": {},
|
| | "sampling": {},
|
| | "roots": {"listChanged": True}
|
| | },
|
| | "clientInfo": {
|
| | "name": "topcoder-intelligence-assistant",
|
| | "version": "1.0.0"
|
| | }
|
| | }
|
| | }
|
| |
|
| | try:
|
| | async with httpx.AsyncClient(timeout=10.0) as client:
|
| | response = await client.post(
|
| | f"{self.base_url}/mcp",
|
| | json=init_request,
|
| | headers=headers
|
| | )
|
| |
|
| | if response.status_code == 200:
|
| | response_headers = dict(response.headers)
|
| | if 'mcp-session-id' in response_headers:
|
| | self.session_id = response_headers['mcp-session-id']
|
| | self.is_connected = True
|
| | return True
|
| |
|
| | except Exception:
|
| | pass
|
| |
|
| | return False
|
| |
|
| | async def call_tool(self, tool_name: str, arguments: Dict[str, Any]) -> Optional[Dict]:
|
| | """Call MCP tool with real session"""
|
| |
|
| | if not self.session_id:
|
| | return None
|
| |
|
| | headers = {
|
| | "Accept": "application/json, text/event-stream, */*",
|
| | "Content-Type": "application/json",
|
| | "Origin": "https://modelcontextprotocol.io",
|
| | "mcp-session-id": self.session_id
|
| | }
|
| |
|
| | tool_request = {
|
| | "jsonrpc": "2.0",
|
| | "id": int(datetime.now().timestamp()),
|
| | "method": "tools/call",
|
| | "params": {
|
| | "name": tool_name,
|
| | "arguments": arguments
|
| | }
|
| | }
|
| |
|
| | try:
|
| | async with httpx.AsyncClient(timeout=30.0) as client:
|
| | response = await client.post(
|
| | f"{self.base_url}/mcp",
|
| | json=tool_request,
|
| | headers=headers
|
| | )
|
| |
|
| | if response.status_code == 200:
|
| | if "text/event-stream" in response.headers.get("content-type", ""):
|
| | sse_data = self.parse_sse_response(response.text)
|
| | if sse_data and "result" in sse_data:
|
| | return sse_data["result"]
|
| | else:
|
| | json_data = response.json()
|
| | if "result" in json_data:
|
| | return json_data["result"]
|
| |
|
| | except Exception:
|
| | pass
|
| |
|
| | return None
|
| |
|
| | def convert_topcoder_challenge(self, tc_data: Dict) -> Challenge:
|
| | """Convert real Topcoder challenge data to Challenge object - FIXED VERSION"""
|
| |
|
| |
|
| | challenge_id = str(tc_data.get('id', 'unknown'))
|
| |
|
| |
|
| | title = tc_data.get('name', 'Topcoder Challenge')
|
| |
|
| |
|
| | description = tc_data.get('description', 'Challenge description not available')
|
| |
|
| |
|
| | technologies = []
|
| | skills = tc_data.get('skills', [])
|
| | for skill in skills:
|
| | if isinstance(skill, dict) and 'name' in skill:
|
| | technologies.append(skill['name'])
|
| |
|
| |
|
| | if 'technologies' in tc_data:
|
| | tech_list = tc_data['technologies']
|
| | if isinstance(tech_list, list):
|
| | for tech in tech_list:
|
| | if isinstance(tech, dict) and 'name' in tech:
|
| | technologies.append(tech['name'])
|
| | elif isinstance(tech, str):
|
| | technologies.append(tech)
|
| |
|
| |
|
| | total_prize = 0
|
| | prize_sets = tc_data.get('prizeSets', [])
|
| | for prize_set in prize_sets:
|
| | if prize_set.get('type') == 'placement':
|
| | prizes = prize_set.get('prizes', [])
|
| | for prize in prizes:
|
| | if prize.get('type') == 'USD':
|
| | total_prize += prize.get('value', 0)
|
| |
|
| | prize = f"${total_prize:,}" if total_prize > 0 else "Merit-based"
|
| |
|
| |
|
| | challenge_type = tc_data.get('type', 'Unknown')
|
| | type_id = tc_data.get('typeId', '')
|
| |
|
| |
|
| | difficulty_mapping = {
|
| | 'First2Finish': 'Beginner',
|
| | 'Code': 'Intermediate',
|
| | 'Assembly Competition': 'Advanced',
|
| | 'UI Prototype Competition': 'Intermediate',
|
| | 'Copilot Posting': 'Beginner',
|
| | 'Bug Hunt': 'Beginner',
|
| | 'Test Suites': 'Intermediate'
|
| | }
|
| |
|
| | difficulty = difficulty_mapping.get(challenge_type, 'Intermediate')
|
| |
|
| |
|
| | time_estimate = "Variable duration"
|
| |
|
| |
|
| | status = tc_data.get('status', '')
|
| | if status == 'Completed':
|
| | time_estimate = "Recently completed"
|
| | elif status in ['Active', 'Draft']:
|
| | time_estimate = "Active challenge"
|
| |
|
| | return Challenge(
|
| | id=challenge_id,
|
| | title=title,
|
| | description=description[:300] + "..." if len(description) > 300 else description,
|
| | technologies=technologies,
|
| | difficulty=difficulty,
|
| | prize=prize,
|
| | time_estimate=time_estimate
|
| | )
|
| |
|
| | async def fetch_real_challenges(self, limit: int = 20) -> List[Challenge]:
|
| | """Fetch real challenges from Topcoder MCP - FIXED VERSION"""
|
| |
|
| | if not await self.initialize_connection():
|
| | return []
|
| |
|
| | result = await self.call_tool("query-tc-challenges", {"limit": limit})
|
| |
|
| | if not result:
|
| | return []
|
| |
|
| |
|
| | challenge_data_list = []
|
| |
|
| |
|
| | if "structuredContent" in result:
|
| | structured = result["structuredContent"]
|
| | if isinstance(structured, dict) and "data" in structured:
|
| | challenge_data_list = structured["data"]
|
| | print(f"β
Found {len(challenge_data_list)} challenges in structuredContent")
|
| |
|
| |
|
| | elif "content" in result and len(result["content"]) > 0:
|
| | content_item = result["content"][0]
|
| | if isinstance(content_item, dict) and content_item.get("type") == "text":
|
| | try:
|
| | text_content = content_item.get("text", "")
|
| | parsed_data = json.loads(text_content)
|
| | if "data" in parsed_data:
|
| | challenge_data_list = parsed_data["data"]
|
| | print(f"β
Found {len(challenge_data_list)} challenges in content text")
|
| | except json.JSONDecodeError:
|
| | pass
|
| |
|
| |
|
| | challenges = []
|
| | for item in challenge_data_list:
|
| | if isinstance(item, dict):
|
| | try:
|
| | challenge = self.convert_topcoder_challenge(item)
|
| | challenges.append(challenge)
|
| | except Exception as e:
|
| | print(f"Error converting challenge: {e}")
|
| | continue
|
| |
|
| | print(f"π Successfully converted {len(challenges)} real Topcoder challenges!")
|
| | return challenges
|
| |
|
| | def extract_technologies_from_query(self, query: str) -> List[str]:
|
| | """Extract technology keywords from user query"""
|
| | tech_keywords = {
|
| | 'python', 'java', 'javascript', 'react', 'node', 'angular', 'vue',
|
| | 'aws', 'docker', 'kubernetes', 'api', 'rest', 'graphql', 'sql',
|
| | 'mongodb', 'postgresql', 'machine learning', 'ai', 'blockchain',
|
| | 'ios', 'android', 'flutter', 'swift', 'kotlin', 'c++', 'c#',
|
| | 'ruby', 'php', 'go', 'rust', 'typescript', 'html', 'css',
|
| | 'nft', 'non-fungible tokens', 'ethereum', 'smart contracts', 'solidity'
|
| | }
|
| |
|
| | query_lower = query.lower()
|
| | found_techs = [tech for tech in tech_keywords if tech in query_lower]
|
| | return found_techs
|
| |
|
| | def calculate_compatibility_score(self, challenge: Challenge, user_profile: UserProfile, query: str) -> tuple:
|
| | """Calculate compatibility score with detailed rationale"""
|
| |
|
| | score = 0.0
|
| | factors = []
|
| |
|
| |
|
| | user_skills_lower = [skill.lower() for skill in user_profile.skills]
|
| | challenge_techs_lower = [tech.lower() for tech in challenge.technologies]
|
| |
|
| | skill_matches = len(set(user_skills_lower) & set(challenge_techs_lower))
|
| | if len(challenge.technologies) > 0:
|
| | skill_score = min(skill_matches / len(challenge.technologies), 1.0) * 0.4
|
| | else:
|
| | skill_score = 0.3
|
| |
|
| | score += skill_score
|
| |
|
| | if skill_matches > 0:
|
| | matched_skills = [t for t in challenge.technologies if t.lower() in user_skills_lower]
|
| | factors.append(f"Uses your {', '.join(matched_skills[:2])} expertise")
|
| | elif len(challenge.technologies) > 0:
|
| | factors.append(f"Learn {', '.join(challenge.technologies[:2])}")
|
| | else:
|
| | factors.append("Suitable for multiple skill levels")
|
| |
|
| |
|
| | experience_mapping = {
|
| | "beginner": {"Beginner": 1.0, "Intermediate": 0.7, "Advanced": 0.4},
|
| | "intermediate": {"Beginner": 0.7, "Intermediate": 1.0, "Advanced": 0.8},
|
| | "advanced": {"Beginner": 0.4, "Intermediate": 0.8, "Advanced": 1.0}
|
| | }
|
| |
|
| | exp_score = experience_mapping.get(user_profile.experience_level.lower(), {}).get(challenge.difficulty, 0.5) * 0.3
|
| | score += exp_score
|
| |
|
| | if exp_score > 0.24:
|
| | factors.append(f"Perfect {user_profile.experience_level} level match")
|
| | else:
|
| | factors.append("Good learning opportunity")
|
| |
|
| |
|
| | query_techs = self.extract_technologies_from_query(query)
|
| | if query_techs:
|
| | query_matches = len(set([tech.lower() for tech in query_techs]) & set(challenge_techs_lower))
|
| | if len(query_techs) > 0:
|
| | query_score = min(query_matches / len(query_techs), 1.0) * 0.2
|
| | else:
|
| | query_score = 0.1
|
| | score += query_score
|
| |
|
| | if query_matches > 0:
|
| | factors.append(f"Matches your {', '.join(query_techs[:2])} interest")
|
| | else:
|
| | score += 0.1
|
| |
|
| |
|
| | score += 0.1
|
| |
|
| | return min(score, 1.0), factors
|
| |
|
| | async def get_personalized_recommendations(self, user_profile: UserProfile, query: str = "") -> Dict[str, Any]:
|
| | """Get personalized recommendations using REAL Topcoder data - FIXED VERSION"""
|
| |
|
| | start_time = datetime.now()
|
| |
|
| |
|
| | real_challenges = await self.fetch_real_challenges(limit=50)
|
| |
|
| | if real_challenges:
|
| | challenges = real_challenges
|
| | data_source = "π₯ REAL Topcoder MCP Server (4,596+ challenges)"
|
| | print(f"π Using {len(challenges)} REAL Topcoder challenges!")
|
| | else:
|
| |
|
| | challenges = self.mock_challenges
|
| | data_source = "Enhanced Mock Data (MCP unavailable)"
|
| |
|
| |
|
| | scored_challenges = []
|
| | for challenge in challenges:
|
| | score, factors = self.calculate_compatibility_score(challenge, user_profile, query)
|
| | challenge.compatibility_score = score
|
| | challenge.rationale = f"Match: {score:.0%}. " + ". ".join(factors[:2]) + "."
|
| | scored_challenges.append(challenge)
|
| |
|
| |
|
| | scored_challenges.sort(key=lambda x: x.compatibility_score, reverse=True)
|
| |
|
| |
|
| | recommendations = scored_challenges[:5]
|
| |
|
| |
|
| | processing_time = (datetime.now() - start_time).total_seconds()
|
| |
|
| |
|
| | query_techs = self.extract_technologies_from_query(query)
|
| | avg_score = sum(c.compatibility_score for c in challenges) / len(challenges) if challenges else 0
|
| |
|
| | return {
|
| | "recommendations": [asdict(rec) for rec in recommendations],
|
| | "insights": {
|
| | "total_challenges": len(challenges),
|
| | "average_compatibility": f"{avg_score:.1%}",
|
| | "processing_time": f"{processing_time:.3f}s",
|
| | "data_source": data_source,
|
| | "top_match": f"{recommendations[0].compatibility_score:.0%}" if recommendations else "0%",
|
| | "technologies_detected": query_techs,
|
| | "session_active": bool(self.session_id),
|
| | "mcp_connected": self.is_connected,
|
| | "topcoder_total": "4,596+ live challenges" if real_challenges else "Mock data"
|
| | }
|
| | }
|
| |
|
| |
|
| | intelligence_engine = RealTopcoderMCPEngine()
|
| |
|
| | def format_recommendations_display(recommendations_data):
|
| | """Format recommendations for beautiful display"""
|
| |
|
| | if not recommendations_data or not recommendations_data.get("recommendations"):
|
| | return "No recommendations found. Please try different criteria."
|
| |
|
| | recommendations = recommendations_data["recommendations"]
|
| | insights = recommendations_data["insights"]
|
| |
|
| |
|
| | display_parts = []
|
| |
|
| |
|
| | data_source_emoji = "π₯" if "REAL" in insights['data_source'] else "β‘"
|
| |
|
| | display_parts.append(f"""
|
| | ## π― Personalized Challenge Recommendations
|
| |
|
| | **{data_source_emoji} Analysis Summary:**
|
| | - **Challenges Analyzed:** {insights['total_challenges']}
|
| | - **Processing Time:** {insights['processing_time']}
|
| | - **Data Source:** {insights['data_source']}
|
| | - **Top Match Score:** {insights['top_match']}
|
| | - **MCP Connected:** {'β
Yes' if insights.get('mcp_connected') else 'β Fallback mode'}
|
| | - **Technologies Detected:** {', '.join(insights['technologies_detected']) if insights['technologies_detected'] else 'General recommendations'}
|
| |
|
| | ---
|
| | """)
|
| |
|
| |
|
| | for i, rec in enumerate(recommendations[:5], 1):
|
| | score_emoji = "π₯" if rec['compatibility_score'] > 0.8 else "β¨" if rec['compatibility_score'] > 0.6 else "π‘"
|
| |
|
| | tech_display = ', '.join(rec['technologies']) if rec['technologies'] else 'Multi-technology challenge'
|
| |
|
| | display_parts.append(f"""
|
| | ### {score_emoji} #{i}. {rec['title']}
|
| |
|
| | **π― Compatibility Score:** {rec['compatibility_score']:.0%} | **π° Prize:** {rec['prize']} | **β±οΈ Time:** {rec['time_estimate']}
|
| |
|
| | **π Description:** {rec['description']}
|
| |
|
| | **π οΈ Technologies:** {tech_display}
|
| |
|
| | **π Why This Matches:** {rec['rationale']}
|
| |
|
| | **π Challenge Level:** {rec['difficulty']}
|
| |
|
| | ---
|
| | """)
|
| |
|
| |
|
| | display_parts.append(f"""
|
| | ## π Next Steps
|
| |
|
| | 1. **Choose a challenge** that matches your skill level and interests
|
| | 2. **Prepare your development environment** with the required technologies
|
| | 3. **Read the full challenge requirements** on the Topcoder platform
|
| | 4. **Start coding** and submit your solution before the deadline!
|
| |
|
| | *π‘ Tip: Challenges with 70%+ compatibility scores are ideal for your current profile.*
|
| |
|
| | **π Powered by {'REAL Topcoder MCP Server' if insights.get('mcp_connected') else 'Advanced Intelligence Engine'}**
|
| | """)
|
| |
|
| | return "\n".join(display_parts)
|
| |
|
| | async def get_recommendations_async(skills_input, experience_level, time_available, interests):
|
| | """Async wrapper for getting recommendations"""
|
| |
|
| |
|
| | skills = [skill.strip() for skill in skills_input.split(",") if skill.strip()]
|
| |
|
| |
|
| | user_profile = UserProfile(
|
| | skills=skills,
|
| | experience_level=experience_level,
|
| | time_available=time_available,
|
| | interests=[interests] if interests else []
|
| | )
|
| |
|
| |
|
| | recommendations_data = await intelligence_engine.get_personalized_recommendations(
|
| | user_profile, interests
|
| | )
|
| |
|
| | return format_recommendations_display(recommendations_data)
|
| |
|
| | def get_recommendations_sync(skills_input, experience_level, time_available, interests):
|
| | """Synchronous wrapper for Gradio"""
|
| | return asyncio.run(get_recommendations_async(skills_input, experience_level, time_available, interests))
|
| |
|
| |
|
| | def create_interface():
|
| | """Create the final Gradio interface"""
|
| |
|
| | with gr.Blocks(
|
| | title="Topcoder Challenge Intelligence Assistant",
|
| | theme=gr.themes.Soft(),
|
| | css="""
|
| | .gradio-container {
|
| | max-width: 1200px !important;
|
| | }
|
| | .header-text {
|
| | text-align: center;
|
| | margin-bottom: 2rem;
|
| | }
|
| | """
|
| | ) as interface:
|
| |
|
| |
|
| | gr.HTML("""
|
| | <div class="header-text">
|
| | <h1>π Topcoder Challenge Intelligence Assistant</h1>
|
| | <p><strong>π₯ REAL MCP Integration - Find Your Perfect Coding Challenges</strong></p>
|
| | <p><em>Powered by live Topcoder MCP server with advanced AI-powered matching</em></p>
|
| | </div>
|
| | """)
|
| |
|
| | with gr.Row():
|
| | with gr.Column(scale=1):
|
| | gr.Markdown("### π Your Profile")
|
| |
|
| | skills_input = gr.Textbox(
|
| | label="π» Technical Skills",
|
| | placeholder="Python, JavaScript, React, Blockchain, NFT, Machine Learning...",
|
| | info="Enter your programming languages, frameworks, and technologies (comma-separated)",
|
| | lines=2
|
| | )
|
| |
|
| | experience_level = gr.Dropdown(
|
| | label="π― Experience Level",
|
| | choices=["Beginner", "Intermediate", "Advanced"],
|
| | value="Intermediate",
|
| | info="Your overall programming and competitive coding experience"
|
| | )
|
| |
|
| | time_available = gr.Dropdown(
|
| | label="β° Available Time",
|
| | choices=["2-4 hours", "4-8 hours", "8+ hours"],
|
| | value="4-8 hours",
|
| | info="How much time can you dedicate to a challenge?"
|
| | )
|
| |
|
| | interests = gr.Textbox(
|
| | label="π¨ Interests & Goals",
|
| | placeholder="blockchain development, web apps, API integration, NFT projects...",
|
| | info="What type of projects and technologies interest you most?",
|
| | lines=2
|
| | )
|
| |
|
| | get_recommendations_btn = gr.Button(
|
| | "π Get My REAL Topcoder Recommendations",
|
| | variant="primary",
|
| | size="lg"
|
| | )
|
| |
|
| | with gr.Column(scale=2):
|
| | gr.Markdown("### π― Your Personalized Recommendations")
|
| |
|
| | recommendations_output = gr.Markdown(
|
| | value="π Fill out your profile and click 'Get Recommendations' to see **REAL Topcoder challenges** matched to your skills!",
|
| | elem_classes=["recommendations-output"]
|
| | )
|
| |
|
| |
|
| | get_recommendations_btn.click(
|
| | fn=get_recommendations_sync,
|
| | inputs=[skills_input, experience_level, time_available, interests],
|
| | outputs=[recommendations_output]
|
| | )
|
| |
|
| |
|
| | gr.HTML("""
|
| | <div style="text-align: center; margin-top: 2rem; padding: 1rem; border-top: 1px solid #ddd;">
|
| | <p><strong>π Topcoder Challenge Intelligence Assistant</strong></p>
|
| | <p>π₯ <strong>REAL MCP Integration</strong> β’ Live Topcoder Server Connection β’ Advanced AI Matching</p>
|
| | <p>Built with professional MCP authentication β’ Session management β’ Production error handling</p>
|
| | </div>
|
| | """)
|
| |
|
| | return interface
|
| |
|
| |
|
| | if __name__ == "__main__":
|
| |
|
| | app = create_interface()
|
| |
|
| |
|
| | app.launch(
|
| | server_name="0.0.0.0",
|
| | server_port=7860,
|
| | show_error=True
|
| | ) |