diff --git "a/app.py" "b/app.py"
--- "a/app.py"
+++ "b/app.py"
@@ -1,7 +1,6 @@
"""
ULTIMATE Topcoder Challenge Intelligence Assistant
-Combining ALL advanced features with REAL MCP Integration + OpenAI LLM
-FIXED VERSION - Hugging Face Compatible with Secrets Management + All Features Preserved
+π₯ FIXED VERSION - Indentation Error Resolved
"""
import asyncio
import httpx
@@ -264,82 +263,81 @@ class UltimateTopcoderMCPEngine:
pass
return None
-
-def convert_topcoder_challenge(self, tc_data: Dict) -> Challenge:
- """Convert real Topcoder challenge data with enhanced parsing"""
-
- # Extract real fields from Topcoder data structure
- challenge_id = str(tc_data.get('id', 'unknown'))
- title = tc_data.get('name', 'Topcoder Challenge')
- description = tc_data.get('description', 'Challenge description not available')
-
- # Extract technologies from skills array
- technologies = []
- skills = tc_data.get('skills', [])
- for skill in skills:
- if isinstance(skill, dict) and 'name' in skill:
- technologies.append(skill['name'])
-
- # Also check for direct technologies field
- if 'technologies' in tc_data:
- tech_list = tc_data['technologies']
- if isinstance(tech_list, list):
- for tech in tech_list:
- if isinstance(tech, dict) and 'name' in tech:
- technologies.append(tech['name'])
- elif isinstance(tech, str):
- technologies.append(tech)
-
- # Calculate total prize from prizeSets
- total_prize = 0
- prize_sets = tc_data.get('prizeSets', [])
- for prize_set in prize_sets:
- if prize_set.get('type') == 'placement':
- prizes = prize_set.get('prizes', [])
- if prizes: # FIXED: Proper indentation here
- for prize in prizes:
- if prize.get('type') == 'USD':
- total_prize += prize.get('value', 0)
-
- prize = f"${total_prize:,}" if total_prize > 0 else "Merit-based"
-
- # Map challenge type to difficulty
- challenge_type = tc_data.get('type', 'Unknown')
-
- difficulty_mapping = {
- 'First2Finish': 'Beginner',
- 'Code': 'Intermediate',
- 'Assembly Competition': 'Advanced',
- 'UI Prototype Competition': 'Intermediate',
- 'Copilot Posting': 'Beginner',
- 'Bug Hunt': 'Beginner',
- 'Test Suites': 'Intermediate'
- }
-
- difficulty = difficulty_mapping.get(challenge_type, 'Intermediate')
-
- # Time estimate and registrants
- time_estimate = "Variable duration"
- registrants = tc_data.get('numOfRegistrants', 0)
-
- status = tc_data.get('status', '')
- if status == 'Completed':
- time_estimate = "Recently completed"
- elif status in ['Active', 'Draft']:
- time_estimate = "Active challenge"
-
- return Challenge(
- id=challenge_id,
- title=title,
- description=description[:300] + "..." if len(description) > 300 else description,
- technologies=technologies,
- difficulty=difficulty,
- prize=prize,
- time_estimate=time_estimate,
- registrants=registrants
- )
-
+ def convert_topcoder_challenge(self, tc_data: Dict) -> Challenge:
+ """Convert real Topcoder challenge data with enhanced parsing"""
+
+ # Extract real fields from Topcoder data structure
+ challenge_id = str(tc_data.get('id', 'unknown'))
+ title = tc_data.get('name', 'Topcoder Challenge')
+ description = tc_data.get('description', 'Challenge description not available')
+
+ # Extract technologies from skills array
+ technologies = []
+ skills = tc_data.get('skills', [])
+ for skill in skills:
+ if isinstance(skill, dict) and 'name' in skill:
+ technologies.append(skill['name'])
+
+ # Also check for direct technologies field
+ if 'technologies' in tc_data:
+ tech_list = tc_data['technologies']
+ if isinstance(tech_list, list):
+ for tech in tech_list:
+ if isinstance(tech, dict) and 'name' in tech:
+ technologies.append(tech['name'])
+ elif isinstance(tech, str):
+ technologies.append(tech)
+
+ # Calculate total prize from prizeSets
+ total_prize = 0
+ prize_sets = tc_data.get('prizeSets', [])
+ for prize_set in prize_sets:
+ if prize_set.get('type') == 'placement':
+ prizes = prize_set.get('prizes', [])
+ if prizes:
+ for prize in prizes:
+ if prize.get('type') == 'USD':
+ total_prize += prize.get('value', 0)
+
+ prize = f"${total_prize:,}" if total_prize > 0 else "Merit-based"
+
+ # Map challenge type to difficulty
+ challenge_type = tc_data.get('type', 'Unknown')
+
+ difficulty_mapping = {
+ 'First2Finish': 'Beginner',
+ 'Code': 'Intermediate',
+ 'Assembly Competition': 'Advanced',
+ 'UI Prototype Competition': 'Intermediate',
+ 'Copilot Posting': 'Beginner',
+ 'Bug Hunt': 'Beginner',
+ 'Test Suites': 'Intermediate'
+ }
+
+ difficulty = difficulty_mapping.get(challenge_type, 'Intermediate')
+
+ # Time estimate and registrants
+ time_estimate = "Variable duration"
+ registrants = tc_data.get('numOfRegistrants', 0)
+
+ status = tc_data.get('status', '')
+ if status == 'Completed':
+ time_estimate = "Recently completed"
+ elif status in ['Active', 'Draft']:
+ time_estimate = "Active challenge"
+
+ return Challenge(
+ id=challenge_id,
+ title=title,
+ description=description[:300] + "..." if len(description) > 300 else description,
+ technologies=technologies,
+ difficulty=difficulty,
+ prize=prize,
+ time_estimate=time_estimate,
+ registrants=registrants
+ )
+
async def fetch_real_challenges(self, limit: int = 30) -> List[Challenge]:
"""Fetch real challenges from Topcoder MCP with enhanced error handling"""
@@ -981,7 +979,6 @@ Provide a helpful, intelligent response using the challenge data context and ack
# Add some insights
if any('prize' in message_lower or 'money' in message_lower or 'pay' in message_lower for _ in [None]):
-
prizes = []
for c in relevant_challenges:
if c.prize.startswith('$'):
@@ -991,6 +988,116 @@ Provide a helpful, intelligent response using the challenge data context and ack
prizes.append(int(prize_str))
except:
continue
+
+ # FIXED INDENTATION: This is where the error was
+ if prizes:
+ avg_prize = sum(prizes) / len(prizes)
+ max_prize = max(prizes)
+ response += f"\nπ‘ **Prize Insights:** Average prize: ${avg_prize:,.0f} | Highest: ${max_prize:,}\n"
+
+ response += f"\n*π Found from {len(challenges)} live challenges via real MCP integration*"
+ return response
+ else:
+ # No matches found, but provide helpful response
+ tech_names = ", ".join(detected_techs)
+ return f"""I searched through **{len(challenges)} live challenges** from the real MCP server, but didn't find any that specifically match **{tech_names}** in my current dataset.
+
+**π This could mean:**
+β’ These challenges might be in a different category or status
+β’ The technology keywords might be listed differently
+β’ New challenges with these technologies haven't been added yet
+
+**π‘ Suggestions:**
+β’ Try the **π― ULTIMATE Recommendations** tab above with your skills
+β’ Check the Topcoder platform directly for the latest challenges
+β’ Ask me about related technologies (e.g., if you asked about Python, try "web development" or "backend")
+
+*π Searched {len(challenges)} real challenges via live MCP integration*"""
+
+ # Handle prize/earning questions
+ elif any(word in message_lower for word in ['prize', 'money', 'earn', 'pay', 'salary', 'income', 'highest']):
+ if challenges:
+ # Sort by prize amount
+ prize_challenges = []
+ for challenge in challenges:
+ if challenge.prize.startswith('$'):
+ try:
+ prize_str = challenge.prize.replace('$', '').replace(',', '')
+ if prize_str.isdigit():
+ prize_amount = int(prize_str)
+ challenge.prize_amount = prize_amount
+ prize_challenges.append(challenge)
+ except:
+ continue
+
+ prize_challenges.sort(key=lambda x: x.prize_amount, reverse=True)
+
+ if prize_challenges:
+ response = f"π° **Highest Prize Challenges** from {len(challenges)} live challenges:\n\n"
+ for i, challenge in enumerate(prize_challenges[:5], 1):
+ response += f"**{i}. {challenge.title}**\n"
+ response += f" π° **Prize:** {challenge.prize}\n"
+ response += f" π οΈ **Technologies:** {', '.join(challenge.technologies)}\n"
+ response += f" π **Difficulty:** {challenge.difficulty} | π₯ {challenge.registrants} registered\n"
+ if challenge.id and challenge.id.startswith("301"):
+ response += f" π **[View Details](https://www.topcoder.com/challenges/{challenge.id})**\n\n"
+ else:
+ response += f" π **Available on Topcoder platform**\n\n"
+
+ total_prizes = sum(c.prize_amount for c in prize_challenges)
+ avg_prize = total_prizes / len(prize_challenges)
+ response += f"π **Prize Stats:** Total: ${total_prizes:,} | Average: ${avg_prize:,.0f}\n"
+ response += f"\n*π Live prize data from real MCP integration*"
+ return response
+
+ # Career/skill questions
+ elif any(word in message_lower for word in ['career', 'skill', 'learn', 'beginner', 'advanced', 'help', 'start']):
+ if challenges:
+ sample_challenge = challenges[0]
+ return f"""I'm your intelligent Topcoder assistant with **REAL MCP integration**! π
+
+I currently have live access to **{len(challenges)} real challenges**. For example:
+
+π― **"{sample_challenge.title}"**
+π° Prize: **{sample_challenge.prize}**
+π οΈ Technologies: {', '.join(sample_challenge.technologies[:3])}
+π Difficulty: {sample_challenge.difficulty}
+
+**I can help you with:**
+π― Find challenges matching your specific skills
+π° Compare real prize amounts and competition levels
+π Analyze difficulty levels and technology requirements
+π Career guidance based on market demand
+
+**π‘ Try asking me:**
+β’ "What React challenges are available?"
+β’ "Show me high-prize Python opportunities"
+β’ "Which challenges are best for beginners?"
+β’ "What technologies are most in-demand?"
+
+*Powered by live MCP connection to Topcoder's challenge database*"""
+
+ # General response with real data
+ if challenges:
+ return f"""Hi! I'm your intelligent Topcoder assistant! π€
+
+I have **REAL MCP integration** with live access to **{len(challenges)} challenges** from Topcoder's database.
+
+**π₯ Currently active challenges include:**
+β’ **{challenges[0].title}** ({challenges[0].prize})
+β’ **{challenges[1].title}** ({challenges[1].prize})
+β’ **{challenges[2].title}** ({challenges[2].prize})
+
+**Ask me about:**
+π― Specific technologies (Python, React, blockchain, etc.)
+π° Prize ranges and earning potential
+π Difficulty levels and skill requirements
+π Career advice and skill development
+
+*All responses powered by real-time Topcoder MCP data!*"""
+
+ return "I'm your intelligent Topcoder assistant with real MCP data access! Ask me about challenges, skills, or career advice and I'll help you using live data from real challenges! π"
+
# Initialize the enhanced intelligence engine
print("π Starting ULTIMATE Topcoder Intelligence Assistant...")
intelligence_engine = UltimateTopcoderMCPEngine()
@@ -1247,13 +1354,13 @@ async def get_ultimate_recommendations_async(skills_input: str, experience_level
except Exception as e:
error_msg = f"""
-
β
+
β³
Processing Error
{str(e)}
Please try again or contact support
"""
- print(f"β Error processing ULTIMATE request: {str(e)}")
+ print(f"β³ Error processing ULTIMATE request: {str(e)}")
return error_msg, ""
def get_ultimate_recommendations_sync(skills_input: str, experience_level: str, time_available: str, interests: str) -> Tuple[str, str]:
@@ -1308,7 +1415,7 @@ def run_ultimate_performance_test():
results.append(f" π Top match: {recs[0]['title']} ({recs[0]['compatibility_score']:.0f}%)")
results.append(f" π§ Algorithm: {insights['algorithm_version']}")
except Exception as e:
- results.append(f" β Test failed: {str(e)}")
+ results.append(f" β³ Test failed: {str(e)}")
results.append("")
# Test 3: API Key Status
@@ -1333,7 +1440,7 @@ def run_ultimate_performance_test():
total_time = round(time.time() - total_start, 3)
results.append("π ULTIMATE PERFORMANCE SUMMARY")
results.append("-" * 40)
- results.append(f"π Total Test Duration: {total_time}s")
+ results.append(f"π°οΈ Total Test Duration: {total_time}s")
results.append(f"π₯ Real MCP Integration: {mcp_status}")
results.append(f"π§ Advanced Intelligence Engine: β
OPERATIONAL")
results.append(f"π€ OpenAI LLM Integration: {api_status}")
@@ -1594,7 +1701,7 @@ def create_ultimate_interface():
results.append(f"π Status: {status}")
except Exception as e:
- results.append(f"β Benchmark failed: {str(e)}")
+ results.append(f"β³ Benchmark failed: {str(e)}")
return "\n".join(results)
@@ -1622,7 +1729,7 @@ def create_ultimate_interface():
openai_status = "β
CONFIGURED" if has_openai else "β οΈ NOT SET"
results.append(f"π€ OpenAI GPT-4: {openai_status}")
- results.append(f"π Checked at: {time.strftime('%H:%M:%S')}")
+ results.append(f"π°οΈ Checked at: {time.strftime('%H:%M:%S')}")
return "\n".join(results)
@@ -1662,7 +1769,7 @@ def create_ultimate_interface():
- **Success Prediction**: Advanced algorithms calculate your probability of success
- **Profile Analysis**: Comprehensive developer type classification and growth recommendations
- ### ποΈ **Technical Architecture**
+ ### πΊοΈ **Technical Architecture**
#### **Hugging Face Secrets Integration**
```
@@ -1690,1604 +1797,6 @@ def create_ultimate_interface():
endpoint = "https://api.openai.com/v1/chat/completions"
model = "gpt-4o-mini" # Fast and cost-effective
context = "Real MCP challenge data + conversation history"
- ```
-
- ### π **Setting Up OpenAI API Key in Hugging Face**
-
- **Step-by-Step Instructions:**
-
- 1. **Go to your Hugging Face Space settings**
- 2. **Navigate to "Repository secrets"**
- 3. **Click "New secret"**
- 4. **Set Name:** `OPENAI_API_KEY`
- 5. **Set Value:** Your OpenAI API key (starts with `sk-`)
- 6. **Click "Add secret"**
- 7. **Restart your Space** for changes to take effect
-
- **π― Why Use HF Secrets:**
- - **Security**: API keys are encrypted and never exposed in code
- - **Environment Variables**: Accessed via `os.getenv("OPENAI_API_KEY")`
- - **Best Practice**: Industry standard for secure API key management
- - **No Code Changes**: Keys can be updated without modifying application code
-
- ### π **Competition Excellence**
-
- **Built for the Topcoder MCP Challenge** - This ULTIMATE system showcases:
- - **Technical Mastery**: Real MCP protocol implementation + OpenAI integration
- - **Problem Solving**: Overcame complex authentication and API integration challenges
- - **User Focus**: Exceptional UX with meaningful business value
- - **Innovation**: First working real-time MCP + GPT-4 integration
- - **Production Quality**: Enterprise-ready deployment with secure secrets management
-
- ---
-
-
-
π₯ ULTIMATE Powered by OpenAI GPT-4 + Real MCP Integration
-
- Revolutionizing developer success through authentic challenge discovery,
- advanced AI intelligence, and secure enterprise-grade API management.
-
-
- π― Live Connection to 4,596+ Real Challenges β’ π€ OpenAI GPT-4 Integration β’ π Secure HF Secrets Management
-
-
- """)
-
- # ULTIMATE footer
- gr.Markdown(f"""
- ---
-
-
π ULTIMATE Topcoder Challenge Intelligence Assistant
-
π₯ Real MCP Integration β’ π€ OpenAI GPT-4 β’ β‘ Lightning Performance
-
π― Built with Gradio β’ π Deployed on Hugging Face Spaces β’ π Competition-Winning Quality
-
π OpenAI Status: {"β
Active" if os.getenv("OPENAI_API_KEY") else "β οΈ Configure OPENAI_API_KEY in HF Secrets"}
-
- """)
-
- print("β
ULTIMATE Gradio interface created successfully!")
- return interface
-
-# Launch the ULTIMATE application
-if __name__ == "__main__":
- print("\n" + "="*70)
- print("π ULTIMATE TOPCODER CHALLENGE INTELLIGENCE ASSISTANT")
- print("π₯ Real MCP Integration + OpenAI GPT-4 + Advanced AI Intelligence")
- print("β‘ Competition-Winning Performance")
- print("="*70)
-
- # Check API key status on startup
- api_key_status = "β
CONFIGURED" if os.getenv("OPENAI_API_KEY") else "β οΈ NOT SET"
- print(f"π€ OpenAI API Key Status: {api_key_status}")
- if not os.getenv("OPENAI_API_KEY"):
- print("π‘ Add OPENAI_API_KEY to HF Secrets for full GPT-4 features!")
-
- try:
- interface = create_ultimate_interface()
- print("\nπ― Starting ULTIMATE Gradio server...")
- print("π₯ Initializing Real MCP connection...")
- print("π€ Loading OpenAI GPT-4 integration...")
- print("π§ Loading Advanced AI intelligence engine...")
- print("π Preparing live challenge database access...")
- print("π Launching ULTIMATE user experience...")
-
- interface.launch(
- share=False, # Set to True for public shareable link
- debug=True, # Show detailed logs
- show_error=True, # Display errors in UI
- server_port=7860, # Standard port
- show_api=False, # Clean interface
- max_threads=20 # Support multiple concurrent users
- )
-
- except Exception as e:
- print(f"β Error starting ULTIMATE application: {str(e)}")
- print("\nπ§ ULTIMATE Troubleshooting:")
- print("1. Verify all dependencies: pip install -r requirements.txt")
- print("2. Add OPENAI_API_KEY to HF Secrets for full features")
- print("3. Check port availability or try different port")
- print("4. Ensure virtual environment is active")
- print("5. For Windows: pip install --upgrade gradio httpx python-dotenv")
- #print("6. Contact support if issues persist"), '').replace(',', '')) for c in relevant_challenges if c.prize.startswith('
- print("6. Contact support if issues persist")
-# Initialize the enhanced intelligence engine
-print("π Starting ULTIMATE Topcoder Intelligence Assistant...")
-intelligence_engine = UltimateTopcoderMCPEngine()
-
-# FIXED: Function signature - now accepts 3 parameters as expected
-async def chat_with_enhanced_llm_agent(message: str, history: List[Tuple[str, str]], mcp_engine) -> Tuple[List[Tuple[str, str]], str]:
- """FIXED: Enhanced chat with real LLM and MCP data integration - 3 parameters"""
- print(f"π§ Enhanced LLM Chat: {message}")
-
- # Initialize enhanced chatbot
- if not hasattr(chat_with_enhanced_llm_agent, 'chatbot'):
- chat_with_enhanced_llm_agent.chatbot = EnhancedLLMChatbot(mcp_engine)
-
- chatbot = chat_with_enhanced_llm_agent.chatbot
-
- try:
- # Get intelligent response using real MCP data
- response = await chatbot.generate_llm_response(message, history)
-
- # Add to history
- history.append((message, response))
-
- print(f"β
Enhanced LLM response generated with real MCP context")
- return history, ""
-
- except Exception as e:
- error_response = f"I encountered an issue processing your request: {str(e)}. However, I can still help you with challenge recommendations using my real MCP data! Try asking about specific technologies or challenge types."
- history.append((message, error_response))
- return history, ""
-
-def chat_with_enhanced_llm_agent_sync(message: str, history: List[Tuple[str, str]]) -> Tuple[List[Tuple[str, str]], str]:
- """FIXED: Synchronous wrapper for Gradio - calls async function with correct parameters"""
- return asyncio.run(chat_with_enhanced_llm_agent(message, history, intelligence_engine))
-
-def format_challenge_card(challenge: Dict) -> str:
- """FIXED: Format challenge as professional HTML card without broken links"""
-
- # Create technology badges
- tech_badges = " ".join([
- f"{tech}"
- for tech in challenge['technologies']
- ])
-
- # Dynamic score coloring and labels
- score = challenge['compatibility_score']
- if score >= 85:
- score_color = "#00b894"
- score_label = "π₯ Excellent Match"
- card_border = "#00b894"
- elif score >= 70:
- score_color = "#f39c12"
- score_label = "β¨ Great Match"
- card_border = "#f39c12"
- elif score >= 55:
- score_color = "#e17055"
- score_label = "π‘ Good Match"
- card_border = "#e17055"
- else:
- score_color = "#74b9ff"
- score_label = "π Learning Opportunity"
- card_border = "#74b9ff"
-
- # Format prize
- prize_display = challenge['prize']
- if challenge['prize'].startswith('$') and challenge['prize'] != '$0':
- prize_color = "#00b894"
- else:
- prize_color = "#6c757d"
- prize_display = "Merit-based"
-
- # FIXED: Better link handling
- challenge_link = ""
- if challenge['id'] and challenge['id'].startswith("301"): # Valid Topcoder ID format
- challenge_link = f"""
- """
- else:
- challenge_link = """
-
- π‘ Available on Topcoder platform - search by title
-
"""
-
- return f"""
-
-
-
-
-
-
-
{challenge['title']}
-
-
{score:.0f}%
-
{score_label}
-
-
-
-
{challenge['description']}
-
-
-
π οΈ Technologies & Skills:
-
{tech_badges}
-
-
-
-
π Why This Matches You:
-
{challenge['rationale']}
-
-
-
-
-
{prize_display}
-
Prize Pool
-
-
-
{challenge['difficulty']}
-
Difficulty
-
-
-
{challenge['time_estimate']}
-
Timeline
-
-
-
{challenge.get('registrants', 'N/A')}
-
Registered
-
-
-
- {challenge_link}
-
- """
-
-def format_insights_panel(insights: Dict) -> str:
- """Format insights as comprehensive dashboard with enhanced styling"""
- return f"""
-
-
-
-
-
-
-
π― Your Intelligence Profile
-
-
-
-
π€ Developer Profile
-
{insights['profile_type']}
-
-
-
πͺ Core Strengths
-
{insights['strengths']}
-
-
-
π Growth Focus
-
{insights['growth_areas']}
-
-
-
π Progression Path
-
{insights['skill_progression']}
-
-
-
π Market Intelligence
-
{insights['market_trends']}
-
-
-
π― Success Forecast
-
{insights['success_probability']}
-
-
-
-
- """
-
-async def get_ultimate_recommendations_async(skills_input: str, experience_level: str, time_available: str, interests: str) -> Tuple[str, str]:
- """ULTIMATE recommendation function with real MCP + advanced intelligence"""
- start_time = time.time()
-
- print(f"\nπ― ULTIMATE RECOMMENDATION REQUEST:")
- print(f" Skills: {skills_input}")
- print(f" Level: {experience_level}")
- print(f" Time: {time_available}")
- print(f" Interests: {interests}")
-
- # Enhanced input validation
- if not skills_input.strip():
- error_msg = """
-
-
β οΈ
-
Please enter your skills
-
Example: Python, JavaScript, React, AWS, Docker
-
- """
- return error_msg, ""
-
- try:
- # Parse and clean skills
- skills = [skill.strip() for skill in skills_input.split(',') if skill.strip()]
-
- # Create comprehensive user profile
- user_profile = UserProfile(
- skills=skills,
- experience_level=experience_level,
- time_available=time_available,
- interests=[interests] if interests else []
- )
-
- # Get ULTIMATE AI recommendations
- recommendations_data = await intelligence_engine.get_personalized_recommendations(user_profile, interests)
- insights = intelligence_engine.get_user_insights(user_profile)
-
- recommendations = recommendations_data["recommendations"]
- insights_data = recommendations_data["insights"]
-
- # Format results with enhanced styling
- if recommendations:
- # Success header with data source info
- data_source_emoji = "π₯" if "REAL" in insights_data['data_source'] else "β‘"
-
- recommendations_html = f"""
-
-
{data_source_emoji}
-
Found {len(recommendations)} Perfect Matches!
-
Personalized using {insights_data['algorithm_version']} β’ {insights_data['processing_time']} response time
-
Source: {insights_data['data_source']}
-
- """
-
- # Add formatted challenge cards
- for challenge in recommendations:
- recommendations_html += format_challenge_card(challenge)
-
- else:
- recommendations_html = """
-
-
π
-
No perfect matches found
-
Try adjusting your skills, experience level, or interests for better results
-
- """
-
- # Generate insights panel
- insights_html = format_insights_panel(insights)
-
- processing_time = round(time.time() - start_time, 3)
- print(f"β
ULTIMATE request completed successfully in {processing_time}s")
- print(f"π Returned {len(recommendations)} recommendations with comprehensive insights\n")
-
- return recommendations_html, insights_html
-
- except Exception as e:
- error_msg = f"""
-
-
β
-
Processing Error
-
{str(e)}
-
Please try again or contact support
-
- """
- print(f"β Error processing ULTIMATE request: {str(e)}")
- return error_msg, ""
-
-def get_ultimate_recommendations_sync(skills_input: str, experience_level: str, time_available: str, interests: str) -> Tuple[str, str]:
- """Synchronous wrapper for Gradio"""
- return asyncio.run(get_ultimate_recommendations_async(skills_input, experience_level, time_available, interests))
-
-def run_ultimate_performance_test():
- """ULTIMATE comprehensive system performance test"""
- results = []
- results.append("π ULTIMATE COMPREHENSIVE PERFORMANCE TEST")
- results.append("=" * 60)
- results.append(f"β° Started at: {time.strftime('%Y-%m-%d %H:%M:%S')}")
- results.append(f"π₯ Testing: Real MCP Integration + Advanced Intelligence Engine")
- results.append("")
-
- total_start = time.time()
-
- # Test 1: MCP Connection Test
- results.append("π Test 1: Real MCP Connection Status")
- start = time.time()
- mcp_status = "β
CONNECTED" if intelligence_engine.is_connected else "β οΈ FALLBACK MODE"
- session_status = f"Session: {intelligence_engine.session_id[:8]}..." if intelligence_engine.session_id else "No session"
- test1_time = round(time.time() - start, 3)
- results.append(f" {mcp_status} ({test1_time}s)")
- results.append(f" π‘ {session_status}")
- results.append(f" π Endpoint: {intelligence_engine.base_url}")
- results.append("")
-
- # Test 2: Advanced Intelligence Engine
- results.append("π Test 2: Advanced Recommendation Engine")
- start = time.time()
-
- # Create async test
- async def test_recommendations():
- test_profile = UserProfile(
- skills=['Python', 'React', 'AWS'],
- experience_level='Intermediate',
- time_available='4-8 hours',
- interests=['web development', 'cloud computing']
- )
- return await intelligence_engine.get_personalized_recommendations(test_profile, 'python react cloud')
-
- try:
- # Run async test
- recs_data = asyncio.run(test_recommendations())
- test2_time = round(time.time() - start, 3)
- recs = recs_data["recommendations"]
- insights = recs_data["insights"]
-
- results.append(f" β
Generated {len(recs)} recommendations in {test2_time}s")
- results.append(f" π― Data Source: {insights['data_source']}")
- results.append(f" π Top match: {recs[0]['title']} ({recs[0]['compatibility_score']:.0f}%)")
- results.append(f" π§ Algorithm: {insights['algorithm_version']}")
- except Exception as e:
- results.append(f" β Test failed: {str(e)}")
- results.append("")
-
- # Test 3: API Key Status
- results.append("π Test 3: OpenAI API Configuration")
- start = time.time()
-
- # Check if we have a chatbot instance and API key
- has_api_key = bool(os.getenv("OPENAI_API_KEY"))
- api_status = "β
CONFIGURED" if has_api_key else "β οΈ NOT SET"
- test3_time = round(time.time() - start, 3)
-
- results.append(f" OpenAI API Key: {api_status} ({test3_time}s)")
- if has_api_key:
- results.append(f" π€ LLM Integration: Available")
- results.append(f" π§ Enhanced Chat: Enabled")
- else:
- results.append(f" π€ LLM Integration: Fallback mode")
- results.append(f" π§ Enhanced Chat: Basic responses")
- results.append("")
-
- # Summary
- total_time = round(time.time() - total_start, 3)
- results.append("π ULTIMATE PERFORMANCE SUMMARY")
- results.append("-" * 40)
- results.append(f"π Total Test Duration: {total_time}s")
- results.append(f"π₯ Real MCP Integration: {mcp_status}")
- results.append(f"π§ Advanced Intelligence Engine: β
OPERATIONAL")
- results.append(f"π€ OpenAI LLM Integration: {api_status}")
- results.append(f"β‘ Average Response Time: <1.0s")
- results.append(f"πΎ Memory Usage: β
OPTIMIZED")
- results.append(f"π― Algorithm Accuracy: β
ADVANCED")
- results.append(f"π Production Readiness: β
ULTIMATE")
- results.append("")
-
- if has_api_key:
- results.append("π All systems performing at ULTIMATE level with full LLM integration!")
- else:
- results.append("π All systems operational! Add OPENAI_API_KEY to HF secrets for full LLM features!")
-
- results.append("π₯ Ready for competition submission!")
-
- return "\n".join(results)
-
-def create_ultimate_interface():
- """Create the ULTIMATE Gradio interface combining all features"""
- print("π¨ Creating ULTIMATE Gradio interface...")
-
- # Enhanced custom CSS
- custom_css = """
- .gradio-container {
- max-width: 1400px !important;
- margin: 0 auto !important;
- }
- .tab-nav {
- border-radius: 12px !important;
- background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
- }
- .ultimate-btn {
- background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
- border: none !important;
- box-shadow: 0 4px 15px rgba(102, 126, 234, 0.4) !important;
- transition: all 0.3s ease !important;
- }
- .ultimate-btn:hover {
- transform: translateY(-2px) !important;
- box-shadow: 0 8px 25px rgba(102, 126, 234, 0.6) !important;
- }
- """
-
- with gr.Blocks(
- theme=gr.themes.Soft(),
- title="π ULTIMATE Topcoder Challenge Intelligence Assistant",
- css=custom_css
- ) as interface:
-
- # ULTIMATE Header
- gr.Markdown("""
- # π ULTIMATE Topcoder Challenge Intelligence Assistant
-
- ### **π₯ REAL MCP Integration + Advanced AI Intelligence + OpenAI LLM**
-
- Experience the **world's most advanced** Topcoder challenge discovery system! Powered by **live Model Context Protocol integration** with access to **4,596+ real challenges**, **OpenAI GPT-4 intelligence**, and sophisticated AI algorithms that deliver **personalized recommendations** tailored to your exact skills and career goals.
-
- **π― What Makes This ULTIMATE:**
- - **π₯ Real MCP Data**: Live connection to Topcoder's official MCP server
- - **π€ OpenAI GPT-4**: Advanced conversational AI with real challenge context
- - **π§ Advanced AI**: Multi-factor compatibility scoring algorithms
- - **β‘ Lightning Fast**: Sub-second response times with real-time data
- - **π¨ Beautiful UI**: Professional interface with enhanced user experience
- - **π Smart Insights**: Comprehensive profile analysis and market intelligence
-
- ---
- """)
-
- with gr.Tabs():
- # Tab 1: ULTIMATE Personalized Recommendations
- with gr.TabItem("π― ULTIMATE Recommendations", elem_id="ultimate-recommendations"):
- gr.Markdown("### π AI-Powered Challenge Discovery with Real MCP Data")
-
- with gr.Row():
- with gr.Column(scale=1):
- gr.Markdown("**π€ Tell the AI about yourself:**")
-
- skills_input = gr.Textbox(
- label="π οΈ Your Skills & Technologies",
- placeholder="Python, React, JavaScript, AWS, Docker, Blockchain, UI/UX...",
- info="Enter your skills separated by commas - the more specific, the better!",
- lines=3,
- value="Python, JavaScript, React" # Default for quick testing
- )
-
- experience_level = gr.Dropdown(
- choices=["Beginner", "Intermediate", "Advanced"],
- label="π Experience Level",
- value="Intermediate",
- info="Your overall development and competitive coding experience"
- )
-
- time_available = gr.Dropdown(
- choices=["2-4 hours", "4-8 hours", "8+ hours"],
- label="β° Time Available",
- value="4-8 hours",
- info="How much time can you dedicate to a challenge?"
- )
-
- interests = gr.Textbox(
- label="π― Current Interests & Goals",
- placeholder="web development, blockchain, AI/ML, cloud computing, mobile apps...",
- info="What type of projects and technologies excite you most?",
- lines=3,
- value="web development, cloud computing" # Default for testing
- )
-
- ultimate_recommend_btn = gr.Button(
- "π Get My ULTIMATE Recommendations",
- variant="primary",
- size="lg",
- elem_classes="ultimate-btn"
- )
-
- gr.Markdown("""
- **π‘ ULTIMATE Tips:**
- - **Be specific**: Include frameworks, libraries, and tools you know
- - **Mention experience**: Add years of experience with key technologies
- - **State goals**: Career objectives help fine-tune recommendations
- - **Real data**: You'll get actual Topcoder challenges with real prizes!
- """)
-
- with gr.Column(scale=2):
- ultimate_insights_output = gr.HTML(
- label="π§ Your Intelligence Profile",
- visible=True
- )
- ultimate_recommendations_output = gr.HTML(
- label="π Your ULTIMATE Recommendations",
- visible=True
- )
-
- # Connect the ULTIMATE recommendation system
- ultimate_recommend_btn.click(
- get_ultimate_recommendations_sync,
- inputs=[skills_input, experience_level, time_available, interests],
- outputs=[ultimate_recommendations_output, ultimate_insights_output]
- )
-
- # Tab 2: FIXED Enhanced LLM Chat
- with gr.TabItem("π¬ INTELLIGENT AI Assistant"):
- gr.Markdown('''
- ### π§ Chat with Your INTELLIGENT AI Assistant
-
- **π₯ Enhanced with OpenAI GPT-4 + Live MCP Data!**
-
- Ask me anything and I'll use:
- - π€ **OpenAI GPT-4 Intelligence** for natural conversations
- - π₯ **Real MCP Data** from 4,596+ live Topcoder challenges
- - π **Live Challenge Analysis** with current prizes and requirements
- - π― **Personalized Recommendations** based on your interests
-
- Try asking: "Show me Python challenges with high prizes" or "What React opportunities are available?"
- ''')
-
- enhanced_chatbot = gr.Chatbot(
- label="π§ INTELLIGENT Topcoder AI Assistant (OpenAI GPT-4)",
- height=500,
- placeholder="Hi! I'm your intelligent assistant with OpenAI GPT-4 and live MCP data access to 4,596+ challenges!",
- show_label=True
- )
-
- with gr.Row():
- enhanced_chat_input = gr.Textbox(
- placeholder="Ask me about challenges, skills, career advice, or anything else!",
- container=False,
- scale=4,
- show_label=False
- )
- enhanced_chat_btn = gr.Button("Send", variant="primary", scale=1)
-
- # API Key status indicator
- api_key_status = "π€ OpenAI GPT-4 Active" if os.getenv("OPENAI_API_KEY") else "β οΈ Set OPENAI_API_KEY in HF Secrets for full GPT-4 features"
- gr.Markdown(f"**Status:** {api_key_status}")
-
- # Enhanced examples
- gr.Examples(
- examples=[
- "What Python challenges offer the highest prizes?",
- "Show me beginner-friendly React opportunities",
- "Which blockchain challenges are most active?",
- "What skills are in highest demand right now?",
- "Help me choose between machine learning and web development",
- "What's the average prize for intermediate challenges?"
- ],
- inputs=enhanced_chat_input
- )
-
- # FIXED: Connect enhanced LLM functionality with correct function
- enhanced_chat_btn.click(
- chat_with_enhanced_llm_agent_sync,
- inputs=[enhanced_chat_input, enhanced_chatbot],
- outputs=[enhanced_chatbot, enhanced_chat_input]
- )
-
- enhanced_chat_input.submit(
- chat_with_enhanced_llm_agent_sync,
- inputs=[enhanced_chat_input, enhanced_chatbot],
- outputs=[enhanced_chatbot, enhanced_chat_input]
- )
-
- # Tab 3: ULTIMATE Performance & Technical Details
- with gr.TabItem("β‘ ULTIMATE Performance"):
- gr.Markdown("""
- ### π§ͺ ULTIMATE System Performance & Real MCP Integration
-
- **π₯ Monitor the performance** of the world's most advanced Topcoder intelligence system! Test real MCP connectivity, OpenAI integration, advanced algorithms, and production-ready performance metrics.
- """)
-
- with gr.Row():
- with gr.Column():
- ultimate_test_btn = gr.Button("π§ͺ Run ULTIMATE Performance Test", variant="secondary", size="lg", elem_classes="ultimate-btn")
- quick_benchmark_btn = gr.Button("β‘ Quick Benchmark", variant="secondary")
- mcp_status_btn = gr.Button("π₯ Check Real MCP Status", variant="secondary")
-
- with gr.Column():
- ultimate_test_output = gr.Textbox(
- label="π ULTIMATE Test Results & Performance Metrics",
- lines=15,
- show_label=True
- )
-
- def quick_benchmark():
- """Quick benchmark for ULTIMATE system"""
- results = []
- results.append("β‘ ULTIMATE QUICK BENCHMARK")
- results.append("=" * 35)
-
- start = time.time()
-
- # Test basic recommendation speed
- async def quick_test():
- test_profile = UserProfile(
- skills=['Python', 'React'],
- experience_level='Intermediate',
- time_available='4-8 hours',
- interests=['web development']
- )
- return await intelligence_engine.get_personalized_recommendations(test_profile)
-
- try:
- test_data = asyncio.run(quick_test())
- benchmark_time = round(time.time() - start, 3)
-
- results.append(f"π Response Time: {benchmark_time}s")
- results.append(f"π― Recommendations: {len(test_data['recommendations'])}")
- results.append(f"π Data Source: {test_data['insights']['data_source']}")
- results.append(f"π§ Algorithm: {test_data['insights']['algorithm_version']}")
-
- if benchmark_time < 1.0:
- status = "π₯ ULTIMATE PERFORMANCE"
- elif benchmark_time < 2.0:
- status = "β
EXCELLENT"
- else:
- status = "β οΈ ACCEPTABLE"
-
- results.append(f"π Status: {status}")
-
- except Exception as e:
- results.append(f"β Benchmark failed: {str(e)}")
-
- return "\n".join(results)
-
- def check_mcp_status():
- """Check real MCP connection status"""
- results = []
- results.append("π₯ REAL MCP CONNECTION STATUS")
- results.append("=" * 35)
-
- if intelligence_engine.is_connected and intelligence_engine.session_id:
- results.append("β
Status: CONNECTED")
- results.append(f"π Session ID: {intelligence_engine.session_id[:12]}...")
- results.append(f"π Endpoint: {intelligence_engine.base_url}")
- results.append("π Live Data: 4,596+ challenges accessible")
- results.append("π― Features: Real-time challenge data")
- results.append("β‘ Performance: Sub-second response times")
- else:
- results.append("β οΈ Status: FALLBACK MODE")
- results.append("π Using: Enhanced premium dataset")
- results.append("π― Features: Advanced algorithms active")
- results.append("π‘ Note: Still provides excellent recommendations")
-
- # Check OpenAI API Key
- has_openai = bool(os.getenv("OPENAI_API_KEY"))
- openai_status = "β
CONFIGURED" if has_openai else "β οΈ NOT SET"
- results.append(f"π€ OpenAI GPT-4: {openai_status}")
-
- results.append(f"π Checked at: {time.strftime('%H:%M:%S')}")
-
- return "\n".join(results)
-
- # Connect ULTIMATE test functions
- ultimate_test_btn.click(run_ultimate_performance_test, outputs=ultimate_test_output)
- quick_benchmark_btn.click(quick_benchmark, outputs=ultimate_test_output)
- mcp_status_btn.click(check_mcp_status, outputs=ultimate_test_output)
-
- # Tab 4: ULTIMATE About & Documentation
- with gr.TabItem("βΉοΈ ULTIMATE About"):
- gr.Markdown(f"""
- ## π About the ULTIMATE Topcoder Challenge Intelligence Assistant
-
- ### π― **Revolutionary Mission**
- This **ULTIMATE** system represents the **world's most advanced** Topcoder challenge discovery platform, combining **real-time MCP integration**, **OpenAI GPT-4 intelligence**, and **cutting-edge AI algorithms** to revolutionize how developers discover and engage with coding challenges.
-
- ### β¨ **ULTIMATE Capabilities**
-
- #### π₯ **Real MCP Integration**
- - **Live Connection**: Direct access to Topcoder's official MCP server
- - **4,596+ Real Challenges**: Live challenge database with real-time updates
- - **6,535+ Skills Database**: Comprehensive skill categorization and matching
- - **Authentic Data**: Real prizes, actual difficulty levels, genuine registration numbers
- - **Session Authentication**: Secure, persistent MCP session management
-
- #### π€ **OpenAI GPT-4 Integration**
- - **Advanced Conversational AI**: Natural language understanding and responses
- - **Context-Aware Responses**: Uses real MCP data in intelligent conversations
- - **Personalized Guidance**: Career advice and skill development recommendations
- - **Real-Time Analysis**: Interprets user queries and provides relevant challenge matches
- - **API Key Status**: {"β
Configured via HF Secrets" if os.getenv("OPENAI_API_KEY") else "β οΈ Set OPENAI_API_KEY in HF Secrets for full features"}
-
- #### π§ **Advanced AI Intelligence Engine**
- - **Multi-Factor Scoring**: 40% skill match + 30% experience + 20% interest + 10% market factors
- - **Natural Language Processing**: Understands your goals and matches with relevant opportunities
- - **Market Intelligence**: Real-time insights on trending technologies and career paths
- - **Success Prediction**: Advanced algorithms calculate your probability of success
- - **Profile Analysis**: Comprehensive developer type classification and growth recommendations
-
- ### ποΈ **Technical Architecture**
-
- #### **Hugging Face Secrets Integration**
- ```
- π SECURE API KEY MANAGEMENT:
- Environment Variable: OPENAI_API_KEY
- Access Method: os.getenv("OPENAI_API_KEY")
- Security: Stored securely in HF Spaces secrets
- Status: {"β
Active" if os.getenv("OPENAI_API_KEY") else "β οΈ Please configure in HF Settings > Repository Secrets"}
- ```
-
- #### **Real MCP Integration**
- ```
- π₯ LIVE CONNECTION DETAILS:
- Server: https://api.topcoder-dev.com/v6/mcp
- Protocol: JSON-RPC 2.0 with Server-Sent Events
- Authentication: Session-based with real session IDs
- Data Access: Real-time challenge and skill databases
- Performance: <1s response times with live data
- ```
-
- #### **OpenAI GPT-4 Integration**
- ```python
- # SECURE API INTEGRATION:
- openai_api_key = os.getenv("OPENAI_API_KEY", "")
- endpoint = "https://api.openai.com/v1/chat/completions"
- model = "gpt-4o-mini" # Fast and cost-effective
- context = "Real MCP challenge data + conversation history"
- ```
-
- ### π **Setting Up OpenAI API Key in Hugging Face**
-
- **Step-by-Step Instructions:**
-
- 1. **Go to your Hugging Face Space settings**
- 2. **Navigate to "Repository secrets"**
- 3. **Click "New secret"**
- 4. **Set Name:** `OPENAI_API_KEY`
- 5. **Set Value:** Your OpenAI API key (starts with `sk-`)
- 6. **Click "Add secret"**
- 7. **Restart your Space** for changes to take effect
-
- **π― Why Use HF Secrets:**
- - **Security**: API keys are encrypted and never exposed in code
- - **Environment Variables**: Accessed via `os.getenv("OPENAI_API_KEY")`
- - **Best Practice**: Industry standard for secure API key management
- - **No Code Changes**: Keys can be updated without modifying application code
-
- ### π **Competition Excellence**
-
- **Built for the Topcoder MCP Challenge** - This ULTIMATE system showcases:
- - **Technical Mastery**: Real MCP protocol implementation + OpenAI integration
- - **Problem Solving**: Overcame complex authentication and API integration challenges
- - **User Focus**: Exceptional UX with meaningful business value
- - **Innovation**: First working real-time MCP + GPT-4 integration
- - **Production Quality**: Enterprise-ready deployment with secure secrets management
-
- ---
-
-
-
π₯ ULTIMATE Powered by OpenAI GPT-4 + Real MCP Integration
-
- Revolutionizing developer success through authentic challenge discovery,
- advanced AI intelligence, and secure enterprise-grade API management.
-
-
- π― Live Connection to 4,596+ Real Challenges β’ π€ OpenAI GPT-4 Integration β’ π Secure HF Secrets Management
-
-
- """)
-
- # ULTIMATE footer
- gr.Markdown(f"""
- ---
-
-
π ULTIMATE Topcoder Challenge Intelligence Assistant
-
π₯ Real MCP Integration β’ π€ OpenAI GPT-4 β’ β‘ Lightning Performance
-
π― Built with Gradio β’ π Deployed on Hugging Face Spaces β’ π Competition-Winning Quality
-
π OpenAI Status: {"β
Active" if os.getenv("OPENAI_API_KEY") else "β οΈ Configure OPENAI_API_KEY in HF Secrets"}
-
- """)
-
- print("β
ULTIMATE Gradio interface created successfully!")
- return interface
-
-# Launch the ULTIMATE application
-if __name__ == "__main__":
- print("\n" + "="*70)
- print("π ULTIMATE TOPCODER CHALLENGE INTELLIGENCE ASSISTANT")
- print("π₯ Real MCP Integration + OpenAI GPT-4 + Advanced AI Intelligence")
- print("β‘ Competition-Winning Performance")
- print("="*70)
-
- # Check API key status on startup
- api_key_status = "β
CONFIGURED" if os.getenv("OPENAI_API_KEY") else "β οΈ NOT SET"
- print(f"π€ OpenAI API Key Status: {api_key_status}")
- if not os.getenv("OPENAI_API_KEY"):
- print("π‘ Add OPENAI_API_KEY to HF Secrets for full GPT-4 features!")
-
- try:
- interface = create_ultimate_interface()
- print("\nπ― Starting ULTIMATE Gradio server...")
- print("π₯ Initializing Real MCP connection...")
- print("π€ Loading OpenAI GPT-4 integration...")
- print("π§ Loading Advanced AI intelligence engine...")
- print("π Preparing live challenge database access...")
- print("π Launching ULTIMATE user experience...")
-
- interface.launch(
- share=False, # Set to True for public shareable link
- debug=True, # Show detailed logs
- show_error=True, # Display errors in UI
- server_port=7860, # Standard port
- show_api=False, # Clean interface
- max_threads=20 # Support multiple concurrent users
- )
-
- except Exception as e:
- print(f"β Error starting ULTIMATE application: {str(e)}")
- print("\nπ§ ULTIMATE Troubleshooting:")
- print("1. Verify all dependencies: pip install -r requirements.txt")
- print("2. Add OPENAI_API_KEY to HF Secrets for full features")
- print("3. Check port availability or try different port")
- print("4. Ensure virtual environment is active")
- print("5. For Windows: pip install --upgrade gradio httpx python-dotenv")
- print("6. Contact support if issues persist")
-
-# Initialize the enhanced intelligence engine
-print("π Starting ULTIMATE Topcoder Intelligence Assistant...")
-intelligence_engine = UltimateTopcoderMCPEngine()
-
-# FIXED: Function signature - now accepts 3 parameters as expected
-async def chat_with_enhanced_llm_agent(message: str, history: List[Tuple[str, str]], mcp_engine) -> Tuple[List[Tuple[str, str]], str]:
- """FIXED: Enhanced chat with real LLM and MCP data integration - 3 parameters"""
- print(f"π§ Enhanced LLM Chat: {message}")
-
- # Initialize enhanced chatbot
- if not hasattr(chat_with_enhanced_llm_agent, 'chatbot'):
- chat_with_enhanced_llm_agent.chatbot = EnhancedLLMChatbot(mcp_engine)
-
- chatbot = chat_with_enhanced_llm_agent.chatbot
-
- try:
- # Get intelligent response using real MCP data
- response = await chatbot.generate_llm_response(message, history)
-
- # Add to history
- history.append((message, response))
-
- print(f"β
Enhanced LLM response generated with real MCP context")
- return history, ""
-
- except Exception as e:
- error_response = f"I encountered an issue processing your request: {str(e)}. However, I can still help you with challenge recommendations using my real MCP data! Try asking about specific technologies or challenge types."
- history.append((message, error_response))
- return history, ""
-
-def chat_with_enhanced_llm_agent_sync(message: str, history: List[Tuple[str, str]]) -> Tuple[List[Tuple[str, str]], str]:
- """FIXED: Synchronous wrapper for Gradio - calls async function with correct parameters"""
- return asyncio.run(chat_with_enhanced_llm_agent(message, history, intelligence_engine))
-
-def format_challenge_card(challenge: Dict) -> str:
- """FIXED: Format challenge as professional HTML card without broken links"""
-
- # Create technology badges
- tech_badges = " ".join([
- f"{tech}"
- for tech in challenge['technologies']
- ])
-
- # Dynamic score coloring and labels
- score = challenge['compatibility_score']
- if score >= 85:
- score_color = "#00b894"
- score_label = "π₯ Excellent Match"
- card_border = "#00b894"
- elif score >= 70:
- score_color = "#f39c12"
- score_label = "β¨ Great Match"
- card_border = "#f39c12"
- elif score >= 55:
- score_color = "#e17055"
- score_label = "π‘ Good Match"
- card_border = "#e17055"
- else:
- score_color = "#74b9ff"
- score_label = "π Learning Opportunity"
- card_border = "#74b9ff"
-
- # Format prize
- prize_display = challenge['prize']
- if challenge['prize'].startswith('$') and challenge['prize'] != '$0':
- prize_color = "#00b894"
- else:
- prize_color = "#6c757d"
- prize_display = "Merit-based"
-
- # FIXED: Better link handling
- challenge_link = ""
- if challenge['id'] and challenge['id'].startswith("301"): # Valid Topcoder ID format
- challenge_link = f"""
- """
- else:
- challenge_link = """
-
- π‘ Available on Topcoder platform - search by title
-
"""
-
- return f"""
-
-
-
-
-
-
-
{challenge['title']}
-
-
{score:.0f}%
-
{score_label}
-
-
-
-
{challenge['description']}
-
-
-
π οΈ Technologies & Skills:
-
{tech_badges}
-
-
-
-
π Why This Matches You:
-
{challenge['rationale']}
-
-
-
-
-
{prize_display}
-
Prize Pool
-
-
-
{challenge['difficulty']}
-
Difficulty
-
-
-
{challenge['time_estimate']}
-
Timeline
-
-
-
{challenge.get('registrants', 'N/A')}
-
Registered
-
-
-
- {challenge_link}
-
- """
-
-def format_insights_panel(insights: Dict) -> str:
- """Format insights as comprehensive dashboard with enhanced styling"""
- return f"""
-
-
-
-
-
-
-
π― Your Intelligence Profile
-
-
-
-
π€ Developer Profile
-
{insights['profile_type']}
-
-
-
πͺ Core Strengths
-
{insights['strengths']}
-
-
-
π Growth Focus
-
{insights['growth_areas']}
-
-
-
π Progression Path
-
{insights['skill_progression']}
-
-
-
π Market Intelligence
-
{insights['market_trends']}
-
-
-
π― Success Forecast
-
{insights['success_probability']}
-
-
-
-
- """
-
-async def get_ultimate_recommendations_async(skills_input: str, experience_level: str, time_available: str, interests: str) -> Tuple[str, str]:
- """ULTIMATE recommendation function with real MCP + advanced intelligence"""
- start_time = time.time()
-
- print(f"\nπ― ULTIMATE RECOMMENDATION REQUEST:")
- print(f" Skills: {skills_input}")
- print(f" Level: {experience_level}")
- print(f" Time: {time_available}")
- print(f" Interests: {interests}")
-
- # Enhanced input validation
- if not skills_input.strip():
- error_msg = """
-
-
β οΈ
-
Please enter your skills
-
Example: Python, JavaScript, React, AWS, Docker
-
- """
- return error_msg, ""
-
- try:
- # Parse and clean skills
- skills = [skill.strip() for skill in skills_input.split(',') if skill.strip()]
-
- # Create comprehensive user profile
- user_profile = UserProfile(
- skills=skills,
- experience_level=experience_level,
- time_available=time_available,
- interests=[interests] if interests else []
- )
-
- # Get ULTIMATE AI recommendations
- recommendations_data = await intelligence_engine.get_personalized_recommendations(user_profile, interests)
- insights = intelligence_engine.get_user_insights(user_profile)
-
- recommendations = recommendations_data["recommendations"]
- insights_data = recommendations_data["insights"]
-
- # Format results with enhanced styling
- if recommendations:
- # Success header with data source info
- data_source_emoji = "π₯" if "REAL" in insights_data['data_source'] else "β‘"
-
- recommendations_html = f"""
-
-
{data_source_emoji}
-
Found {len(recommendations)} Perfect Matches!
-
Personalized using {insights_data['algorithm_version']} β’ {insights_data['processing_time']} response time
-
Source: {insights_data['data_source']}
-
- """
-
- # Add formatted challenge cards
- for challenge in recommendations:
- recommendations_html += format_challenge_card(challenge)
-
- else:
- recommendations_html = """
-
-
π
-
No perfect matches found
-
Try adjusting your skills, experience level, or interests for better results
-
- """
-
- # Generate insights panel
- insights_html = format_insights_panel(insights)
-
- processing_time = round(time.time() - start_time, 3)
- print(f"β
ULTIMATE request completed successfully in {processing_time}s")
- print(f"π Returned {len(recommendations)} recommendations with comprehensive insights\n")
-
- return recommendations_html, insights_html
-
- except Exception as e:
- error_msg = f"""
-
-
β
-
Processing Error
-
{str(e)}
-
Please try again or contact support
-
- """
- print(f"β Error processing ULTIMATE request: {str(e)}")
- return error_msg, ""
-
-def get_ultimate_recommendations_sync(skills_input: str, experience_level: str, time_available: str, interests: str) -> Tuple[str, str]:
- """Synchronous wrapper for Gradio"""
- return asyncio.run(get_ultimate_recommendations_async(skills_input, experience_level, time_available, interests))
-
-def run_ultimate_performance_test():
- """ULTIMATE comprehensive system performance test"""
- results = []
- results.append("π ULTIMATE COMPREHENSIVE PERFORMANCE TEST")
- results.append("=" * 60)
- results.append(f"β° Started at: {time.strftime('%Y-%m-%d %H:%M:%S')}")
- results.append(f"π₯ Testing: Real MCP Integration + Advanced Intelligence Engine")
- results.append("")
-
- total_start = time.time()
-
- # Test 1: MCP Connection Test
- results.append("π Test 1: Real MCP Connection Status")
- start = time.time()
- mcp_status = "β
CONNECTED" if intelligence_engine.is_connected else "β οΈ FALLBACK MODE"
- session_status = f"Session: {intelligence_engine.session_id[:8]}..." if intelligence_engine.session_id else "No session"
- test1_time = round(time.time() - start, 3)
- results.append(f" {mcp_status} ({test1_time}s)")
- results.append(f" π‘ {session_status}")
- results.append(f" π Endpoint: {intelligence_engine.base_url}")
- results.append("")
-
- # Test 2: Advanced Intelligence Engine
- results.append("π Test 2: Advanced Recommendation Engine")
- start = time.time()
-
- # Create async test
- async def test_recommendations():
- test_profile = UserProfile(
- skills=['Python', 'React', 'AWS'],
- experience_level='Intermediate',
- time_available='4-8 hours',
- interests=['web development', 'cloud computing']
- )
- return await intelligence_engine.get_personalized_recommendations(test_profile, 'python react cloud')
-
- try:
- # Run async test
- recs_data = asyncio.run(test_recommendations())
- test2_time = round(time.time() - start, 3)
- recs = recs_data["recommendations"]
- insights = recs_data["insights"]
-
- results.append(f" β
Generated {len(recs)} recommendations in {test2_time}s")
- results.append(f" π― Data Source: {insights['data_source']}")
- results.append(f" π Top match: {recs[0]['title']} ({recs[0]['compatibility_score']:.0f}%)")
- results.append(f" π§ Algorithm: {insights['algorithm_version']}")
- except Exception as e:
- results.append(f" β Test failed: {str(e)}")
- results.append("")
-
- # Test 3: API Key Status
- results.append("π Test 3: OpenAI API Configuration")
- start = time.time()
-
- # Check if we have a chatbot instance and API key
- has_api_key = bool(os.getenv("OPENAI_API_KEY"))
- api_status = "β
CONFIGURED" if has_api_key else "β οΈ NOT SET"
- test3_time = round(time.time() - start, 3)
-
- results.append(f" OpenAI API Key: {api_status} ({test3_time}s)")
- if has_api_key:
- results.append(f" π€ LLM Integration: Available")
- results.append(f" π§ Enhanced Chat: Enabled")
- else:
- results.append(f" π€ LLM Integration: Fallback mode")
- results.append(f" π§ Enhanced Chat: Basic responses")
- results.append("")
-
- # Summary
- total_time = round(time.time() - total_start, 3)
- results.append("π ULTIMATE PERFORMANCE SUMMARY")
- results.append("-" * 40)
- results.append(f"π Total Test Duration: {total_time}s")
- results.append(f"π₯ Real MCP Integration: {mcp_status}")
- results.append(f"π§ Advanced Intelligence Engine: β
OPERATIONAL")
- results.append(f"π€ OpenAI LLM Integration: {api_status}")
- results.append(f"β‘ Average Response Time: <1.0s")
- results.append(f"πΎ Memory Usage: β
OPTIMIZED")
- results.append(f"π― Algorithm Accuracy: β
ADVANCED")
- results.append(f"π Production Readiness: β
ULTIMATE")
- results.append("")
-
- if has_api_key:
- results.append("π All systems performing at ULTIMATE level with full LLM integration!")
- else:
- results.append("π All systems operational! Add OPENAI_API_KEY to HF secrets for full LLM features!")
-
- results.append("π₯ Ready for competition submission!")
-
- return "\n".join(results)
-
-def create_ultimate_interface():
- """Create the ULTIMATE Gradio interface combining all features"""
- print("π¨ Creating ULTIMATE Gradio interface...")
-
- # Enhanced custom CSS
- custom_css = """
- .gradio-container {
- max-width: 1400px !important;
- margin: 0 auto !important;
- }
- .tab-nav {
- border-radius: 12px !important;
- background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
- }
- .ultimate-btn {
- background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
- border: none !important;
- box-shadow: 0 4px 15px rgba(102, 126, 234, 0.4) !important;
- transition: all 0.3s ease !important;
- }
- .ultimate-btn:hover {
- transform: translateY(-2px) !important;
- box-shadow: 0 8px 25px rgba(102, 126, 234, 0.6) !important;
- }
- """
-
- with gr.Blocks(
- theme=gr.themes.Soft(),
- title="π ULTIMATE Topcoder Challenge Intelligence Assistant",
- css=custom_css
- ) as interface:
-
- # ULTIMATE Header
- gr.Markdown("""
- # π ULTIMATE Topcoder Challenge Intelligence Assistant
-
- ### **π₯ REAL MCP Integration + Advanced AI Intelligence + OpenAI LLM**
-
- Experience the **world's most advanced** Topcoder challenge discovery system! Powered by **live Model Context Protocol integration** with access to **4,596+ real challenges**, **OpenAI GPT-4 intelligence**, and sophisticated AI algorithms that deliver **personalized recommendations** tailored to your exact skills and career goals.
-
- **π― What Makes This ULTIMATE:**
- - **π₯ Real MCP Data**: Live connection to Topcoder's official MCP server
- - **π€ OpenAI GPT-4**: Advanced conversational AI with real challenge context
- - **π§ Advanced AI**: Multi-factor compatibility scoring algorithms
- - **β‘ Lightning Fast**: Sub-second response times with real-time data
- - **π¨ Beautiful UI**: Professional interface with enhanced user experience
- - **π Smart Insights**: Comprehensive profile analysis and market intelligence
-
- ---
- """)
-
- with gr.Tabs():
- # Tab 1: ULTIMATE Personalized Recommendations
- with gr.TabItem("π― ULTIMATE Recommendations", elem_id="ultimate-recommendations"):
- gr.Markdown("### π AI-Powered Challenge Discovery with Real MCP Data")
-
- with gr.Row():
- with gr.Column(scale=1):
- gr.Markdown("**π€ Tell the AI about yourself:**")
-
- skills_input = gr.Textbox(
- label="π οΈ Your Skills & Technologies",
- placeholder="Python, React, JavaScript, AWS, Docker, Blockchain, UI/UX...",
- info="Enter your skills separated by commas - the more specific, the better!",
- lines=3,
- value="Python, JavaScript, React" # Default for quick testing
- )
-
- experience_level = gr.Dropdown(
- choices=["Beginner", "Intermediate", "Advanced"],
- label="π Experience Level",
- value="Intermediate",
- info="Your overall development and competitive coding experience"
- )
-
- time_available = gr.Dropdown(
- choices=["2-4 hours", "4-8 hours", "8+ hours"],
- label="β° Time Available",
- value="4-8 hours",
- info="How much time can you dedicate to a challenge?"
- )
-
- interests = gr.Textbox(
- label="π― Current Interests & Goals",
- placeholder="web development, blockchain, AI/ML, cloud computing, mobile apps...",
- info="What type of projects and technologies excite you most?",
- lines=3,
- value="web development, cloud computing" # Default for testing
- )
-
- ultimate_recommend_btn = gr.Button(
- "π Get My ULTIMATE Recommendations",
- variant="primary",
- size="lg",
- elem_classes="ultimate-btn"
- )
-
- gr.Markdown("""
- **π‘ ULTIMATE Tips:**
- - **Be specific**: Include frameworks, libraries, and tools you know
- - **Mention experience**: Add years of experience with key technologies
- - **State goals**: Career objectives help fine-tune recommendations
- - **Real data**: You'll get actual Topcoder challenges with real prizes!
- """)
-
- with gr.Column(scale=2):
- ultimate_insights_output = gr.HTML(
- label="π§ Your Intelligence Profile",
- visible=True
- )
- ultimate_recommendations_output = gr.HTML(
- label="π Your ULTIMATE Recommendations",
- visible=True
- )
-
- # Connect the ULTIMATE recommendation system
- ultimate_recommend_btn.click(
- get_ultimate_recommendations_sync,
- inputs=[skills_input, experience_level, time_available, interests],
- outputs=[ultimate_recommendations_output, ultimate_insights_output]
- )
-
- # Tab 2: FIXED Enhanced LLM Chat
- with gr.TabItem("π¬ INTELLIGENT AI Assistant"):
- gr.Markdown('''
- ### π§ Chat with Your INTELLIGENT AI Assistant
-
- **π₯ Enhanced with OpenAI GPT-4 + Live MCP Data!**
-
- Ask me anything and I'll use:
- - π€ **OpenAI GPT-4 Intelligence** for natural conversations
- - π₯ **Real MCP Data** from 4,596+ live Topcoder challenges
- - π **Live Challenge Analysis** with current prizes and requirements
- - π― **Personalized Recommendations** based on your interests
-
- Try asking: "Show me Python challenges with high prizes" or "What React opportunities are available?"
- ''')
-
- enhanced_chatbot = gr.Chatbot(
- label="π§ INTELLIGENT Topcoder AI Assistant (OpenAI GPT-4)",
- height=500,
- placeholder="Hi! I'm your intelligent assistant with OpenAI GPT-4 and live MCP data access to 4,596+ challenges!",
- show_label=True
- )
-
- with gr.Row():
- enhanced_chat_input = gr.Textbox(
- placeholder="Ask me about challenges, skills, career advice, or anything else!",
- container=False,
- scale=4,
- show_label=False
- )
- enhanced_chat_btn = gr.Button("Send", variant="primary", scale=1)
-
- # API Key status indicator
- api_key_status = "π€ OpenAI GPT-4 Active" if os.getenv("OPENAI_API_KEY") else "β οΈ Set OPENAI_API_KEY in HF Secrets for full GPT-4 features"
- gr.Markdown(f"**Status:** {api_key_status}")
-
- # Enhanced examples
- gr.Examples(
- examples=[
- "What Python challenges offer the highest prizes?",
- "Show me beginner-friendly React opportunities",
- "Which blockchain challenges are most active?",
- "What skills are in highest demand right now?",
- "Help me choose between machine learning and web development",
- "What's the average prize for intermediate challenges?"
- ],
- inputs=enhanced_chat_input
- )
-
- # FIXED: Connect enhanced LLM functionality with correct function
- enhanced_chat_btn.click(
- chat_with_enhanced_llm_agent_sync,
- inputs=[enhanced_chat_input, enhanced_chatbot],
- outputs=[enhanced_chatbot, enhanced_chat_input]
- )
-
- enhanced_chat_input.submit(
- chat_with_enhanced_llm_agent_sync,
- inputs=[enhanced_chat_input, enhanced_chatbot],
- outputs=[enhanced_chatbot, enhanced_chat_input]
- )
-
- # Tab 3: ULTIMATE Performance & Technical Details
- with gr.TabItem("β‘ ULTIMATE Performance"):
- gr.Markdown("""
- ### π§ͺ ULTIMATE System Performance & Real MCP Integration
-
- **π₯ Monitor the performance** of the world's most advanced Topcoder intelligence system! Test real MCP connectivity, OpenAI integration, advanced algorithms, and production-ready performance metrics.
- """)
-
- with gr.Row():
- with gr.Column():
- ultimate_test_btn = gr.Button("π§ͺ Run ULTIMATE Performance Test", variant="secondary", size="lg", elem_classes="ultimate-btn")
- quick_benchmark_btn = gr.Button("β‘ Quick Benchmark", variant="secondary")
- mcp_status_btn = gr.Button("π₯ Check Real MCP Status", variant="secondary")
-
- with gr.Column():
- ultimate_test_output = gr.Textbox(
- label="π ULTIMATE Test Results & Performance Metrics",
- lines=15,
- show_label=True
- )
-
- def quick_benchmark():
- """Quick benchmark for ULTIMATE system"""
- results = []
- results.append("β‘ ULTIMATE QUICK BENCHMARK")
- results.append("=" * 35)
-
- start = time.time()
-
- # Test basic recommendation speed
- async def quick_test():
- test_profile = UserProfile(
- skills=['Python', 'React'],
- experience_level='Intermediate',
- time_available='4-8 hours',
- interests=['web development']
- )
- return await intelligence_engine.get_personalized_recommendations(test_profile)
-
- try:
- test_data = asyncio.run(quick_test())
- benchmark_time = round(time.time() - start, 3)
-
- results.append(f"π Response Time: {benchmark_time}s")
- results.append(f"π― Recommendations: {len(test_data['recommendations'])}")
- results.append(f"π Data Source: {test_data['insights']['data_source']}")
- results.append(f"π§ Algorithm: {test_data['insights']['algorithm_version']}")
-
- if benchmark_time < 1.0:
- status = "π₯ ULTIMATE PERFORMANCE"
- elif benchmark_time < 2.0:
- status = "β
EXCELLENT"
- else:
- status = "β οΈ ACCEPTABLE"
-
- results.append(f"π Status: {status}")
-
- except Exception as e:
- results.append(f"β Benchmark failed: {str(e)}")
-
- return "\n".join(results)
-
- def check_mcp_status():
- """Check real MCP connection status"""
- results = []
- results.append("π₯ REAL MCP CONNECTION STATUS")
- results.append("=" * 35)
-
- if intelligence_engine.is_connected and intelligence_engine.session_id:
- results.append("β
Status: CONNECTED")
- results.append(f"π Session ID: {intelligence_engine.session_id[:12]}...")
- results.append(f"π Endpoint: {intelligence_engine.base_url}")
- results.append("π Live Data: 4,596+ challenges accessible")
- results.append("π― Features: Real-time challenge data")
- results.append("β‘ Performance: Sub-second response times")
- else:
- results.append("β οΈ Status: FALLBACK MODE")
- results.append("π Using: Enhanced premium dataset")
- results.append("π― Features: Advanced algorithms active")
- results.append("π‘ Note: Still provides excellent recommendations")
-
- # Check OpenAI API Key
- has_openai = bool(os.getenv("OPENAI_API_KEY"))
- openai_status = "β
CONFIGURED" if has_openai else "β οΈ NOT SET"
- results.append(f"π€ OpenAI GPT-4: {openai_status}")
-
- results.append(f"π Checked at: {time.strftime('%H:%M:%S')}")
-
- return "\n".join(results)
-
- # Connect ULTIMATE test functions
- ultimate_test_btn.click(run_ultimate_performance_test, outputs=ultimate_test_output)
- quick_benchmark_btn.click(quick_benchmark, outputs=ultimate_test_output)
- mcp_status_btn.click(check_mcp_status, outputs=ultimate_test_output)
-
- # Tab 4: ULTIMATE About & Documentation
- with gr.TabItem("βΉοΈ ULTIMATE About"):
- gr.Markdown(f"""
- ## π About the ULTIMATE Topcoder Challenge Intelligence Assistant
-
- ### π― **Revolutionary Mission**
- This **ULTIMATE** system represents the **world's most advanced** Topcoder challenge discovery platform, combining **real-time MCP integration**, **OpenAI GPT-4 intelligence**, and **cutting-edge AI algorithms** to revolutionize how developers discover and engage with coding challenges.
-
- ### β¨ **ULTIMATE Capabilities**
-
- #### π₯ **Real MCP Integration**
- - **Live Connection**: Direct access to Topcoder's official MCP server
- - **4,596+ Real Challenges**: Live challenge database with real-time updates
- - **6,535+ Skills Database**: Comprehensive skill categorization and matching
- - **Authentic Data**: Real prizes, actual difficulty levels, genuine registration numbers
- - **Session Authentication**: Secure, persistent MCP session management
-
- #### π€ **OpenAI GPT-4 Integration**
- - **Advanced Conversational AI**: Natural language understanding and responses
- - **Context-Aware Responses**: Uses real MCP data in intelligent conversations
- - **Personalized Guidance**: Career advice and skill development recommendations
- - **Real-Time Analysis**: Interprets user queries and provides relevant challenge matches
- - **API Key Status**: {"β
Configured via HF Secrets" if os.getenv("OPENAI_API_KEY") else "β οΈ Set OPENAI_API_KEY in HF Secrets for full features"}
-
- #### π§ **Advanced AI Intelligence Engine**
- - **Multi-Factor Scoring**: 40% skill match + 30% experience + 20% interest + 10% market factors
- - **Natural Language Processing**: Understands your goals and matches with relevant opportunities
- - **Market Intelligence**: Real-time insights on trending technologies and career paths
- - **Success Prediction**: Advanced algorithms calculate your probability of success
- - **Profile Analysis**: Comprehensive developer type classification and growth recommendations
-
- ### ποΈ **Technical Architecture**
-
- #### **Hugging Face Secrets Integration**
- ```
- π SECURE API KEY MANAGEMENT:
- Environment Variable: OPENAI_API_KEY
- Access Method: os.getenv("OPENAI_API_KEY")
- Security: Stored securely in HF Spaces secrets
- Status: {"β
Active" if os.getenv("OPENAI_API_KEY") else "β οΈ Please configure in HF Settings > Repository Secrets"}
- ```
-
- #### **Real MCP Integration**
- ```
- π₯ LIVE CONNECTION DETAILS:
- Server: https://api.topcoder-dev.com/v6/mcp
- Protocol: JSON-RPC 2.0 with Server-Sent Events
- Authentication: Session-based with real session IDs
- Data Access: Real-time challenge and skill databases
- Performance: <1s response times with live data
- ```
-
- #### **OpenAI GPT-4 Integration**
- ```python
- # SECURE API INTEGRATION:
- openai_api_key = os.getenv("OPENAI_API_KEY", "")
- endpoint = "https://api.openai.com/v1/chat/completions"
- model = "gpt-4o-mini" # Fast and cost-effective
- context = "Real MCP challenge data + conversation history"
- ```
-
### π **Setting Up OpenAI API Key in Hugging Face**
**Step-by-Step Instructions:**