kikomiko commited on
Commit
494bcbb
Β·
1 Parent(s): 2994e1b

use real MCP data fix

Browse files
Files changed (1) hide show
  1. app.py +219 -342
app.py CHANGED
@@ -27,85 +27,19 @@ class Challenge:
27
 
28
  @dataclass
29
  class UserProfile:
30
- skills: List[str]
31
  experience_level: str
32
  time_available: str
33
  interests: List[str]
34
 
35
  class UltimateTopcoderMCPEngine:
36
- """FIXED: Real MCP Integration - More Aggressive Connection"""
37
 
38
  def __init__(self):
39
  print("πŸš€ Initializing ULTIMATE Topcoder MCP Engine...")
40
  self.base_url = "https://api.topcoder.com/v6/mcp"
41
  self.session_id = None
42
  self.is_connected = False
43
- self.mock_challenges = self._create_enhanced_fallback_challenges()
44
- print(f"βœ… Loaded fallback system with {len(self.mock_challenges)} premium challenges")
45
-
46
- def _create_enhanced_fallback_challenges(self) -> List[Challenge]:
47
- return [
48
- Challenge(
49
- id="30174840",
50
- title="React Component Library Development",
51
- description="Build a comprehensive React component library with TypeScript support and Storybook documentation. Perfect for developers looking to create reusable UI components.",
52
- technologies=["React", "TypeScript", "Storybook", "CSS", "Jest"],
53
- difficulty="Intermediate",
54
- prize="$3,000",
55
- time_estimate="14 days",
56
- registrants=45
57
- ),
58
- Challenge(
59
- id="30174841",
60
- title="Python API Performance Optimization",
61
- description="Optimize existing Python FastAPI application for better performance and scalability. Focus on database queries, caching strategies, and async processing.",
62
- technologies=["Python", "FastAPI", "PostgreSQL", "Redis", "Docker"],
63
- difficulty="Advanced",
64
- prize="$5,000",
65
- time_estimate="21 days",
66
- registrants=28
67
- ),
68
- Challenge(
69
- id="30174842",
70
- title="Mobile App UI/UX Design",
71
- description="Design modern, accessible mobile app interface with dark mode support and responsive layouts for both iOS and Android platforms.",
72
- technologies=["Figma", "UI/UX", "Mobile Design", "Accessibility", "Prototyping"],
73
- difficulty="Beginner",
74
- prize="$2,000",
75
- time_estimate="10 days",
76
- registrants=67
77
- ),
78
- Challenge(
79
- id="30174843",
80
- title="Blockchain Smart Contract Development",
81
- description="Develop secure smart contracts for DeFi applications with comprehensive testing suite and gas optimization techniques.",
82
- technologies=["Solidity", "Web3", "JavaScript", "Hardhat", "Testing"],
83
- difficulty="Advanced",
84
- prize="$7,500",
85
- time_estimate="28 days",
86
- registrants=19
87
- ),
88
- Challenge(
89
- id="30174844",
90
- title="Data Visualization Dashboard",
91
- description="Create interactive data visualization dashboard using modern charting libraries with real-time data updates and export capabilities.",
92
- technologies=["D3.js", "JavaScript", "HTML", "CSS", "Chart.js"],
93
- difficulty="Intermediate",
94
- prize="$4,000",
95
- time_estimate="18 days",
96
- registrants=33
97
- ),
98
- Challenge(
99
- id="30174845",
100
- title="Machine Learning Model Deployment",
101
- description="Deploy ML models to production with API endpoints, monitoring, and auto-scaling capabilities using cloud platforms.",
102
- technologies=["Python", "TensorFlow", "Docker", "Kubernetes", "AWS"],
103
- difficulty="Advanced",
104
- prize="$6,000",
105
- time_estimate="25 days",
106
- registrants=24
107
- )
108
- ]
109
 
110
  def parse_sse_response(self, sse_text: str) -> Dict[str, Any]:
111
  """Parse Server-Sent Events response"""
@@ -175,7 +109,7 @@ class UltimateTopcoderMCPEngine:
175
  print("⚠️ MCP connection succeeded but no session ID found")
176
 
177
  except Exception as e:
178
- print(f"⚠️ MCP connection failed, using enhanced fallback: {e}")
179
 
180
  return False
181
 
@@ -338,19 +272,19 @@ class UltimateTopcoderMCPEngine:
338
  sort_by: str = None,
339
  sort_order: str = None,
340
  ) -> List[Challenge]:
341
- """FIXED: More aggressive real challenge fetching"""
342
 
343
  # Always try to connect
344
  print(f"πŸ”„ Attempting to fetch REAL challenges (limit: {limit})")
345
  connection_success = await self.initialize_connection()
346
 
347
  if not connection_success:
348
- print("❌ Could not establish MCP connection, using fallback")
349
- return []
350
 
351
  # Build comprehensive query parameters
352
  skill_keywords = self.extract_technologies_from_query(
353
- query + " " + " ".join(user_profile.skills + user_profile.interests)
354
  )
355
 
356
  mcp_query = {
@@ -387,7 +321,7 @@ class UltimateTopcoderMCPEngine:
387
  result = await self.call_tool("query-tc-challenges", mcp_query)
388
  if not result:
389
  print("❌ No result from MCP tool call")
390
- return []
391
 
392
  print(f"πŸ“Š Raw MCP result type: {type(result)}")
393
  if isinstance(result, dict):
@@ -415,6 +349,10 @@ class UltimateTopcoderMCPEngine:
415
  except json.JSONDecodeError:
416
  pass
417
 
 
 
 
 
418
  challenges = []
419
  for item in challenge_data_list:
420
  if isinstance(item, dict):
@@ -431,23 +369,24 @@ class UltimateTopcoderMCPEngine:
431
  def calculate_advanced_compatibility_score(self, challenge: Challenge, user_profile: UserProfile, query: str) -> tuple:
432
  score = 0.0
433
  factors = []
434
- user_skills_lower = [skill.lower().strip() for skill in user_profile.skills]
 
435
  challenge_techs_lower = [tech.lower() for tech in challenge.technologies]
436
- skill_matches = len(set(user_skills_lower) & set(challenge_techs_lower))
437
  if len(challenge.technologies) > 0:
438
- exact_match_score = (skill_matches / len(challenge.technologies)) * 30
439
- coverage_bonus = min(skill_matches * 10, 10)
440
- skill_score = exact_match_score + coverage_bonus
441
  else:
442
- skill_score = 30
443
- score += skill_score
444
- if skill_matches > 0:
445
- matched_skills = [t for t in challenge.technologies if t.lower() in user_skills_lower]
446
- factors.append(f"Strong match: uses your {', '.join(matched_skills[:2])} expertise")
447
  elif len(challenge.technologies) > 0:
448
  factors.append(f"Growth opportunity: learn {', '.join(challenge.technologies[:2])}")
449
  else:
450
- factors.append("Versatile challenge suitable for multiple skill levels")
451
  level_mapping = {'beginner': 1, 'intermediate': 2, 'advanced': 3}
452
  user_level_num = level_mapping.get(user_profile.experience_level.lower(), 2)
453
  challenge_level_num = level_mapping.get(challenge.difficulty.lower(), 2)
@@ -488,7 +427,8 @@ class UltimateTopcoderMCPEngine:
488
  return min(score, 100.0), factors
489
 
490
  def get_user_insights(self, user_profile: UserProfile) -> Dict:
491
- skills = user_profile.skills
 
492
  level = user_profile.experience_level
493
  time_available = user_profile.time_available
494
  frontend_skills = ['react', 'javascript', 'css', 'html', 'vue', 'angular', 'typescript']
@@ -497,13 +437,13 @@ class UltimateTopcoderMCPEngine:
497
  devops_skills = ['docker', 'kubernetes', 'aws', 'azure', 'terraform', 'jenkins']
498
  design_skills = ['figma', 'ui/ux', 'design', 'prototyping', 'accessibility']
499
  blockchain_skills = ['solidity', 'web3', 'ethereum', 'blockchain', 'smart contracts', 'nft']
500
- user_skills_lower = [skill.lower() for skill in skills]
501
- frontend_count = sum(1 for skill in user_skills_lower if any(fs in skill for fs in frontend_skills))
502
- backend_count = sum(1 for skill in user_skills_lower if any(bs in skill for bs in backend_skills))
503
- data_count = sum(1 for skill in user_skills_lower if any(ds in skill for ds in data_skills))
504
- devops_count = sum(1 for skill in user_skills_lower if any(ds in skill for ds in devops_skills))
505
- design_count = sum(1 for skill in user_skills_lower if any(ds in skill for ds in design_skills))
506
- blockchain_count = sum(1 for skill in user_skills_lower if any(bs in skill for bs in blockchain_skills))
507
  if blockchain_count >= 2:
508
  profile_type = "Blockchain Developer"
509
  elif frontend_count >= 2 and backend_count >= 1:
@@ -522,16 +462,16 @@ class UltimateTopcoderMCPEngine:
522
  profile_type = "Versatile Developer"
523
  insights = {
524
  'profile_type': profile_type,
525
- 'strengths': f"Strong {profile_type.lower()} with expertise in {', '.join(skills[:3]) if skills else 'multiple technologies'}",
526
- 'growth_areas': self._suggest_growth_areas(user_skills_lower, frontend_count, backend_count, data_count, devops_count, blockchain_count),
527
- 'skill_progression': f"Ready for {level.lower()} to advanced challenges based on current skill set",
528
- 'market_trends': self._get_market_trends(skills),
529
  'time_optimization': f"With {time_available}, you can complete 1-2 medium challenges or 1 large project",
530
- 'success_probability': self._calculate_success_probability(level, len(skills))
531
  }
532
  return insights
533
 
534
- def _suggest_growth_areas(self, user_skills: List[str], frontend: int, backend: int, data: int, devops: int, blockchain: int) -> str:
535
  suggestions = []
536
  if blockchain < 1 and (frontend >= 1 or backend >= 1):
537
  suggestions.append("blockchain and Web3 technologies")
@@ -539,15 +479,15 @@ class UltimateTopcoderMCPEngine:
539
  suggestions.append("cloud technologies (AWS, Docker)")
540
  if data < 1 and backend >= 1:
541
  suggestions.append("database optimization and analytics")
542
- if frontend >= 1 and "typescript" not in str(user_skills):
543
  suggestions.append("TypeScript for enhanced development")
544
- if backend >= 1 and "api" not in str(user_skills):
545
  suggestions.append("API design and microservices")
546
  if not suggestions:
547
  suggestions = ["AI/ML integration", "system design", "performance optimization"]
548
  return "Consider exploring " + ", ".join(suggestions[:3])
549
 
550
- def _get_market_trends(self, skills: List[str]) -> str:
551
  hot_skills = {
552
  'react': 'React dominates frontend with 75% job market share',
553
  'python': 'Python leads in AI/ML and backend development growth',
@@ -558,17 +498,17 @@ class UltimateTopcoderMCPEngine:
558
  'ai': 'AI integration skills in highest demand for 2024',
559
  'kubernetes': 'Container orchestration critical for enterprise roles'
560
  }
561
- for skill in skills:
562
- skill_lower = skill.lower()
563
  for hot_skill, trend in hot_skills.items():
564
- if hot_skill in skill_lower:
565
  return trend
566
  return "Full-stack and cloud skills show strongest market demand"
567
 
568
- def _calculate_success_probability(self, level: str, skill_count: int) -> str:
569
  base_score = {'beginner': 60, 'intermediate': 75, 'advanced': 85}.get(level.lower(), 70)
570
- skill_bonus = min(skill_count * 3, 15)
571
- total = base_score + skill_bonus
572
  if total >= 90:
573
  return f"{total}% - Outstanding success potential"
574
  elif total >= 80:
@@ -576,7 +516,7 @@ class UltimateTopcoderMCPEngine:
576
  elif total >= 70:
577
  return f"{total}% - Good probability of success"
578
  else:
579
- return f"{total}% - Consider skill development first"
580
 
581
  async def get_personalized_recommendations(
582
  self, user_profile: UserProfile, query: str = "",
@@ -586,30 +526,27 @@ class UltimateTopcoderMCPEngine:
586
  limit: int = 50
587
  ) -> Dict[str, Any]:
588
  start_time = datetime.now()
589
- print(f"🎯 Analyzing profile: {user_profile.skills} | Level: {user_profile.experience_level}")
590
-
591
- # FIXED: More aggressive real data fetching
592
- real_challenges = await self.fetch_real_challenges(
593
- user_profile=user_profile,
594
- query=query,
595
- limit=limit,
596
- status=status,
597
- prize_min=prize_min,
598
- prize_max=prize_max,
599
- challenge_type=challenge_type,
600
- track=track,
601
- sort_by=sort_by,
602
- sort_order=sort_order,
603
- )
604
 
605
- if real_challenges:
606
- challenges = real_challenges
 
 
 
 
 
 
 
 
 
 
 
 
607
  data_source = "πŸ”₯ REAL Topcoder MCP Server (4,596+ challenges)"
608
  print(f"πŸŽ‰ Using {len(challenges)} REAL Topcoder challenges!")
609
- else:
610
- challenges = self.mock_challenges
611
- data_source = "✨ Enhanced Intelligence Engine (Premium Dataset)"
612
- print(f"⚑ Using {len(challenges)} premium challenges with advanced algorithms")
613
 
614
  scored_challenges = []
615
  for challenge in challenges:
@@ -637,34 +574,108 @@ class UltimateTopcoderMCPEngine:
637
  "session_active": bool(self.session_id),
638
  "mcp_connected": self.is_connected,
639
  "algorithm_version": "Advanced Multi-Factor v2.0",
640
- "topcoder_total": "4,596+ live challenges" if real_challenges else "Premium dataset"
641
  }
642
  }
643
 
644
  class EnhancedLLMChatbot:
645
  """FIXED: Enhanced LLM Chatbot with OpenAI Integration + HF Secrets"""
646
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
647
  def __init__(self, mcp_engine):
648
  self.mcp_engine = mcp_engine
649
- self.conversation_context = []
650
- self.user_preferences = {}
651
 
652
  # FIXED: Use Hugging Face Secrets (environment variables)
653
  self.openai_api_key = os.getenv("OPENAI_API_KEY", "")
654
 
655
  if not self.openai_api_key:
656
- print("⚠️ OpenAI API key not found in HF secrets. Using enhanced fallback responses.")
657
  self.llm_available = False
658
  else:
659
  self.llm_available = True
660
  print("βœ… OpenAI API key loaded from HF secrets for intelligent responses")
661
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
662
  async def get_challenge_context(self, query: str, limit: int = 10) -> str:
663
  """Get relevant challenge data for LLM context"""
664
  try:
665
  # Create a basic profile for context
666
  basic_profile = UserProfile(
667
- skills=['Python', 'JavaScript'],
668
  experience_level='Intermediate',
669
  time_available='4-8 hours',
670
  interests=[query]
@@ -677,17 +688,10 @@ class EnhancedLLMChatbot:
677
  limit=limit
678
  )
679
 
680
- if not challenges:
681
- # Try fallback challenges
682
- challenges = self.mcp_engine.mock_challenges[:limit]
683
- context_source = "Enhanced Intelligence Engine"
684
- else:
685
- context_source = "Real MCP Server"
686
-
687
  # Create rich context from real data
688
  context_data = {
689
- "total_challenges_available": "4,596+" if challenges == self.mcp_engine.mock_challenges else f"{len(challenges)}+",
690
- "data_source": context_source,
691
  "sample_challenges": []
692
  }
693
 
@@ -710,172 +714,55 @@ class EnhancedLLMChatbot:
710
  return f"Challenge data temporarily unavailable: {str(e)}"
711
 
712
  async def generate_llm_response(self, user_message: str, chat_history: List) -> str:
713
- """FIXED: Generate intelligent response using OpenAI API with real MCP data"""
714
-
715
- # Get real challenge context
716
- challenge_context = await self.get_challenge_context(user_message)
717
-
718
- # Build conversation context
719
- recent_history = chat_history[-4:] if len(chat_history) > 4 else chat_history
720
- history_text = "\n".join([f"User: {h[0]}\nAssistant: {h[1]}" for h in recent_history])
721
-
722
- # Create comprehensive prompt for LLM
723
- system_prompt = f"""You are an expert Topcoder Challenge Intelligence Assistant with REAL-TIME access to live challenge data through MCP integration.
724
-
725
- REAL CHALLENGE DATA CONTEXT:
726
- {challenge_context}
727
-
728
- Your capabilities:
729
- - Access to 4,596+ live Topcoder challenges through real MCP integration
730
- - Advanced challenge matching algorithms with multi-factor scoring
731
- - Real-time prize information, difficulty levels, and technology requirements
732
- - Comprehensive skill analysis and career guidance
733
- - Market intelligence and technology trend insights
734
-
735
- CONVERSATION HISTORY:
736
- {history_text}
737
-
738
- Guidelines:
739
- - Use the REAL challenge data provided above in your responses
740
- - Reference actual challenge titles, prizes, and technologies when relevant
741
- - Provide specific, actionable advice based on real data
742
- - Mention that your data comes from live MCP integration with Topcoder
743
- - Be enthusiastic about the real-time data capabilities
744
- - If asked about specific technologies, reference actual challenges that use them
745
- - For skill questions, suggest real challenges that match their level
746
- - Keep responses concise but informative (max 300 words)
747
-
748
- User's current question: {user_message}
749
-
750
- Provide a helpful, intelligent response using the real challenge data context."""
751
-
752
- # FIXED: Try OpenAI API if available
753
- if self.llm_available:
754
- try:
755
- async with httpx.AsyncClient(timeout=30.0) as client:
756
- response = await client.post(
757
- "https://api.openai.com/v1/chat/completions", # FIXED: Correct OpenAI endpoint
758
- headers={
759
- "Content-Type": "application/json",
760
- "Authorization": f"Bearer {self.openai_api_key}" # FIXED: Proper auth header
761
- },
762
- json={
763
- "model": "gpt-4o-mini", # Fast and cost-effective
764
- "messages": [
765
- {"role": "system", "content": "You are an expert Topcoder Challenge Intelligence Assistant with real MCP data access."},
766
- {"role": "user", "content": system_prompt}
767
- ],
768
- "max_tokens": 800,
769
- "temperature": 0.7
770
- }
771
- )
772
-
773
- if response.status_code == 200:
774
- data = response.json()
775
- llm_response = data["choices"][0]["message"]["content"]
776
-
777
- # Add real-time data indicators
778
- llm_response += f"\n\n*πŸ€– Powered by OpenAI GPT-4 + Real MCP Data β€’ {len(challenge_context)} chars of live context*"
779
-
780
- return llm_response
781
- else:
782
- print(f"OpenAI API error: {response.status_code} - {response.text}")
783
- return await self.get_fallback_response_with_context(user_message, challenge_context)
784
-
785
- except Exception as e:
786
- print(f"OpenAI API error: {e}")
787
- return await self.get_fallback_response_with_context(user_message, challenge_context)
788
-
789
- # Fallback to enhanced responses with real data
790
- return await self.get_fallback_response_with_context(user_message, challenge_context)
791
-
792
- async def get_fallback_response_with_context(self, user_message: str, challenge_context: str) -> str:
793
- """Enhanced fallback using real challenge data"""
794
- message_lower = user_message.lower()
795
-
796
- # Parse challenge context for intelligent responses
797
  try:
798
- context_data = json.loads(challenge_context)
799
- challenges = context_data.get("sample_challenges", [])
800
- except:
801
- challenges = []
802
-
803
- # Technology-specific responses using real data
804
- tech_keywords = ['python', 'react', 'javascript', 'blockchain', 'ai', 'ml', 'java', 'nodejs', 'angular', 'vue']
805
- matching_tech = [tech for tech in tech_keywords if tech in message_lower]
806
-
807
- if matching_tech:
808
- relevant_challenges = []
809
- for challenge in challenges:
810
- challenge_techs = [tech.lower() for tech in challenge.get('technologies', [])]
811
- if any(tech in challenge_techs for tech in matching_tech):
812
- relevant_challenges.append(challenge)
 
 
 
 
 
 
 
 
 
 
813
 
814
- if relevant_challenges:
815
- response = f"Great question about {', '.join(matching_tech)}! πŸš€ Based on my real MCP data access, here are actual challenges:\n\n"
816
- for i, challenge in enumerate(relevant_challenges[:3], 1):
817
- response += f"🎯 **{challenge['title']}**\n"
818
- response += f" πŸ’° Prize: {challenge['prize']}\n"
819
- response += f" πŸ› οΈ Technologies: {', '.join(challenge['technologies'])}\n"
820
- response += f" πŸ“Š Difficulty: {challenge['difficulty']}\n"
821
- response += f" πŸ‘₯ Registrants: {challenge['registrants']}\n\n"
822
-
823
- response += f"*These are REAL challenges from my live MCP connection to Topcoder's database of 4,596+ challenges!*"
824
- return response
825
-
826
- # Prize/earning questions with real data
827
- if any(word in message_lower for word in ['prize', 'money', 'earn', 'pay', 'salary', 'income']):
828
- if challenges:
829
- response = f"πŸ’° Based on real MCP data, current Topcoder challenges offer:\n\n"
830
- for i, challenge in enumerate(challenges[:3], 1):
831
- response += f"{i}. **{challenge['title']}** - {challenge['prize']}\n"
832
- response += f" πŸ“Š Difficulty: {challenge['difficulty']} | πŸ‘₯ Competition: {challenge['registrants']} registered\n\n"
833
- response += f"*This is live prize data from {context_data.get('total_challenges_available', '4,596+')} real challenges!*"
834
- return response
835
-
836
- # Career/skill questions
837
- if any(word in message_lower for word in ['career', 'skill', 'learn', 'beginner', 'advanced', 'help']):
838
- if challenges:
839
- sample_challenge = challenges[0]
840
- return f"""I'm your intelligent Topcoder assistant with REAL MCP integration! πŸš€
841
-
842
- I currently have live access to {context_data.get('total_challenges_available', '4,596+')} real challenges. For example, right now there's:
843
 
844
- 🎯 **"{sample_challenge['title']}"**
845
- πŸ’° Prize: **{sample_challenge['prize']}**
846
- πŸ› οΈ Technologies: {', '.join(sample_challenge['technologies'][:3])}
847
- πŸ“Š Difficulty: {sample_challenge['difficulty']}
848
-
849
- I can help you with:
850
- 🎯 Find challenges matching your specific skills
851
- πŸ’° Compare real prize amounts and competition levels
852
- πŸ“Š Analyze difficulty levels and technology requirements
853
- πŸš€ Career guidance based on market demand
854
-
855
- Try asking me about specific technologies like "Python challenges" or "React opportunities"!
856
-
857
- *Powered by live MCP connection to Topcoder's challenge database*"""
858
-
859
- # Default intelligent response with real data
860
- if challenges:
861
- return f"""Hi! I'm your intelligent Topcoder assistant! πŸ€–
862
-
863
- I have REAL MCP integration with live access to **{context_data.get('total_challenges_available', '4,596+')} challenges** from Topcoder's database.
864
-
865
- **Currently active challenges include:**
866
- β€’ **{challenges[0]['title']}** ({challenges[0]['prize']})
867
- β€’ **{challenges[1]['title']}** ({challenges[1]['prize']})
868
- β€’ **{challenges[2]['title']}** ({challenges[2]['prize']})
869
-
870
- Ask me about:
871
- 🎯 Specific technologies (Python, React, blockchain, etc.)
872
- πŸ’° Prize ranges and earning potential
873
- πŸ“Š Difficulty levels and skill requirements
874
- πŸš€ Career advice and skill development
875
-
876
- *All responses powered by real-time Topcoder MCP data!*"""
877
-
878
- return "I'm your intelligent Topcoder assistant with real MCP data access! Ask me about challenges, skills, or career advice and I'll help you using live data from 4,596+ real challenges! πŸš€"
879
 
880
  # FIXED: Properly placed standalone functions with correct signatures
881
  async def chat_with_enhanced_llm_agent(message: str, history: List[Tuple[str, str]], mcp_engine) -> Tuple[List[Tuple[str, str]], str]:
@@ -899,7 +786,7 @@ async def chat_with_enhanced_llm_agent(message: str, history: List[Tuple[str, st
899
  return history, ""
900
 
901
  except Exception as e:
902
- error_response = f"I encountered an issue processing your request: {str(e)}. However, I can still help you with challenge recommendations using my real MCP data! Try asking about specific technologies or challenge types."
903
  history.append((message, error_response))
904
  return history, ""
905
 
@@ -1040,18 +927,18 @@ def format_insights_panel(insights: Dict) -> str:
1040
  """
1041
 
1042
  async def get_ultimate_recommendations_async(
1043
- skills_input: str, experience_level: str, time_available: str, interests: str,
1044
  status: str, prize_min: int, prize_max: int, challenge_type: str, track: str,
1045
  sort_by: str, sort_order: str
1046
  ) -> Tuple[str, str]:
1047
  start_time = time.time()
1048
  try:
1049
- skills = [skill.strip() for skill in skills_input.split(',') if skill.strip()]
 
1050
  user_profile = UserProfile(
1051
- skills=skills,
1052
  experience_level=experience_level,
1053
  time_available=time_available,
1054
- interests=[interests] if interests else []
1055
  )
1056
  # Pass all new filter params to get_personalized_recommendations
1057
  recommendations_data = await intelligence_engine.get_personalized_recommendations(
@@ -1088,7 +975,7 @@ async def get_ultimate_recommendations_async(
1088
  <div style='background:linear-gradient(135deg,#fdcb6e,#e17055);color:white;padding:25px;border-radius:12px;text-align:center;box-shadow:0 8px 25px rgba(253,203,110,0.3);'>
1089
  <div style='font-size:3em;margin-bottom:15px;'>πŸ”</div>
1090
  <div style='font-size:1.3em;font-weight:600;margin-bottom:10px;'>No perfect matches found</div>
1091
- <div style='opacity:0.9;font-size:1em;'>Try adjusting your skills, experience level, or interests for better results</div>
1092
  </div>
1093
  """
1094
  # Generate insights panel
@@ -1102,21 +989,21 @@ async def get_ultimate_recommendations_async(
1102
  error_msg = f"""
1103
  <div style='background:linear-gradient(135deg,#e17055,#d63031);color:white;padding:25px;border-radius:12px;text-align:center;box-shadow:0 8px 25px rgba(225,112,85,0.3);'>
1104
  <div style='font-size:3em;margin-bottom:15px;'>⚠</div>
1105
- <div style='font-size:1.3em;font-weight:600;margin-bottom:10px;'>Processing Error</div>
1106
  <div style='opacity:0.9;font-size:0.9em;'>{str(e)}</div>
1107
- <div style='opacity:0.8;font-size:0.85em;margin-top:10px;'>Please try again or contact support</div>
1108
  </div>
1109
  """
1110
  print(f"❌ Error processing ULTIMATE request: {str(e)}")
1111
  return error_msg, ""
1112
 
1113
  def get_ultimate_recommendations_sync(
1114
- skills_input: str, experience_level: str, time_available: str, interests: str,
1115
  status: str, prize_min: int, prize_max: int, challenge_type: str, track: str,
1116
  sort_by: str, sort_order: str
1117
  ) -> Tuple[str, str]:
1118
  return asyncio.run(get_ultimate_recommendations_async(
1119
- skills_input, experience_level, time_available, interests,
1120
  status, prize_min, prize_max, challenge_type, track,
1121
  sort_by, sort_order
1122
  ))
@@ -1135,7 +1022,7 @@ def run_ultimate_performance_test():
1135
  # Test 1: MCP Connection Test
1136
  results.append("πŸ” Test 1: Real MCP Connection Status")
1137
  start = time.time()
1138
- mcp_status = "βœ… CONNECTED" if intelligence_engine.is_connected else "⚠️ FALLBACK MODE"
1139
  session_status = f"Session: {intelligence_engine.session_id[:8]}..." if intelligence_engine.session_id else "No session"
1140
  test1_time = round(time.time() - start, 3)
1141
  results.append(f" {mcp_status} ({test1_time}s)")
@@ -1150,12 +1037,11 @@ def run_ultimate_performance_test():
1150
  # Create async test
1151
  async def test_recommendations():
1152
  test_profile = UserProfile(
1153
- skills=['Python', 'React', 'AWS'],
1154
  experience_level='Intermediate',
1155
  time_available='4-8 hours',
1156
- interests=['web development', 'cloud computing']
1157
  )
1158
- return await intelligence_engine.get_personalized_recommendations(test_profile, 'python react cloud')
1159
 
1160
  try:
1161
  # Run async test
@@ -1186,8 +1072,8 @@ def run_ultimate_performance_test():
1186
  results.append(f" πŸ€– LLM Integration: Available")
1187
  results.append(f" 🧠 Enhanced Chat: Enabled")
1188
  else:
1189
- results.append(f" πŸ€– LLM Integration: Fallback mode")
1190
- results.append(f" 🧠 Enhanced Chat: Basic responses")
1191
  results.append("")
1192
 
1193
  # Summary
@@ -1224,10 +1110,9 @@ def quick_benchmark():
1224
  # Test basic recommendation speed
1225
  async def quick_test():
1226
  test_profile = UserProfile(
1227
- skills=['Python', 'React'],
1228
  experience_level='Intermediate',
1229
  time_available='4-8 hours',
1230
- interests=['web development']
1231
  )
1232
  return await intelligence_engine.get_personalized_recommendations(test_profile)
1233
 
@@ -1268,10 +1153,10 @@ def check_mcp_status():
1268
  results.append("🎯 Features: Real-time challenge data")
1269
  results.append("⚑ Performance: Sub-second response times")
1270
  else:
1271
- results.append("⚠️ Status: FALLBACK MODE")
1272
- results.append("πŸ“Š Using: Enhanced premium dataset")
1273
- results.append("🎯 Features: Advanced algorithms active")
1274
- results.append("πŸ’‘ Note: Still provides excellent recommendations")
1275
 
1276
  # Check OpenAI API Key
1277
  has_openai = bool(os.getenv("OPENAI_API_KEY"))
@@ -1320,7 +1205,7 @@ def create_ultimate_interface():
1320
 
1321
  ### **πŸ”₯ REAL MCP Integration + Advanced AI Intelligence + OpenAI LLM**
1322
 
1323
- Experience the **world's most advanced** Topcoder challenge discovery system! Powered by **live Model Context Protocol integration** with access to **4,596+ real challenges**, **OpenAI GPT-4 intelligence**, and sophisticated AI algorithms that deliver **personalized recommendations** tailored to your exact skills and career goals.
1324
 
1325
  **🎯 What Makes This ULTIMATE:**
1326
  - **πŸ”₯ Real MCP Data**: Live connection to Topcoder's official MCP server
@@ -1341,13 +1226,6 @@ def create_ultimate_interface():
1341
  with gr.Row():
1342
  with gr.Column(scale=1):
1343
  gr.Markdown("**πŸ€– Tell the AI about yourself and filter challenges:**")
1344
-
1345
- skills_input = gr.Textbox(
1346
- label="πŸ› οΈ Your Skills & Technologies",
1347
- placeholder="Python, React, JavaScript, AWS, Docker, Blockchain, UI/UX...",
1348
- lines=3,
1349
- value="Python, JavaScript, React"
1350
- )
1351
  experience_level = gr.Dropdown(
1352
  choices=["Beginner", "Intermediate", "Advanced"],
1353
  label="πŸ“Š Experience Level",
@@ -1417,7 +1295,6 @@ def create_ultimate_interface():
1417
  ultimate_recommend_btn.click(
1418
  get_ultimate_recommendations_sync,
1419
  inputs=[
1420
- skills_input,
1421
  experience_level,
1422
  time_available,
1423
  interests,
@@ -1457,7 +1334,7 @@ def create_ultimate_interface():
1457
 
1458
  with gr.Row():
1459
  enhanced_chat_input = gr.Textbox(
1460
- placeholder="Ask me about challenges, skills, career advice, or anything else!",
1461
  container=False,
1462
  scale=4,
1463
  show_label=False
@@ -1546,7 +1423,7 @@ def create_ultimate_interface():
1546
  - **API Key Status**: {"βœ… Configured via HF Secrets" if os.getenv("OPENAI_API_KEY") else "⚠️ Set OPENAI_API_KEY in HF Secrets for full features"}
1547
 
1548
  #### 🧠 **Enhanced AI Intelligence Engine v4.0**
1549
- - **Multi-Factor Scoring**: 40% skill match + 30% experience + 20% interest + 10% market factors
1550
  - **Natural Language Processing**: Understands your goals and matches with relevant opportunities
1551
  - **Enhanced Market Intelligence**: Real-time insights on trending technologies and career paths
1552
  - **Success Prediction**: Enhanced algorithms calculate your probability of success
@@ -1569,7 +1446,7 @@ def create_ultimate_interface():
1569
  ```python
1570
  # SECURE: Hugging Face Secrets integration
1571
  openai_api_key = os.getenv("OPENAI_API_KEY", "")
1572
- endpoint = "https://api.openai.com/v1/chat/completions"
1573
  model = "gpt-4o-mini" # Fast and cost-effective
1574
  context = "Real MCP challenge data + conversation history"
1575
  ```
 
27
 
28
  @dataclass
29
  class UserProfile:
 
30
  experience_level: str
31
  time_available: str
32
  interests: List[str]
33
 
34
  class UltimateTopcoderMCPEngine:
35
+ """FIXED: Real MCP Integration - No Mock/Fallback Data"""
36
 
37
  def __init__(self):
38
  print("πŸš€ Initializing ULTIMATE Topcoder MCP Engine...")
39
  self.base_url = "https://api.topcoder.com/v6/mcp"
40
  self.session_id = None
41
  self.is_connected = False
42
+ print(f"βœ… MCP Engine initialized with live data connection")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
 
44
  def parse_sse_response(self, sse_text: str) -> Dict[str, Any]:
45
  """Parse Server-Sent Events response"""
 
109
  print("⚠️ MCP connection succeeded but no session ID found")
110
 
111
  except Exception as e:
112
+ print(f"⚠️ MCP connection failed: {e}")
113
 
114
  return False
115
 
 
272
  sort_by: str = None,
273
  sort_order: str = None,
274
  ) -> List[Challenge]:
275
+ """FIXED: Only fetch real challenges, no mock/fallback"""
276
 
277
  # Always try to connect
278
  print(f"πŸ”„ Attempting to fetch REAL challenges (limit: {limit})")
279
  connection_success = await self.initialize_connection()
280
 
281
  if not connection_success:
282
+ print("❌ Could not establish MCP connection")
283
+ raise Exception("Unable to connect to Topcoder MCP server. Please try again later.")
284
 
285
  # Build comprehensive query parameters
286
  skill_keywords = self.extract_technologies_from_query(
287
+ query + " " + " ".join(user_profile.interests) # FIXED: Only using interests, not skills
288
  )
289
 
290
  mcp_query = {
 
321
  result = await self.call_tool("query-tc-challenges", mcp_query)
322
  if not result:
323
  print("❌ No result from MCP tool call")
324
+ raise Exception("No data received from Topcoder MCP server. Please try again later.")
325
 
326
  print(f"πŸ“Š Raw MCP result type: {type(result)}")
327
  if isinstance(result, dict):
 
349
  except json.JSONDecodeError:
350
  pass
351
 
352
+ if not challenge_data_list:
353
+ print("❌ No challenge data found in MCP response")
354
+ raise Exception("No challenges found matching your criteria. Please try different filters.")
355
+
356
  challenges = []
357
  for item in challenge_data_list:
358
  if isinstance(item, dict):
 
369
  def calculate_advanced_compatibility_score(self, challenge: Challenge, user_profile: UserProfile, query: str) -> tuple:
370
  score = 0.0
371
  factors = []
372
+ # FIXED: Only using interests, not skills
373
+ user_interests_lower = [interest.lower().strip() for interest in user_profile.interests]
374
  challenge_techs_lower = [tech.lower() for tech in challenge.technologies]
375
+ interest_matches = len(set(user_interests_lower) & set(challenge_techs_lower))
376
  if len(challenge.technologies) > 0:
377
+ exact_match_score = (interest_matches / len(challenge.technologies)) * 30
378
+ coverage_bonus = min(interest_matches * 10, 10)
379
+ interest_score = exact_match_score + coverage_bonus
380
  else:
381
+ interest_score = 30
382
+ score += interest_score
383
+ if interest_matches > 0:
384
+ matched_interests = [t for t in challenge.technologies if t.lower() in user_interests_lower]
385
+ factors.append(f"Strong match: uses your {', '.join(matched_interests[:2])} interests")
386
  elif len(challenge.technologies) > 0:
387
  factors.append(f"Growth opportunity: learn {', '.join(challenge.technologies[:2])}")
388
  else:
389
+ factors.append("Versatile challenge suitable for multiple skill/interest levels")
390
  level_mapping = {'beginner': 1, 'intermediate': 2, 'advanced': 3}
391
  user_level_num = level_mapping.get(user_profile.experience_level.lower(), 2)
392
  challenge_level_num = level_mapping.get(challenge.difficulty.lower(), 2)
 
427
  return min(score, 100.0), factors
428
 
429
  def get_user_insights(self, user_profile: UserProfile) -> Dict:
430
+ # FIXED: Only using interests, not skills
431
+ interests = user_profile.interests
432
  level = user_profile.experience_level
433
  time_available = user_profile.time_available
434
  frontend_skills = ['react', 'javascript', 'css', 'html', 'vue', 'angular', 'typescript']
 
437
  devops_skills = ['docker', 'kubernetes', 'aws', 'azure', 'terraform', 'jenkins']
438
  design_skills = ['figma', 'ui/ux', 'design', 'prototyping', 'accessibility']
439
  blockchain_skills = ['solidity', 'web3', 'ethereum', 'blockchain', 'smart contracts', 'nft']
440
+ user_interests_lower = [interest.lower() for interest in interests]
441
+ frontend_count = sum(1 for interest in user_interests_lower if any(fs in interest for fs in frontend_skills))
442
+ backend_count = sum(1 for interest in user_interests_lower if any(bs in interest for bs in backend_skills))
443
+ data_count = sum(1 for interest in user_interests_lower if any(ds in interest for ds in data_skills))
444
+ devops_count = sum(1 for interest in user_interests_lower if any(ds in interest for ds in devops_skills))
445
+ design_count = sum(1 for interest in user_interests_lower if any(ds in interest for ds in design_skills))
446
+ blockchain_count = sum(1 for interest in user_interests_lower if any(bs in interest for bs in blockchain_skills))
447
  if blockchain_count >= 2:
448
  profile_type = "Blockchain Developer"
449
  elif frontend_count >= 2 and backend_count >= 1:
 
462
  profile_type = "Versatile Developer"
463
  insights = {
464
  'profile_type': profile_type,
465
+ 'strengths': f"Strong {profile_type.lower()} with expertise in {', '.join(interests[:3]) if interests else 'multiple technologies'}",
466
+ 'growth_areas': self._suggest_growth_areas(user_interests_lower, frontend_count, backend_count, data_count, devops_count, blockchain_count),
467
+ 'skill_progression': f"Ready for {level.lower()} to advanced challenges based on current skill/interest set",
468
+ 'market_trends': self._get_market_trends(interests),
469
  'time_optimization': f"With {time_available}, you can complete 1-2 medium challenges or 1 large project",
470
+ 'success_probability': self._calculate_success_probability(level, len(interests))
471
  }
472
  return insights
473
 
474
+ def _suggest_growth_areas(self, user_interests: List[str], frontend: int, backend: int, data: int, devops: int, blockchain: int) -> str:
475
  suggestions = []
476
  if blockchain < 1 and (frontend >= 1 or backend >= 1):
477
  suggestions.append("blockchain and Web3 technologies")
 
479
  suggestions.append("cloud technologies (AWS, Docker)")
480
  if data < 1 and backend >= 1:
481
  suggestions.append("database optimization and analytics")
482
+ if frontend >= 1 and "typescript" not in str(user_interests):
483
  suggestions.append("TypeScript for enhanced development")
484
+ if backend >= 1 and "api" not in str(user_interests):
485
  suggestions.append("API design and microservices")
486
  if not suggestions:
487
  suggestions = ["AI/ML integration", "system design", "performance optimization"]
488
  return "Consider exploring " + ", ".join(suggestions[:3])
489
 
490
+ def _get_market_trends(self, interests: List[str]) -> str:
491
  hot_skills = {
492
  'react': 'React dominates frontend with 75% job market share',
493
  'python': 'Python leads in AI/ML and backend development growth',
 
498
  'ai': 'AI integration skills in highest demand for 2024',
499
  'kubernetes': 'Container orchestration critical for enterprise roles'
500
  }
501
+ for interest in interests:
502
+ interest_lower = interest.lower()
503
  for hot_skill, trend in hot_skills.items():
504
+ if hot_skill in interest_lower:
505
  return trend
506
  return "Full-stack and cloud skills show strongest market demand"
507
 
508
+ def _calculate_success_probability(self, level: str, interest_count: int) -> str:
509
  base_score = {'beginner': 60, 'intermediate': 75, 'advanced': 85}.get(level.lower(), 70)
510
+ interest_bonus = min(interest_count * 3, 15)
511
+ total = base_score + interest_bonus
512
  if total >= 90:
513
  return f"{total}% - Outstanding success potential"
514
  elif total >= 80:
 
516
  elif total >= 70:
517
  return f"{total}% - Good probability of success"
518
  else:
519
+ return f"{total}% - Consider skill/interest development first"
520
 
521
  async def get_personalized_recommendations(
522
  self, user_profile: UserProfile, query: str = "",
 
526
  limit: int = 50
527
  ) -> Dict[str, Any]:
528
  start_time = datetime.now()
529
+ print(f"🎯 Analyzing profile: {user_profile.interests} | Level: {user_profile.experience_level}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
530
 
531
+ # FIXED: Only fetch real challenges, no mock/fallback
532
+ try:
533
+ challenges = await self.fetch_real_challenges(
534
+ user_profile=user_profile,
535
+ query=query,
536
+ limit=limit,
537
+ status=status,
538
+ prize_min=prize_min,
539
+ prize_max=prize_max,
540
+ challenge_type=challenge_type,
541
+ track=track,
542
+ sort_by=sort_by,
543
+ sort_order=sort_order,
544
+ )
545
  data_source = "πŸ”₯ REAL Topcoder MCP Server (4,596+ challenges)"
546
  print(f"πŸŽ‰ Using {len(challenges)} REAL Topcoder challenges!")
547
+ except Exception as e:
548
+ print(f"❌ Error fetching challenges: {str(e)}")
549
+ raise Exception(f"Unable to fetch challenges from Topcoder MCP: {str(e)}")
 
550
 
551
  scored_challenges = []
552
  for challenge in challenges:
 
574
  "session_active": bool(self.session_id),
575
  "mcp_connected": self.is_connected,
576
  "algorithm_version": "Advanced Multi-Factor v2.0",
577
+ "topcoder_total": "4,596+ live challenges"
578
  }
579
  }
580
 
581
  class EnhancedLLMChatbot:
582
  """FIXED: Enhanced LLM Chatbot with OpenAI Integration + HF Secrets"""
583
 
584
+ LLM_INSTRUCTIONS = """You are an expert Topcoder Challenge Intelligence Assistant with REAL-TIME access to live challenge data through MCP integration.
585
+
586
+ Your capabilities:
587
+ - Access to 4,596+ live Topcoder challenges through real MCP integration
588
+ - Advanced challenge matching algorithms with multi-factor scoring
589
+ - Real-time prize information, difficulty levels, and technology requirements
590
+ - Comprehensive skill & interest analysis and career guidance
591
+ - Market intelligence and technology trend insights
592
+
593
+ Guidelines:
594
+ - Use the REAL challenge data provided above in your responses
595
+ - Reference actual challenge titles, prizes, and technologies when relevant
596
+ - Provide specific, actionable advice based on real data
597
+ - Mention that your data comes from live MCP integration with Topcoder
598
+ - Be enthusiastic about the real-time data capabilities
599
+ - If asked about specific technologies, reference actual challenges that use them
600
+ - For skill & interest questions, suggest real challenges that match their level
601
+ - Keep responses concise but informative (max 300 words)
602
+
603
+ Provide a helpful, intelligent response using the real challenge data context."""
604
+
605
+ FOOTER_TEXT = "πŸ€– Powered by OpenAI GPT-4 + Real MCP Data"
606
+
607
+ LLM_TOOLS = [
608
+ {
609
+ "type": "function",
610
+ "name": "get_challenge_context",
611
+ "description": "Query challenges via Topcoder API",
612
+ "parameters": {
613
+ "type": "object",
614
+ "properties": {
615
+ "query": {"type": "string", "description": "Search query for challenges. e.g. python, react, etc."},
616
+ "limit": {"type": "integer", "description": "Maximum number of challenges to return", "default": 10}
617
+ },
618
+ "required": ["query"]
619
+ }
620
+ }
621
+ ]
622
+
623
  def __init__(self, mcp_engine):
624
  self.mcp_engine = mcp_engine
 
 
625
 
626
  # FIXED: Use Hugging Face Secrets (environment variables)
627
  self.openai_api_key = os.getenv("OPENAI_API_KEY", "")
628
 
629
  if not self.openai_api_key:
630
+ print("⚠️ OpenAI API key not found in HF secrets. Chat will show error messages.")
631
  self.llm_available = False
632
  else:
633
  self.llm_available = True
634
  print("βœ… OpenAI API key loaded from HF secrets for intelligent responses")
635
+
636
+ async def generate_openai_response(self, input_list: List[Dict]) -> Dict:
637
+ """Reusable function to call the OpenAI API."""
638
+ headers = {
639
+ "Content-Type": "application/json",
640
+ "Authorization": f"Bearer {self.openai_api_key}"
641
+ }
642
+ body = {
643
+ "model": "gpt-4o-mini",
644
+ "input": input_list,
645
+ "store": False,
646
+ "tools": self.LLM_TOOLS,
647
+ "instructions": self.LLM_INSTRUCTIONS
648
+ }
649
+ print("πŸš€ Sending request to OpenAI API...")
650
+ async with httpx.AsyncClient(timeout=30.0) as client:
651
+ response = await client.post(
652
+ "https://api.openai.com/v1/responses",
653
+ headers=headers,
654
+ json=body
655
+ )
656
+ print(f"πŸ“‘ Received OpenAI response with status: {response.status_code}")
657
+ if response.status_code == 200:
658
+ return response.json()
659
+ else:
660
+ print(f"OpenAI API error: {response.status_code} - {response.text}")
661
+ raise Exception(f"❌ **OpenAI API Error** (Status {response.status_code}): Unable to generate response. Please try again later or check your API key configuration.")
662
+
663
+ def extract_response_text(self, data: Dict) -> str:
664
+ """Safely extracts the response text from the API data."""
665
+ print("πŸ“„ Parsing OpenAI response text...")
666
+ try:
667
+ response_text = data["output"][0]["content"][0]["text"]
668
+ print("βœ… Successfully extracted response text.")
669
+ return response_text
670
+ except (KeyError, IndexError):
671
+ print("⚠️ Failed to extract response text, returning default message.")
672
+ return "I apologize, but I couldn't generate a response. Please try again."
673
+
674
  async def get_challenge_context(self, query: str, limit: int = 10) -> str:
675
  """Get relevant challenge data for LLM context"""
676
  try:
677
  # Create a basic profile for context
678
  basic_profile = UserProfile(
 
679
  experience_level='Intermediate',
680
  time_available='4-8 hours',
681
  interests=[query]
 
688
  limit=limit
689
  )
690
 
 
 
 
 
 
 
 
691
  # Create rich context from real data
692
  context_data = {
693
+ "total_challenges_available": "4,596+",
694
+ "data_source": "Real MCP Server",
695
  "sample_challenges": []
696
  }
697
 
 
714
  return f"Challenge data temporarily unavailable: {str(e)}"
715
 
716
  async def generate_llm_response(self, user_message: str, chat_history: List) -> str:
717
+ """Send a message to the conversation using Responses API"""
718
+ if not self.llm_available:
719
+ raise Exception("OpenAI API key not configured. Please set it in Hugging Face Secrets.")
720
+
721
+ input_list = []
722
+ for user_msg, bot_resp in chat_history:
723
+ bot_resp_cleaned = bot_resp.split(f"\n\n*{self.FOOTER_TEXT}")[0]
724
+ input_list.append({"role": "user", "content": user_msg})
725
+ input_list.append({"role": "assistant", "content": bot_resp_cleaned})
726
+ input_list.append({"role": "user", "content": user_message})
727
+
728
+ print("πŸ€– Generating LLM response...")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
729
  try:
730
+ data = await self.generate_openai_response(input_list)
731
+ input_list += data.get("output", [])
732
+
733
+ tool_result = None
734
+ function_call_found = False
735
+ for item in data.get("output", []):
736
+ if item.get("type") == "function_call" and item.get("name") == "get_challenge_context":
737
+ print("πŸ” Function call detected, processing tool...")
738
+ function_call_found = True
739
+ tool_args = json.loads(item.get("arguments", "{}"))
740
+ query = tool_args.get("query", "")
741
+ limit = tool_args.get("limit", 10)
742
+
743
+ tool_result = await self.get_challenge_context(query, limit)
744
+ print(f"πŸ”§ Tool result: {json.dumps(tool_result, indent=2) if tool_result else 'No data returned'}")
745
+ input_list.append({
746
+ "type": "function_call_output",
747
+ "call_id": item.get("call_id"),
748
+ "output": json.dumps({"challenges": tool_result})
749
+ })
750
+
751
+ if function_call_found:
752
+ data = await self.generate_openai_response(input_list)
753
+
754
+ llm_response = self.extract_response_text(data)
755
 
756
+ footer_text = self.FOOTER_TEXT
757
+ if tool_result:
758
+ footer_text += f" β€’ {len(str(tool_result))} chars of live context"
759
+ llm_response += f"\n\n*{footer_text}*"
760
+ print("βœ… LLM response generated successfully.")
761
+ return llm_response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
762
 
763
+ except Exception as e:
764
+ print(f"Chat error: {e}")
765
+ raise Exception(f"❌ **Chat Error**: {str(e)}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
766
 
767
  # FIXED: Properly placed standalone functions with correct signatures
768
  async def chat_with_enhanced_llm_agent(message: str, history: List[Tuple[str, str]], mcp_engine) -> Tuple[List[Tuple[str, str]], str]:
 
786
  return history, ""
787
 
788
  except Exception as e:
789
+ error_response = f"I encountered an issue processing your request: {str(e)}."
790
  history.append((message, error_response))
791
  return history, ""
792
 
 
927
  """
928
 
929
  async def get_ultimate_recommendations_async(
930
+ experience_level: str, time_available: str, interests: str,
931
  status: str, prize_min: int, prize_max: int, challenge_type: str, track: str,
932
  sort_by: str, sort_order: str
933
  ) -> Tuple[str, str]:
934
  start_time = time.time()
935
  try:
936
+ # FIXED: Removed skills_input parameter, only using interests
937
+ interest_list = [interest.strip() for interest in interests.split(',') if interest.strip()]
938
  user_profile = UserProfile(
 
939
  experience_level=experience_level,
940
  time_available=time_available,
941
+ interests=interest_list
942
  )
943
  # Pass all new filter params to get_personalized_recommendations
944
  recommendations_data = await intelligence_engine.get_personalized_recommendations(
 
975
  <div style='background:linear-gradient(135deg,#fdcb6e,#e17055);color:white;padding:25px;border-radius:12px;text-align:center;box-shadow:0 8px 25px rgba(253,203,110,0.3);'>
976
  <div style='font-size:3em;margin-bottom:15px;'>πŸ”</div>
977
  <div style='font-size:1.3em;font-weight:600;margin-bottom:10px;'>No perfect matches found</div>
978
+ <div style='opacity:0.9;font-size:1em;'>Try adjusting your interests, experience level, or filters for better results</div>
979
  </div>
980
  """
981
  # Generate insights panel
 
989
  error_msg = f"""
990
  <div style='background:linear-gradient(135deg,#e17055,#d63031);color:white;padding:25px;border-radius:12px;text-align:center;box-shadow:0 8px 25px rgba(225,112,85,0.3);'>
991
  <div style='font-size:3em;margin-bottom:15px;'>⚠</div>
992
+ <div style='font-size:1.3em;font-weight:600;margin-bottom:10px;'>No recommendations found</div>
993
  <div style='opacity:0.9;font-size:0.9em;'>{str(e)}</div>
994
+ <div style='opacity:0.8;font-size:0.85em;margin-top:10px;'>If problem persists, contact support.</div>
995
  </div>
996
  """
997
  print(f"❌ Error processing ULTIMATE request: {str(e)}")
998
  return error_msg, ""
999
 
1000
  def get_ultimate_recommendations_sync(
1001
+ experience_level: str, time_available: str, interests: str,
1002
  status: str, prize_min: int, prize_max: int, challenge_type: str, track: str,
1003
  sort_by: str, sort_order: str
1004
  ) -> Tuple[str, str]:
1005
  return asyncio.run(get_ultimate_recommendations_async(
1006
+ experience_level, time_available, interests,
1007
  status, prize_min, prize_max, challenge_type, track,
1008
  sort_by, sort_order
1009
  ))
 
1022
  # Test 1: MCP Connection Test
1023
  results.append("πŸ” Test 1: Real MCP Connection Status")
1024
  start = time.time()
1025
+ mcp_status = "βœ… CONNECTED" if intelligence_engine.is_connected else "⚠️ NOT CONNECTED"
1026
  session_status = f"Session: {intelligence_engine.session_id[:8]}..." if intelligence_engine.session_id else "No session"
1027
  test1_time = round(time.time() - start, 3)
1028
  results.append(f" {mcp_status} ({test1_time}s)")
 
1037
  # Create async test
1038
  async def test_recommendations():
1039
  test_profile = UserProfile(
 
1040
  experience_level='Intermediate',
1041
  time_available='4-8 hours',
1042
+ interests=['python', 'react', 'cloud']
1043
  )
1044
+ return await intelligence_engine.get_personalized_recommendations(test_profile, 'python')
1045
 
1046
  try:
1047
  # Run async test
 
1072
  results.append(f" πŸ€– LLM Integration: Available")
1073
  results.append(f" 🧠 Enhanced Chat: Enabled")
1074
  else:
1075
+ results.append(f" πŸ€– LLM Integration: Not Available")
1076
+ results.append(f" 🧠 Enhanced Chat: Not Available")
1077
  results.append("")
1078
 
1079
  # Summary
 
1110
  # Test basic recommendation speed
1111
  async def quick_test():
1112
  test_profile = UserProfile(
 
1113
  experience_level='Intermediate',
1114
  time_available='4-8 hours',
1115
+ interests=['web development', 'Python', 'React']
1116
  )
1117
  return await intelligence_engine.get_personalized_recommendations(test_profile)
1118
 
 
1153
  results.append("🎯 Features: Real-time challenge data")
1154
  results.append("⚑ Performance: Sub-second response times")
1155
  else:
1156
+ results.append("⚠️ Status: NOT CONNECTED")
1157
+ results.append("πŸ“Š Using: No data available")
1158
+ results.append("🎯 Features: MCP connection required")
1159
+ results.append("πŸ’‘ Note: Please check your connection")
1160
 
1161
  # Check OpenAI API Key
1162
  has_openai = bool(os.getenv("OPENAI_API_KEY"))
 
1205
 
1206
  ### **πŸ”₯ REAL MCP Integration + Advanced AI Intelligence + OpenAI LLM**
1207
 
1208
+ Experience the **world's most advanced** Topcoder challenge discovery system! Powered by **live Model Context Protocol integration** with access to **4,596+ real challenges**, **OpenAI GPT-4 intelligence**, and sophisticated AI algorithms that deliver **personalized recommendations** tailored to your exact skills, interests and career goals.
1209
 
1210
  **🎯 What Makes This ULTIMATE:**
1211
  - **πŸ”₯ Real MCP Data**: Live connection to Topcoder's official MCP server
 
1226
  with gr.Row():
1227
  with gr.Column(scale=1):
1228
  gr.Markdown("**πŸ€– Tell the AI about yourself and filter challenges:**")
 
 
 
 
 
 
 
1229
  experience_level = gr.Dropdown(
1230
  choices=["Beginner", "Intermediate", "Advanced"],
1231
  label="πŸ“Š Experience Level",
 
1295
  ultimate_recommend_btn.click(
1296
  get_ultimate_recommendations_sync,
1297
  inputs=[
 
1298
  experience_level,
1299
  time_available,
1300
  interests,
 
1334
 
1335
  with gr.Row():
1336
  enhanced_chat_input = gr.Textbox(
1337
+ placeholder="Ask me about challenges, skills, interests, career advice, or anything else!",
1338
  container=False,
1339
  scale=4,
1340
  show_label=False
 
1423
  - **API Key Status**: {"βœ… Configured via HF Secrets" if os.getenv("OPENAI_API_KEY") else "⚠️ Set OPENAI_API_KEY in HF Secrets for full features"}
1424
 
1425
  #### 🧠 **Enhanced AI Intelligence Engine v4.0**
1426
+ - **Multi-Factor Scoring**: 40% interest match + 30% experience + 20% query + 10% market factors
1427
  - **Natural Language Processing**: Understands your goals and matches with relevant opportunities
1428
  - **Enhanced Market Intelligence**: Real-time insights on trending technologies and career paths
1429
  - **Success Prediction**: Enhanced algorithms calculate your probability of success
 
1446
  ```python
1447
  # SECURE: Hugging Face Secrets integration
1448
  openai_api_key = os.getenv("OPENAI_API_KEY", "")
1449
+ endpoint = "https://api.openai.com/v1/responses"
1450
  model = "gpt-4o-mini" # Fast and cost-effective
1451
  context = "Real MCP challenge data + conversation history"
1452
  ```