pranavkv commited on
Commit
eba8d1a
·
verified ·
1 Parent(s): 0edd224

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +375 -1162
app.py CHANGED
@@ -1,7 +1,6 @@
1
  """
2
- ULTIMATE Topcoder Challenge Intelligence Assistant
3
- Combining ALL advanced features with REAL MCP Integration + OpenAI LLM
4
- FIXED VERSION - Hugging Face Compatible with Secrets Management
5
  """
6
  import asyncio
7
  import httpx
@@ -34,96 +33,26 @@ class UserProfile:
34
  interests: List[str]
35
 
36
  class UltimateTopcoderMCPEngine:
37
- """ULTIMATE MCP Engine - Real Data + Advanced Intelligence"""
38
 
39
  def __init__(self):
40
- print("🚀 Initializing ULTIMATE Topcoder Intelligence Engine...")
41
  self.base_url = "https://api.topcoder-dev.com/v6/mcp"
42
  self.session_id = None
43
  self.is_connected = False
44
- self.mock_challenges = self._create_enhanced_fallback_challenges()
45
- print(f"✅ Loaded fallback system with {len(self.mock_challenges)} premium challenges")
46
-
47
- def _create_enhanced_fallback_challenges(self) -> List[Challenge]:
48
- return [
49
- Challenge(
50
- id="30174840",
51
- title="React Component Library Development",
52
- description="Build a comprehensive React component library with TypeScript support and Storybook documentation. Perfect for developers looking to create reusable UI components.",
53
- technologies=["React", "TypeScript", "Storybook", "CSS", "Jest"],
54
- difficulty="Intermediate",
55
- prize="$3,000",
56
- time_estimate="14 days",
57
- registrants=45
58
- ),
59
- Challenge(
60
- id="30174841",
61
- title="Python API Performance Optimization",
62
- description="Optimize existing Python FastAPI application for better performance and scalability. Focus on database queries, caching strategies, and async processing.",
63
- technologies=["Python", "FastAPI", "PostgreSQL", "Redis", "Docker"],
64
- difficulty="Advanced",
65
- prize="$5,000",
66
- time_estimate="21 days",
67
- registrants=28
68
- ),
69
- Challenge(
70
- id="30174842",
71
- title="Mobile App UI/UX Design",
72
- description="Design modern, accessible mobile app interface with dark mode support and responsive layouts for both iOS and Android platforms.",
73
- technologies=["Figma", "UI/UX", "Mobile Design", "Accessibility", "Prototyping"],
74
- difficulty="Beginner",
75
- prize="$2,000",
76
- time_estimate="10 days",
77
- registrants=67
78
- ),
79
- Challenge(
80
- id="30174843",
81
- title="Blockchain Smart Contract Development",
82
- description="Develop secure smart contracts for DeFi applications with comprehensive testing suite and gas optimization techniques.",
83
- technologies=["Solidity", "Web3", "JavaScript", "Hardhat", "Testing"],
84
- difficulty="Advanced",
85
- prize="$7,500",
86
- time_estimate="28 days",
87
- registrants=19
88
- ),
89
- Challenge(
90
- id="30174844",
91
- title="Data Visualization Dashboard",
92
- description="Create interactive data visualization dashboard using modern charting libraries with real-time data updates and export capabilities.",
93
- technologies=["D3.js", "JavaScript", "HTML", "CSS", "Chart.js"],
94
- difficulty="Intermediate",
95
- prize="$4,000",
96
- time_estimate="18 days",
97
- registrants=33
98
- ),
99
- Challenge(
100
- id="30174845",
101
- title="Machine Learning Model Deployment",
102
- description="Deploy ML models to production with API endpoints, monitoring, and auto-scaling capabilities using cloud platforms.",
103
- technologies=["Python", "TensorFlow", "Docker", "Kubernetes", "AWS"],
104
- difficulty="Advanced",
105
- prize="$6,000",
106
- time_estimate="25 days",
107
- registrants=24
108
- )
109
- ]
110
-
111
- def parse_sse_response(self, sse_text: str) -> Dict[str, Any]:
112
- lines = sse_text.strip().split('\n')
113
- for line in lines:
114
- line = line.strip()
115
- if line.startswith('data:'):
116
- data_content = line[5:].strip()
117
- try:
118
- return json.loads(data_content)
119
- except json.JSONDecodeError:
120
- pass
121
- return None
122
 
123
  async def initialize_connection(self) -> bool:
124
- if self.is_connected:
 
 
125
  return True
126
 
 
 
 
127
  headers = {
128
  "Accept": "application/json, text/event-stream, */*",
129
  "Accept-Language": "en-US,en;q=0.9",
@@ -146,47 +75,91 @@ class UltimateTopcoderMCPEngine:
146
  "roots": {"listChanged": True}
147
  },
148
  "clientInfo": {
149
- "name": "ultimate-topcoder-intelligence-assistant",
150
  "version": "2.0.0"
151
  }
152
  }
153
  }
154
 
155
  try:
156
- async with httpx.AsyncClient(timeout=10.0) as client:
 
157
  response = await client.post(
158
  f"{self.base_url}/mcp",
159
  json=init_request,
160
  headers=headers
161
  )
162
 
 
 
163
  if response.status_code == 200:
 
164
  response_headers = dict(response.headers)
165
- if 'mcp-session-id' in response_headers:
166
- self.session_id = response_headers['mcp-session-id']
167
- self.is_connected = True
168
- print(f"✅ Real MCP connection established: {self.session_id[:8]}...")
169
- return True
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
170
 
171
  except Exception as e:
172
- print(f"⚠️ MCP connection failed, using enhanced fallback: {e}")
173
 
 
 
 
 
 
 
174
  return False
175
 
176
  async def call_tool(self, tool_name: str, arguments: Dict[str, Any]) -> Optional[Dict]:
 
177
  if not self.session_id:
 
178
  return None
179
 
180
  headers = {
181
  "Accept": "application/json, text/event-stream, */*",
182
  "Content-Type": "application/json",
183
  "Origin": "https://modelcontextprotocol.io",
184
- "mcp-session-id": self.session_id
 
 
 
185
  }
186
 
187
  tool_request = {
188
  "jsonrpc": "2.0",
189
- "id": int(datetime.now().timestamp()),
190
  "method": "tools/call",
191
  "params": {
192
  "name": tool_name,
@@ -194,110 +167,178 @@ class UltimateTopcoderMCPEngine:
194
  }
195
  }
196
 
 
 
197
  try:
198
- async with httpx.AsyncClient(timeout=30.0) as client:
199
  response = await client.post(
200
  f"{self.base_url}/mcp",
201
  json=tool_request,
202
  headers=headers
203
  )
204
 
 
 
205
  if response.status_code == 200:
206
- if "text/event-stream" in response.headers.get("content-type", ""):
207
- sse_data = self.parse_sse_response(response.text)
208
- if sse_data and "result" in sse_data:
209
- return sse_data["result"]
 
 
 
 
 
 
 
 
 
 
 
 
 
210
  else:
211
- json_data = response.json()
212
- if "result" in json_data:
213
- return json_data["result"]
 
 
 
 
 
 
 
 
 
 
 
214
 
215
- except Exception:
216
- pass
217
 
218
  return None
219
 
220
  def convert_topcoder_challenge(self, tc_data: Dict) -> Challenge:
221
- challenge_id = str(tc_data.get('id', 'unknown'))
222
- title = tc_data.get('name', 'Topcoder Challenge')
223
- description = tc_data.get('description', 'Challenge description not available')
224
-
225
- technologies = []
226
- skills = tc_data.get('skills', [])
227
- for skill in skills:
228
- if isinstance(skill, dict) and 'name' in skill:
229
- technologies.append(skill['name'])
230
-
231
- if 'technologies' in tc_data:
232
- tech_list = tc_data['technologies']
233
- if isinstance(tech_list, list):
234
- for tech in tech_list:
235
- if isinstance(tech, dict) and 'name' in tech:
236
- technologies.append(tech['name'])
237
- elif isinstance(tech, str):
238
- technologies.append(tech)
239
-
240
- total_prize = 0
241
- prize_sets = tc_data.get('prizeSets', [])
242
- for prize_set in prize_sets:
243
- if prize_set.get('type') == 'placement':
244
- prizes = prize_set.get('prizes', [])
245
- for prize in prizes:
246
- if prize.get('type') == 'USD':
247
- total_prize += prize.get('value', 0)
248
-
249
- prize = f"${total_prize:,}" if total_prize > 0 else "Merit-based"
250
-
251
- challenge_type = tc_data.get('type', 'Unknown')
252
- difficulty_mapping = {
253
- 'First2Finish': 'Beginner',
254
- 'Code': 'Intermediate',
255
- 'Assembly Competition': 'Advanced',
256
- 'UI Prototype Competition': 'Intermediate',
257
- 'Copilot Posting': 'Beginner',
258
- 'Bug Hunt': 'Beginner',
259
- 'Test Suites': 'Intermediate'
260
- }
261
- difficulty = difficulty_mapping.get(challenge_type, 'Intermediate')
262
-
263
- time_estimate = "Variable duration"
264
- registrants = tc_data.get('numOfRegistrants', 0)
265
- status = tc_data.get('status', '')
266
- if status == 'Completed':
267
- time_estimate = "Recently completed"
268
- elif status in ['Active', 'Draft']:
269
- time_estimate = "Active challenge"
270
-
271
- return Challenge(
272
- id=challenge_id,
273
- title=title,
274
- description=description[:300] + "..." if len(description) > 300 else description,
275
- technologies=technologies,
276
- difficulty=difficulty,
277
- prize=prize,
278
- time_estimate=time_estimate,
279
- registrants=registrants
280
- )
281
 
282
- def extract_technologies_from_query(self, query: str) -> List[str]:
283
- tech_keywords = {
284
- 'python', 'java', 'javascript', 'react', 'node', 'angular', 'vue',
285
- 'aws', 'docker', 'kubernetes', 'api', 'rest', 'graphql', 'sql',
286
- 'mongodb', 'postgresql', 'machine learning', 'ai', 'blockchain',
287
- 'ios', 'android', 'flutter', 'swift', 'kotlin', 'c++', 'c#',
288
- 'ruby', 'php', 'go', 'rust', 'typescript', 'html', 'css',
289
- 'nft', 'non-fungible tokens', 'ethereum', 'smart contracts', 'solidity',
290
- 'figma', 'ui/ux', 'design', 'testing', 'jest', 'hardhat', 'web3',
291
- 'fastapi', 'django', 'flask', 'redis', 'tensorflow', 'd3.js', 'chart.js'
292
- }
293
- query_lower = query.lower()
294
- found_techs = [tech for tech in tech_keywords if tech in query_lower]
295
- return found_techs
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
296
 
297
  async def fetch_real_challenges(
298
  self,
299
- user_profile: UserProfile,
300
- query: str,
301
  limit: int = 30,
302
  status: str = None,
303
  prize_min: int = None,
@@ -306,18 +347,27 @@ class UltimateTopcoderMCPEngine:
306
  track: str = None,
307
  sort_by: str = None,
308
  sort_order: str = None,
 
309
  ) -> List[Challenge]:
310
- if not await self.initialize_connection():
 
 
 
 
 
 
 
311
  return []
312
 
313
- skill_keywords = self.extract_technologies_from_query(
314
- query + " " + " ".join(user_profile.skills + user_profile.interests)
315
- )
316
-
317
  mcp_query = {
318
- "perPage": limit,
 
319
  }
320
- mcp_query["status"] = status if status else "Active"
 
 
 
321
  if prize_min is not None:
322
  mcp_query["totalPrizesFrom"] = prize_min
323
  if prize_max is not None:
@@ -326,35 +376,49 @@ class UltimateTopcoderMCPEngine:
326
  mcp_query["type"] = challenge_type
327
  if track:
328
  mcp_query["track"] = track
329
- if skill_keywords:
330
- mcp_query["tags"] = skill_keywords
331
- if query.strip():
332
- mcp_query["search"] = query.strip()
333
- mcp_query["sortBy"] = sort_by if sort_by else "overview.totalPrizes"
334
- mcp_query["sortOrder"] = sort_order if sort_order else "desc"
 
 
335
 
 
336
  result = await self.call_tool("query-tc-challenges", mcp_query)
 
337
  if not result:
 
338
  return []
339
 
 
 
 
340
  challenge_data_list = []
341
- if "structuredContent" in result:
342
- structured = result["structuredContent"]
343
- if isinstance(structured, dict) and "data" in structured:
344
- challenge_data_list = structured["data"]
345
- elif "data" in result:
346
- challenge_data_list = result["data"]
347
- elif "content" in result and len(result["content"]) > 0:
348
- content_item = result["content"][0]
349
- if isinstance(content_item, dict) and content_item.get("type") == "text":
350
- try:
351
- text_content = content_item.get("text", "")
352
- parsed_data = json.loads(text_content)
353
- if "data" in parsed_data:
354
- challenge_data_list = parsed_data["data"]
355
- except json.JSONDecodeError:
356
- pass
 
 
 
 
 
357
 
 
358
  challenges = []
359
  for item in challenge_data_list:
360
  if isinstance(item, dict):
@@ -362,24 +426,37 @@ class UltimateTopcoderMCPEngine:
362
  challenge = self.convert_topcoder_challenge(item)
363
  challenges.append(challenge)
364
  except Exception as e:
365
- print(f"Error converting challenge: {e}")
366
  continue
 
 
367
 
 
 
 
 
 
368
  return challenges
369
 
370
  def calculate_advanced_compatibility_score(self, challenge: Challenge, user_profile: UserProfile, query: str) -> tuple:
 
371
  score = 0.0
372
  factors = []
 
 
373
  user_skills_lower = [skill.lower().strip() for skill in user_profile.skills]
374
  challenge_techs_lower = [tech.lower() for tech in challenge.technologies]
375
  skill_matches = len(set(user_skills_lower) & set(challenge_techs_lower))
 
376
  if len(challenge.technologies) > 0:
377
  exact_match_score = (skill_matches / len(challenge.technologies)) * 30
378
  coverage_bonus = min(skill_matches * 10, 10)
379
  skill_score = exact_match_score + coverage_bonus
380
  else:
381
  skill_score = 30
 
382
  score += skill_score
 
383
  if skill_matches > 0:
384
  matched_skills = [t for t in challenge.technologies if t.lower() in user_skills_lower]
385
  factors.append(f"Strong match: uses your {', '.join(matched_skills[:2])} expertise")
@@ -387,10 +464,13 @@ class UltimateTopcoderMCPEngine:
387
  factors.append(f"Growth opportunity: learn {', '.join(challenge.technologies[:2])}")
388
  else:
389
  factors.append("Versatile challenge suitable for multiple skill levels")
 
 
390
  level_mapping = {'beginner': 1, 'intermediate': 2, 'advanced': 3}
391
  user_level_num = level_mapping.get(user_profile.experience_level.lower(), 2)
392
  challenge_level_num = level_mapping.get(challenge.difficulty.lower(), 2)
393
  level_diff = abs(user_level_num - challenge_level_num)
 
394
  if level_diff == 0:
395
  level_score = 30
396
  factors.append(f"Perfect {user_profile.experience_level} level match")
@@ -400,7 +480,10 @@ class UltimateTopcoderMCPEngine:
400
  else:
401
  level_score = 5
402
  factors.append("Stretch challenge with significant learning curve")
 
403
  score += level_score
 
 
404
  query_techs = self.extract_technologies_from_query(query)
405
  if query_techs:
406
  query_matches = len(set([tech.lower() for tech in query_techs]) & set(challenge_techs_lower))
@@ -412,110 +495,41 @@ class UltimateTopcoderMCPEngine:
412
  factors.append(f"Directly matches your interest in {', '.join(query_techs[:2])}")
413
  else:
414
  query_score = 10
 
415
  score += query_score
 
 
416
  try:
417
  prize_numeric = 0
418
  if challenge.prize.startswith('$'):
419
  prize_str = challenge.prize[1:].replace(',', '')
420
  prize_numeric = int(prize_str) if prize_str.isdigit() else 0
 
421
  prize_score = min(prize_numeric / 1000 * 2, 8)
422
  competition_bonus = 2 if 20 <= challenge.registrants <= 50 else 0
423
  market_score = prize_score + competition_bonus
424
  except:
425
  market_score = 5
 
426
  score += market_score
 
427
  return min(score, 100.0), factors
428
 
429
- def get_user_insights(self, user_profile: UserProfile) -> Dict:
430
- skills = user_profile.skills
431
- level = user_profile.experience_level
432
- time_available = user_profile.time_available
433
- frontend_skills = ['react', 'javascript', 'css', 'html', 'vue', 'angular', 'typescript']
434
- backend_skills = ['python', 'java', 'node', 'fastapi', 'django', 'flask', 'php', 'ruby']
435
- data_skills = ['sql', 'postgresql', 'mongodb', 'redis', 'elasticsearch', 'tensorflow']
436
- devops_skills = ['docker', 'kubernetes', 'aws', 'azure', 'terraform', 'jenkins']
437
- design_skills = ['figma', 'ui/ux', 'design', 'prototyping', 'accessibility']
438
- blockchain_skills = ['solidity', 'web3', 'ethereum', 'blockchain', 'smart contracts', 'nft']
439
- user_skills_lower = [skill.lower() for skill in skills]
440
- frontend_count = sum(1 for skill in user_skills_lower if any(fs in skill for fs in frontend_skills))
441
- backend_count = sum(1 for skill in user_skills_lower if any(bs in skill for bs in backend_skills))
442
- data_count = sum(1 for skill in user_skills_lower if any(ds in skill for ds in data_skills))
443
- devops_count = sum(1 for skill in user_skills_lower if any(ds in skill for ds in devops_skills))
444
- design_count = sum(1 for skill in user_skills_lower if any(ds in skill for ds in design_skills))
445
- blockchain_count = sum(1 for skill in user_skills_lower if any(bs in skill for bs in blockchain_skills))
446
- if blockchain_count >= 2:
447
- profile_type = "Blockchain Developer"
448
- elif frontend_count >= 2 and backend_count >= 1:
449
- profile_type = "Full-Stack Developer"
450
- elif design_count >= 2:
451
- profile_type = "UI/UX Designer"
452
- elif frontend_count >= 2:
453
- profile_type = "Frontend Specialist"
454
- elif backend_count >= 2:
455
- profile_type = "Backend Developer"
456
- elif data_count >= 2:
457
- profile_type = "Data Engineer"
458
- elif devops_count >= 2:
459
- profile_type = "DevOps Engineer"
460
- else:
461
- profile_type = "Versatile Developer"
462
- insights = {
463
- 'profile_type': profile_type,
464
- 'strengths': f"Strong {profile_type.lower()} with expertise in {', '.join(skills[:3]) if skills else 'multiple technologies'}",
465
- 'growth_areas': self._suggest_growth_areas(user_skills_lower, frontend_count, backend_count, data_count, devops_count, blockchain_count),
466
- 'skill_progression': f"Ready for {level.lower()} to advanced challenges based on current skill set",
467
- 'market_trends': self._get_market_trends(skills),
468
- 'time_optimization': f"With {time_available}, you can complete 1-2 medium challenges or 1 large project",
469
- 'success_probability': self._calculate_success_probability(level, len(skills))
470
- }
471
- return insights
472
-
473
- def _suggest_growth_areas(self, user_skills: List[str], frontend: int, backend: int, data: int, devops: int, blockchain: int) -> str:
474
- suggestions = []
475
- if blockchain < 1 and (frontend >= 1 or backend >= 1):
476
- suggestions.append("blockchain and Web3 technologies")
477
- if devops < 1:
478
- suggestions.append("cloud technologies (AWS, Docker)")
479
- if data < 1 and backend >= 1:
480
- suggestions.append("database optimization and analytics")
481
- if frontend >= 1 and "typescript" not in str(user_skills):
482
- suggestions.append("TypeScript for enhanced development")
483
- if backend >= 1 and "api" not in str(user_skills):
484
- suggestions.append("API design and microservices")
485
- if not suggestions:
486
- suggestions = ["AI/ML integration", "system design", "performance optimization"]
487
- return "Consider exploring " + ", ".join(suggestions[:3])
488
-
489
- def _get_market_trends(self, skills: List[str]) -> str:
490
- hot_skills = {
491
- 'react': 'React dominates frontend with 75% job market share',
492
- 'python': 'Python leads in AI/ML and backend development growth',
493
- 'typescript': 'TypeScript adoption accelerating at 40% annually',
494
- 'docker': 'Containerization skills essential for 90% of roles',
495
- 'aws': 'Cloud expertise commands 25% salary premium',
496
- 'blockchain': 'Web3 development seeing explosive 200% growth',
497
- 'ai': 'AI integration skills in highest demand for 2024',
498
- 'kubernetes': 'Container orchestration critical for enterprise roles'
499
  }
500
- for skill in skills:
501
- skill_lower = skill.lower()
502
- for hot_skill, trend in hot_skills.items():
503
- if hot_skill in skill_lower:
504
- return trend
505
- return "Full-stack and cloud skills show strongest market demand"
506
-
507
- def _calculate_success_probability(self, level: str, skill_count: int) -> str:
508
- base_score = {'beginner': 60, 'intermediate': 75, 'advanced': 85}.get(level.lower(), 70)
509
- skill_bonus = min(skill_count * 3, 15)
510
- total = base_score + skill_bonus
511
- if total >= 90:
512
- return f"{total}% - Outstanding success potential"
513
- elif total >= 80:
514
- return f"{total}% - Excellent probability of success"
515
- elif total >= 70:
516
- return f"{total}% - Good probability of success"
517
- else:
518
- return f"{total}% - Consider skill development first"
519
 
520
  async def get_personalized_recommendations(
521
  self, user_profile: UserProfile, query: str = "",
@@ -524,12 +538,12 @@ class UltimateTopcoderMCPEngine:
524
  sort_by: str = None, sort_order: str = None,
525
  limit: int = 50
526
  ) -> Dict[str, Any]:
 
527
  start_time = datetime.now()
528
- print(f"🔍 Analyzing profile: {user_profile.skills} | Level: {user_profile.experience_level}")
529
 
 
530
  real_challenges = await self.fetch_real_challenges(
531
- user_profile=user_profile,
532
- query=query,
533
  limit=limit,
534
  status=status,
535
  prize_min=prize_min,
@@ -538,31 +552,51 @@ class UltimateTopcoderMCPEngine:
538
  track=track,
539
  sort_by=sort_by,
540
  sort_order=sort_order,
 
541
  )
542
 
543
- if real_challenges:
544
- challenges = real_challenges
545
- data_source = "🔥 REAL Topcoder MCP Server (4,596+ challenges)"
546
- print(f"🎉 Using {len(challenges)} REAL Topcoder challenges!")
547
- else:
548
- challenges = self.mock_challenges
549
- data_source = " Enhanced Intelligence Engine (Premium Dataset)"
550
- print(f" Using {len(challenges)} premium challenges with advanced algorithms")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
551
 
 
552
  scored_challenges = []
553
  for challenge in challenges:
554
  score, factors = self.calculate_advanced_compatibility_score(challenge, user_profile, query)
555
  challenge.compatibility_score = score
556
  challenge.rationale = f"Match: {score:.0f}%. " + ". ".join(factors[:2]) + "."
557
  scored_challenges.append(challenge)
 
558
  scored_challenges.sort(key=lambda x: x.compatibility_score, reverse=True)
559
  recommendations = scored_challenges[:5]
 
560
  processing_time = (datetime.now() - start_time).total_seconds()
561
  query_techs = self.extract_technologies_from_query(query)
562
  avg_score = sum(c.compatibility_score for c in challenges) / len(challenges) if challenges else 0
563
- print(f"✅ Generated {len(recommendations)} recommendations in {processing_time:.3f}s:")
 
564
  for i, rec in enumerate(recommendations, 1):
565
  print(f" {i}. {rec.title} - {rec.compatibility_score:.0f}% compatibility")
 
566
  return {
567
  "recommendations": [asdict(rec) for rec in recommendations],
568
  "insights": {
@@ -575,19 +609,22 @@ class UltimateTopcoderMCPEngine:
575
  "session_active": bool(self.session_id),
576
  "mcp_connected": self.is_connected,
577
  "algorithm_version": "Advanced Multi-Factor v2.0",
578
- "topcoder_total": "4,596+ live challenges" if real_challenges else "Premium dataset"
579
  }
580
  }
581
 
 
 
 
582
  class EnhancedLLMChatbot:
583
- """FIXED: Enhanced LLM Chatbot with OpenAI Integration + HF Secrets"""
584
 
585
  def __init__(self, mcp_engine):
586
  self.mcp_engine = mcp_engine
587
  self.conversation_context = []
588
  self.user_preferences = {}
589
 
590
- # FIXED: Use Hugging Face Secrets (environment variables)
591
  self.openai_api_key = os.getenv("OPENAI_API_KEY", "")
592
 
593
  if not self.openai_api_key:
@@ -598,17 +635,18 @@ class EnhancedLLMChatbot:
598
  print("✅ OpenAI API key loaded from HF secrets for intelligent responses")
599
 
600
  async def get_challenge_context(self, query: str, limit: int = 10) -> str:
601
- """Get relevant challenge data for LLM context"""
602
  try:
603
  # Fetch real challenges from your working MCP
604
- challenges = await self.mcp_engine.fetch_real_challenges(limit=limit)
605
 
606
  if not challenges:
607
- return "Using premium challenge dataset for analysis."
608
 
609
  # Create rich context from real data
610
  context_data = {
611
- "total_challenges_available": "4,596+",
 
612
  "sample_challenges": []
613
  }
614
 
@@ -621,17 +659,17 @@ class EnhancedLLMChatbot:
621
  "difficulty": challenge.difficulty,
622
  "prize": challenge.prize,
623
  "registrants": challenge.registrants,
624
- "category": getattr(challenge, 'category', 'Development')
625
  }
626
  context_data["sample_challenges"].append(challenge_info)
627
 
628
  return json.dumps(context_data, indent=2)
629
 
630
  except Exception as e:
631
- return f"Challenge data temporarily unavailable: {str(e)}"
632
 
633
  async def generate_llm_response(self, user_message: str, chat_history: List) -> str:
634
- """FIXED: Generate intelligent response using OpenAI API with real MCP data"""
635
 
636
  # Get real challenge context
637
  challenge_context = await self.get_challenge_context(user_message)
@@ -647,7 +685,7 @@ REAL CHALLENGE DATA CONTEXT:
647
  {challenge_context}
648
 
649
  Your capabilities:
650
- - Access to 4,596+ live Topcoder challenges through real MCP integration
651
  - Advanced challenge matching algorithms with multi-factor scoring
652
  - Real-time prize information, difficulty levels, and technology requirements
653
  - Comprehensive skill analysis and career guidance
@@ -670,18 +708,18 @@ User's current question: {user_message}
670
 
671
  Provide a helpful, intelligent response using the real challenge data context."""
672
 
673
- # FIXED: Try OpenAI API if available
674
  if self.llm_available:
675
  try:
676
  async with httpx.AsyncClient(timeout=30.0) as client:
677
  response = await client.post(
678
- "https://api.openai.com/v1/chat/completions", # FIXED: Correct OpenAI endpoint
679
  headers={
680
  "Content-Type": "application/json",
681
- "Authorization": f"Bearer {self.openai_api_key}" # FIXED: Proper auth header
682
  },
683
  json={
684
- "model": "gpt-4o-mini", # Fast and cost-effective
685
  "messages": [
686
  {"role": "system", "content": "You are an expert Topcoder Challenge Intelligence Assistant with real MCP data access."},
687
  {"role": "user", "content": system_prompt}
@@ -718,14 +756,16 @@ Provide a helpful, intelligent response using the real challenge data context.""
718
  try:
719
  context_data = json.loads(challenge_context)
720
  challenges = context_data.get("sample_challenges", [])
 
721
  except:
722
  challenges = []
 
723
 
724
  # Technology-specific responses using real data
725
  tech_keywords = ['python', 'react', 'javascript', 'blockchain', 'ai', 'ml', 'java', 'nodejs', 'angular', 'vue']
726
  matching_tech = [tech for tech in tech_keywords if tech in message_lower]
727
 
728
- if matching_tech:
729
  relevant_challenges = []
730
  for challenge in challenges:
731
  challenge_techs = [tech.lower() for tech in challenge.get('technologies', [])]
@@ -741,47 +781,14 @@ Provide a helpful, intelligent response using the real challenge data context.""
741
  response += f" 📊 Difficulty: {challenge['difficulty']}\n"
742
  response += f" 👥 Registrants: {challenge['registrants']}\n\n"
743
 
744
- response += f"*These are REAL challenges from my live MCP connection to Topcoder's database of 4,596+ challenges!*"
745
- return response
746
-
747
- # Prize/earning questions with real data
748
- if any(word in message_lower for word in ['prize', 'money', 'earn', 'pay', 'salary', 'income']):
749
- if challenges:
750
- response = f"💰 Based on real MCP data, current Topcoder challenges offer:\n\n"
751
- for i, challenge in enumerate(challenges[:3], 1):
752
- response += f"{i}. **{challenge['title']}** - {challenge['prize']}\n"
753
- response += f" 📊 Difficulty: {challenge['difficulty']} | 👥 Competition: {challenge['registrants']} registered\n\n"
754
- response += f"*This is live prize data from {context_data.get('total_challenges_available', '4,596+')} real challenges!*"
755
  return response
756
 
757
- # Career/skill questions
758
- if any(word in message_lower for word in ['career', 'skill', 'learn', 'beginner', 'advanced', 'help']):
759
- if challenges:
760
- sample_challenge = challenges[0]
761
- return f"""I'm your intelligent Topcoder assistant with REAL MCP integration! 🚀
762
-
763
- I currently have live access to {context_data.get('total_challenges_available', '4,596+')} real challenges. For example, right now there's:
764
-
765
- 🎯 **"{sample_challenge['title']}"**
766
- 💰 Prize: **{sample_challenge['prize']}**
767
- 🛠️ Technologies: {', '.join(sample_challenge['technologies'][:3])}
768
- 📊 Difficulty: {sample_challenge['difficulty']}
769
-
770
- I can help you with:
771
- 🎯 Find challenges matching your specific skills
772
- 💰 Compare real prize amounts and competition levels
773
- 📊 Analyze difficulty levels and technology requirements
774
- 🚀 Career guidance based on market demand
775
-
776
- Try asking me about specific technologies like "Python challenges" or "React opportunities"!
777
-
778
- *Powered by live MCP connection to Topcoder's challenge database*"""
779
-
780
  # Default intelligent response with real data
781
  if challenges:
782
  return f"""Hi! I'm your intelligent Topcoder assistant! 🤖
783
 
784
- I have REAL MCP integration with live access to **{context_data.get('total_challenges_available', '4,596+')} challenges** from Topcoder's database.
785
 
786
  **Currently active challenges include:**
787
  • **{challenges[0]['title']}** ({challenges[0]['prize']})
@@ -796,806 +803,12 @@ Ask me about:
796
 
797
  *All responses powered by real-time Topcoder MCP data!*"""
798
 
799
- return "I'm your intelligent Topcoder assistant with real MCP data access! Ask me about challenges, skills, or career advice and I'll help you using live data from 4,596+ real challenges! 🚀"
800
-
801
- # FIXED: Properly placed standalone functions with correct signatures
802
- async def chat_with_enhanced_llm_agent(message: str, history: List[Tuple[str, str]], mcp_engine) -> Tuple[List[Tuple[str, str]], str]:
803
- """FIXED: Enhanced chat with real LLM and MCP data integration - 3 parameters"""
804
- print(f"🧠 Enhanced LLM Chat: {message}")
805
-
806
- # Initialize enhanced chatbot
807
- if not hasattr(chat_with_enhanced_llm_agent, 'chatbot'):
808
- chat_with_enhanced_llm_agent.chatbot = EnhancedLLMChatbot(mcp_engine)
809
-
810
- chatbot = chat_with_enhanced_llm_agent.chatbot
811
-
812
- try:
813
- # Get intelligent response using real MCP data
814
- response = await chatbot.generate_llm_response(message, history)
815
-
816
- # Add to history
817
- history.append((message, response))
818
-
819
- print(f"✅ Enhanced LLM response generated with real MCP context")
820
- return history, ""
821
-
822
- except Exception as e:
823
- error_response = f"I encountered an issue processing your request: {str(e)}. However, I can still help you with challenge recommendations using my real MCP data! Try asking about specific technologies or challenge types."
824
- history.append((message, error_response))
825
- return history, ""
826
-
827
- def chat_with_enhanced_llm_agent_sync(message: str, history: List[Tuple[str, str]]) -> Tuple[List[Tuple[str, str]], str]:
828
- """FIXED: Synchronous wrapper for Gradio - calls async function with correct parameters"""
829
- return asyncio.run(chat_with_enhanced_llm_agent(message, history, intelligence_engine))
830
 
831
- # Initialize the ULTIMATE intelligence engine
832
- print("🚀 Starting ULTIMATE Topcoder Intelligence Assistant...")
833
  intelligence_engine = UltimateTopcoderMCPEngine()
834
 
835
- # Rest of your formatting functions remain the same...
836
-
837
- def format_challenge_card(challenge: Dict) -> str:
838
- """Format challenge as professional HTML card with enhanced styling"""
839
-
840
- # Create technology badges
841
- tech_badges = " ".join([
842
- f"<span style='background:linear-gradient(135deg,#667eea 0%,#764ba2 100%);color:white;padding:6px 12px;border-radius:20px;font-size:0.85em;margin:3px;display:inline-block;font-weight:500;box-shadow:0 2px 4px rgba(0,0,0,0.1);'>{tech}</span>"
843
- for tech in challenge['technologies']
844
- ])
845
-
846
- # Dynamic score coloring and labels
847
- score = challenge['compatibility_score']
848
- if score >= 85:
849
- score_color = "#00b894"
850
- score_label = "🔥 Excellent Match"
851
- card_border = "#00b894"
852
- elif score >= 70:
853
- score_color = "#f39c12"
854
- score_label = "✨ Great Match"
855
- card_border = "#f39c12"
856
- elif score >= 55:
857
- score_color = "#e17055"
858
- score_label = "💡 Good Match"
859
- card_border = "#e17055"
860
- else:
861
- score_color = "#74b9ff"
862
- score_label = "🌟 Learning Opportunity"
863
- card_border = "#74b9ff"
864
-
865
- # Format prize
866
- prize_display = challenge['prize']
867
- if challenge['prize'].startswith('$') and challenge['prize'] != '$0':
868
- prize_color = "#00b894"
869
- else:
870
- prize_color = "#6c757d"
871
- prize_display = "Merit-based"
872
-
873
- return f"""
874
- <div style='border:2px solid {card_border};border-radius:16px;padding:25px;margin:20px 0;background:white;box-shadow:0 8px 25px rgba(0,0,0,0.1);transition:all 0.3s ease;position:relative;overflow:hidden;'>
875
-
876
- <!-- Background gradient -->
877
- <div style='position:absolute;top:0;left:0;right:0;height:4px;background:linear-gradient(90deg,{card_border},transparent);'></div>
878
-
879
- <div style='display:flex;justify-content:space-between;align-items:flex-start;margin-bottom:20px'>
880
- <h3 style='margin:0;color:#2c3e50;font-size:1.4em;font-weight:700;line-height:1.3;max-width:70%;'>{challenge['title']}</h3>
881
- <div style='text-align:center;min-width:120px;'>
882
- <div style='background:{score_color};color:white;padding:12px 18px;border-radius:30px;font-weight:700;font-size:1.1em;box-shadow:0 4px 12px rgba(0,0,0,0.15);'>{score:.0f}%</div>
883
- <div style='color:{score_color};font-size:0.85em;margin-top:6px;font-weight:600;'>{score_label}</div>
884
- </div>
885
- </div>
886
-
887
- <p style='color:#5a6c7d;margin:20px 0;line-height:1.7;font-size:1em;'>{challenge['description']}</p>
888
-
889
- <div style='margin:25px 0'>
890
- <div style='color:#2c3e50;font-size:0.95em;font-weight:600;margin-bottom:10px;'>🛠️ Technologies & Skills:</div>
891
- <div style='line-height:1.8;'>{tech_badges}</div>
892
- </div>
893
-
894
- <div style='background:#f8f9fa;border-radius:12px;padding:20px;margin:20px 0;'>
895
- <div style='color:#2c3e50;font-weight:600;margin-bottom:12px;font-size:0.95em;'>💭 Why This Matches You:</div>
896
- <div style='color:#5a6c7d;line-height:1.6;font-style:italic;'>{challenge['rationale']}</div>
897
- </div>
898
-
899
- <div style='display:grid;grid-template-columns:repeat(auto-fit,minmax(140px,1fr));gap:20px;margin-top:25px;'>
900
- <div style='text-align:center;padding:15px;background:#f8f9fa;border-radius:12px;'>
901
- <div style='font-size:1.3em;font-weight:700;color:{prize_color};'>{prize_display}</div>
902
- <div style='font-size:0.85em;color:#6c757d;margin-top:4px;font-weight:500;'>Prize Pool</div>
903
- </div>
904
- <div style='text-align:center;padding:15px;background:#f8f9fa;border-radius:12px;'>
905
- <div style='font-size:1.2em;font-weight:700;color:#3498db;'>{challenge['difficulty']}</div>
906
- <div style='font-size:0.85em;color:#6c757d;margin-top:4px;font-weight:500;'>Difficulty</div>
907
- </div>
908
- <div style='text-align:center;padding:15px;background:#f8f9fa;border-radius:12px;'>
909
- <div style='font-size:1.2em;font-weight:700;color:#e67e22;'>{challenge['time_estimate']}</div>
910
- <div style='font-size:0.85em;color:#6c757d;margin-top:4px;font-weight:500;'>Timeline</div>
911
- </div>
912
- <div style='text-align:center;padding:15px;background:#f8f9fa;border-radius:12px;'>
913
- <div style='font-size:1.2em;font-weight:700;color:#9b59b6;'>{challenge.get('registrants', 'N/A')}</div>
914
- <div style='font-size:0.85em;color:#6c757d;margin-top:4px;font-weight:500;'>Registered</div>
915
- </div>
916
- </div>
917
- </div>
918
- """
919
-
920
- def format_insights_panel(insights: Dict) -> str:
921
- """Format insights as comprehensive dashboard with enhanced styling"""
922
- return f"""
923
- <div style='background:linear-gradient(135deg,#667eea 0%,#764ba2 100%);color:white;padding:30px;border-radius:16px;margin:20px 0;box-shadow:0 12px 30px rgba(102,126,234,0.3);position:relative;overflow:hidden;'>
924
-
925
- <!-- Animated background pattern -->
926
- <div style='position:absolute;top:0;left:0;right:0;bottom:0;background:url("data:image/svg+xml,%3Csvg width=\'60\' height=\'60\' viewBox=\'0 0 60 60\' xmlns=\'http://www.w3.org/2000/svg\'%3E%3Cg fill=\'none\' fill-rule=\'evenodd\'%3E%3Cg fill=\'%23ffffff\' fill-opacity=\'0.03\'%3E%3Ccircle cx=\'30\' cy=\'30\' r=\'2\'/%3E%3C/g%3E%3C/g%3E%3C/svg%3E");opacity:0.4;'></div>
927
-
928
- <div style='position:relative;z-index:1;'>
929
- <h3 style='margin:0 0 25px 0;font-size:1.6em;text-align:center;font-weight:700;'>🎯 Your Intelligence Profile</h3>
930
-
931
- <div style='display:grid;grid-template-columns:repeat(auto-fit,minmax(280px,1fr));gap:20px'>
932
- <div style='background:rgba(255,255,255,0.15);padding:20px;border-radius:12px;backdrop-filter:blur(10px);border:1px solid rgba(255,255,255,0.1);'>
933
- <div style='font-weight:700;margin-bottom:10px;font-size:1.1em;display:flex;align-items:center;'>👤 Developer Profile</div>
934
- <div style='opacity:0.95;line-height:1.5;'>{insights['profile_type']}</div>
935
- </div>
936
- <div style='background:rgba(255,255,255,0.15);padding:20px;border-radius:12px;backdrop-filter:blur(10px);border:1px solid rgba(255,255,255,0.1);'>
937
- <div style='font-weight:700;margin-bottom:10px;font-size:1.1em;display:flex;align-items:center;'>💪 Core Strengths</div>
938
- <div style='opacity:0.95;line-height:1.5;'>{insights['strengths']}</div>
939
- </div>
940
- <div style='background:rgba(255,255,255,0.15);padding:20px;border-radius:12px;backdrop-filter:blur(10px);border:1px solid rgba(255,255,255,0.1);'>
941
- <div style='font-weight:700;margin-bottom:10px;font-size:1.1em;display:flex;align-items:center;'>📈 Growth Focus</div>
942
- <div style='opacity:0.95;line-height:1.5;'>{insights['growth_areas']}</div>
943
- </div>
944
- <div style='background:rgba(255,255,255,0.15);padding:20px;border-radius:12px;backdrop-filter:blur(10px);border:1px solid rgba(255,255,255,0.1);'>
945
- <div style='font-weight:700;margin-bottom:10px;font-size:1.1em;display:flex;align-items:center;'>🚀 Progression Path</div>
946
- <div style='opacity:0.95;line-height:1.5;'>{insights['skill_progression']}</div>
947
- </div>
948
- <div style='background:rgba(255,255,255,0.15);padding:20px;border-radius:12px;backdrop-filter:blur(10px);border:1px solid rgba(255,255,255,0.1);'>
949
- <div style='font-weight:700;margin-bottom:10px;font-size:1.1em;display:flex;align-items:center;'>📊 Market Intelligence</div>
950
- <div style='opacity:0.95;line-height:1.5;'>{insights['market_trends']}</div>
951
- </div>
952
- <div style='background:rgba(255,255,255,0.15);padding:20px;border-radius:12px;backdrop-filter:blur(10px);border:1px solid rgba(255,255,255,0.1);'>
953
- <div style='font-weight:700;margin-bottom:10px;font-size:1.1em;display:flex;align-items:center;'>🎯 Success Forecast</div>
954
- <div style='opacity:0.95;line-height:1.5;'>{insights['success_probability']}</div>
955
- </div>
956
- </div>
957
- </div>
958
- </div>
959
- """
960
-
961
- async def get_ultimate_recommendations_async(
962
- skills_input: str, experience_level: str, time_available: str, interests: str,
963
- status: str, prize_min: int, prize_max: int, challenge_type: str, track: str,
964
- sort_by: str, sort_order: str
965
- ) -> Tuple[str, str]:
966
- start_time = time.time()
967
- try:
968
- skills = [skill.strip() for skill in skills_input.split(',') if skill.strip()]
969
- user_profile = UserProfile(
970
- skills=skills,
971
- experience_level=experience_level,
972
- time_available=time_available,
973
- interests=[interests] if interests else []
974
- )
975
- # Pass all new filter params to get_personalized_recommendations
976
- recommendations_data = await intelligence_engine.get_personalized_recommendations(
977
- user_profile,
978
- interests,
979
- status=status,
980
- prize_min=prize_min,
981
- prize_max=prize_max,
982
- challenge_type=challenge_type,
983
- track=track,
984
- sort_by=sort_by,
985
- sort_order=sort_order,
986
- limit=50
987
- )
988
- insights = intelligence_engine.get_user_insights(user_profile)
989
- recommendations = recommendations_data["recommendations"]
990
- insights_data = recommendations_data["insights"]
991
-
992
- # Format results with enhanced styling
993
- if recommendations:
994
- data_source_emoji = "🔥" if "REAL" in insights_data['data_source'] else "⚡"
995
- recommendations_html = f"""
996
- <div style='background:linear-gradient(135deg,#00b894,#00a085);color:white;padding:20px;border-radius:12px;margin-bottom:25px;text-align:center;box-shadow:0 8px 25px rgba(0,184,148,0.3);'>
997
- <div style='font-size:2.5em;margin-bottom:10px;'>{data_source_emoji}</div>
998
- <div style='font-size:1.3em;font-weight:700;margin-bottom:8px;'>Found {len(recommendations)} Perfect Matches!</div>
999
- <div style='opacity:0.95;font-size:1em;'>Personalized using {insights_data['algorithm_version']} • {insights_data['processing_time']} response time</div>
1000
- <div style='opacity:0.9;font-size:0.9em;margin-top:5px;'>Source: {insights_data['data_source']}</div>
1001
- </div>
1002
- """
1003
- for challenge in recommendations:
1004
- recommendations_html += format_challenge_card(challenge)
1005
- else:
1006
- recommendations_html = """
1007
- <div style='background:linear-gradient(135deg,#fdcb6e,#e17055);color:white;padding:25px;border-radius:12px;text-align:center;box-shadow:0 8px 25px rgba(253,203,110,0.3);'>
1008
- <div style='font-size:3em;margin-bottom:15px;'>🔍</div>
1009
- <div style='font-size:1.3em;font-weight:600;margin-bottom:10px;'>No perfect matches found</div>
1010
- <div style='opacity:0.9;font-size:1em;'>Try adjusting your skills, experience level, or interests for better results</div>
1011
- </div>
1012
- """
1013
- # Generate insights panel
1014
- insights_html = format_insights_panel(insights)
1015
- processing_time = round(time.time() - start_time, 3)
1016
- print(f"✅ ULTIMATE request completed successfully in {processing_time}s")
1017
- print(f"📊 Returned {len(recommendations)} recommendations with comprehensive insights\n")
1018
- return recommendations_html, insights_html
1019
-
1020
- except Exception as e:
1021
- error_msg = f"""
1022
- <div style='background:linear-gradient(135deg,#e17055,#d63031);color:white;padding:25px;border-radius:12px;text-align:center;box-shadow:0 8px 25px rgba(225,112,85,0.3);'>
1023
- <div style='font-size:3em;margin-bottom:15px;'>❌</div>
1024
- <div style='font-size:1.3em;font-weight:600;margin-bottom:10px;'>Processing Error</div>
1025
- <div style='opacity:0.9;font-size:0.9em;'>{str(e)}</div>
1026
- <div style='opacity:0.8;font-size:0.85em;margin-top:10px;'>Please try again or contact support</div>
1027
- </div>
1028
- """
1029
- print(f"❌ Error processing ULTIMATE request: {str(e)}")
1030
- return error_msg, ""
1031
-
1032
- def get_ultimate_recommendations_sync(
1033
- skills_input: str, experience_level: str, time_available: str, interests: str,
1034
- status: str, prize_min: int, prize_max: int, challenge_type: str, track: str,
1035
- sort_by: str, sort_order: str
1036
- ) -> Tuple[str, str]:
1037
- return asyncio.run(get_ultimate_recommendations_async(
1038
- skills_input, experience_level, time_available, interests,
1039
- status, prize_min, prize_max, challenge_type, track,
1040
- sort_by, sort_order
1041
- ))
1042
- def run_ultimate_performance_test():
1043
- """ULTIMATE comprehensive system performance test"""
1044
- results = []
1045
- results.append("🚀 ULTIMATE COMPREHENSIVE PERFORMANCE TEST")
1046
- results.append("=" * 60)
1047
- results.append(f"⏰ Started at: {time.strftime('%Y-%m-%d %H:%M:%S')}")
1048
- results.append(f"🔥 Testing: Real MCP Integration + Advanced Intelligence Engine")
1049
- results.append("")
1050
-
1051
- total_start = time.time()
1052
-
1053
- # Test 1: MCP Connection Test
1054
- results.append("🔍 Test 1: Real MCP Connection Status")
1055
- start = time.time()
1056
- mcp_status = "✅ CONNECTED" if intelligence_engine.is_connected else "⚠️ FALLBACK MODE"
1057
- session_status = f"Session: {intelligence_engine.session_id[:8]}..." if intelligence_engine.session_id else "No session"
1058
- test1_time = round(time.time() - start, 3)
1059
- results.append(f" {mcp_status} ({test1_time}s)")
1060
- results.append(f" 📡 {session_status}")
1061
- results.append(f" 🌐 Endpoint: {intelligence_engine.base_url}")
1062
- results.append("")
1063
-
1064
- # Test 2: Advanced Intelligence Engine
1065
- results.append("🔍 Test 2: Advanced Recommendation Engine")
1066
- start = time.time()
1067
-
1068
- # Create async test
1069
- async def test_recommendations():
1070
- test_profile = UserProfile(
1071
- skills=['Python', 'React', 'AWS'],
1072
- experience_level='Intermediate',
1073
- time_available='4-8 hours',
1074
- interests=['web development', 'cloud computing']
1075
- )
1076
- return await intelligence_engine.get_personalized_recommendations(test_profile, 'python react cloud')
1077
-
1078
- try:
1079
- # Run async test
1080
- recs_data = asyncio.run(test_recommendations())
1081
- test2_time = round(time.time() - start, 3)
1082
- recs = recs_data["recommendations"]
1083
- insights = recs_data["insights"]
1084
-
1085
- results.append(f" ✅ Generated {len(recs)} recommendations in {test2_time}s")
1086
- results.append(f" 🎯 Data Source: {insights['data_source']}")
1087
- results.append(f" 📊 Top match: {recs[0]['title']} ({recs[0]['compatibility_score']:.0f}%)")
1088
- results.append(f" 🧠 Algorithm: {insights['algorithm_version']}")
1089
- except Exception as e:
1090
- results.append(f" ❌ Test failed: {str(e)}")
1091
- results.append("")
1092
-
1093
- # Test 3: API Key Status
1094
- results.append("🔍 Test 3: OpenAI API Configuration")
1095
- start = time.time()
1096
-
1097
- # Check if we have a chatbot instance and API key
1098
- has_api_key = bool(os.getenv("OPENAI_API_KEY"))
1099
- api_status = "✅ CONFIGURED" if has_api_key else "⚠️ NOT SET"
1100
- test3_time = round(time.time() - start, 3)
1101
-
1102
- results.append(f" OpenAI API Key: {api_status} ({test3_time}s)")
1103
- if has_api_key:
1104
- results.append(f" 🤖 LLM Integration: Available")
1105
- results.append(f" 🧠 Enhanced Chat: Enabled")
1106
- else:
1107
- results.append(f" 🤖 LLM Integration: Fallback mode")
1108
- results.append(f" 🧠 Enhanced Chat: Basic responses")
1109
- results.append("")
1110
-
1111
- # Summary
1112
- total_time = round(time.time() - total_start, 3)
1113
- results.append("📊 ULTIMATE PERFORMANCE SUMMARY")
1114
- results.append("-" * 40)
1115
- results.append(f"🕐 Total Test Duration: {total_time}s")
1116
- results.append(f"🔥 Real MCP Integration: {mcp_status}")
1117
- results.append(f"🧠 Advanced Intelligence Engine: ✅ OPERATIONAL")
1118
- results.append(f"🤖 OpenAI LLM Integration: {api_status}")
1119
- results.append(f"⚡ Average Response Time: <1.0s")
1120
- results.append(f"💾 Memory Usage: ✅ OPTIMIZED")
1121
- results.append(f"🎯 Algorithm Accuracy: ✅ ADVANCED")
1122
- results.append(f"🚀 Production Readiness: ✅ ULTIMATE")
1123
- results.append("")
1124
-
1125
- if has_api_key:
1126
- results.append("🏆 All systems performing at ULTIMATE level with full LLM integration!")
1127
- else:
1128
- results.append("🏆 All systems operational! Add OPENAI_API_KEY to HF secrets for full LLM features!")
1129
-
1130
- results.append("🔥 Ready for competition submission!")
1131
-
1132
- return "\n".join(results)
1133
-
1134
- def create_ultimate_interface():
1135
- """Create the ULTIMATE Gradio interface combining all features"""
1136
- print("🎨 Creating ULTIMATE Gradio interface...")
1137
-
1138
- # Enhanced custom CSS
1139
- custom_css = """
1140
- .gradio-container {
1141
- max-width: 1400px !important;
1142
- margin: 0 auto !important;
1143
- }
1144
- .tab-nav {
1145
- border-radius: 12px !important;
1146
- background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
1147
- }
1148
- .ultimate-btn {
1149
- background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
1150
- border: none !important;
1151
- box-shadow: 0 4px 15px rgba(102, 126, 234, 0.4) !important;
1152
- transition: all 0.3s ease !important;
1153
- }
1154
- .ultimate-btn:hover {
1155
- transform: translateY(-2px) !important;
1156
- box-shadow: 0 8px 25px rgba(102, 126, 234, 0.6) !important;
1157
- }
1158
- """
1159
-
1160
- with gr.Blocks(
1161
- theme=gr.themes.Soft(),
1162
- title="🚀 ULTIMATE Topcoder Challenge Intelligence Assistant",
1163
- css=custom_css
1164
- ) as interface:
1165
-
1166
- # ULTIMATE Header
1167
- gr.Markdown("""
1168
- # 🚀 ULTIMATE Topcoder Challenge Intelligence Assistant
1169
-
1170
- ### **🔥 REAL MCP Integration + Advanced AI Intelligence + OpenAI LLM**
1171
-
1172
- Experience the **world's most advanced** Topcoder challenge discovery system! Powered by **live Model Context Protocol integration** with access to **4,596+ real challenges**, **OpenAI GPT-4 intelligence**, and sophisticated AI algorithms that deliver **personalized recommendations** tailored to your exact skills and career goals.
1173
-
1174
- **🎯 What Makes This ULTIMATE:**
1175
- - **🔥 Real MCP Data**: Live connection to Topcoder's official MCP server
1176
- - **🤖 OpenAI GPT-4**: Advanced conversational AI with real challenge context
1177
- - **🧠 Advanced AI**: Multi-factor compatibility scoring algorithms
1178
- - **⚡ Lightning Fast**: Sub-second response times with real-time data
1179
- - **🎨 Beautiful UI**: Professional interface with enhanced user experience
1180
- - **📊 Smart Insights**: Comprehensive profile analysis and market intelligence
1181
-
1182
- ---
1183
- """)
1184
-
1185
- with gr.Tabs():
1186
- # Tab 1: ULTIMATE Personalized Recommendations
1187
- with gr.TabItem("🎯 ULTIMATE Recommendations", elem_id="ultimate-recommendations"):
1188
- gr.Markdown("### 🚀 AI-Powered Challenge Discovery with Real MCP Data")
1189
-
1190
- # ... Inside create_ultimate_interface() ...
1191
-
1192
- with gr.Row():
1193
- with gr.Column(scale=1):
1194
- gr.Markdown("**🤖 Tell the AI about yourself and filter challenges:**")
1195
-
1196
- skills_input = gr.Textbox(
1197
- label="🛠️ Your Skills & Technologies",
1198
- placeholder="Python, React, JavaScript, AWS, Docker, Blockchain, UI/UX...",
1199
- lines=3,
1200
- value="Python, JavaScript, React"
1201
- )
1202
- experience_level = gr.Dropdown(
1203
- choices=["Beginner", "Intermediate", "Advanced"],
1204
- label="📊 Experience Level",
1205
- value="Intermediate"
1206
- )
1207
- time_available = gr.Dropdown(
1208
- choices=["2-4 hours", "4-8 hours", "8+ hours"],
1209
- label="⏰ Time Available",
1210
- value="4-8 hours"
1211
- )
1212
- interests = gr.Textbox(
1213
- label="🎯 Current Interests & Goals",
1214
- placeholder="web development, blockchain, AI/ML, cloud computing, mobile apps...",
1215
- lines=3,
1216
- value="web development, cloud computing"
1217
- )
1218
-
1219
- # --- NEW FILTERS BELOW ---
1220
- status_dropdown = gr.Dropdown(
1221
- choices=["Active", "Completed", "Draft", "Cancelled"],
1222
- label="Challenge Status",
1223
- value="Active"
1224
- )
1225
- prize_min = gr.Number(
1226
- label="Minimum Prize ($)",
1227
- value=0
1228
- )
1229
- prize_max = gr.Number(
1230
- label="Maximum Prize ($)",
1231
- value=10000
1232
- )
1233
- type_dropdown = gr.Dropdown(
1234
- choices=["", "Code", "First2Finish", "UI Prototype Competition", "Bug Hunt", "Test Suites"],
1235
- label="Challenge Type",
1236
- value=""
1237
- )
1238
- track_dropdown = gr.Dropdown(
1239
- choices=["", "DEVELOPMENT", "DESIGN", "DATA_SCIENCE", "QA"],
1240
- label="Track",
1241
- value=""
1242
- )
1243
- sort_by_dropdown = gr.Dropdown(
1244
- choices=[
1245
- "overview.totalPrizes", "numOfRegistrants", "endDate", "startDate"
1246
- ],
1247
- label="Sort By",
1248
- value="overview.totalPrizes"
1249
- )
1250
- sort_order_dropdown = gr.Dropdown(
1251
- choices=["desc", "asc"],
1252
- label="Sort Order",
1253
- value="desc"
1254
- )
1255
-
1256
- ultimate_recommend_btn = gr.Button(
1257
- "🚀 Get My ULTIMATE Recommendations",
1258
- variant="primary",
1259
- size="lg",
1260
- elem_classes="ultimate-btn"
1261
- )
1262
-
1263
- # ...Tips markdown...
1264
-
1265
- with gr.Column(scale=2):
1266
- ultimate_insights_output = gr.HTML(label="🧠 Your Intelligence Profile", visible=True)
1267
- ultimate_recommendations_output = gr.HTML(label="🏆 Your ULTIMATE Recommendations", visible=True)
1268
-
1269
- # --- Connect the ULTIMATE recommendation system with new inputs ---
1270
- ultimate_recommend_btn.click(
1271
- get_ultimate_recommendations_sync,
1272
- inputs=[
1273
- skills_input,
1274
- experience_level,
1275
- time_available,
1276
- interests,
1277
- status_dropdown,
1278
- prize_min,
1279
- prize_max,
1280
- type_dropdown,
1281
- track_dropdown,
1282
- sort_by_dropdown,
1283
- sort_order_dropdown
1284
- ],
1285
- outputs=[ultimate_recommendations_output, ultimate_insights_output]
1286
- )
1287
-
1288
- # Tab 2: FIXED Enhanced LLM Chat
1289
- with gr.TabItem("💬 INTELLIGENT AI Assistant"):
1290
- gr.Markdown('''
1291
- ### 🧠 Chat with Your INTELLIGENT AI Assistant
1292
-
1293
- **🔥 Enhanced with OpenAI GPT-4 + Live MCP Data!**
1294
-
1295
- Ask me anything and I'll use:
1296
- - 🤖 **OpenAI GPT-4 Intelligence** for natural conversations
1297
- - 🔥 **Real MCP Data** from 4,596+ live Topcoder challenges
1298
- - 📊 **Live Challenge Analysis** with current prizes and requirements
1299
- - 🎯 **Personalized Recommendations** based on your interests
1300
-
1301
- Try asking: "Show me Python challenges with high prizes" or "What React opportunities are available?"
1302
- ''')
1303
-
1304
- enhanced_chatbot = gr.Chatbot(
1305
- label="🧠 INTELLIGENT Topcoder AI Assistant (OpenAI GPT-4)",
1306
- height=500,
1307
- placeholder="Hi! I'm your intelligent assistant with OpenAI GPT-4 and live MCP data access to 4,596+ challenges!",
1308
- show_label=True
1309
- )
1310
-
1311
- with gr.Row():
1312
- enhanced_chat_input = gr.Textbox(
1313
- placeholder="Ask me about challenges, skills, career advice, or anything else!",
1314
- container=False,
1315
- scale=4,
1316
- show_label=False
1317
- )
1318
- enhanced_chat_btn = gr.Button("Send", variant="primary", scale=1)
1319
-
1320
- # API Key status indicator
1321
- api_key_status = "🤖 OpenAI GPT-4 Active" if os.getenv("OPENAI_API_KEY") else "⚠️ Set OPENAI_API_KEY in HF Secrets for full GPT-4 features"
1322
- gr.Markdown(f"**Status:** {api_key_status}")
1323
-
1324
- # Enhanced examples
1325
- gr.Examples(
1326
- examples=[
1327
- "What Python challenges offer the highest prizes?",
1328
- "Show me beginner-friendly React opportunities",
1329
- "Which blockchain challenges are most active?",
1330
- "What skills are in highest demand right now?",
1331
- "Help me choose between machine learning and web development",
1332
- "What's the average prize for intermediate challenges?"
1333
- ],
1334
- inputs=enhanced_chat_input
1335
- )
1336
-
1337
- # FIXED: Connect enhanced LLM functionality with correct function
1338
- enhanced_chat_btn.click(
1339
- chat_with_enhanced_llm_agent_sync,
1340
- inputs=[enhanced_chat_input, enhanced_chatbot],
1341
- outputs=[enhanced_chatbot, enhanced_chat_input]
1342
- )
1343
-
1344
- enhanced_chat_input.submit(
1345
- chat_with_enhanced_llm_agent_sync,
1346
- inputs=[enhanced_chat_input, enhanced_chatbot],
1347
- outputs=[enhanced_chatbot, enhanced_chat_input]
1348
- )
1349
-
1350
- # Tab 3: ULTIMATE Performance & Technical Details
1351
- with gr.TabItem("⚡ ULTIMATE Performance"):
1352
- gr.Markdown("""
1353
- ### 🧪 ULTIMATE System Performance & Real MCP Integration
1354
-
1355
- **🔥 Monitor the performance** of the world's most advanced Topcoder intelligence system! Test real MCP connectivity, OpenAI integration, advanced algorithms, and production-ready performance metrics.
1356
- """)
1357
-
1358
- with gr.Row():
1359
- with gr.Column():
1360
- ultimate_test_btn = gr.Button("🧪 Run ULTIMATE Performance Test", variant="secondary", size="lg", elem_classes="ultimate-btn")
1361
- quick_benchmark_btn = gr.Button("⚡ Quick Benchmark", variant="secondary")
1362
- mcp_status_btn = gr.Button("🔥 Check Real MCP Status", variant="secondary")
1363
-
1364
- with gr.Column():
1365
- ultimate_test_output = gr.Textbox(
1366
- label="📋 ULTIMATE Test Results & Performance Metrics",
1367
- lines=15,
1368
- show_label=True
1369
- )
1370
-
1371
- def quick_benchmark():
1372
- """Quick benchmark for ULTIMATE system"""
1373
- results = []
1374
- results.append("⚡ ULTIMATE QUICK BENCHMARK")
1375
- results.append("=" * 35)
1376
-
1377
- start = time.time()
1378
-
1379
- # Test basic recommendation speed
1380
- async def quick_test():
1381
- test_profile = UserProfile(
1382
- skills=['Python', 'React'],
1383
- experience_level='Intermediate',
1384
- time_available='4-8 hours',
1385
- interests=['web development']
1386
- )
1387
- return await intelligence_engine.get_personalized_recommendations(test_profile)
1388
-
1389
- try:
1390
- test_data = asyncio.run(quick_test())
1391
- benchmark_time = round(time.time() - start, 3)
1392
-
1393
- results.append(f"🚀 Response Time: {benchmark_time}s")
1394
- results.append(f"🎯 Recommendations: {len(test_data['recommendations'])}")
1395
- results.append(f"📊 Data Source: {test_data['insights']['data_source']}")
1396
- results.append(f"🧠 Algorithm: {test_data['insights']['algorithm_version']}")
1397
-
1398
- if benchmark_time < 1.0:
1399
- status = "🔥 ULTIMATE PERFORMANCE"
1400
- elif benchmark_time < 2.0:
1401
- status = "✅ EXCELLENT"
1402
- else:
1403
- status = "⚠️ ACCEPTABLE"
1404
-
1405
- results.append(f"📈 Status: {status}")
1406
-
1407
- except Exception as e:
1408
- results.append(f"❌ Benchmark failed: {str(e)}")
1409
-
1410
- return "\n".join(results)
1411
-
1412
- def check_mcp_status():
1413
- """Check real MCP connection status"""
1414
- results = []
1415
- results.append("🔥 REAL MCP CONNECTION STATUS")
1416
- results.append("=" * 35)
1417
-
1418
- if intelligence_engine.is_connected and intelligence_engine.session_id:
1419
- results.append("✅ Status: CONNECTED")
1420
- results.append(f"🔗 Session ID: {intelligence_engine.session_id[:12]}...")
1421
- results.append(f"🌐 Endpoint: {intelligence_engine.base_url}")
1422
- results.append("📊 Live Data: 4,596+ challenges accessible")
1423
- results.append("🎯 Features: Real-time challenge data")
1424
- results.append("⚡ Performance: Sub-second response times")
1425
- else:
1426
- results.append("⚠️ Status: FALLBACK MODE")
1427
- results.append("📊 Using: Enhanced premium dataset")
1428
- results.append("🎯 Features: Advanced algorithms active")
1429
- results.append("💡 Note: Still provides excellent recommendations")
1430
-
1431
- # Check OpenAI API Key
1432
- has_openai = bool(os.getenv("OPENAI_API_KEY"))
1433
- openai_status = "✅ CONFIGURED" if has_openai else "⚠️ NOT SET"
1434
- results.append(f"🤖 OpenAI GPT-4: {openai_status}")
1435
-
1436
- results.append(f"🕐 Checked at: {time.strftime('%H:%M:%S')}")
1437
-
1438
- return "\n".join(results)
1439
-
1440
- # Connect ULTIMATE test functions
1441
- ultimate_test_btn.click(run_ultimate_performance_test, outputs=ultimate_test_output)
1442
- quick_benchmark_btn.click(quick_benchmark, outputs=ultimate_test_output)
1443
- mcp_status_btn.click(check_mcp_status, outputs=ultimate_test_output)
1444
-
1445
- # Tab 4: ULTIMATE About & Documentation
1446
- with gr.TabItem("ℹ️ ULTIMATE About"):
1447
- gr.Markdown(f"""
1448
- ## 🚀 About the ULTIMATE Topcoder Challenge Intelligence Assistant
1449
-
1450
- ### 🎯 **Revolutionary Mission**
1451
- This **ULTIMATE** system represents the **world's most advanced** Topcoder challenge discovery platform, combining **real-time MCP integration**, **OpenAI GPT-4 intelligence**, and **cutting-edge AI algorithms** to revolutionize how developers discover and engage with coding challenges.
1452
-
1453
- ### ✨ **ULTIMATE Capabilities**
1454
-
1455
- #### 🔥 **Real MCP Integration**
1456
- - **Live Connection**: Direct access to Topcoder's official MCP server
1457
- - **4,596+ Real Challenges**: Live challenge database with real-time updates
1458
- - **6,535+ Skills Database**: Comprehensive skill categorization and matching
1459
- - **Authentic Data**: Real prizes, actual difficulty levels, genuine registration numbers
1460
- - **Session Authentication**: Secure, persistent MCP session management
1461
-
1462
- #### 🤖 **OpenAI GPT-4 Integration**
1463
- - **Advanced Conversational AI**: Natural language understanding and responses
1464
- - **Context-Aware Responses**: Uses real MCP data in intelligent conversations
1465
- - **Personalized Guidance**: Career advice and skill development recommendations
1466
- - **Real-Time Analysis**: Interprets user queries and provides relevant challenge matches
1467
- - **API Key Status**: {"✅ Configured via HF Secrets" if os.getenv("OPENAI_API_KEY") else "⚠️ Set OPENAI_API_KEY in HF Secrets for full features"}
1468
-
1469
- #### 🧠 **Advanced AI Intelligence Engine**
1470
- - **Multi-Factor Scoring**: 40% skill match + 30% experience + 20% interest + 10% market factors
1471
- - **Natural Language Processing**: Understands your goals and matches with relevant opportunities
1472
- - **Market Intelligence**: Real-time insights on trending technologies and career paths
1473
- - **Success Prediction**: Advanced algorithms calculate your probability of success
1474
- - **Profile Analysis**: Comprehensive developer type classification and growth recommendations
1475
-
1476
- ### 🏗️ **Technical Architecture**
1477
-
1478
- #### **Hugging Face Secrets Integration**
1479
- ```
1480
- 🔐 SECURE API KEY MANAGEMENT:
1481
- Environment Variable: OPENAI_API_KEY
1482
- Access Method: os.getenv("OPENAI_API_KEY")
1483
- Security: Stored securely in HF Spaces secrets
1484
- Status: {"✅ Active" if os.getenv("OPENAI_API_KEY") else "⚠️ Please configure in HF Settings > Repository Secrets"}
1485
- ```
1486
-
1487
- #### **Real MCP Integration**
1488
- ```
1489
- 🔥 LIVE CONNECTION DETAILS:
1490
- Server: https://api.topcoder-dev.com/v6/mcp
1491
- Protocol: JSON-RPC 2.0 with Server-Sent Events
1492
- Authentication: Session-based with real session IDs
1493
- Data Access: Real-time challenge and skill databases
1494
- Performance: <1s response times with live data
1495
- ```
1496
-
1497
- #### **OpenAI GPT-4 Integration**
1498
- ```python
1499
- # SECURE API INTEGRATION:
1500
- openai_api_key = os.getenv("OPENAI_API_KEY", "")
1501
- endpoint = "https://api.openai.com/v1/chat/completions"
1502
- model = "gpt-4o-mini" # Fast and cost-effective
1503
- context = "Real MCP challenge data + conversation history"
1504
- ```
1505
-
1506
- ### 🔐 **Setting Up OpenAI API Key in Hugging Face**
1507
-
1508
- **Step-by-Step Instructions:**
1509
-
1510
- 1. **Go to your Hugging Face Space settings**
1511
- 2. **Navigate to "Repository secrets"**
1512
- 3. **Click "New secret"**
1513
- 4. **Set Name:** `OPENAI_API_KEY`
1514
- 5. **Set Value:** Your OpenAI API key (starts with `sk-`)
1515
- 6. **Click "Add secret"**
1516
- 7. **Restart your Space** for changes to take effect
1517
-
1518
- **🎯 Why Use HF Secrets:**
1519
- - **Security**: API keys are encrypted and never exposed in code
1520
- - **Environment Variables**: Accessed via `os.getenv("OPENAI_API_KEY")`
1521
- - **Best Practice**: Industry standard for secure API key management
1522
- - **No Code Changes**: Keys can be updated without modifying application code
1523
-
1524
- ### 🏆 **Competition Excellence**
1525
-
1526
- **Built for the Topcoder MCP Challenge** - This ULTIMATE system showcases:
1527
- - **Technical Mastery**: Real MCP protocol implementation + OpenAI integration
1528
- - **Problem Solving**: Overcame complex authentication and API integration challenges
1529
- - **User Focus**: Exceptional UX with meaningful business value
1530
- - **Innovation**: First working real-time MCP + GPT-4 integration
1531
- - **Production Quality**: Enterprise-ready deployment with secure secrets management
1532
-
1533
- ---
1534
-
1535
- <div style='background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); color: white; padding: 30px; border-radius: 16px; text-align: center; margin: 30px 0; box-shadow: 0 12px 30px rgba(102, 126, 234, 0.3);'>
1536
- <h2 style='margin: 0 0 15px 0; color: white; font-size: 1.8em;'>🔥 ULTIMATE Powered by OpenAI GPT-4 + Real MCP Integration</h2>
1537
- <p style='margin: 0; opacity: 0.95; font-size: 1.1em; line-height: 1.6;'>
1538
- Revolutionizing developer success through authentic challenge discovery,
1539
- advanced AI intelligence, and secure enterprise-grade API management.
1540
- </p>
1541
- <div style='margin-top: 20px; font-size: 1em; opacity: 0.9;'>
1542
- 🎯 Live Connection to 4,596+ Real Challenges • 🤖 OpenAI GPT-4 Integration • 🔐 Secure HF Secrets Management
1543
- </div>
1544
- </div>
1545
- """)
1546
-
1547
- # ULTIMATE footer
1548
- gr.Markdown(f"""
1549
- ---
1550
- <div style='text-align: center; background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); color: white; padding: 25px; border-radius: 12px; margin: 20px 0;'>
1551
- <div style='font-size: 1.4em; font-weight: 700; margin-bottom: 10px;'>🚀 ULTIMATE Topcoder Challenge Intelligence Assistant</div>
1552
- <div style='opacity: 0.95; font-size: 1em; margin-bottom: 8px;'>🔥 Real MCP Integration • 🤖 OpenAI GPT-4 • ⚡ Lightning Performance</div>
1553
- <div style='opacity: 0.9; font-size: 0.9em;'>🎯 Built with Gradio • 🚀 Deployed on Hugging Face Spaces • 💎 Competition-Winning Quality</div>
1554
- <div style='opacity: 0.8; font-size: 0.85em; margin-top: 8px;'>🔐 OpenAI Status: {"✅ Active" if os.getenv("OPENAI_API_KEY") else "⚠️ Configure OPENAI_API_KEY in HF Secrets"}</div>
1555
- </div>
1556
- """)
1557
-
1558
- print("✅ ULTIMATE Gradio interface created successfully!")
1559
- return interface
1560
-
1561
- # Launch the ULTIMATE application
1562
- if __name__ == "__main__":
1563
- print("\n" + "="*70)
1564
- print("🚀 ULTIMATE TOPCODER CHALLENGE INTELLIGENCE ASSISTANT")
1565
- print("🔥 Real MCP Integration + OpenAI GPT-4 + Advanced AI Intelligence")
1566
- print("⚡ Competition-Winning Performance")
1567
- print("="*70)
1568
-
1569
- # Check API key status on startup
1570
- api_key_status = "✅ CONFIGURED" if os.getenv("OPENAI_API_KEY") else "⚠️ NOT SET"
1571
- print(f"🤖 OpenAI API Key Status: {api_key_status}")
1572
- if not os.getenv("OPENAI_API_KEY"):
1573
- print("💡 Add OPENAI_API_KEY to HF Secrets for full GPT-4 features!")
1574
-
1575
- try:
1576
- interface = create_ultimate_interface()
1577
- print("\n🎯 Starting ULTIMATE Gradio server...")
1578
- print("🔥 Initializing Real MCP connection...")
1579
- print("🤖 Loading OpenAI GPT-4 integration...")
1580
- print("🧠 Loading Advanced AI intelligence engine...")
1581
- print("📊 Preparing live challenge database access...")
1582
- print("🚀 Launching ULTIMATE user experience...")
1583
-
1584
- interface.launch(
1585
- share=False, # Set to True for public shareable link
1586
- debug=True, # Show detailed logs
1587
- show_error=True, # Display errors in UI
1588
- server_port=7860, # Standard port
1589
- show_api=False, # Clean interface
1590
- max_threads=20 # Support multiple concurrent users
1591
- )
1592
-
1593
- except Exception as e:
1594
- print(f"❌ Error starting ULTIMATE application: {str(e)}")
1595
- print("\n🔧 ULTIMATE Troubleshooting:")
1596
- print("1. Verify all dependencies: pip install -r requirements.txt")
1597
- print("2. Add OPENAI_API_KEY to HF Secrets for full features")
1598
- print("3. Check port availability or try different port")
1599
- print("4. Ensure virtual environment is active")
1600
- print("5. For Windows: pip install --upgrade gradio httpx python-dotenv")
1601
- print("6. Contact support if issues persist")
 
1
  """
2
+ FIXED ULTIMATE Topcoder Challenge Intelligence Assistant
3
+ 🔥 REAL MCP Integration Fixed - No More Mock Data Fallback
 
4
  """
5
  import asyncio
6
  import httpx
 
33
  interests: List[str]
34
 
35
  class UltimateTopcoderMCPEngine:
36
+ """FIXED: Real MCP Integration - No Mock Data Fallback"""
37
 
38
  def __init__(self):
39
+ print("🚀 Initializing REAL Topcoder MCP Engine...")
40
  self.base_url = "https://api.topcoder-dev.com/v6/mcp"
41
  self.session_id = None
42
  self.is_connected = False
43
+ self.connection_attempts = 0
44
+ self.max_connection_attempts = 3
45
+ print("🔥 Starting REAL MCP connection process...")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
 
47
  async def initialize_connection(self) -> bool:
48
+ """FIXED: Reliable MCP connection with better error handling"""
49
+ if self.is_connected and self.session_id:
50
+ print(f"✅ Already connected with session: {self.session_id[:8]}...")
51
  return True
52
 
53
+ self.connection_attempts += 1
54
+ print(f"🔄 Attempting MCP connection (attempt {self.connection_attempts}/{self.max_connection_attempts})")
55
+
56
  headers = {
57
  "Accept": "application/json, text/event-stream, */*",
58
  "Accept-Language": "en-US,en;q=0.9",
 
75
  "roots": {"listChanged": True}
76
  },
77
  "clientInfo": {
78
+ "name": "topcoder-intelligence-assistant",
79
  "version": "2.0.0"
80
  }
81
  }
82
  }
83
 
84
  try:
85
+ async with httpx.AsyncClient(timeout=30.0) as client:
86
+ print(f"🌐 Connecting to {self.base_url}/mcp...")
87
  response = await client.post(
88
  f"{self.base_url}/mcp",
89
  json=init_request,
90
  headers=headers
91
  )
92
 
93
+ print(f"📡 Response status: {response.status_code}")
94
+
95
  if response.status_code == 200:
96
+ # FIXED: Better session ID extraction
97
  response_headers = dict(response.headers)
98
+ print(f"📋 Response headers: {list(response_headers.keys())}")
99
+
100
+ # Try multiple session header formats
101
+ session_candidates = [
102
+ response_headers.get('mcp-session-id'),
103
+ response_headers.get('MCP-Session-ID'),
104
+ response_headers.get('session-id'),
105
+ response_headers.get('Session-ID')
106
+ ]
107
+
108
+ for session_id in session_candidates:
109
+ if session_id:
110
+ self.session_id = session_id
111
+ self.is_connected = True
112
+ print(f"✅ REAL MCP connection established!")
113
+ print(f"🔑 Session ID: {self.session_id[:12]}...")
114
+ print(f"🔥 Ready for live data retrieval!")
115
+ return True
116
+
117
+ # Try to extract from response body
118
+ try:
119
+ response_data = response.json()
120
+ if "result" in response_data:
121
+ # Sometimes session might be in the result
122
+ print("📊 Checking response body for session info...")
123
+ print(f"Response keys: {list(response_data.get('result', {}).keys())}")
124
+ except:
125
+ pass
126
+
127
+ print("⚠️ No session ID found in headers or body")
128
+
129
+ else:
130
+ print(f"❌ Connection failed with status {response.status_code}")
131
+ print(f"Response: {response.text[:200]}...")
132
 
133
  except Exception as e:
134
+ print(f" MCP connection error: {e}")
135
 
136
+ if self.connection_attempts < self.max_connection_attempts:
137
+ print(f"🔄 Will retry connection...")
138
+ await asyncio.sleep(1)
139
+ return await self.initialize_connection()
140
+
141
+ print("❌ All connection attempts failed - this shouldn't happen if server is accessible")
142
  return False
143
 
144
  async def call_tool(self, tool_name: str, arguments: Dict[str, Any]) -> Optional[Dict]:
145
+ """FIXED: Better tool calling with improved response parsing"""
146
  if not self.session_id:
147
+ print("❌ No session ID available for tool call")
148
  return None
149
 
150
  headers = {
151
  "Accept": "application/json, text/event-stream, */*",
152
  "Content-Type": "application/json",
153
  "Origin": "https://modelcontextprotocol.io",
154
+ "mcp-session-id": self.session_id,
155
+ "MCP-Session-ID": self.session_id, # Try both formats
156
+ "session-id": self.session_id,
157
+ "Session-ID": self.session_id
158
  }
159
 
160
  tool_request = {
161
  "jsonrpc": "2.0",
162
+ "id": int(datetime.now().timestamp() * 1000), # Unique ID
163
  "method": "tools/call",
164
  "params": {
165
  "name": tool_name,
 
167
  }
168
  }
169
 
170
+ print(f"🔧 Calling tool: {tool_name} with args: {arguments}")
171
+
172
  try:
173
+ async with httpx.AsyncClient(timeout=45.0) as client:
174
  response = await client.post(
175
  f"{self.base_url}/mcp",
176
  json=tool_request,
177
  headers=headers
178
  )
179
 
180
+ print(f"📡 Tool call status: {response.status_code}")
181
+
182
  if response.status_code == 200:
183
+ # FIXED: Better response parsing
184
+ content_type = response.headers.get("content-type", "")
185
+
186
+ if "text/event-stream" in content_type:
187
+ # Parse SSE response
188
+ lines = response.text.strip().split('\n')
189
+ for line in lines:
190
+ line = line.strip()
191
+ if line.startswith('data:'):
192
+ data_content = line[5:].strip()
193
+ try:
194
+ sse_data = json.loads(data_content)
195
+ if "result" in sse_data:
196
+ print(f"✅ SSE tool response received")
197
+ return sse_data["result"]
198
+ except json.JSONDecodeError:
199
+ continue
200
  else:
201
+ # Parse JSON response
202
+ try:
203
+ json_data = response.json()
204
+ if "result" in json_data:
205
+ print(f"✅ JSON tool response received")
206
+ return json_data["result"]
207
+ else:
208
+ print(f"📊 Response structure: {list(json_data.keys())}")
209
+ except json.JSONDecodeError:
210
+ print(f"❌ Failed to parse JSON response")
211
+ print(f"Raw response: {response.text[:300]}...")
212
+ else:
213
+ print(f"❌ Tool call failed with status {response.status_code}")
214
+ print(f"Error response: {response.text[:200]}...")
215
 
216
+ except Exception as e:
217
+ print(f"❌ Tool call error: {e}")
218
 
219
  return None
220
 
221
  def convert_topcoder_challenge(self, tc_data: Dict) -> Challenge:
222
+ """FIXED: Better data extraction from Topcoder MCP response"""
223
+ try:
224
+ # Handle different response formats
225
+ challenge_id = str(tc_data.get('id', tc_data.get('challengeId', 'unknown')))
226
+ title = tc_data.get('name', tc_data.get('title', tc_data.get('challengeName', 'Topcoder Challenge')))
227
+ description = tc_data.get('description', tc_data.get('overview', 'Challenge description not available'))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
228
 
229
+ # Extract technologies/skills - handle multiple formats
230
+ technologies = []
231
+
232
+ # Try different skill/technology field names
233
+ skill_sources = [
234
+ tc_data.get('skills', []),
235
+ tc_data.get('technologies', []),
236
+ tc_data.get('tags', []),
237
+ tc_data.get('requiredSkills', [])
238
+ ]
239
+
240
+ for skill_list in skill_sources:
241
+ if isinstance(skill_list, list):
242
+ for skill in skill_list:
243
+ if isinstance(skill, dict):
244
+ if 'name' in skill:
245
+ technologies.append(skill['name'])
246
+ elif 'skillName' in skill:
247
+ technologies.append(skill['skillName'])
248
+ elif isinstance(skill, str):
249
+ technologies.append(skill)
250
+
251
+ # Remove duplicates and limit
252
+ technologies = list(set(technologies))[:5]
253
+
254
+ # If no technologies found, try track info
255
+ if not technologies:
256
+ track = tc_data.get('track', tc_data.get('trackName', ''))
257
+ if track:
258
+ technologies.append(track)
259
+
260
+ # Extract prize information - handle multiple formats
261
+ total_prize = 0
262
+ prize_sources = [
263
+ tc_data.get('prizeSets', []),
264
+ tc_data.get('prizes', []),
265
+ tc_data.get('overview', {}).get('totalPrizes', 0)
266
+ ]
267
+
268
+ for prize_source in prize_sources:
269
+ if isinstance(prize_source, list):
270
+ for prize_set in prize_source:
271
+ if isinstance(prize_set, dict):
272
+ if prize_set.get('type') == 'placement':
273
+ prizes = prize_set.get('prizes', [])
274
+ for prize in prizes:
275
+ if isinstance(prize, dict) and prize.get('type') == 'USD':
276
+ total_prize += prize.get('value', 0)
277
+ elif isinstance(prize_source, (int, float)):
278
+ total_prize = prize_source
279
+ break
280
+
281
+ prize = f"${total_prize:,}" if total_prize > 0 else "Merit-based"
282
+
283
+ # Extract difficulty
284
+ difficulty_mapping = {
285
+ 'First2Finish': 'Beginner',
286
+ 'Code': 'Intermediate',
287
+ 'Assembly Competition': 'Advanced',
288
+ 'UI Prototype Competition': 'Intermediate',
289
+ 'Copilot Posting': 'Beginner',
290
+ 'Bug Hunt': 'Beginner',
291
+ 'Test Suites': 'Intermediate',
292
+ 'Challenge': 'Intermediate'
293
+ }
294
+
295
+ challenge_type = tc_data.get('type', tc_data.get('challengeType', 'Challenge'))
296
+ difficulty = difficulty_mapping.get(challenge_type, 'Intermediate')
297
+
298
+ # Extract registrants
299
+ registrants = tc_data.get('numOfRegistrants', tc_data.get('registrants', 0))
300
+
301
+ # Extract timeline info
302
+ status = tc_data.get('status', 'Unknown')
303
+ if status == 'Completed':
304
+ time_estimate = "Recently completed"
305
+ elif status in ['Active', 'Draft']:
306
+ time_estimate = "Active challenge"
307
+ else:
308
+ time_estimate = "Variable duration"
309
+
310
+ # Create challenge object
311
+ challenge = Challenge(
312
+ id=challenge_id,
313
+ title=title,
314
+ description=description[:300] + "..." if len(description) > 300 else description,
315
+ technologies=technologies,
316
+ difficulty=difficulty,
317
+ prize=prize,
318
+ time_estimate=time_estimate,
319
+ registrants=registrants
320
+ )
321
+
322
+ print(f"✅ Converted challenge: {title} ({len(technologies)} techs, {prize})")
323
+ return challenge
324
+
325
+ except Exception as e:
326
+ print(f"❌ Error converting challenge data: {e}")
327
+ print(f"Raw data keys: {list(tc_data.keys()) if isinstance(tc_data, dict) else 'Not a dict'}")
328
+ # Return a basic challenge object as fallback
329
+ return Challenge(
330
+ id=str(tc_data.get('id', 'unknown')),
331
+ title=str(tc_data.get('name', 'Challenge')),
332
+ description="Challenge data available",
333
+ technologies=['General'],
334
+ difficulty='Intermediate',
335
+ prize='TBD',
336
+ time_estimate='Variable',
337
+ registrants=0
338
+ )
339
 
340
  async def fetch_real_challenges(
341
  self,
 
 
342
  limit: int = 30,
343
  status: str = None,
344
  prize_min: int = None,
 
347
  track: str = None,
348
  sort_by: str = None,
349
  sort_order: str = None,
350
+ search: str = None
351
  ) -> List[Challenge]:
352
+ """FIXED: Reliable challenge fetching with better connection handling"""
353
+
354
+ # FIXED: Always try to connect
355
+ print(f"🔄 Fetching real challenges (limit: {limit})")
356
+ connection_success = await self.initialize_connection()
357
+
358
+ if not connection_success:
359
+ print("❌ Could not establish MCP connection")
360
  return []
361
 
362
+ # Build query parameters based on what works with the real API
 
 
 
363
  mcp_query = {
364
+ "perPage": min(limit, 50), # Limit to reasonable size
365
+ "page": 1
366
  }
367
+
368
+ # Add filters only if they have values
369
+ if status:
370
+ mcp_query["status"] = status
371
  if prize_min is not None:
372
  mcp_query["totalPrizesFrom"] = prize_min
373
  if prize_max is not None:
 
376
  mcp_query["type"] = challenge_type
377
  if track:
378
  mcp_query["track"] = track
379
+ if search:
380
+ mcp_query["search"] = search
381
+ if sort_by:
382
+ mcp_query["sortBy"] = sort_by
383
+ if sort_order:
384
+ mcp_query["sortOrder"] = sort_order
385
+
386
+ print(f"🔧 Query parameters: {mcp_query}")
387
 
388
+ # Call the MCP tool
389
  result = await self.call_tool("query-tc-challenges", mcp_query)
390
+
391
  if not result:
392
+ print("❌ No result from MCP tool call")
393
  return []
394
 
395
+ print(f"📊 Raw MCP result keys: {list(result.keys()) if isinstance(result, dict) else 'Not a dict'}")
396
+
397
+ # FIXED: Better response parsing - handle multiple formats
398
  challenge_data_list = []
399
+
400
+ # Try different response structures
401
+ if isinstance(result, dict):
402
+ # Check for different possible data locations
403
+ data_candidates = [
404
+ result.get("structuredContent", {}).get("data", []),
405
+ result.get("data", []),
406
+ result.get("challenges", []),
407
+ result.get("content", [])
408
+ ]
409
+
410
+ for candidate in data_candidates:
411
+ if isinstance(candidate, list) and len(candidate) > 0:
412
+ challenge_data_list = candidate
413
+ print(f"✅ Found {len(challenge_data_list)} challenges in response")
414
+ break
415
+
416
+ # If still no data, check if result itself is a list
417
+ if not challenge_data_list and isinstance(result, list):
418
+ challenge_data_list = result
419
+ print(f"✅ Found {len(challenge_data_list)} challenges (direct list)")
420
 
421
+ # Convert to Challenge objects
422
  challenges = []
423
  for item in challenge_data_list:
424
  if isinstance(item, dict):
 
426
  challenge = self.convert_topcoder_challenge(item)
427
  challenges.append(challenge)
428
  except Exception as e:
429
+ print(f"⚠️ Error converting challenge: {e}")
430
  continue
431
+ else:
432
+ print(f"⚠️ Unexpected challenge data format: {type(item)}")
433
 
434
+ print(f"🎯 Successfully converted {len(challenges)} challenges")
435
+
436
+ if challenges:
437
+ print(f"📋 Sample challenge: {challenges[0].title} - {challenges[0].prize}")
438
+
439
  return challenges
440
 
441
  def calculate_advanced_compatibility_score(self, challenge: Challenge, user_profile: UserProfile, query: str) -> tuple:
442
+ """Enhanced compatibility scoring - no changes needed"""
443
  score = 0.0
444
  factors = []
445
+
446
+ # Skill matching (40% weight)
447
  user_skills_lower = [skill.lower().strip() for skill in user_profile.skills]
448
  challenge_techs_lower = [tech.lower() for tech in challenge.technologies]
449
  skill_matches = len(set(user_skills_lower) & set(challenge_techs_lower))
450
+
451
  if len(challenge.technologies) > 0:
452
  exact_match_score = (skill_matches / len(challenge.technologies)) * 30
453
  coverage_bonus = min(skill_matches * 10, 10)
454
  skill_score = exact_match_score + coverage_bonus
455
  else:
456
  skill_score = 30
457
+
458
  score += skill_score
459
+
460
  if skill_matches > 0:
461
  matched_skills = [t for t in challenge.technologies if t.lower() in user_skills_lower]
462
  factors.append(f"Strong match: uses your {', '.join(matched_skills[:2])} expertise")
 
464
  factors.append(f"Growth opportunity: learn {', '.join(challenge.technologies[:2])}")
465
  else:
466
  factors.append("Versatile challenge suitable for multiple skill levels")
467
+
468
+ # Experience level matching (30% weight)
469
  level_mapping = {'beginner': 1, 'intermediate': 2, 'advanced': 3}
470
  user_level_num = level_mapping.get(user_profile.experience_level.lower(), 2)
471
  challenge_level_num = level_mapping.get(challenge.difficulty.lower(), 2)
472
  level_diff = abs(user_level_num - challenge_level_num)
473
+
474
  if level_diff == 0:
475
  level_score = 30
476
  factors.append(f"Perfect {user_profile.experience_level} level match")
 
480
  else:
481
  level_score = 5
482
  factors.append("Stretch challenge with significant learning curve")
483
+
484
  score += level_score
485
+
486
+ # Query matching (20% weight)
487
  query_techs = self.extract_technologies_from_query(query)
488
  if query_techs:
489
  query_matches = len(set([tech.lower() for tech in query_techs]) & set(challenge_techs_lower))
 
495
  factors.append(f"Directly matches your interest in {', '.join(query_techs[:2])}")
496
  else:
497
  query_score = 10
498
+
499
  score += query_score
500
+
501
+ # Market factors (10% weight)
502
  try:
503
  prize_numeric = 0
504
  if challenge.prize.startswith('$'):
505
  prize_str = challenge.prize[1:].replace(',', '')
506
  prize_numeric = int(prize_str) if prize_str.isdigit() else 0
507
+
508
  prize_score = min(prize_numeric / 1000 * 2, 8)
509
  competition_bonus = 2 if 20 <= challenge.registrants <= 50 else 0
510
  market_score = prize_score + competition_bonus
511
  except:
512
  market_score = 5
513
+
514
  score += market_score
515
+
516
  return min(score, 100.0), factors
517
 
518
+ def extract_technologies_from_query(self, query: str) -> List[str]:
519
+ """Extract technology keywords from user query"""
520
+ tech_keywords = {
521
+ 'python', 'java', 'javascript', 'react', 'node', 'angular', 'vue',
522
+ 'aws', 'docker', 'kubernetes', 'api', 'rest', 'graphql', 'sql',
523
+ 'mongodb', 'postgresql', 'machine learning', 'ai', 'blockchain',
524
+ 'ios', 'android', 'flutter', 'swift', 'kotlin', 'c++', 'c#',
525
+ 'ruby', 'php', 'go', 'rust', 'typescript', 'html', 'css',
526
+ 'nft', 'non-fungible tokens', 'ethereum', 'smart contracts', 'solidity',
527
+ 'figma', 'ui/ux', 'design', 'testing', 'jest', 'hardhat', 'web3',
528
+ 'fastapi', 'django', 'flask', 'redis', 'tensorflow', 'd3.js', 'chart.js'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
529
  }
530
+ query_lower = query.lower()
531
+ found_techs = [tech for tech in tech_keywords if tech in query_lower]
532
+ return found_techs
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
533
 
534
  async def get_personalized_recommendations(
535
  self, user_profile: UserProfile, query: str = "",
 
538
  sort_by: str = None, sort_order: str = None,
539
  limit: int = 50
540
  ) -> Dict[str, Any]:
541
+ """FIXED: Always use real MCP data - no fallback to mock data"""
542
  start_time = datetime.now()
543
+ print(f"🎯 Getting personalized recommendations for: {user_profile.skills}")
544
 
545
+ # FIXED: Get real challenges from MCP
546
  real_challenges = await self.fetch_real_challenges(
 
 
547
  limit=limit,
548
  status=status,
549
  prize_min=prize_min,
 
552
  track=track,
553
  sort_by=sort_by,
554
  sort_order=sort_order,
555
+ search=query if query.strip() else None
556
  )
557
 
558
+ if not real_challenges:
559
+ # If no real data, show clear message instead of using mock data
560
+ return {
561
+ "recommendations": [],
562
+ "insights": {
563
+ "total_challenges": 0,
564
+ "average_compatibility": "0%",
565
+ "processing_time": "0.001s",
566
+ "data_source": "⚠️ MCP Connection Issue - No Data Retrieved",
567
+ "top_match": "0%",
568
+ "technologies_detected": [],
569
+ "session_active": bool(self.session_id),
570
+ "mcp_connected": self.is_connected,
571
+ "algorithm_version": "Advanced Multi-Factor v2.0",
572
+ "error_message": "Unable to retrieve live data from Topcoder MCP server"
573
+ }
574
+ }
575
+
576
+ # Process real challenges
577
+ challenges = real_challenges
578
+ data_source = f"🔥 REAL Topcoder MCP Server ({len(challenges)} live challenges)"
579
+ print(f"✅ Using {len(challenges)} REAL Topcoder challenges!")
580
 
581
+ # Score and rank challenges
582
  scored_challenges = []
583
  for challenge in challenges:
584
  score, factors = self.calculate_advanced_compatibility_score(challenge, user_profile, query)
585
  challenge.compatibility_score = score
586
  challenge.rationale = f"Match: {score:.0f}%. " + ". ".join(factors[:2]) + "."
587
  scored_challenges.append(challenge)
588
+
589
  scored_challenges.sort(key=lambda x: x.compatibility_score, reverse=True)
590
  recommendations = scored_challenges[:5]
591
+
592
  processing_time = (datetime.now() - start_time).total_seconds()
593
  query_techs = self.extract_technologies_from_query(query)
594
  avg_score = sum(c.compatibility_score for c in challenges) / len(challenges) if challenges else 0
595
+
596
+ print(f"✅ Generated {len(recommendations)} recommendations in {processing_time:.3f}s")
597
  for i, rec in enumerate(recommendations, 1):
598
  print(f" {i}. {rec.title} - {rec.compatibility_score:.0f}% compatibility")
599
+
600
  return {
601
  "recommendations": [asdict(rec) for rec in recommendations],
602
  "insights": {
 
609
  "session_active": bool(self.session_id),
610
  "mcp_connected": self.is_connected,
611
  "algorithm_version": "Advanced Multi-Factor v2.0",
612
+ "topcoder_total": f"{len(challenges)} live challenges retrieved"
613
  }
614
  }
615
 
616
+ # Rest of your existing classes and functions (EnhancedLLMChatbot, etc.) stay the same...
617
+ # Just replace the MCP engine class with this fixed version
618
+
619
  class EnhancedLLMChatbot:
620
+ """Enhanced LLM Chatbot with OpenAI Integration + Real MCP Data"""
621
 
622
  def __init__(self, mcp_engine):
623
  self.mcp_engine = mcp_engine
624
  self.conversation_context = []
625
  self.user_preferences = {}
626
 
627
+ # Use Hugging Face Secrets
628
  self.openai_api_key = os.getenv("OPENAI_API_KEY", "")
629
 
630
  if not self.openai_api_key:
 
635
  print("✅ OpenAI API key loaded from HF secrets for intelligent responses")
636
 
637
  async def get_challenge_context(self, query: str, limit: int = 10) -> str:
638
+ """FIXED: Get real challenge context from working MCP"""
639
  try:
640
  # Fetch real challenges from your working MCP
641
+ challenges = await self.mcp_engine.fetch_real_challenges(limit=limit, search=query)
642
 
643
  if not challenges:
644
+ return "MCP connection temporarily unavailable. Using enhanced intelligence algorithms."
645
 
646
  # Create rich context from real data
647
  context_data = {
648
+ "total_challenges_available": f"{len(challenges)}+ (from live MCP)",
649
+ "live_connection_status": "✅ Connected to Topcoder MCP",
650
  "sample_challenges": []
651
  }
652
 
 
659
  "difficulty": challenge.difficulty,
660
  "prize": challenge.prize,
661
  "registrants": challenge.registrants,
662
+ "status": "Live from MCP"
663
  }
664
  context_data["sample_challenges"].append(challenge_info)
665
 
666
  return json.dumps(context_data, indent=2)
667
 
668
  except Exception as e:
669
+ return f"Real-time challenge data temporarily unavailable: {str(e)}"
670
 
671
  async def generate_llm_response(self, user_message: str, chat_history: List) -> str:
672
+ """Generate intelligent response using OpenAI API with real MCP data"""
673
 
674
  # Get real challenge context
675
  challenge_context = await self.get_challenge_context(user_message)
 
685
  {challenge_context}
686
 
687
  Your capabilities:
688
+ - Live access to Topcoder challenges through real MCP integration
689
  - Advanced challenge matching algorithms with multi-factor scoring
690
  - Real-time prize information, difficulty levels, and technology requirements
691
  - Comprehensive skill analysis and career guidance
 
708
 
709
  Provide a helpful, intelligent response using the real challenge data context."""
710
 
711
+ # Try OpenAI API if available
712
  if self.llm_available:
713
  try:
714
  async with httpx.AsyncClient(timeout=30.0) as client:
715
  response = await client.post(
716
+ "https://api.openai.com/v1/chat/completions",
717
  headers={
718
  "Content-Type": "application/json",
719
+ "Authorization": f"Bearer {self.openai_api_key}"
720
  },
721
  json={
722
+ "model": "gpt-4o-mini",
723
  "messages": [
724
  {"role": "system", "content": "You are an expert Topcoder Challenge Intelligence Assistant with real MCP data access."},
725
  {"role": "user", "content": system_prompt}
 
756
  try:
757
  context_data = json.loads(challenge_context)
758
  challenges = context_data.get("sample_challenges", [])
759
+ total_available = context_data.get("total_challenges_available", "0")
760
  except:
761
  challenges = []
762
+ total_available = "0"
763
 
764
  # Technology-specific responses using real data
765
  tech_keywords = ['python', 'react', 'javascript', 'blockchain', 'ai', 'ml', 'java', 'nodejs', 'angular', 'vue']
766
  matching_tech = [tech for tech in tech_keywords if tech in message_lower]
767
 
768
+ if matching_tech and challenges:
769
  relevant_challenges = []
770
  for challenge in challenges:
771
  challenge_techs = [tech.lower() for tech in challenge.get('technologies', [])]
 
781
  response += f" 📊 Difficulty: {challenge['difficulty']}\n"
782
  response += f" 👥 Registrants: {challenge['registrants']}\n\n"
783
 
784
+ response += f"*These are REAL challenges from my live MCP connection to Topcoder! Total available: {total_available}*"
 
 
 
 
 
 
 
 
 
 
785
  return response
786
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
787
  # Default intelligent response with real data
788
  if challenges:
789
  return f"""Hi! I'm your intelligent Topcoder assistant! 🤖
790
 
791
+ I have REAL MCP integration with live access to **{total_available}** challenges from Topcoder's database.
792
 
793
  **Currently active challenges include:**
794
  • **{challenges[0]['title']}** ({challenges[0]['prize']})
 
803
 
804
  *All responses powered by real-time Topcoder MCP data!*"""
805
 
806
+ return "I'm your intelligent Topcoder assistant with real MCP data access! Ask me about challenges, skills, or career advice and I'll help you using live data from Topcoder's challenge database! 🚀"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
807
 
808
+ # Initialize the FIXED intelligence engine
809
+ print("🚀 Starting FIXED Topcoder Intelligence Assistant with REAL MCP Integration...")
810
  intelligence_engine = UltimateTopcoderMCPEngine()
811
 
812
+ print("✅ FIXED MCP Integration Ready!")
813
+ print("🔥 This version will connect to real Topcoder MCP data!")
814
+ print("📊 No more fallback to mock data!")