swayamshetkar commited on
Commit
50bbce6
·
1 Parent(s): b8d4b15

geminicahngeslast

Browse files
Files changed (3) hide show
  1. Dockerfile +8 -13
  2. app.py +226 -555
  3. requirements.txt +5 -4
Dockerfile CHANGED
@@ -1,19 +1,14 @@
1
- FROM python:3.10-slim
 
2
 
3
- WORKDIR /code
4
 
5
- # Copy requirements
6
  COPY requirements.txt .
 
7
 
8
- # Install dependencies
9
- RUN pip install --no-cache-dir --upgrade pip && \
10
- pip install --no-cache-dir -r requirements.txt
11
 
12
- # Copy application code
13
- COPY main.py .
14
 
15
- # Expose port
16
- EXPOSE 7860
17
-
18
- # Run the application
19
- CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
 
1
+ # Dockerfile (optional)
2
+ FROM python:3.11-slim
3
 
4
+ WORKDIR /app
5
 
 
6
  COPY requirements.txt .
7
+ RUN pip install --no-cache-dir -r requirements.txt
8
 
9
+ COPY . .
 
 
10
 
11
+ ENV PYTHONUNBUFFERED=1
12
+ EXPOSE 8080
13
 
14
+ CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "8080"]
 
 
 
 
app.py CHANGED
@@ -1,33 +1,66 @@
1
- from fastapi import FastAPI, HTTPException
 
2
  from pydantic import BaseModel
 
 
3
  import requests
 
4
  import json
5
- import os
6
- import hashlib
7
  from datetime import datetime, timedelta
8
- from typing import Optional
9
-
10
- app = FastAPI()
11
-
12
- # In-memory cache (resets when Space restarts)
13
- CACHE = {}
14
- CACHE_DURATION = timedelta(hours=1) # Cache for 1 hour
15
-
16
- # Use Hugging Face Inference API (FREE!)
17
- HF_TOKEN = os.environ.get("HF_TOKEN", "")
18
-
19
- MODELS = {
20
- "default": "microsoft/Phi-3-mini-4k-instruct",
21
- "backup": "HuggingFaceH4/zephyr-7b-beta",
22
- }
23
-
24
- # Stats tracking
25
- STATS = {
26
- "cache_hits": 0,
27
- "api_calls": 0,
28
- "total_requests": 0
29
- }
30
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
  class GenerateRequest(BaseModel):
32
  custom_prompt: str = ""
33
 
@@ -35,551 +68,189 @@ class DetailRequest(BaseModel):
35
  idea_id: int
36
  idea_title: str
37
 
38
- def get_cache_key(prefix: str, data: str) -> str:
39
- """Generate cache key from data"""
40
- return f"{prefix}:{hashlib.md5(data.encode()).hexdigest()}"
 
 
 
 
 
41
 
42
- def get_from_cache(key: str):
43
- """Get from cache if not expired"""
44
- if key in CACHE:
45
- entry = CACHE[key]
46
- if datetime.now() < entry["expires"]:
47
- STATS["cache_hits"] += 1
48
- return entry["data"]
49
- else:
50
- del CACHE[key]
51
- return None
52
-
53
- def save_to_cache(key: str, data: dict):
54
- """Save to cache with expiration"""
55
- CACHE[key] = {
56
- "data": data,
57
- "expires": datetime.now() + CACHE_DURATION
58
- }
59
-
60
- def call_hf_api(prompt: str, max_tokens: int = 800, model: str = "default") -> str:
61
- """Call Hugging Face Inference API with rate limit handling"""
62
- STATS["api_calls"] += 1
63
-
64
- api_url = f"https://api-inference.huggingface.co/models/{MODELS[model]}"
65
-
66
  headers = {
67
- "Authorization": f"Bearer {HF_TOKEN}" if HF_TOKEN else ""
68
- }
69
-
70
- payload = {
71
- "inputs": prompt,
72
- "parameters": {
73
- "max_new_tokens": max_tokens,
74
- "temperature": 0.7,
75
- "top_p": 0.9,
76
- "do_sample": True,
77
- "return_full_text": False
78
- }
79
- }
80
-
81
- try:
82
- response = requests.post(api_url, headers=headers, json=payload, timeout=30)
83
-
84
- if response.status_code == 503:
85
- return "MODEL_LOADING"
86
- elif response.status_code == 429:
87
- if model == "default":
88
- return call_hf_api(prompt, max_tokens, "backup")
89
- return "RATE_LIMIT"
90
-
91
- response.raise_for_status()
92
- result = response.json()
93
-
94
- if isinstance(result, list) and len(result) > 0:
95
- return result[0].get("generated_text", "")
96
- return ""
97
-
98
- except Exception as e:
99
- print(f"API Error: {e}")
100
- return ""
101
-
102
- def generate_ideas_with_ai(custom_prompt: str) -> dict:
103
- """Generate hackathon ideas with caching"""
104
- STATS["total_requests"] += 1
105
-
106
- # Check cache first
107
- cache_key = get_cache_key("ideas", custom_prompt.lower().strip())
108
- cached = get_from_cache(cache_key)
109
- if cached:
110
- cached["_from_cache"] = True
111
- return cached
112
-
113
- prompt = f"""You are a hackathon project expert. Generate 3 unique, creative hackathon project ideas.
114
- Focus: {custom_prompt if custom_prompt else "innovative web applications"}
115
-
116
- For each idea, provide:
117
- 1. Title (creative, concise)
118
- 2. Elevator pitch (one compelling sentence)
119
- 3. Overview (2-3 sentences explaining the project)
120
- 4. Tech stack (3-4 technologies)
121
- 5. Difficulty (Easy/Medium/Hard)
122
- 6. Time estimate in hours (12-48)
123
-
124
- Format your response EXACTLY like this:
125
-
126
- IDEA 1:
127
- Title: [title]
128
- Elevator: [pitch]
129
- Overview: [description]
130
- Tech: [tech1, tech2, tech3]
131
- Difficulty: [Easy/Medium/Hard]
132
- Hours: [number]
133
-
134
- IDEA 2:
135
- Title: [title]
136
- Elevator: [pitch]
137
- Overview: [description]
138
- Tech: [tech1, tech2, tech3]
139
- Difficulty: [Easy/Medium/Hard]
140
- Hours: [number]
141
-
142
- IDEA 3:
143
- Title: [title]
144
- Elevator: [pitch]
145
- Overview: [description]
146
- Tech: [tech1, tech2, tech3]
147
- Difficulty: [Easy/Medium/Hard]
148
- Hours: [number]
149
-
150
- Generate now:"""
151
-
152
- response = call_hf_api(prompt, max_tokens=1000)
153
-
154
- if response == "MODEL_LOADING":
155
- return {
156
- "error": "Model is loading. Please wait 30 seconds and try again.",
157
- "ideas": [],
158
- "best_pick_id": 1,
159
- "best_pick_reason": "Model loading"
160
- }
161
- elif response == "RATE_LIMIT":
162
- # Return cached fallback ideas
163
- return get_cached_fallback_ideas(custom_prompt)
164
-
165
- ideas = parse_ideas_from_text(response)
166
-
167
- if len(ideas) < 3:
168
- ideas = generate_simple_fallback(custom_prompt)
169
-
170
- result = {
171
- "ideas": ideas,
172
- "best_pick_id": 2,
173
- "best_pick_reason": "Balanced scope with achievable goals and innovative features",
174
- "_from_cache": False
175
- }
176
-
177
- # Save to cache
178
- save_to_cache(cache_key, result)
179
-
180
- return result
181
-
182
- def get_cached_fallback_ideas(custom_prompt: str) -> dict:
183
- """Return pre-generated fallback ideas when rate limited"""
184
- focus = custom_prompt.lower().strip() if custom_prompt else "web"
185
-
186
- # Check if we have cached version of similar prompts
187
- for key in CACHE:
188
- if focus in key:
189
- cached = get_from_cache(key)
190
- if cached:
191
- cached["_from_cache"] = True
192
- cached["_note"] = "Rate limited - using cached similar results"
193
- return cached
194
-
195
- # Generate simple fallback
196
- result = {
197
- "ideas": generate_simple_fallback(custom_prompt),
198
- "best_pick_id": 2,
199
- "best_pick_reason": "Rate limited - using fallback ideas",
200
- "_from_cache": False,
201
- "_note": "Rate limit reached. These are fallback ideas."
202
  }
203
- return result
204
 
205
- def parse_ideas_from_text(text: str) -> list:
206
- """Parse AI-generated text into structured ideas"""
207
- ideas = []
208
- lines = text.split('\n')
209
- current_idea = {}
210
-
211
- for line in lines:
212
- line = line.strip()
213
- if not line:
214
- continue
215
-
216
- if line.startswith("Title:"):
217
- if current_idea and len(current_idea) >= 5:
218
- ideas.append(format_idea(current_idea, len(ideas) + 1))
219
- current_idea = {"title": line.replace("Title:", "").strip()}
220
- elif line.startswith("Elevator:"):
221
- current_idea["elevator"] = line.replace("Elevator:", "").strip()
222
- elif line.startswith("Overview:"):
223
- current_idea["overview"] = line.replace("Overview:", "").strip()
224
- elif line.startswith("Tech:"):
225
- tech_str = line.replace("Tech:", "").strip()
226
- current_idea["tech"] = [t.strip() for t in tech_str.split(',')]
227
- elif line.startswith("Difficulty:"):
228
- current_idea["difficulty"] = line.replace("Difficulty:", "").strip()
229
- elif line.startswith("Hours:"):
230
- try:
231
- current_idea["hours"] = int(line.replace("Hours:", "").strip())
232
- except:
233
- current_idea["hours"] = 24
234
-
235
- if current_idea and len(current_idea) >= 5:
236
- ideas.append(format_idea(current_idea, len(ideas) + 1))
237
-
238
- return ideas[:3]
239
-
240
- def format_idea(data: dict, id: int) -> dict:
241
- """Format idea into expected structure"""
242
- return {
243
- "id": id,
244
- "title": data.get("title", f"Project {id}")[:100],
245
- "elevator": data.get("elevator", "An innovative hackathon project")[:200],
246
- "overview": data.get("overview", "A comprehensive solution for developers")[:400],
247
- "primary_tech_stack": data.get("tech", ["React", "Node.js", "MongoDB"])[:4],
248
- "difficulty": data.get("difficulty", "Medium"),
249
- "time_estimate_hours": data.get("hours", 24)
250
- }
251
-
252
- def generate_simple_fallback(custom_prompt: str) -> list:
253
- """High-quality fallback ideas"""
254
- focus = custom_prompt.lower() if custom_prompt else "web"
255
-
256
- # AI/ML ideas
257
- if any(k in focus for k in ["ai", "ml", "machine learning", "neural"]):
258
- return [
259
- {
260
- "id": 1,
261
- "title": "AI Code Review Assistant",
262
- "elevator": "Automatically analyze code for bugs, security issues, and best practices using machine learning",
263
- "overview": "A developer tool that integrates with Git workflows to provide intelligent code reviews, detect potential bugs early, and suggest improvements based on industry best practices and learned patterns.",
264
- "primary_tech_stack": ["Python", "TensorFlow", "FastAPI", "VS Code API"],
265
- "difficulty": "Medium",
266
- "time_estimate_hours": 28
267
- },
268
- {
269
- "id": 2,
270
- "title": "Smart Document Summarizer",
271
- "elevator": "Transform lengthy documents into concise summaries using natural language processing",
272
- "overview": "An NLP-powered web application that processes PDFs and documents to generate intelligent summaries, extract key points, and identify action items automatically, saving hours of reading time.",
273
- "primary_tech_stack": ["React", "Python", "Transformers", "MongoDB"],
274
- "difficulty": "Easy",
275
- "time_estimate_hours": 20
276
- },
277
- {
278
- "id": 3,
279
- "title": "Predictive Analytics Dashboard",
280
- "elevator": "Forecast trends and patterns using machine learning on business data",
281
- "overview": "A comprehensive analytics platform that uses ML algorithms to predict future trends, identify patterns in business data, and provide actionable insights through interactive visualizations.",
282
- "primary_tech_stack": ["React", "scikit-learn", "PostgreSQL", "Plotly"],
283
- "difficulty": "Hard",
284
- "time_estimate_hours": 36
285
- }
286
- ]
287
-
288
- # Web3/Blockchain ideas
289
- elif any(k in focus for k in ["web3", "blockchain", "crypto", "nft"]):
290
- return [
291
- {
292
- "id": 1,
293
- "title": "NFT Certificate Platform",
294
- "elevator": "Issue tamper-proof digital certificates as NFTs for courses and achievements",
295
- "overview": "A blockchain-based certification system that allows educational institutions to create and distribute verifiable certificates as NFTs, ensuring authenticity and preventing fraud.",
296
- "primary_tech_stack": ["Solidity", "React", "Polygon", "IPFS"],
297
- "difficulty": "Medium",
298
- "time_estimate_hours": 26
299
- },
300
- {
301
- "id": 2,
302
- "title": "Decentralized Freelance Marketplace",
303
- "elevator": "Connect freelancers and clients using smart contracts for secure payments",
304
- "overview": "A Web3 platform where freelancers showcase work and clients post projects, with smart contracts handling automatic milestone-based payments and dispute resolution.",
305
- "primary_tech_stack": ["Solidity", "Next.js", "Ethereum", "The Graph"],
306
- "difficulty": "Hard",
307
- "time_estimate_hours": 40
308
- },
309
- {
310
- "id": 3,
311
- "title": "Crypto Portfolio Tracker",
312
- "elevator": "Track cryptocurrency investments across multiple wallets in real-time",
313
- "overview": "A comprehensive dashboard aggregating data from various blockchain networks, providing real-time portfolio valuation, profit/loss tracking, and automated tax reporting.",
314
- "primary_tech_stack": ["React", "Web3.js", "Node.js", "CoinGecko API"],
315
- "difficulty": "Easy",
316
- "time_estimate_hours": 18
317
- }
318
- ]
319
-
320
- # Default/General ideas
321
- else:
322
- return [
323
  {
324
- "id": 1,
325
- "title": "Real-Time Collaboration Hub",
326
- "elevator": "Digital workspace for remote teams with live editing and video chat",
327
- "overview": "A comprehensive collaboration platform featuring real-time document editing, interactive whiteboard, video conferencing, and task management all in one seamless interface.",
328
- "primary_tech_stack": ["React", "WebSocket", "Node.js", "WebRTC"],
329
- "difficulty": "Medium",
330
- "time_estimate_hours": 30
331
- },
332
- {
333
- "id": 2,
334
- "title": "Smart Recipe Finder",
335
- "elevator": "Discover recipes based on ingredients you already have at home",
336
- "overview": "A mobile-optimized web app that helps reduce food waste by suggesting recipes based on available ingredients, complete with nutritional information and step-by-step cooking instructions.",
337
- "primary_tech_stack": ["Vue.js", "Node.js", "Spoonacular API", "MongoDB"],
338
- "difficulty": "Easy",
339
- "time_estimate_hours": 16
340
- },
341
- {
342
- "id": 3,
343
- "title": "Event Discovery Platform",
344
- "elevator": "Find local events and meetups happening in your community",
345
- "overview": "A location-based platform aggregating events from multiple sources, providing personalized recommendations, facilitating RSVPs, and enabling social connections around shared interests.",
346
- "primary_tech_stack": ["React", "Google Maps API", "Firebase", "Express"],
347
- "difficulty": "Medium",
348
- "time_estimate_hours": 24
349
- }
350
- ]
351
-
352
- def generate_details_with_ai(idea_id: int, idea_title: str) -> dict:
353
- """Generate detailed plan with caching"""
354
- # Check cache
355
- cache_key = get_cache_key("details", f"{idea_id}:{idea_title}")
356
- cached = get_from_cache(cache_key)
357
- if cached:
358
- return cached
359
-
360
- # For details, always use simple generation to save API calls
361
- result = generate_simple_details(idea_id, idea_title)
362
- save_to_cache(cache_key, result)
363
- return result
364
-
365
- def generate_simple_details(idea_id: int, idea_title: str) -> dict:
366
- """Generate structured details"""
367
- return {
368
- "id": idea_id,
369
- "title": idea_title,
370
- "mermaid_architecture": """graph TB
371
- A[Frontend UI] --> B[API Gateway]
372
- B --> C[Authentication]
373
- B --> D[Business Logic]
374
- D --> E[Database]
375
- D --> F[External Services]
376
- E --> G[(Data Store)]""",
377
- "phases": [
378
- {
379
- "name": "MVP",
380
- "time_hours": 20,
381
- "tasks": [
382
- "Initialize project with React/Vue and set up development environment",
383
- "Design and implement database schema with proper indexing",
384
- "Build authentication system with JWT tokens",
385
- "Create core API endpoints for main functionality",
386
- "Develop essential UI components and layouts",
387
- "Integrate frontend with backend APIs",
388
- "Implement basic error handling and validation"
389
- ],
390
- "deliverables": [
391
- "Functional prototype with core features operational",
392
- "User authentication and authorization working",
393
- "Database deployed with initial data",
394
- "Responsive UI covering primary user flows"
395
- ]
396
- },
397
- {
398
- "name": "Polish",
399
- "time_hours": 18,
400
- "tasks": [
401
- "Enhance UI/UX with animations and micro-interactions",
402
- "Add comprehensive error handling across all endpoints",
403
- "Implement loading states and user feedback mechanisms",
404
- "Optimize API performance and database queries",
405
- "Add unit tests for critical business logic",
406
- "Implement logging and monitoring systems",
407
- "Create comprehensive user documentation"
408
- ],
409
- "deliverables": [
410
- "Production-ready interface with polished UX",
411
- "Robust error handling for all edge cases",
412
- "Performance optimizations implemented",
413
- "Test coverage for critical paths",
414
- "Complete API and user documentation"
415
- ]
416
- },
417
- {
418
- "name": "Demo",
419
- "time_hours": 10,
420
- "tasks": [
421
- "Design compelling presentation deck with key metrics",
422
- "Create realistic demo scenarios and user personas",
423
- "Set up production demo environment and test thoroughly",
424
- "Record backup video demo as contingency plan",
425
- "Practice pitch and prepare for Q&A",
426
- "Optimize demo flow for maximum impact",
427
- "Final bug fixes and polish for demo features"
428
- ],
429
- "deliverables": [
430
- "Professional pitch deck ready to present",
431
- "Polished live demo environment",
432
- "Backup video demonstration recorded",
433
- "Q&A preparation with anticipated questions",
434
- "Clean GitHub repository with README"
435
  ]
436
  }
437
  ],
438
- "critical_code_snippets": [
439
- {
440
- "title": "Express API Server with Error Handling",
441
- "language": "javascript",
442
- "code": """const express = require('express');
443
- const cors = require('cors');
444
- const helmet = require('helmet');
445
-
446
- const app = express();
447
-
448
- // Security and parsing middleware
449
- app.use(helmet());
450
- app.use(cors());
451
- app.use(express.json());
452
-
453
- // API Routes
454
- app.post('/api/data', async (req, res) => {
455
- try {
456
- const result = await processData(req.body);
457
- res.json({ success: true, data: result });
458
- } catch (error) {
459
- console.error('Error:', error);
460
- res.status(500).json({
461
- success: false,
462
- error: error.message
463
- });
464
- }
465
- });
466
-
467
- // Global error handler
468
- app.use((err, req, res, next) => {
469
- res.status(err.status || 500).json({
470
- error: err.message
471
- });
472
- });
473
-
474
- const PORT = process.env.PORT || 3000;
475
- app.listen(PORT);"""
476
- },
477
- {
478
- "title": "React Component with Hooks",
479
- "language": "javascript",
480
- "code": """import React, { useState, useEffect } from 'react';
481
- import axios from 'axios';
482
-
483
- function Dashboard() {
484
- const [data, setData] = useState([]);
485
- const [loading, setLoading] = useState(true);
486
- const [error, setError] = useState(null);
487
-
488
- useEffect(() => {
489
- fetchData();
490
- }, []);
491
-
492
- const fetchData = async () => {
493
- try {
494
- setLoading(true);
495
- const res = await axios.get('/api/data');
496
- setData(res.data);
497
- setError(null);
498
- } catch (err) {
499
- setError(err.message);
500
- } finally {
501
- setLoading(false);
502
  }
503
- };
504
 
505
- if (loading) return <Spinner />;
506
- if (error) return <Error message={error} />;
507
-
508
- return (
509
- <div className="dashboard">
510
- {data.map(item => (
511
- <Card key={item.id} data={item} />
512
- ))}
513
- </div>
514
- );
515
- }"""
516
- }
517
- ],
518
- "ui_components": [
519
- {"name": "Navigation Bar", "purpose": "Primary navigation with branding and user menu"},
520
- {"name": "Dashboard View", "purpose": "Main interface with data visualization and key metrics"},
521
- {"name": "Form Components", "purpose": "User input collection with real-time validation"},
522
- {"name": "Data Table", "purpose": "Display items with sorting, filtering, and pagination"},
523
- {"name": "Modal Dialogs", "purpose": "Handle confirmations and detailed item views"},
524
- {"name": "Settings Panel", "purpose": "User preferences and app configuration"}
525
- ],
526
- "risks_and_mitigations": [
527
- {"risk": "API rate limits affecting user experience", "mitigation": "Implement request caching, queuing, and retry logic with exponential backoff"},
528
- {"risk": "Database performance degradation under load", "mitigation": "Add proper indexing, implement pagination, use connection pooling, and consider read replicas"},
529
- {"risk": "Authentication security vulnerabilities", "mitigation": "Use battle-tested libraries (Passport.js), implement rate limiting, secure password hashing with bcrypt"},
530
- {"risk": "Insufficient time for complete feature set", "mitigation": "Prioritize MVP features using MoSCoW method, use feature flags for progressive rollout"}
531
- ]
532
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
533
 
 
534
  @app.get("/")
535
- def home():
536
- return {
537
- "status": "AI Hackathon Generator (Optimized for Launch)",
538
- "endpoints": ["/generate", "/details", "/stats"],
539
- "model": MODELS["default"],
540
- "features": [
541
- "Real AI generation",
542
- "Smart caching (1 hour)",
543
- "Rate limit protection",
544
- "Fallback system"
545
- ],
546
- "cache_info": {
547
- "cached_entries": len(CACHE),
548
- "duration_hours": 1
549
- }
550
- }
551
 
552
  @app.get("/stats")
553
- def get_stats():
554
- """Get usage statistics"""
555
- cache_hit_rate = 0
556
- if STATS["total_requests"] > 0:
557
- cache_hit_rate = (STATS["cache_hits"] / STATS["total_requests"]) * 100
558
-
559
- return {
560
- "total_requests": STATS["total_requests"],
561
- "api_calls": STATS["api_calls"],
562
- "cache_hits": STATS["cache_hits"],
563
- "cache_hit_rate": f"{cache_hit_rate:.1f}%",
564
- "cached_entries": len(CACHE),
565
- "api_calls_saved": STATS["cache_hits"]
566
- }
567
 
568
  @app.post("/generate")
569
- def generate(req: GenerateRequest):
570
- try:
571
- return generate_ideas_with_ai(req.custom_prompt)
572
- except Exception as e:
573
- return {
574
- "error": str(e),
575
- "ideas": generate_simple_fallback(req.custom_prompt),
576
- "best_pick_id": 2,
577
- "best_pick_reason": "Fallback due to error"
578
- }
 
 
 
 
 
 
 
 
 
 
 
 
579
 
580
  @app.post("/details")
581
- def details(req: DetailRequest):
582
- try:
583
- return generate_details_with_ai(req.idea_id, req.idea_title)
584
- except Exception as e:
585
- return generate_simple_details(req.idea_id, req.idea_title)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app.py
2
+ from fastapi import FastAPI, Request, HTTPException, status
3
  from pydantic import BaseModel
4
+ from fastapi.middleware.cors import CORSMiddleware
5
+ import os
6
  import requests
7
+ import time
8
  import json
9
+ import threading
10
+ from typing import Dict
11
  from datetime import datetime, timedelta
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
+ # ---------- Configuration ----------
14
+ GEMINI_API_KEY = os.environ.get("GEMINI_API_KEY", "")
15
+ if not GEMINI_API_KEY:
16
+ print("WARNING: GEMINI_API_KEY not set. Set it in Space secrets or env.")
17
+
18
+ GEMINI_MODEL = os.environ.get("GEMINI_MODEL", "gemini-2.5-flash") # change if needed
19
+ GEMINI_BASE = os.environ.get("GEMINI_BASE", "https://generativelanguage.googleapis.com/v1beta")
20
+ GENERATE_PATH = f"{GEMINI_BASE}/models/{GEMINI_MODEL}:generateContent"
21
+
22
+ # Rate limit settings (in-memory). Tune these for your expected traffic.
23
+ RATE_LIMIT_REQUESTS = int(os.environ.get("RATE_LIMIT_REQUESTS", "12")) # requests per window per IP
24
+ RATE_LIMIT_WINDOW = int(os.environ.get("RATE_LIMIT_WINDOW", "60")) # window size in seconds
25
+
26
+ # Retry settings for Gemini calls
27
+ MAX_RETRIES = 3
28
+ RETRY_BACKOFF = 1.5 # multiplier
29
+
30
+ # ---------- Simple in-memory rate limiter ----------
31
+ # Structure: { ip: [timestamp1, timestamp2, ...] }
32
+ rate_table: Dict[str, list] = {}
33
+ rate_lock = threading.Lock()
34
+
35
+ def clean_old(ip: str):
36
+ now = time.time()
37
+ cutoff = now - RATE_LIMIT_WINDOW
38
+ with rate_lock:
39
+ timestamps = rate_table.get(ip, [])
40
+ timestamps = [t for t in timestamps if t >= cutoff]
41
+ rate_table[ip] = timestamps
42
+ return len(timestamps)
43
+
44
+ def consume_token(ip: str):
45
+ with rate_lock:
46
+ cnt = clean_old(ip)
47
+ if cnt >= RATE_LIMIT_REQUESTS:
48
+ return False
49
+ rate_table.setdefault(ip, []).append(time.time())
50
+ return True
51
+
52
+ # ---------- FastAPI setup ----------
53
+ app = FastAPI(title="Hackathon Idea Generator (Gemini backend)")
54
+
55
+ app.add_middleware(
56
+ CORSMiddleware,
57
+ allow_origins=["*"], # change to your frontend domain in production
58
+ allow_credentials=True,
59
+ allow_methods=["GET", "POST", "OPTIONS"],
60
+ allow_headers=["*"],
61
+ )
62
+
63
+ # ---------- Request / Response models ----------
64
  class GenerateRequest(BaseModel):
65
  custom_prompt: str = ""
66
 
 
68
  idea_id: int
69
  idea_title: str
70
 
71
+ # ---------- Utility: call Gemini REST generateContent ----------
72
+ def call_gemini(prompt_text: str, max_output_tokens: int = 1024, temperature: float = 0.7):
73
+ """
74
+ Call Gemini generateContent REST endpoint with retries and error handling.
75
+ Returns response JSON (raw).
76
+ """
77
+ if not GEMINI_API_KEY:
78
+ raise HTTPException(status_code=500, detail="GEMINI_API_KEY not configured on server.")
79
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
80
  headers = {
81
+ "Content-Type": "application/json",
82
+ "x-goog-api-key": GEMINI_API_KEY
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
  }
 
84
 
85
+ body = {
86
+ "contents": [
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
87
  {
88
+ "parts": [
89
+ {"text": prompt_text}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90
  ]
91
  }
92
  ],
93
+ # optional tuning params depending on API; keep compact
94
+ "temperature": temperature,
95
+ "maxOutputTokens": max_output_tokens
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
  }
 
97
 
98
+ attempt = 0
99
+ backoff = 1.0
100
+ while attempt < MAX_RETRIES:
101
+ try:
102
+ resp = requests.post(GENERATE_PATH, headers=headers, json=body, timeout=60)
103
+ if resp.status_code == 200:
104
+ return resp.json()
105
+ # retry on 429/5xx
106
+ if resp.status_code in (429, 500, 502, 503, 504):
107
+ attempt += 1
108
+ time.sleep(backoff)
109
+ backoff *= RETRY_BACKOFF
110
+ continue
111
+ # for other errors, raise with message
112
+ try:
113
+ d = resp.json()
114
+ message = d.get("error", d)
115
+ except Exception:
116
+ message = resp.text
117
+ raise HTTPException(status_code=resp.status_code, detail=f"Gemini API error: {message}")
118
+ except requests.Timeout:
119
+ attempt += 1
120
+ time.sleep(backoff)
121
+ backoff *= RETRY_BACKOFF
122
+ continue
123
+ except requests.RequestException as e:
124
+ attempt += 1
125
+ time.sleep(backoff)
126
+ backoff *= RETRY_BACKOFF
127
+ continue
128
+
129
+ raise HTTPException(status_code=502, detail="Gemini API unavailable after retries.")
130
+
131
+ # ---------- Helpers to extract text from Gemini response ----------
132
+ def extract_text_from_gemini(resp_json):
133
+ """
134
+ There are several forms of Gemini responses; try to robustly extract
135
+ the generated text from the response payload.
136
+ """
137
+ # Newer responses may have "candidates" or "output" structure
138
+ # See docs: the easy path is to inspect "candidates" or "outputs" etc.
139
+ # We'll try common possibilities.
140
+ try:
141
+ # Example: { "candidates": [{"content": [{"text": "..."}]}] }
142
+ if "candidates" in resp_json:
143
+ cand = resp_json["candidates"]
144
+ if isinstance(cand, list) and len(cand) > 0:
145
+ # dive into structured content
146
+ content = cand[0].get("content")
147
+ if isinstance(content, list):
148
+ parts = []
149
+ for item in content:
150
+ if isinstance(item, dict) and "text" in item:
151
+ parts.append(item["text"])
152
+ if parts:
153
+ return "".join(parts)
154
+ # fallback to "output" keys
155
+ if "output" in cand[0]:
156
+ return cand[0]["output"].get("text", "")
157
+ # Example newer API: { "outputs": [{"content":[{"text":"..."}]}] }
158
+ if "outputs" in resp_json:
159
+ outs = resp_json["outputs"]
160
+ if isinstance(outs, list) and len(outs) > 0:
161
+ content = outs[0].get("content", [])
162
+ parts = []
163
+ for c in content:
164
+ if isinstance(c, dict) and "text" in c:
165
+ parts.append(c["text"])
166
+ if parts:
167
+ return "".join(parts)
168
+ # Example minimal: { "text": "..." }
169
+ if "text" in resp_json:
170
+ return resp_json["text"]
171
+ # Example nested: "candidates":[{"content":[{"parts":[{"text":"..."}]}]}]
172
+ if "candidates" in resp_json:
173
+ try:
174
+ return resp_json["candidates"][0]["content"][0]["parts"][0]["text"]
175
+ except:
176
+ pass
177
+ except Exception:
178
+ pass
179
+
180
+ # last resort: return stringified json
181
+ return json.dumps(resp_json)
182
+
183
+ # ---------- JSON extraction helper ----------
184
+ import re
185
+ def extract_json_from_text(text: str):
186
+ # Find first JSON object in text
187
+ m = re.search(r'\{[\s\S]*\}', text)
188
+ if m:
189
+ try:
190
+ return json.loads(m.group())
191
+ except:
192
+ return None
193
+ # also try array
194
+ m2 = re.search(r'\[[\s\S]*\]', text)
195
+ if m2:
196
+ try:
197
+ arr = json.loads(m2.group())
198
+ return {"ideas": arr}
199
+ except:
200
+ return None
201
+ return None
202
 
203
+ # ---------- Endpoints ----------
204
  @app.get("/")
205
+ def root():
206
+ return {"status": "ok", "model": GEMINI_MODEL}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
207
 
208
  @app.get("/stats")
209
+ def stats():
210
+ # simple stats not persisted across restarts
211
+ return {"rate_limit_requests": RATE_LIMIT_REQUESTS, "rate_limit_window_seconds": RATE_LIMIT_WINDOW}
 
 
 
 
 
 
 
 
 
 
 
212
 
213
  @app.post("/generate")
214
+ async def generate(req: GenerateRequest, request: Request):
215
+ client_ip = request.client.host if request.client else "unknown"
216
+ if not consume_token(client_ip):
217
+ raise HTTPException(status_code=status.HTTP_429_TOO_MANY_REQUESTS,
218
+ detail=f"Rate limit exceeded. Max {RATE_LIMIT_REQUESTS} requests per {RATE_LIMIT_WINDOW}s.")
219
+
220
+ # compose system + user prompt. Keep it compact to reduce tokens.
221
+ system_prefix = (
222
+ "[SYSTEM] You are a JSON API that returns exactly 3 hackathon ideas in valid JSON. "
223
+ "Return only JSON and nothing else. "
224
+ )
225
+ user_prompt = f"Topic: {req.custom_prompt.strip() or 'innovative technology'}"
226
+ prompt_text = f"{system_prefix}\n{user_prompt}\n\nReturn JSON with format:\n" \
227
+ "{ \"ideas\": [ { \"id\":1, \"title\":\"...\", \"elevator\":\"...\", \"overview\":\"...\", \"primary_tech_stack\":[], \"difficulty\":\"Medium\", \"time_estimate_hours\":24 }, ... ], \"best_pick_id\": 2, \"best_pick_reason\": \"...\" }"
228
+
229
+ resp = call_gemini(prompt_text, max_output_tokens=1024, temperature=0.7)
230
+ generated_text = extract_text_from_gemini(resp)
231
+ parsed = extract_json_from_text(generated_text)
232
+ if not parsed:
233
+ # return debug for easier local debugging
234
+ raise HTTPException(status_code=502, detail={"error": "PARSE_ERROR", "raw": generated_text[:1000], "gemini_raw": resp})
235
+ return parsed
236
 
237
  @app.post("/details")
238
+ async def details(req: DetailRequest, request: Request):
239
+ client_ip = request.client.host if request.client else "unknown"
240
+ if not consume_token(client_ip):
241
+ raise HTTPException(status_code=status.HTTP_429_TOO_MANY_REQUESTS,
242
+ detail=f"Rate limit exceeded. Max {RATE_LIMIT_REQUESTS} requests per {RATE_LIMIT_WINDOW}s.")
243
+
244
+ prompt_text = (
245
+ "[SYSTEM] You are a JSON API that returns a full 48-hour implementation plan in JSON. "
246
+ "Return only JSON.\n"
247
+ f"Project title: {req.idea_title}\n\n"
248
+ "Return format: {\"id\": <id>, \"title\": \"...\", \"mermaid_architecture\":\"...\", \"phases\": [ {\"name\":\"MVP\",\"time_hours\":20,\"tasks\":[..],\"deliverables\":[..]} , ... ], \"critical_code_snippets\":[...], \"ui_components\": [...], \"risks_and_mitigations\":[...] }"
249
+ )
250
+ resp = call_gemini(prompt_text, max_output_tokens=1400, temperature=0.7)
251
+ generated_text = extract_text_from_gemini(resp)
252
+ parsed = extract_json_from_text(generated_text)
253
+ if not parsed:
254
+ raise HTTPException(status_code=502, detail={"error": "PARSE_ERROR", "raw": generated_text[:1000], "gemini_raw": resp})
255
+ return parsed
256
+ # ---------- Run with: uvicorn app:app --host 0.0.0.0 --port 8080
requirements.txt CHANGED
@@ -1,4 +1,5 @@
1
- fastapi==0.104.1
2
- uvicorn[standard]==0.24.0
3
- pydantic==2.5.0
4
- requests==2.31.0
 
 
1
+ fastapi
2
+ uvicorn
3
+ requests
4
+ pydantic
5
+ python-multipart