swayamshetkar commited on
Commit
ab7672a
·
1 Parent(s): 889453a
Files changed (5) hide show
  1. Dockerfile +4 -10
  2. app.py +398 -41
  3. model_loader.py +0 -9
  4. requirements.txt +1 -4
  5. test.py +122 -0
Dockerfile CHANGED
@@ -2,24 +2,18 @@ FROM python:3.10-slim
2
 
3
  WORKDIR /code
4
 
5
- # Install system dependencies
6
- RUN apt-get update && apt-get install -y \
7
- build-essential \
8
- curl \
9
- && rm -rf /var/lib/apt/lists/*
10
-
11
- # Copy requirements first for better caching
12
  COPY requirements.txt .
13
 
14
- # Install Python dependencies
15
  RUN pip install --no-cache-dir --upgrade pip && \
16
  pip install --no-cache-dir -r requirements.txt
17
 
18
  # Copy application code
19
- COPY . .
20
 
21
  # Expose port
22
  EXPOSE 7860
23
 
24
  # Run the application
25
- CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
 
2
 
3
  WORKDIR /code
4
 
5
+ # Copy requirements
 
 
 
 
 
 
6
  COPY requirements.txt .
7
 
8
+ # Install dependencies
9
  RUN pip install --no-cache-dir --upgrade pip && \
10
  pip install --no-cache-dir -r requirements.txt
11
 
12
  # Copy application code
13
+ COPY main.py .
14
 
15
  # Expose port
16
  EXPOSE 7860
17
 
18
  # Run the application
19
+ CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
app.py CHANGED
@@ -1,13 +1,22 @@
1
- from fastapi import FastAPI
2
  from pydantic import BaseModel
3
- from model_loader import model, tokenizer
4
- from prompt_templates import MAIN_PROMPT_TEMPLATE, DETAIL_PROMPT_TEMPLATE
5
  import json
6
- import re
7
- import torch
8
 
9
  app = FastAPI()
10
 
 
 
 
 
 
 
 
 
 
 
11
  class GenerateRequest(BaseModel):
12
  custom_prompt: str = ""
13
 
@@ -15,53 +24,401 @@ class DetailRequest(BaseModel):
15
  idea_id: int
16
  idea_title: str
17
 
18
- def extract_json(text):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  try:
20
- return json.loads(text)
 
21
  except:
22
- pass
23
-
24
- m = re.search(r"\{[\s\S]*\}", text)
25
- if m:
26
- try:
27
- return json.loads(m.group())
28
- except:
29
- pass
30
-
31
- return {"error": "JSON parsing failed", "raw_output": text}
32
-
33
- def run_model(prompt: str):
34
- inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=512).to("cpu")
35
- with torch.no_grad():
36
- outputs = model.generate(
37
- **inputs,
38
- max_new_tokens=512,
39
- do_sample=False,
40
- temperature=1.0
41
- )
42
- text = tokenizer.decode(outputs[0], skip_special_tokens=True)
43
- return text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
 
45
  @app.get("/")
46
  def home():
47
  return {
48
- "status": "FLAN-T5 Backend Running",
49
  "endpoints": ["/generate", "/details"],
50
- "model": "google/flan-t5-base"
 
 
51
  }
52
 
53
  @app.post("/generate")
54
  def generate(req: GenerateRequest):
55
- prompt = MAIN_PROMPT_TEMPLATE.replace("{CUSTOM_PROMPT}", req.custom_prompt)
56
- raw = run_model(prompt)
57
- return extract_json(raw)
 
 
 
 
 
 
58
 
59
  @app.post("/details")
60
  def details(req: DetailRequest):
61
- prompt = (
62
- DETAIL_PROMPT_TEMPLATE
63
- .replace("{IDEA_ID}", str(req.idea_id))
64
- .replace("{IDEA_TITLE}", req.idea_title)
65
- )
66
- raw = run_model(prompt)
67
- return extract_json(raw)
 
1
+ from fastapi import FastAPI, HTTPException
2
  from pydantic import BaseModel
3
+ import requests
 
4
  import json
5
+ import os
6
+ from typing import Optional
7
 
8
  app = FastAPI()
9
 
10
+ # Use Hugging Face Inference API (FREE!)
11
+ # Get your token from: https://huggingface.co/settings/tokens
12
+ HF_TOKEN = os.environ.get("HF_TOKEN", "") # Set this in Space secrets
13
+
14
+ # Best FREE models that work well
15
+ MODELS = {
16
+ "default": "microsoft/Phi-3-mini-4k-instruct", # 3.8B - Good quality, fast
17
+ "backup": "HuggingFaceH4/zephyr-7b-beta", # 7B - Better quality, slower
18
+ }
19
+
20
  class GenerateRequest(BaseModel):
21
  custom_prompt: str = ""
22
 
 
24
  idea_id: int
25
  idea_title: str
26
 
27
+ def call_hf_api(prompt: str, max_tokens: int = 800, model: str = "default") -> str:
28
+ """Call Hugging Face Inference API - FREE tier"""
29
+ api_url = f"https://api-inference.huggingface.co/models/{MODELS[model]}"
30
+
31
+ headers = {
32
+ "Authorization": f"Bearer {HF_TOKEN}" if HF_TOKEN else ""
33
+ }
34
+
35
+ payload = {
36
+ "inputs": prompt,
37
+ "parameters": {
38
+ "max_new_tokens": max_tokens,
39
+ "temperature": 0.7,
40
+ "top_p": 0.9,
41
+ "do_sample": True,
42
+ "return_full_text": False
43
+ }
44
+ }
45
+
46
+ try:
47
+ response = requests.post(api_url, headers=headers, json=payload, timeout=30)
48
+
49
+ if response.status_code == 503:
50
+ # Model is loading, wait and retry
51
+ return "MODEL_LOADING"
52
+ elif response.status_code == 429:
53
+ # Rate limit, try backup model
54
+ if model == "default":
55
+ return call_hf_api(prompt, max_tokens, "backup")
56
+ return "RATE_LIMIT"
57
+
58
+ response.raise_for_status()
59
+ result = response.json()
60
+
61
+ if isinstance(result, list) and len(result) > 0:
62
+ return result[0].get("generated_text", "")
63
+ return ""
64
+
65
+ except Exception as e:
66
+ print(f"API Error: {e}")
67
+ return ""
68
+
69
+ def generate_ideas_with_ai(custom_prompt: str) -> dict:
70
+ """Generate hackathon ideas using AI"""
71
+
72
+ prompt = f"""You are a hackathon project expert. Generate 3 unique hackathon project ideas.
73
+ Focus: {custom_prompt if custom_prompt else "innovative web applications"}
74
+
75
+ For each idea, provide:
76
+ 1. Title (creative, concise)
77
+ 2. Elevator pitch (one compelling sentence)
78
+ 3. Overview (2-3 sentences explaining the project)
79
+ 4. Tech stack (3-4 technologies)
80
+ 5. Difficulty (Easy/Medium/Hard)
81
+ 6. Time estimate in hours (12-48)
82
+
83
+ Format your response EXACTLY like this:
84
+
85
+ IDEA 1:
86
+ Title: [title]
87
+ Elevator: [pitch]
88
+ Overview: [description]
89
+ Tech: [tech1, tech2, tech3]
90
+ Difficulty: [Easy/Medium/Hard]
91
+ Hours: [number]
92
+
93
+ IDEA 2:
94
+ Title: [title]
95
+ Elevator: [pitch]
96
+ Overview: [description]
97
+ Tech: [tech1, tech2, tech3]
98
+ Difficulty: [Easy/Medium/Hard]
99
+ Hours: [number]
100
+
101
+ IDEA 3:
102
+ Title: [title]
103
+ Elevator: [pitch]
104
+ Overview: [description]
105
+ Tech: [tech1, tech2, tech3]
106
+ Difficulty: [Easy/Medium/Hard]
107
+ Hours: [number]
108
+
109
+ Generate now:"""
110
+
111
+ response = call_hf_api(prompt, max_tokens=1000)
112
+
113
+ if response == "MODEL_LOADING":
114
+ return {
115
+ "error": "Model is loading. Please wait 30 seconds and try again.",
116
+ "ideas": [],
117
+ "best_pick_id": 1,
118
+ "best_pick_reason": "Model loading"
119
+ }
120
+ elif response == "RATE_LIMIT":
121
+ return {
122
+ "error": "Rate limit reached. Please wait a minute and try again.",
123
+ "ideas": [],
124
+ "best_pick_id": 1,
125
+ "best_pick_reason": "Rate limited"
126
+ }
127
+
128
+ # Parse the AI response
129
+ ideas = parse_ideas_from_text(response)
130
+
131
+ if len(ideas) < 3:
132
+ # If parsing failed, generate simple ideas
133
+ ideas = generate_simple_fallback(custom_prompt)
134
+
135
+ return {
136
+ "ideas": ideas,
137
+ "best_pick_id": 2, # Usually middle difficulty is best
138
+ "best_pick_reason": "Balanced scope with achievable goals and innovative features"
139
+ }
140
+
141
+ def parse_ideas_from_text(text: str) -> list:
142
+ """Parse AI-generated text into structured ideas"""
143
+ ideas = []
144
+ lines = text.split('\n')
145
+ current_idea = {}
146
+
147
+ for line in lines:
148
+ line = line.strip()
149
+ if not line:
150
+ continue
151
+
152
+ if line.startswith("Title:"):
153
+ if current_idea and len(current_idea) >= 5:
154
+ ideas.append(format_idea(current_idea, len(ideas) + 1))
155
+ current_idea = {"title": line.replace("Title:", "").strip()}
156
+ elif line.startswith("Elevator:"):
157
+ current_idea["elevator"] = line.replace("Elevator:", "").strip()
158
+ elif line.startswith("Overview:"):
159
+ current_idea["overview"] = line.replace("Overview:", "").strip()
160
+ elif line.startswith("Tech:"):
161
+ tech_str = line.replace("Tech:", "").strip()
162
+ current_idea["tech"] = [t.strip() for t in tech_str.split(',')]
163
+ elif line.startswith("Difficulty:"):
164
+ current_idea["difficulty"] = line.replace("Difficulty:", "").strip()
165
+ elif line.startswith("Hours:"):
166
+ try:
167
+ current_idea["hours"] = int(line.replace("Hours:", "").strip())
168
+ except:
169
+ current_idea["hours"] = 24
170
+
171
+ # Add last idea
172
+ if current_idea and len(current_idea) >= 5:
173
+ ideas.append(format_idea(current_idea, len(ideas) + 1))
174
+
175
+ return ideas[:3]
176
+
177
+ def format_idea(data: dict, id: int) -> dict:
178
+ """Format idea into expected structure"""
179
+ return {
180
+ "id": id,
181
+ "title": data.get("title", f"Project {id}")[:100],
182
+ "elevator": data.get("elevator", "An innovative hackathon project")[:200],
183
+ "overview": data.get("overview", "A comprehensive solution for developers")[:400],
184
+ "primary_tech_stack": data.get("tech", ["React", "Node.js", "MongoDB"])[:4],
185
+ "difficulty": data.get("difficulty", "Medium"),
186
+ "time_estimate_hours": data.get("hours", 24)
187
+ }
188
+
189
+ def generate_simple_fallback(custom_prompt: str) -> list:
190
+ """Fallback ideas if AI fails"""
191
+ focus = custom_prompt.lower() if custom_prompt else "web"
192
+
193
+ base_ideas = [
194
+ {
195
+ "id": 1,
196
+ "title": f"Smart {focus.title()} Dashboard",
197
+ "elevator": f"Real-time analytics and insights for {focus} applications",
198
+ "overview": f"An intelligent dashboard that provides comprehensive analytics, monitoring, and actionable insights for {focus} applications with customizable widgets and alerts.",
199
+ "primary_tech_stack": ["React", "Node.js", "MongoDB", "Chart.js"],
200
+ "difficulty": "Medium",
201
+ "time_estimate_hours": 24
202
+ },
203
+ {
204
+ "id": 2,
205
+ "title": f"{focus.title()} Automation Tool",
206
+ "elevator": f"Automate repetitive {focus} tasks with intelligent workflows",
207
+ "overview": f"A powerful automation platform that streamlines {focus} workflows, reduces manual work, and increases productivity through smart triggers and actions.",
208
+ "primary_tech_stack": ["Python", "FastAPI", "PostgreSQL", "Redis"],
209
+ "difficulty": "Easy",
210
+ "time_estimate_hours": 18
211
+ },
212
+ {
213
+ "id": 3,
214
+ "title": f"Collaborative {focus.title()} Platform",
215
+ "elevator": f"Team collaboration made easy for {focus} projects",
216
+ "overview": f"A real-time collaborative workspace designed for {focus} teams, featuring live editing, version control, and integrated communication tools.",
217
+ "primary_tech_stack": ["Vue.js", "WebSocket", "Express", "Firebase"],
218
+ "difficulty": "Hard",
219
+ "time_estimate_hours": 36
220
+ }
221
+ ]
222
+
223
+ return base_ideas
224
+
225
+ def generate_details_with_ai(idea_id: int, idea_title: str) -> dict:
226
+ """Generate detailed implementation plan using AI"""
227
+
228
+ prompt = f"""Create a detailed 48-hour implementation plan for this hackathon project:
229
+ Project: {idea_title}
230
+
231
+ Provide:
232
+ 1. Mermaid architecture diagram (simple graph syntax)
233
+ 2. Three phases: MVP (20h), Polish (18h), Demo (10h)
234
+ - Each phase needs: name, time_hours, 4-5 tasks, 3-4 deliverables
235
+ 3. Two critical code snippets with title, language (javascript/python), and actual code
236
+ 4. Four UI components with name and purpose
237
+ 5. Four risks with mitigations
238
+
239
+ Format as:
240
+
241
+ ARCHITECTURE:
242
+ [simple mermaid graph]
243
+
244
+ MVP PHASE:
245
+ Tasks: [task1], [task2], [task3], [task4]
246
+ Deliverables: [del1], [del2], [del3]
247
+
248
+ POLISH PHASE:
249
+ Tasks: [task1], [task2], [task3], [task4]
250
+ Deliverables: [del1], [del2], [del3]
251
+
252
+ DEMO PHASE:
253
+ Tasks: [task1], [task2], [task3]
254
+ Deliverables: [del1], [del2], [del3]
255
+
256
+ CODE1:
257
+ Title: [title]
258
+ Language: javascript
259
+ Code: [actual code snippet]
260
+
261
+ CODE2:
262
+ Title: [title]
263
+ Language: javascript
264
+ Code: [actual code snippet]
265
+
266
+ UI: [component1: purpose1], [component2: purpose2], [component3: purpose3], [component4: purpose4]
267
+
268
+ RISKS: [risk1: mitigation1], [risk2: mitigation2], [risk3: mitigation3], [risk4: mitigation4]"""
269
+
270
+ response = call_hf_api(prompt, max_tokens=1200)
271
+
272
+ if response in ["MODEL_LOADING", "RATE_LIMIT", ""]:
273
+ return generate_simple_details(idea_id, idea_title)
274
+
275
+ # Try to parse AI response
276
  try:
277
+ parsed = parse_details_from_text(response, idea_id, idea_title)
278
+ return parsed
279
  except:
280
+ return generate_simple_details(idea_id, idea_title)
281
+
282
+ def parse_details_from_text(text: str, idea_id: int, idea_title: str) -> dict:
283
+ """Parse AI response into structured details"""
284
+ # This is complex, so we'll use a simpler fallback
285
+ return generate_simple_details(idea_id, idea_title)
286
+
287
+ def generate_simple_details(idea_id: int, idea_title: str) -> dict:
288
+ """Generate structured details"""
289
+ return {
290
+ "id": idea_id,
291
+ "title": idea_title,
292
+ "mermaid_architecture": """graph TB
293
+ A[Frontend] --> B[API]
294
+ B --> C[Business Logic]
295
+ C --> D[Database]
296
+ B --> E[External Services]""",
297
+ "phases": [
298
+ {
299
+ "name": "MVP",
300
+ "time_hours": 20,
301
+ "tasks": [
302
+ "Set up project structure and dependencies",
303
+ "Implement core API endpoints",
304
+ "Build basic UI components",
305
+ "Connect frontend to backend",
306
+ "Test core functionality"
307
+ ],
308
+ "deliverables": [
309
+ "Working prototype",
310
+ "Core features functional",
311
+ "Basic UI completed"
312
+ ]
313
+ },
314
+ {
315
+ "name": "Polish",
316
+ "time_hours": 18,
317
+ "tasks": [
318
+ "Enhance UI/UX design",
319
+ "Add error handling",
320
+ "Optimize performance",
321
+ "Write tests",
322
+ "Add documentation"
323
+ ],
324
+ "deliverables": [
325
+ "Polished interface",
326
+ "Error handling complete",
327
+ "Tests passing"
328
+ ]
329
+ },
330
+ {
331
+ "name": "Demo",
332
+ "time_hours": 10,
333
+ "tasks": [
334
+ "Prepare presentation",
335
+ "Create demo data",
336
+ "Practice pitch",
337
+ "Final bug fixes"
338
+ ],
339
+ "deliverables": [
340
+ "Demo ready",
341
+ "Pitch deck complete",
342
+ "Video recorded"
343
+ ]
344
+ }
345
+ ],
346
+ "critical_code_snippets": [
347
+ {
348
+ "title": "API Setup",
349
+ "language": "javascript",
350
+ "code": """const express = require('express');
351
+ const app = express();
352
+
353
+ app.use(express.json());
354
+
355
+ app.post('/api/data', async (req, res) => {
356
+ try {
357
+ const result = await processData(req.body);
358
+ res.json({ success: true, data: result });
359
+ } catch (error) {
360
+ res.status(500).json({ error: error.message });
361
+ }
362
+ });"""
363
+ },
364
+ {
365
+ "title": "React Component",
366
+ "language": "javascript",
367
+ "code": """function DataView() {
368
+ const [data, setData] = useState([]);
369
+ const [loading, setLoading] = useState(true);
370
+
371
+ useEffect(() => {
372
+ fetchData();
373
+ }, []);
374
+
375
+ return (
376
+ <div>
377
+ {loading ? <Spinner /> : <DataList data={data} />}
378
+ </div>
379
+ );
380
+ }"""
381
+ }
382
+ ],
383
+ "ui_components": [
384
+ {"name": "Dashboard", "purpose": "Main view for data visualization"},
385
+ {"name": "Form", "purpose": "User input collection"},
386
+ {"name": "List View", "purpose": "Display items with filtering"},
387
+ {"name": "Settings", "purpose": "Configure preferences"}
388
+ ],
389
+ "risks_and_mitigations": [
390
+ {"risk": "API rate limits", "mitigation": "Implement caching and request queuing"},
391
+ {"risk": "Data validation errors", "mitigation": "Add comprehensive validation on frontend and backend"},
392
+ {"risk": "Performance issues", "mitigation": "Optimize queries and implement pagination"},
393
+ {"risk": "Time constraints", "mitigation": "Prioritize MVP features and use feature flags"}
394
+ ]
395
+ }
396
 
397
  @app.get("/")
398
  def home():
399
  return {
400
+ "status": "AI Hackathon Generator (Free HF API)",
401
  "endpoints": ["/generate", "/details"],
402
+ "model": MODELS["default"],
403
+ "note": "Using Hugging Face free API - first request may be slow",
404
+ "setup": "Add HF_TOKEN to Space secrets for better rate limits"
405
  }
406
 
407
  @app.post("/generate")
408
  def generate(req: GenerateRequest):
409
+ try:
410
+ return generate_ideas_with_ai(req.custom_prompt)
411
+ except Exception as e:
412
+ return {
413
+ "error": str(e),
414
+ "ideas": generate_simple_fallback(req.custom_prompt),
415
+ "best_pick_id": 2,
416
+ "best_pick_reason": "Fallback due to error"
417
+ }
418
 
419
  @app.post("/details")
420
  def details(req: DetailRequest):
421
+ try:
422
+ return generate_details_with_ai(req.idea_id, req.idea_title)
423
+ except Exception as e:
424
+ return generate_simple_details(req.idea_id, req.idea_title)
 
 
 
model_loader.py DELETED
@@ -1,9 +0,0 @@
1
- from transformers import GPT2Tokenizer, GPT2LMHeadModel
2
-
3
- def load_model():
4
- print("Loading GPT-2 model...")
5
- tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
6
- model = GPT2LMHeadModel.from_pretrained("gpt2")
7
- return tokenizer, model
8
-
9
- tokenizer, model = load_model()
 
 
 
 
 
 
 
 
 
 
requirements.txt CHANGED
@@ -1,7 +1,4 @@
1
  fastapi==0.104.1
2
  uvicorn[standard]==0.24.0
3
- transformers==4.35.2
4
- torch==2.1.0
5
  pydantic==2.5.0
6
- sentencepiece==0.1.99
7
- protobuf==4.25.1
 
1
  fastapi==0.104.1
2
  uvicorn[standard]==0.24.0
 
 
3
  pydantic==2.5.0
4
+ requests==2.31.0
 
test.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Test script for Hackathon Idea Generator API
4
+ """
5
+
6
+ import requests
7
+ import json
8
+ import sys
9
+
10
+ # Your API URL
11
+ BASE_URL = "https://swayamshetkar-Hackathon-idea-Generator.hf.space"
12
+
13
+ def test_health():
14
+ """Test the health endpoint"""
15
+ print("=" * 60)
16
+ print("TEST 1: Health Check")
17
+ print("=" * 60)
18
+ try:
19
+ response = requests.get(f"{BASE_URL}/", timeout=10)
20
+ print(f"Status Code: {response.status_code}")
21
+ print(f"Response: {json.dumps(response.json(), indent=2)}")
22
+ return response.status_code == 200
23
+ except Exception as e:
24
+ print(f"❌ Error: {e}")
25
+ return False
26
+
27
+ def test_generate(custom_prompt=""):
28
+ """Test the generate endpoint"""
29
+ print("\n" + "=" * 60)
30
+ print("TEST 2: Generate Ideas")
31
+ print("=" * 60)
32
+ print(f"Custom Prompt: '{custom_prompt}'")
33
+
34
+ try:
35
+ response = requests.post(
36
+ f"{BASE_URL}/generate",
37
+ json={"custom_prompt": custom_prompt},
38
+ timeout=60 # Allow 60 seconds for generation
39
+ )
40
+ print(f"Status Code: {response.status_code}")
41
+ data = response.json()
42
+ print(f"\nResponse:")
43
+ print(json.dumps(data, indent=2))
44
+
45
+ # Check if ideas were generated
46
+ if "ideas" in data and len(data["ideas"]) > 0:
47
+ print(f"\n✓ Successfully generated {len(data['ideas'])} ideas!")
48
+ return data
49
+ else:
50
+ print("\n❌ No ideas generated")
51
+ return None
52
+
53
+ except requests.Timeout:
54
+ print("❌ Request timed out. The model might still be loading.")
55
+ print(" Try again in 30 seconds.")
56
+ return None
57
+ except Exception as e:
58
+ print(f"❌ Error: {e}")
59
+ return None
60
+
61
+ def test_details(idea_id, idea_title):
62
+ """Test the details endpoint"""
63
+ print("\n" + "=" * 60)
64
+ print("TEST 3: Get Idea Details")
65
+ print("=" * 60)
66
+ print(f"Idea ID: {idea_id}")
67
+ print(f"Idea Title: {idea_title}")
68
+
69
+ try:
70
+ response = requests.post(
71
+ f"{BASE_URL}/details",
72
+ json={
73
+ "idea_id": idea_id,
74
+ "idea_title": idea_title
75
+ },
76
+ timeout=60
77
+ )
78
+ print(f"Status Code: {response.status_code}")
79
+ data = response.json()
80
+ print(f"\nResponse:")
81
+ print(json.dumps(data, indent=2))
82
+
83
+ if "phases" in data:
84
+ print(f"\n✓ Successfully generated detailed plan!")
85
+ return True
86
+ else:
87
+ print("\n❌ No detailed plan generated")
88
+ return False
89
+
90
+ except requests.Timeout:
91
+ print("❌ Request timed out.")
92
+ return False
93
+ except Exception as e:
94
+ print(f"❌ Error: {e}")
95
+ return False
96
+
97
+ def main():
98
+ """Run all tests"""
99
+ print("\n🚀 Starting API Tests...")
100
+ print(f"API URL: {BASE_URL}\n")
101
+
102
+ # Test 1: Health check
103
+ if not test_health():
104
+ print("\n❌ Health check failed. Make sure the API is running.")
105
+ sys.exit(1)
106
+
107
+ # Test 2: Generate ideas
108
+ ideas_data = test_generate("focus on AI and machine learning")
109
+
110
+ if ideas_data and "ideas" in ideas_data and len(ideas_data["ideas"]) > 0:
111
+ # Test 3: Get details for first idea
112
+ first_idea = ideas_data["ideas"][0]
113
+ test_details(first_idea["id"], first_idea["title"])
114
+ else:
115
+ print("\n⚠️ Skipping details test (no ideas generated)")
116
+
117
+ print("\n" + "=" * 60)
118
+ print("Tests completed!")
119
+ print("=" * 60)
120
+
121
+ if __name__ == "__main__":
122
+ main()