swayamshetkar commited on
Commit
327898f
Β·
1 Parent(s): 40a044d

Added backend files

Browse files
Files changed (5) hide show
  1. README.md +19 -10
  2. app.py +68 -0
  3. model_loader.py +9 -0
  4. prompt_templates.py +72 -0
  5. requirements.txt +5 -0
README.md CHANGED
@@ -1,10 +1,19 @@
1
- ---
2
- title: Hackathon Idea Generator
3
- emoji: πŸ”₯
4
- colorFrom: pink
5
- colorTo: gray
6
- sdk: docker
7
- pinned: false
8
- ---
9
-
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
1
+ # Hackathon Idea Generator API (FastAPI + GPT-2)
2
+
3
+ This is a backend that uses GPT-2 (hosted on Hugging Face Inference API) to generate:
4
+
5
+ βœ” 5 hackathon ideas
6
+ βœ” Overview
7
+ βœ” Why it's useful
8
+ βœ” Difficulty
9
+ βœ” "How to build it" guide
10
+ βœ” Best pick
11
+
12
+ ## Endpoints
13
+
14
+ ### `/generate` (POST)
15
+ Request:
16
+ ```json
17
+ {
18
+ "topic": "AI in healthcare"
19
+ }
app.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI
2
+ from pydantic import BaseModel
3
+ from model_loader import tokenizer, model
4
+ from prompt_templates import MAIN_PROMPT_TEMPLATE, DETAIL_PROMPT_TEMPLATE
5
+ import torch
6
+ import json
7
+ import re
8
+
9
+ app = FastAPI()
10
+
11
+ class GenerateRequest(BaseModel):
12
+ custom_prompt: str = ""
13
+
14
+ class DetailRequest(BaseModel):
15
+ idea_id: int
16
+ idea_title: str
17
+
18
+
19
+ @app.get("/")
20
+ def home():
21
+ return {"status": "Local GPT-2 Backend Running", "endpoints": ["/generate", "/details"]}
22
+
23
+
24
+ def run_gpt2(prompt: str):
25
+ inputs = tokenizer.encode(prompt, return_tensors="pt")
26
+ outputs = model.generate(
27
+ inputs,
28
+ max_new_tokens=500,
29
+ temperature=0.8,
30
+ top_p=0.9,
31
+ do_sample=True
32
+ )
33
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
34
+
35
+
36
+ def extract_json(text):
37
+ try:
38
+ match = re.search(r"\{[\s\S]*\}", text)
39
+ if match:
40
+ return json.loads(match.group())
41
+ except:
42
+ pass
43
+ return {"error": "JSON parsing failed", "raw_output": text}
44
+
45
+
46
+ # -----------------------
47
+ # STEP 1 β†’ GENERATE IDEAS
48
+ # -----------------------
49
+ @app.post("/generate")
50
+ def generate(req: GenerateRequest):
51
+ prompt = MAIN_PROMPT_TEMPLATE.format(CUSTOM_PROMPT=req.custom_prompt)
52
+ raw_output = run_gpt2(prompt)
53
+ json_data = extract_json(raw_output)
54
+ return json_data
55
+
56
+
57
+ # -----------------------
58
+ # STEP 2 β†’ IDEA DETAILS
59
+ # -----------------------
60
+ @app.post("/details")
61
+ def details(req: DetailRequest):
62
+ prompt = DETAIL_PROMPT_TEMPLATE.format(
63
+ IDEA_ID=req.idea_id,
64
+ IDEA_TITLE=req.idea_title
65
+ )
66
+ raw_output = run_gpt2(prompt)
67
+ json_data = extract_json(raw_output)
68
+ return json_data
model_loader.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import GPT2Tokenizer, GPT2LMHeadModel
2
+
3
+ def load_model():
4
+ print("Loading GPT-2 model...")
5
+ tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
6
+ model = GPT2LMHeadModel.from_pretrained("gpt2")
7
+ return tokenizer, model
8
+
9
+ tokenizer, model = load_model()
prompt_templates.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MAIN_PROMPT_TEMPLATE = """
2
+ Generate 3 distinct client-side hackathon ideas. {CUSTOM_PROMPT}
3
+ For each idea, provide a title, a short elevator pitch, an overview, primary tech stack, difficulty, and estimated time.
4
+ Pick the best one and explain why.
5
+ Return ONLY valid JSON matching this exact schema:
6
+
7
+ {
8
+ "ideas": [
9
+ {
10
+ "id": <int>,
11
+ "title": "<string>",
12
+ "elevator": "<string>",
13
+ "overview": "<string>",
14
+ "primary_tech_stack": ["<string>", "..."],
15
+ "difficulty": "<Easy|Medium|Hard>",
16
+ "time_estimate_hours": <number>
17
+ }
18
+ ],
19
+ "best_pick_id": <int>,
20
+ "best_pick_reason": "<string>"
21
+ }
22
+
23
+ Constraints:
24
+ - Return exactly 3 ideas.
25
+ - Keep all text concise for UI cards.
26
+ """.strip()
27
+
28
+
29
+ DETAIL_PROMPT_TEMPLATE = """
30
+ Expand on the hackathon idea "{IDEA_TITLE}" (ID: {IDEA_ID}).
31
+ Create a detailed, step-by-step build plan suitable for a 48-hour hackathon.
32
+ Return ONLY valid JSON matching this exact schema:
33
+
34
+ {
35
+ "id": {IDEA_ID},
36
+ "title": "{IDEA_TITLE}",
37
+ "mermaid_architecture": "<string starting with 'graph LR'>",
38
+ "phases": [
39
+ {
40
+ "name": "<MVP|Polish|Demo>",
41
+ "time_hours": <number>,
42
+ "tasks": ["<string>", "..."],
43
+ "deliverables": ["<string>", "..."]
44
+ }
45
+ ],
46
+ "critical_code_snippets": [
47
+ {
48
+ "title": "<string>",
49
+ "language": "javascript|html|css",
50
+ "code": "<multiline string of code>"
51
+ }
52
+ ],
53
+ "ui_components": [
54
+ {
55
+ "name": "<string>",
56
+ "purpose": "<string>"
57
+ }
58
+ ],
59
+ "risks_and_mitigations": [
60
+ {
61
+ "risk": "<string>",
62
+ "mitigation":"<string>"
63
+ }
64
+ ]
65
+ }
66
+
67
+ Constraints:
68
+ - The 'phases' array must cover the entire build process.
69
+ - Total time for all phases must not exceed 48 hours.
70
+ - Mermaid diagrams must be valid and represent the project architecture.
71
+ - Include at least one meaningful code snippet.
72
+ """.strip()
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ fastapi
2
+ uvicorn
3
+ transformers
4
+ torch
5
+ python-multipart