Lcmind commited on
Commit
e56a4bd
·
1 Parent(s): 5297538

fix: switch to Docker SDK with direct FastAPI, remove Gradio wrapper

Browse files
Files changed (6) hide show
  1. Dockerfile +3 -3
  2. README.md +2 -3
  3. app.py +0 -13
  4. main.py +0 -485
  5. pyproject.toml +0 -22
  6. requirements.txt +0 -3
Dockerfile CHANGED
@@ -59,7 +59,7 @@ EXPOSE 7860
59
 
60
  # Health check for container monitoring
61
  HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
62
- CMD python -c "import requests; requests.get('http://localhost:7860/health', timeout=5)"
63
 
64
- # Run Gradio wrapper (which includes FastAPI)
65
- CMD ["python", "app.py"]
 
59
 
60
  # Health check for container monitoring
61
  HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
62
+ CMD python -c "import requests; requests.get('http://localhost:7860/', timeout=5)"
63
 
64
+ # Run FastAPI directly on port 7860 (HF Spaces standard)
65
+ CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "7860"]
README.md CHANGED
@@ -3,9 +3,8 @@ title: VIBE_LINK Server
3
  emoji: 🎨
4
  colorFrom: gray
5
  colorTo: green
6
- sdk: gradio
7
- sdk_version: 6.3.0
8
- app_file: app.py
9
  pinned: false
10
  ---
11
 
 
3
  emoji: 🎨
4
  colorFrom: gray
5
  colorTo: green
6
+ sdk: docker
7
+ app_port: 7860
 
8
  pinned: false
9
  ---
10
 
app.py DELETED
@@ -1,13 +0,0 @@
1
- """
2
- Gradio wrapper for HF Spaces compatibility
3
- Mounts FastAPI app to Gradio for proper routing
4
- """
5
- import gradio as gr
6
- from app.main import app
7
-
8
- # Mount FastAPI to Gradio
9
- gradio_app = gr.mount_gradio_app(
10
- gr.Blocks(title="VIBE_LINK API"),
11
- app,
12
- path="/"
13
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
main.py DELETED
@@ -1,485 +0,0 @@
1
- """
2
- VIBE_LINK Backend API
3
- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
4
- Serverless-style FastAPI backend for converting websites into Vibe Posters.
5
- Optimized for Hugging Face Spaces (Docker SDK) with 16GB RAM.
6
-
7
- Architecture:
8
- 1. Screenshot Capture → pyppeteer (Headless Chrome)
9
- 2. AI Analysis → Google Gemini 2.5 Flash
10
- 3. Image Generation → Hugging Face Flux.1-dev
11
- 4. Image Hosting → ImgBB API
12
-
13
- Author: S-Grade Developer | Production-Ready Code
14
- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
15
- """
16
-
17
- import os
18
- import json
19
- import base64
20
- import asyncio
21
- import tempfile
22
- from pathlib import Path
23
- from typing import Optional, Dict
24
- from contextlib import asynccontextmanager
25
-
26
- import requests
27
- from fastapi import FastAPI, HTTPException, Request
28
- from fastapi.middleware.cors import CORSMiddleware
29
- from fastapi.responses import JSONResponse
30
- from pydantic import BaseModel, HttpUrl, Field
31
- from pyppeteer import launch
32
- from huggingface_hub import InferenceClient
33
- import google.generativeai as genai
34
- from PIL import Image
35
- from dotenv import load_dotenv
36
-
37
- # ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
38
- # CONFIGURATION & ENVIRONMENT
39
- # ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
40
-
41
- load_dotenv()
42
-
43
- # API Keys (Secured via Environment Variables)
44
- HF_TOKEN = os.getenv("HF_TOKEN")
45
- GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
46
- IMGBB_KEY = os.getenv("IMGBB_KEY")
47
-
48
- # Validate API Keys
49
- if not all([HF_TOKEN, GEMINI_API_KEY, IMGBB_KEY]):
50
- raise RuntimeError(
51
- "Missing API Keys! Set HF_TOKEN, GEMINI_API_KEY, IMGBB_KEY in environment."
52
- )
53
-
54
- # Initialize AI Services
55
- genai.configure(api_key=GEMINI_API_KEY)
56
- hf_client = InferenceClient(token=HF_TOKEN)
57
-
58
- # Prevent pyppeteer from downloading Chromium (use system-installed one)
59
- os.environ['PUPPETEER_SKIP_CHROMIUM_DOWNLOAD'] = 'true'
60
- os.environ['PUPPETEER_EXECUTABLE_PATH'] = '/usr/bin/chromium'
61
-
62
- # Constants
63
- SCREENSHOT_TIMEOUT = 20000 # 20 seconds
64
- VIEWPORT_WIDTH = 1280
65
- VIEWPORT_HEIGHT = 1200
66
- FLUX_MODEL = "black-forest-labs/FLUX.1-schnell" # Faster model, same quality (4 steps vs 30)
67
- TEMP_DIR = Path(tempfile.gettempdir()) / "vibe_link"
68
- TEMP_DIR.mkdir(exist_ok=True)
69
-
70
-
71
- # ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
72
- # PYDANTIC MODELS
73
- # ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
74
-
75
- class CreateRequest(BaseModel):
76
- url: HttpUrl = Field(..., description="Target website URL to analyze")
77
-
78
-
79
- class CreateResponse(BaseModel):
80
- status: str
81
- poster_url: str
82
- vibe: Optional[str] = None
83
- summary: Optional[str] = None
84
-
85
-
86
- class ErrorResponse(BaseModel):
87
- status: str = "error"
88
- message: str
89
- detail: Optional[str] = None
90
-
91
-
92
- # ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
93
- # FASTAPI LIFECYCLE MANAGEMENT
94
- # ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
95
-
96
- @asynccontextmanager
97
- async def lifespan(app: FastAPI):
98
- """Manage browser lifecycle to minimize resource usage"""
99
- print("🚀 VIBE_LINK Backend Starting...")
100
- yield
101
- print("🛑 Cleaning up resources...")
102
- # Cleanup temp files on shutdown
103
- for file in TEMP_DIR.glob("*"):
104
- try:
105
- file.unlink()
106
- except Exception:
107
- pass
108
-
109
-
110
- app = FastAPI(
111
- title="VIBE_LINK API",
112
- description="AI-powered Website to Vibe Poster Generator",
113
- version="1.0.0",
114
- lifespan=lifespan,
115
- )
116
-
117
- # CORS Middleware (Allow all origins for public API)
118
- app.add_middleware(
119
- CORSMiddleware,
120
- allow_origins=["*"],
121
- allow_credentials=True,
122
- allow_methods=["*"],
123
- allow_headers=["*"],
124
- )
125
-
126
-
127
- # ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
128
- # CORE PIPELINE FUNCTIONS
129
- # ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
130
-
131
- async def capture_screenshot(url: str) -> Path:
132
- """
133
- STEP 1: Screenshot Capture
134
- Uses Headless Chrome to capture website screenshot.
135
- Optimized for Docker environment with sandbox disabled.
136
- """
137
- screenshot_path = TEMP_DIR / f"screenshot_{id(url)}.jpg"
138
-
139
- browser = None
140
- try:
141
- # Launch Chromium with Docker-safe arguments
142
- browser = await launch(
143
- headless=True,
144
- executablePath="/usr/bin/chromium",
145
- args=[
146
- "--no-sandbox",
147
- "--disable-setuid-sandbox",
148
- "--disable-dev-shm-usage",
149
- "--disable-gpu",
150
- "--disable-software-rasterizer",
151
- "--disable-extensions",
152
- "--single-process", # Memory optimization
153
- ],
154
- )
155
-
156
- page = await browser.newPage()
157
- await page.setViewport({"width": VIEWPORT_WIDTH, "height": VIEWPORT_HEIGHT})
158
-
159
- # Navigate with timeout
160
- await page.goto(
161
- url,
162
- {"waitUntil": "domcontentloaded", "timeout": SCREENSHOT_TIMEOUT}
163
- )
164
-
165
- # Capture screenshot
166
- await page.screenshot({"path": str(screenshot_path), "type": "jpeg", "quality": 85})
167
-
168
- return screenshot_path
169
-
170
- except Exception as e:
171
- raise HTTPException(
172
- status_code=500,
173
- detail=f"Screenshot capture failed: {str(e)}"
174
- )
175
- finally:
176
- if browser:
177
- await browser.close()
178
-
179
-
180
- async def analyze_with_gemini(screenshot_path: Path) -> Dict[str, str]:
181
- """
182
- STEP 2: AI Analysis
183
- Uses Google Gemini 2.5 Flash to analyze screenshot and generate
184
- vibe description + image generation prompt.
185
- """
186
- try:
187
- # Upload image to Gemini
188
- uploaded_file = genai.upload_file(str(screenshot_path))
189
-
190
- # AI Analysis Prompt
191
- analysis_prompt = """
192
- You are a world-class Creative Director and Prompt Engineer.
193
- Analyze this website screenshot and generate a high-end vertical brand poster concept for Flux.1.
194
-
195
- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
196
- STEP 1: EXTRACT BRAND IDENTITY
197
- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
198
- 1. Find the main brand name/logo in the screenshot.
199
- 2. **CRITICAL: Convert Korean to English romanization.**
200
- Examples:
201
- - "무신사" → "MUSINSA"
202
- - "떡볶이 천국" → "TTEOKBOKKI HEAVEN"
203
- - "이창민" → "LEE CHANGMIN"
204
- - "카페 서울" → "CAFE SEOUL"
205
- 3. If no brand name exists, use the domain name.
206
-
207
- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
208
- STEP 2: ANALYZE BUSINESS TYPE & VISUAL DNA
209
- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
210
- Identify the business category and extract visual elements:
211
-
212
- **Business Examples:**
213
- - Fashion (무신사) → Clothes, fabrics, hangers, runway
214
- - Food (떡볶이) → Ingredients, steam, spices, bowls
215
- - Tech (코딩) → Code snippets, circuits, data streams
216
- - Fitness (헬스장) → Dumbbells, protein, energy
217
- - Corporate (법률) → Books, marble, gold, scales
218
-
219
- **Extract:**
220
- - Dominant Color Palette (2-3 colors)
221
- - Key Physical Objects (what fills the background)
222
- - Material/Texture (fabric, metal, food, digital)
223
- - Lighting Mood (neon, sunlight, studio, dark)
224
-
225
- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
226
- STEP 3: CONSTRUCT FLUX.1 PROMPT
227
- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
228
- Build a detailed scene description:
229
-
230
- **BACKGROUND (70% of prompt):**
231
- - Describe a 3D environment FILLED with objects related to the business.
232
- - Example (Fashion): "floating designer clothes, silk fabrics, leather jackets, sneakers, accessories scattered in mid-air"
233
- - Example (Food): "steaming bowls, fresh ingredients, spices exploding, sauce splash, vibrant colors"
234
- - Use the dominant colors extracted in Step 2.
235
-
236
- **CENTER TYPOGRAPHY (30% of prompt):**
237
- - Place the ENGLISH brand name in the CENTER.
238
- - Make it a 3D object matching the theme:
239
- * Fashion: "metallic chrome letters with fabric texture"
240
- * Food: "letters made of the food itself (cookies, cake, rice)"
241
- * Tech: "neon holographic glowing text"
242
- - Keywords: "big bold 3D typography", "magazine cover layout", "centered composition"
243
-
244
- **MANDATORY RULES:**
245
- ✅ NO Korean characters (Hangul) - Flux CANNOT render them
246
- ✅ Convert all Korean to English romanization
247
- ✅ Brand name must be in CENTER in big 3D letters
248
- ✅ Background must be FILLED with thematic 3D objects
249
- ✅ Use "vertical poster", "9:16 aspect ratio", "high quality 3D render"
250
-
251
- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
252
- OUTPUT FORMAT (JSON)
253
- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
254
- Return ONLY valid JSON:
255
- {
256
- "vibe": "Business type in 1-2 English words (e.g., Fashion, Spicy Food, Tech)",
257
- "summary": "One sentence description in Korean",
258
- "flux_prompt": "Complete Flux.1 prompt in English following the rules above"
259
- }
260
-
261
- **Example flux_prompt structure:**
262
- "A vertical 3D poster (9:16), [BACKGROUND: detailed scene with thematic objects], centered big bold 3D typography text '[BRAND NAME IN ENGLISH]' made of [material matching theme], [lighting description], magazine cover layout, high quality render, cinematic composition"
263
- """
264
-
265
- # Call Gemini API
266
- model = genai.GenerativeModel("gemini-2.0-flash-exp")
267
- response = model.generate_content([uploaded_file, analysis_prompt])
268
-
269
- # Robust JSON parsing with multiple fallback strategies
270
- response_text = response.text.strip()
271
-
272
- # Strategy 1: Remove markdown code blocks
273
- if response_text.startswith("```json"):
274
- response_text = response_text[7:-3].strip()
275
- elif response_text.startswith("```"):
276
- response_text = response_text[3:-3].strip()
277
-
278
- # Strategy 2: Find JSON object between curly braces
279
- if not response_text.startswith("{"):
280
- start = response_text.find("{")
281
- end = response_text.rfind("}") + 1
282
- if start != -1 and end > start:
283
- response_text = response_text[start:end]
284
-
285
- try:
286
- analysis = json.loads(response_text)
287
- except json.JSONDecodeError as e:
288
- raise ValueError(f"Failed to parse Gemini JSON response: {e}. Response: {response_text[:200]}")
289
-
290
- # Validate required fields
291
- if not all(k in analysis for k in ["vibe", "summary", "flux_prompt"]):
292
- raise ValueError(f"Missing required fields in Gemini response. Got: {list(analysis.keys())}")
293
-
294
- return analysis
295
-
296
- except Exception as e:
297
- raise HTTPException(
298
- status_code=500,
299
- detail=f"AI analysis failed: {str(e)}"
300
- )
301
-
302
-
303
- async def generate_poster(flux_prompt: str) -> Path:
304
- """
305
- STEP 3: Image Generation
306
- Uses Hugging Face Inference API with Flux.1-dev model
307
- to generate vibe poster.
308
- """
309
- poster_path = TEMP_DIR / f"poster_{id(flux_prompt)}.webp"
310
-
311
- try:
312
- # Call Flux.1-schnell via HF Inference API
313
- # Schnell is 10x faster than dev with similar quality
314
- image = hf_client.text_to_image(
315
- flux_prompt,
316
- model=FLUX_MODEL,
317
- # Parameters optimized for vertical poster
318
- width=768,
319
- height=1344, # 9:16 aspect ratio
320
- num_inference_steps=4, # Schnell optimized for 1-4 steps
321
- )
322
-
323
- # Save as WebP for optimal compression
324
- image.save(str(poster_path), "WEBP", quality=90, method=6)
325
-
326
- return poster_path
327
-
328
- except Exception as e:
329
- raise HTTPException(
330
- status_code=500,
331
- detail=f"Image generation failed: {str(e)}"
332
- )
333
-
334
-
335
- async def upload_to_imgbb(image_path: Path) -> str:
336
- """
337
- STEP 4: Image Hosting
338
- Uploads generated poster to ImgBB for permanent hosting.
339
- Returns public URL.
340
- """
341
- try:
342
- # Read image and encode as base64
343
- with open(image_path, "rb") as f:
344
- image_data = base64.b64encode(f.read()).decode("utf-8")
345
-
346
- # Upload to ImgBB
347
- response = requests.post(
348
- "https://api.imgbb.com/1/upload",
349
- data={
350
- "key": IMGBB_KEY,
351
- "image": image_data,
352
- "expiration": 0, # Never expire
353
- },
354
- timeout=30,
355
- )
356
-
357
- response.raise_for_status()
358
- result = response.json()
359
-
360
- if not result.get("success"):
361
- raise ValueError("ImgBB upload failed")
362
-
363
- return result["data"]["url"]
364
-
365
- except Exception as e:
366
- raise HTTPException(
367
- status_code=500,
368
- detail=f"Image upload failed: {str(e)}"
369
- )
370
- finally:
371
- # Clean up local file immediately after upload
372
- try:
373
- image_path.unlink()
374
- except Exception:
375
- pass
376
-
377
-
378
- # ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
379
- # API ENDPOINTS
380
- # ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
381
-
382
- @app.get("/")
383
- async def root():
384
- """API Root - Health & Info"""
385
- return {
386
- "service": "VIBE_LINK API",
387
- "status": "operational",
388
- "version": "1.0.0",
389
- "endpoints": {
390
- "create": "POST /create - Generate vibe poster from URL",
391
- "health": "GET /health - Service health check",
392
- },
393
- }
394
-
395
-
396
- @app.get("/health")
397
- async def health_check():
398
- """Health Check Endpoint (for Docker HEALTHCHECK)"""
399
- return {"status": "healthy", "service": "vibe-link-backend"}
400
-
401
-
402
- @app.post("/create", response_model=CreateResponse, responses={500: {"model": ErrorResponse}})
403
- async def create_vibe_poster(request: CreateRequest):
404
- """
405
- 🎨 Main Endpoint: Generate Vibe Poster
406
-
407
- Pipeline:
408
- 1. Capture screenshot of website
409
- 2. Analyze with Gemini AI
410
- 3. Generate poster with Flux.1
411
- 4. Upload to ImgBB
412
-
413
- Returns:
414
- CreateResponse with poster URL and metadata
415
- """
416
- screenshot_path = None
417
-
418
- try:
419
- # STEP 1: Screenshot
420
- print(f"📸 Capturing screenshot: {request.url}")
421
- screenshot_path = await capture_screenshot(str(request.url))
422
-
423
- # STEP 2: AI Analysis
424
- print("🧠 Analyzing with Gemini...")
425
- analysis = await analyze_with_gemini(screenshot_path)
426
-
427
- # STEP 3: Generate Poster
428
- print("🎨 Generating poster with Flux.1...")
429
- poster_path = await generate_poster(analysis["flux_prompt"])
430
-
431
- # STEP 4: Upload to ImgBB
432
- print("☁️ Uploading to ImgBB...")
433
- poster_url = await upload_to_imgbb(poster_path)
434
-
435
- print(f"✅ Success! Poster URL: {poster_url}")
436
-
437
- return CreateResponse(
438
- status="success",
439
- poster_url=poster_url,
440
- vibe=analysis["vibe"],
441
- summary=analysis["summary"],
442
- )
443
-
444
- except HTTPException:
445
- raise
446
- except Exception as e:
447
- raise HTTPException(
448
- status_code=500,
449
- detail=f"Unexpected error: {str(e)}"
450
- )
451
- finally:
452
- # Cleanup screenshot file
453
- if screenshot_path and screenshot_path.exists():
454
- try:
455
- screenshot_path.unlink()
456
- except Exception:
457
- pass
458
-
459
-
460
- @app.exception_handler(Exception)
461
- async def global_exception_handler(request: Request, exc: Exception):
462
- """Global error handler for unhandled exceptions"""
463
- return JSONResponse(
464
- status_code=500,
465
- content={
466
- "status": "error",
467
- "message": "Internal server error",
468
- "detail": str(exc),
469
- },
470
- )
471
-
472
-
473
- # ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
474
- # MAIN ENTRY POINT
475
- # ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
476
-
477
- if __name__ == "__main__":
478
- import uvicorn
479
-
480
- uvicorn.run(
481
- app,
482
- host="0.0.0.0",
483
- port=7860,
484
- log_level="info",
485
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
pyproject.toml DELETED
@@ -1,22 +0,0 @@
1
- [project]
2
- name = "vibe-link-backend"
3
- version = "1.0.0"
4
- description = "AI-powered Website to Vibe Poster Generator"
5
- requires-python = ">=3.9"
6
- dependencies = [
7
- "fastapi==0.109.0",
8
- "uvicorn[standard]==0.27.0",
9
- "python-multipart==0.0.6",
10
- "pyppeteer==1.0.2",
11
- "google-generativeai==0.3.2",
12
- "huggingface-hub==0.20.3",
13
- "Pillow==10.2.0",
14
- "requests==2.31.0",
15
- "httpx==0.26.0",
16
- "python-dotenv==1.0.1",
17
- "aiofiles==23.2.1",
18
- ]
19
-
20
- [build-system]
21
- requires = ["hatchling"]
22
- build-backend = "hatchling.build"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements.txt CHANGED
@@ -6,9 +6,6 @@ fastapi==0.115.0
6
  uvicorn[standard]==0.32.0
7
  python-multipart==0.0.12
8
 
9
- # Gradio (HF Spaces requirement)
10
- gradio==6.3.0
11
-
12
  # Headless Browser (Screenshot)
13
  pyppeteer==1.0.2
14
 
 
6
  uvicorn[standard]==0.32.0
7
  python-multipart==0.0.12
8
 
 
 
 
9
  # Headless Browser (Screenshot)
10
  pyppeteer==1.0.2
11