Spaces:
Sleeping
Sleeping
Commit ·
19a0fe5
0
Parent(s):
portfolio backend to render
Browse files- __pycache__/main.cpython-312.pyc +0 -0
- main.py +59 -0
- render.ymal +8 -0
- requirements.txt +0 -0
__pycache__/main.cpython-312.pyc
ADDED
|
Binary file (2.56 kB). View file
|
|
|
main.py
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import FastAPI
|
| 2 |
+
from pydantic import BaseModel
|
| 3 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 4 |
+
from diffusers import DiffusionPipeline
|
| 5 |
+
import torch
|
| 6 |
+
import uuid
|
| 7 |
+
from PIL import Image
|
| 8 |
+
import os
|
| 9 |
+
from fastapi.staticfiles import StaticFiles
|
| 10 |
+
|
| 11 |
+
app = FastAPI()
|
| 12 |
+
|
| 13 |
+
app.add_middleware(
|
| 14 |
+
CORSMiddleware,
|
| 15 |
+
allow_origins=["http://localhost:5173"],
|
| 16 |
+
allow_credentials=True,
|
| 17 |
+
allow_methods=["*"],
|
| 18 |
+
allow_headers=["*"],
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
# Remote Hugging Face model IDs
|
| 22 |
+
hf_model_ids = {
|
| 23 |
+
"model1": "Pacicap/FineTuned_claude_StableDiffussion_2_1",
|
| 24 |
+
"model2": "Pacicap/FineTuned_gpt4o_StableDiffussion_2_1"
|
| 25 |
+
}
|
| 26 |
+
|
| 27 |
+
loaded_models = {}
|
| 28 |
+
|
| 29 |
+
class PromptInput(BaseModel):
|
| 30 |
+
prompt: str
|
| 31 |
+
model: str # should be "model1" or "model2"
|
| 32 |
+
|
| 33 |
+
@app.post("/generate")
|
| 34 |
+
def generate(data: PromptInput):
|
| 35 |
+
model_key = data.model
|
| 36 |
+
|
| 37 |
+
if model_key not in hf_model_ids:
|
| 38 |
+
return {"error": "Invalid model selected"}
|
| 39 |
+
|
| 40 |
+
model_id = hf_model_ids[model_key]
|
| 41 |
+
|
| 42 |
+
if model_key not in loaded_models:
|
| 43 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 44 |
+
pipe = DiffusionPipeline.from_pretrained(model_id).to(device)
|
| 45 |
+
loaded_models[model_key] = pipe
|
| 46 |
+
else:
|
| 47 |
+
pipe = loaded_models[model_key]
|
| 48 |
+
|
| 49 |
+
image = pipe(data.prompt).images[0]
|
| 50 |
+
|
| 51 |
+
os.makedirs("generated", exist_ok=True)
|
| 52 |
+
filename = f"{uuid.uuid4().hex}.png"
|
| 53 |
+
filepath = os.path.join("generated", filename)
|
| 54 |
+
image.save(filepath)
|
| 55 |
+
|
| 56 |
+
return {"url": f"http://localhost:8000/generated/{filename}"}
|
| 57 |
+
|
| 58 |
+
# Serve images
|
| 59 |
+
app.mount("/generated", StaticFiles(directory="generated"), name="generated")
|
render.ymal
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
services:
|
| 2 |
+
- type: web
|
| 3 |
+
name: fastapi-backend
|
| 4 |
+
env: python
|
| 5 |
+
buildCommand: ""
|
| 6 |
+
startCommand: uvicorn main:app --host 0.0.0.0 --port 10000
|
| 7 |
+
plan: free
|
| 8 |
+
autoDeploy: true
|
requirements.txt
ADDED
|
Binary file (1.68 kB). View file
|
|
|