ebraam1's picture
Update app.py
3d53cf9 verified
from fastapi import FastAPI, UploadFile, File
from pydantic import BaseModel
from diffusers import StableDiffusionPipeline
from huggingface_hub import hf_hub_download
from fastapi.responses import StreamingResponse
from PIL import Image
import torch
import io
app = FastAPI()
# =========================
# تحميل LoRA من HF
# =========================
LORA_PATH = hf_hub_download(
repo_id="ebraam1/interior-sd-models",
filename="Interior_lora.safetensors"
)
print("Loading base model...")
# =========================
# ✔ FIX: استخدم pretrained model بدل single_file
# =========================
pipe = StableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5",
torch_dtype=torch.float32,
safety_checker=None
).to("cpu")
print("Loading LoRA...")
pipe.load_lora_weights(LORA_PATH)
pipe.fuse_lora(lora_scale=0.8)
# =========================
# ⚡ Speed optimizations
# =========================
pipe.enable_attention_slicing()
pipe.enable_vae_slicing()
print("Model ready 🔥")
# =========================
class Prompt(BaseModel):
prompt: str
def to_bytes(img):
buf = io.BytesIO()
img.save(buf, format="PNG")
buf.seek(0)
return buf
# =========================
# TXT2IMG
# =========================
@app.post("/txt2img")
def generate(data: Prompt):
image = pipe(
data.prompt,
num_inference_steps=6,
guidance_scale=5,
height=256,
width=256
).images[0]
return StreamingResponse(to_bytes(image), media_type="image/png")
# =========================
# IMG2IMG (correct way)
# =========================
@app.post("/img2img")
async def img2img_api(file: UploadFile = File(...), prompt: str = ""):
img = Image.open(io.BytesIO(await file.read())).convert("RGB")
img = img.resize((256, 256))
image = pipe(
prompt=prompt,
image=img,
strength=0.6,
num_inference_steps=6,
guidance_scale=5
).images[0]
return StreamingResponse(to_bytes(image), media_type="image/png")