| from fastapi import FastAPI, UploadFile, File |
| from pydantic import BaseModel |
| from diffusers import StableDiffusionPipeline |
| from huggingface_hub import hf_hub_download |
| from fastapi.responses import StreamingResponse |
| from PIL import Image |
| import torch |
| import io |
|
|
| app = FastAPI() |
|
|
| |
| |
| |
| LORA_PATH = hf_hub_download( |
| repo_id="ebraam1/interior-sd-models", |
| filename="Interior_lora.safetensors" |
| ) |
|
|
| print("Loading base model...") |
|
|
| |
| |
| |
| pipe = StableDiffusionPipeline.from_pretrained( |
| "runwayml/stable-diffusion-v1-5", |
| torch_dtype=torch.float32, |
| safety_checker=None |
| ).to("cpu") |
|
|
| print("Loading LoRA...") |
|
|
| pipe.load_lora_weights(LORA_PATH) |
| pipe.fuse_lora(lora_scale=0.8) |
|
|
| |
| |
| |
| pipe.enable_attention_slicing() |
| pipe.enable_vae_slicing() |
|
|
| print("Model ready 🔥") |
|
|
|
|
| |
| class Prompt(BaseModel): |
| prompt: str |
|
|
|
|
| def to_bytes(img): |
| buf = io.BytesIO() |
| img.save(buf, format="PNG") |
| buf.seek(0) |
| return buf |
|
|
|
|
| |
| |
| |
| @app.post("/txt2img") |
| def generate(data: Prompt): |
|
|
| image = pipe( |
| data.prompt, |
| num_inference_steps=6, |
| guidance_scale=5, |
| height=256, |
| width=256 |
| ).images[0] |
|
|
| return StreamingResponse(to_bytes(image), media_type="image/png") |
|
|
|
|
| |
| |
| |
| @app.post("/img2img") |
| async def img2img_api(file: UploadFile = File(...), prompt: str = ""): |
|
|
| img = Image.open(io.BytesIO(await file.read())).convert("RGB") |
| img = img.resize((256, 256)) |
|
|
| image = pipe( |
| prompt=prompt, |
| image=img, |
| strength=0.6, |
| num_inference_steps=6, |
| guidance_scale=5 |
| ).images[0] |
|
|
| return StreamingResponse(to_bytes(image), media_type="image/png") |