Spaces:
Sleeping
Sleeping
File size: 1,550 Bytes
3fb45e7 19a0fe5 3fb45e7 19a0fe5 3fb45e7 19a0fe5 3fb45e7 19a0fe5 8e7151f 19a0fe5 3fb45e7 19a0fe5 3fb45e7 19a0fe5 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 | from fastapi import FastAPI, Request
from pydantic import BaseModel
from fastapi.middleware.cors import CORSMiddleware
from diffusers import DiffusionPipeline
import torch
import uuid
import os
from PIL import Image
from fastapi.staticfiles import StaticFiles
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"], # Accept from all for now
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
hf_model_ids = {
"model1": "Pacicap/FineTuned_claude_StableDiffussion_2_1",
"model2": "Pacicap/FineTuned_gpt4o_StableDiffussion_2_1"
}
loaded_models = {}
class PromptInput(BaseModel):
prompt: str
model: str
@app.post("/generate")
def generate(data: PromptInput, request: Request):
model_key = data.model
if model_key not in hf_model_ids:
return {"error": "Invalid model selected"}
model_id = hf_model_ids[model_key]
if model_key not in loaded_models:
pipe = DiffusionPipeline.from_pretrained(
model_id,
torch_dtype=torch.float32
).to("cpu") # CPU-safe for Spaces
loaded_models[model_key] = pipe
else:
pipe = loaded_models[model_key]
image = pipe(data.prompt).images[0]
os.makedirs("generated", exist_ok=True)
filename = f"{uuid.uuid4().hex}.png"
filepath = os.path.join("generated", filename)
image.save(filepath)
return {
"url": f"{request.base_url}generated/{filename}"
}
app.mount("/generated", StaticFiles(directory="generated"), name="generated")
|