File size: 1,355 Bytes
128e289 002d875 128e289 002d875 128e289 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 | from typing import Dict
import torch
from diffusers import DiffusionPipeline
from compel import Compel
class EndpointHandler:
def __init__(self, path: str = ""):
# Load base FLUX pipeline
self.pipe = DiffusionPipeline.from_pretrained(
"black-forest-labs/FLUX.1-dev",
torch_dtype=torch.float16,
variant="fp16",
)
# Load your LoRA weights hosted in the same repo
self.pipe.load_lora_weights("./c1t3_v1.safetensors")
# Move to GPU if available
if torch.cuda.is_available():
self.pipe.to("cuda")
else:
self.pipe.to("cpu")
# Optional: enable memory optimization
self.pipe.enable_model_cpu_offload()
# Initialize Compel (prompt parser)
self.compel = Compel(tokenizer=self.pipe.tokenizer, text_encoder=self.pipe.text_encoder)
def __call__(self, data: Dict[str, str]) -> Dict:
# Get the prompt from request
prompt = data.get("prompt", "")
if not prompt:
return {"error": "No prompt provided."}
# Process the prompt with Compel (recommended for FLUX)
conditioning = self.compel(prompt)
# Generate the image
image = self.pipe(prompt_embeds=conditioning).images[0]
# Return the result
return {"image": image}
|