| | from typing import Dict |
| | import torch |
| | from diffusers import DiffusionPipeline |
| | from compel import Compel |
| |
|
| | class EndpointHandler: |
| | def __init__(self, path: str = ""): |
| | |
| | self.pipe = DiffusionPipeline.from_pretrained( |
| | "black-forest-labs/FLUX.1-dev", |
| | torch_dtype=torch.float16, |
| | variant="fp16", |
| | ) |
| |
|
| | |
| | self.pipe.load_lora_weights("./c1t3_v1.safetensors") |
| |
|
| | |
| | if torch.cuda.is_available(): |
| | self.pipe.to("cuda") |
| | else: |
| | self.pipe.to("cpu") |
| |
|
| | |
| | self.pipe.enable_model_cpu_offload() |
| |
|
| | |
| | self.compel = Compel(tokenizer=self.pipe.tokenizer, text_encoder=self.pipe.text_encoder) |
| |
|
| | def __call__(self, data: Dict[str, str]) -> Dict: |
| | |
| | prompt = data.get("prompt", "") |
| | if not prompt: |
| | return {"error": "No prompt provided."} |
| |
|
| | |
| | conditioning = self.compel(prompt) |
| |
|
| | |
| | image = self.pipe(prompt_embeds=conditioning).images[0] |
| |
|
| | |
| | return {"image": image} |
| |
|