File size: 2,011 Bytes
d22fba2 002d875 d22fba2 0c0b7e8 d22fba2 128e289 0c0b7e8 9cd9948 128e289 0c0b7e8 d22fba2 128e289 d22fba2 7e29046 0c0b7e8 d22fba2 128e289 d22fba2 7e29046 0c0b7e8 d22fba2 0c0b7e8 bf7ff83 0c0b7e8 bf7ff83 0c0b7e8 bf7ff83 0c0b7e8 bf7ff83 0c0b7e8 bf7ff83 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 | from typing import Dict
import torch
from diffusers import DiffusionPipeline
from compel import Compel
from io import BytesIO
import base64
class EndpointHandler:
def __init__(self, path: str = ""):
print(f"Initializing model from: {path}")
self.pipe = DiffusionPipeline.from_pretrained(
"black-forest-labs/FLUX.1-dev",
torch_dtype=torch.float16,
use_auth_token=True # Required for gated base model
)
# Load LoRA weights from your Hugging Face repo
print("Loading LoRA weights from: Texttra/Cityscape_Studio")
self.pipe.load_lora_weights("Texttra/Cityscape_Studio", weight_name="c1t3_v1.safetensors")
# Send to GPU if available
if torch.cuda.is_available():
self.pipe.to("cuda")
else:
self.pipe.to("cpu")
self.pipe.enable_model_cpu_offload()
# Initialize Compel for prompt conditioning
self.compel = Compel(
tokenizer=self.pipe.tokenizer,
text_encoder=self.pipe.text_encoder
)
print("Model initialized successfully.")
def __call__(self, data: Dict) -> Dict:
print("Received data:", data)
inputs = data.get("inputs", {})
prompt = inputs.get("prompt", "")
print("Extracted prompt:", prompt)
if not prompt:
return {"error": "No prompt provided"}
# Generate both prompt and pooled embeddings
conditioning, pooled = self.compel(prompt, return_pooled=True)
print("Conditioning complete.")
# Run the model
image = self.pipe(
prompt_embeds=conditioning,
pooled_prompt_embeds=pooled
).images[0]
print("Image generated.")
# Encode image to base64
buffer = BytesIO()
image.save(buffer, format="PNG")
base64_image = base64.b64encode(buffer.getvalue()).decode("utf-8")
print("Returning image.")
return {"image": base64_image}
|