File size: 1,996 Bytes
d22fba2 002d875 d22fba2 bf7ff83 d22fba2 128e289 9cd9948 128e289 d22fba2 128e289 d22fba2 7e29046 d22fba2 128e289 d22fba2 7e29046 bf7ff83 d22fba2 bf7ff83 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 | from typing import Dict
import torch
from diffusers import DiffusionPipeline
from compel import Compel
from io import BytesIO
import base64
class EndpointHandler:
def __init__(self, path: str = ""):
print(f"Initializing model from: {path}")
self.pipe = DiffusionPipeline.from_pretrained(
"black-forest-labs/FLUX.1-dev",
torch_dtype=torch.float16,
use_auth_token=True
)
print("Loading LoRA weights from: Texttra/Cityscape_Studio")
self.pipe.load_lora_weights("Texttra/Cityscape_Studio", weight_name="c1t3_v1.safetensors")
if torch.cuda.is_available():
self.pipe.to("cuda")
else:
self.pipe.to("cpu")
self.pipe.enable_model_cpu_offload()
self.compel = Compel(
tokenizer=self.pipe.tokenizer,
text_encoder=self.pipe.text_encoder
)
print("Model initialized successfully.")
def __call__(self, data: Dict) -> Dict:
print("Received data:", data)
try:
inputs = data.get("inputs", {})
if isinstance(inputs, str):
# In case the input comes in raw string form (e.g., Postman tests)
prompt = inputs
else:
prompt = inputs.get("prompt", "")
print("Extracted prompt:", prompt)
if not prompt:
return {"error": "No prompt provided"}
conditioning = self.compel(prompt)
print("Conditioning complete.")
image = self.pipe(prompt_embeds=conditioning).images[0]
print("Image generated.")
buffer = BytesIO()
image.save(buffer, format="PNG")
base64_image = base64.b64encode(buffer.getvalue()).decode("utf-8")
print("Returning image.")
return {"image": base64_image}
except Exception as e:
print(f"Error occurred: {str(e)}")
return {"error": str(e)}
|