Texttra commited on
Commit
ad55b48
·
verified ·
1 Parent(s): 128e289

Update handler.py

Browse files
Files changed (1) hide show
  1. handler.py +13 -7
handler.py CHANGED
@@ -2,6 +2,8 @@ from typing import Dict
2
  import torch
3
  from diffusers import DiffusionPipeline
4
  from compel import Compel
 
 
5
 
6
  class EndpointHandler:
7
  def __init__(self, path: str = ""):
@@ -12,7 +14,7 @@ class EndpointHandler:
12
  variant="fp16",
13
  )
14
 
15
- # Load your LoRA weights hosted in the same repo
16
  self.pipe.load_lora_weights("./c1t3_v1.safetensors")
17
 
18
  # Move to GPU if available
@@ -24,20 +26,24 @@ class EndpointHandler:
24
  # Optional: enable memory optimization
25
  self.pipe.enable_model_cpu_offload()
26
 
27
- # Initialize Compel (prompt parser)
28
  self.compel = Compel(tokenizer=self.pipe.tokenizer, text_encoder=self.pipe.text_encoder)
29
 
30
  def __call__(self, data: Dict[str, str]) -> Dict:
31
- # Get the prompt from request
32
  prompt = data.get("prompt", "")
33
  if not prompt:
34
  return {"error": "No prompt provided."}
35
 
36
- # Process the prompt with Compel (recommended for FLUX)
37
  conditioning = self.compel(prompt)
38
 
39
- # Generate the image
40
  image = self.pipe(prompt_embeds=conditioning).images[0]
41
 
42
- # Return the result
43
- return {"image": image}
 
 
 
 
 
2
  import torch
3
  from diffusers import DiffusionPipeline
4
  from compel import Compel
5
+ from io import BytesIO
6
+ import base64
7
 
8
  class EndpointHandler:
9
  def __init__(self, path: str = ""):
 
14
  variant="fp16",
15
  )
16
 
17
+ # Load your LoRA weights from the repo
18
  self.pipe.load_lora_weights("./c1t3_v1.safetensors")
19
 
20
  # Move to GPU if available
 
26
  # Optional: enable memory optimization
27
  self.pipe.enable_model_cpu_offload()
28
 
29
+ # Initialize Compel (prompt parser for FLUX)
30
  self.compel = Compel(tokenizer=self.pipe.tokenizer, text_encoder=self.pipe.text_encoder)
31
 
32
  def __call__(self, data: Dict[str, str]) -> Dict:
33
+ # Get prompt from request data
34
  prompt = data.get("prompt", "")
35
  if not prompt:
36
  return {"error": "No prompt provided."}
37
 
38
+ # Generate prompt conditioning using Compel
39
  conditioning = self.compel(prompt)
40
 
41
+ # Generate image using FLUX + LoRA
42
  image = self.pipe(prompt_embeds=conditioning).images[0]
43
 
44
+ # Convert image to base64 string for API response
45
+ buffer = BytesIO()
46
+ image.save(buffer, format="PNG")
47
+ base64_image = base64.b64encode(buffer.getvalue()).decode("utf-8")
48
+
49
+ return {"image": base64_image}