Texttra commited on
Commit
128e289
·
verified ·
1 Parent(s): bc76d24

Update handler.py

Browse files
Files changed (1) hide show
  1. handler.py +40 -19
handler.py CHANGED
@@ -1,22 +1,43 @@
1
- from diffusers import DiffusionPipeline
2
  import torch
 
 
3
 
4
  class EndpointHandler:
5
- def __init__(self, path="Texttra/Cityscape_Studio"):
6
- print("Loading pipeline...")
7
- try:
8
- self.pipe = DiffusionPipeline.from_pretrained(
9
- path,
10
- torch_dtype=torch.float16,
11
- revision="fp16",
12
- use_safetensors=True
13
- ).to("cuda")
14
- print("Pipeline loaded successfully.")
15
- except Exception as e:
16
- print(f"Error loading model: {e}")
17
- raise
18
-
19
- def __call__(self, data):
20
- inputs = data.pop("inputs", data)
21
- prompt = inputs if isinstance(inputs, str) else inputs[0]
22
- return self.pipe(prompt).images[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict
2
  import torch
3
+ from diffusers import DiffusionPipeline
4
+ from compel import Compel
5
 
6
  class EndpointHandler:
7
+ def __init__(self, path: str = ""):
8
+ # Load base FLUX pipeline
9
+ self.pipe = DiffusionPipeline.from_pretrained(
10
+ "black-forest-labs/FLUX.1-dev",
11
+ torch_dtype=torch.float16,
12
+ variant="fp16",
13
+ )
14
+
15
+ # Load your LoRA weights hosted in the same repo
16
+ self.pipe.load_lora_weights("./c1t3_v1.safetensors")
17
+
18
+ # Move to GPU if available
19
+ if torch.cuda.is_available():
20
+ self.pipe.to("cuda")
21
+ else:
22
+ self.pipe.to("cpu")
23
+
24
+ # Optional: enable memory optimization
25
+ self.pipe.enable_model_cpu_offload()
26
+
27
+ # Initialize Compel (prompt parser)
28
+ self.compel = Compel(tokenizer=self.pipe.tokenizer, text_encoder=self.pipe.text_encoder)
29
+
30
+ def __call__(self, data: Dict[str, str]) -> Dict:
31
+ # Get the prompt from request
32
+ prompt = data.get("prompt", "")
33
+ if not prompt:
34
+ return {"error": "No prompt provided."}
35
+
36
+ # Process the prompt with Compel (recommended for FLUX)
37
+ conditioning = self.compel(prompt)
38
+
39
+ # Generate the image
40
+ image = self.pipe(prompt_embeds=conditioning).images[0]
41
+
42
+ # Return the result
43
+ return {"image": image}