Texttra commited on
Commit
514124d
·
verified ·
1 Parent(s): 9cd9948

Update handler.py

Browse files
Files changed (1) hide show
  1. handler.py +9 -4
handler.py CHANGED
@@ -11,13 +11,14 @@ class EndpointHandler:
11
  self.pipe = DiffusionPipeline.from_pretrained(
12
  "black-forest-labs/FLUX.1-dev",
13
  torch_dtype=torch.float16,
14
- use_auth_token=True # Safe since base model is gated
15
  )
16
 
17
- # ✅ Load LoRA directly from Hugging Face Hub
18
  print("Loading LoRA weights from: Texttra/Cityscape_Studio")
19
  self.pipe.load_lora_weights("Texttra/Cityscape_Studio", weight_name="c1t3_v1.safetensors")
20
 
 
21
  if torch.cuda.is_available():
22
  self.pipe.to("cuda")
23
  else:
@@ -41,10 +42,14 @@ class EndpointHandler:
41
  if not prompt:
42
  return {"error": "No prompt provided."}
43
 
44
- conditioning = self.compel(prompt)
 
45
  print("Conditioning complete.")
46
 
47
- image = self.pipe(prompt_embeds=conditioning).images[0]
 
 
 
48
  print("Image generated.")
49
 
50
  buffer = BytesIO()
 
11
  self.pipe = DiffusionPipeline.from_pretrained(
12
  "black-forest-labs/FLUX.1-dev",
13
  torch_dtype=torch.float16,
14
+ use_auth_token=True # Required for gated base model
15
  )
16
 
17
+ # ✅ Load LoRA from Hugging Face Hub (or local if path is set)
18
  print("Loading LoRA weights from: Texttra/Cityscape_Studio")
19
  self.pipe.load_lora_weights("Texttra/Cityscape_Studio", weight_name="c1t3_v1.safetensors")
20
 
21
+ # Move to GPU if available
22
  if torch.cuda.is_available():
23
  self.pipe.to("cuda")
24
  else:
 
42
  if not prompt:
43
  return {"error": "No prompt provided."}
44
 
45
+ # FLUX requires both prompt_embeds and pooled_prompt_embeds
46
+ conditioning, pooled = self.compel(prompt, return_pooled=True)
47
  print("Conditioning complete.")
48
 
49
+ image = self.pipe(
50
+ prompt_embeds=conditioning,
51
+ pooled_prompt_embeds=pooled
52
+ ).images[0]
53
  print("Image generated.")
54
 
55
  buffer = BytesIO()