colt12 commited on
Commit
50456b3
·
verified ·
1 Parent(s): 85d79f8

Update handler.py

Browse files
Files changed (1) hide show
  1. handler.py +9 -12
handler.py CHANGED
@@ -1,46 +1,43 @@
1
  import torch
2
- from diffusers import StableDiffusionXLPipeline
3
  import base64
4
  from io import BytesIO
5
  import os
6
 
7
  class InferenceHandler:
8
  def __init__(self):
9
- # Determine the device to run on
10
  self.device = "cuda" if torch.cuda.is_available() else "cpu"
11
  model_name = "colt12/maxcushion"
12
 
13
- # Load the pipeline with authentication from environment variable
14
  self.pipe = StableDiffusionXLPipeline.from_pretrained(
15
  model_name,
16
  torch_dtype=torch.float16,
17
  use_safetensors=True,
18
- use_auth_token=os.getenv("HUGGINGFACE_TOKEN") # Securely get the token
19
  ).to(self.device)
20
 
 
 
 
21
  def __call__(self, inputs):
22
- # Extract the prompt from inputs
23
  prompt = inputs.get("prompt", "")
24
  if not prompt:
25
  raise ValueError("A prompt must be provided")
26
-
27
  negative_prompt = inputs.get("negative_prompt", "")
28
 
29
- # Generate the image using the pipeline
30
  image = self.pipe(
31
  prompt=prompt,
32
  negative_prompt=negative_prompt,
33
  num_inference_steps=30,
34
  guidance_scale=7.5
35
  ).images[0]
36
-
37
- # Convert the image to base64 encoding
38
  buffered = BytesIO()
39
  image.save(buffered, format="PNG")
40
  image_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
41
-
42
- # Return the base64 image
43
  return {"image_base64": image_base64}
44
 
45
- # Instantiate the handler
46
  handler = InferenceHandler()
 
1
  import torch
2
+ from diffusers import StableDiffusionXLPipeline, DDIMScheduler # Import your desired scheduler
3
  import base64
4
  from io import BytesIO
5
  import os
6
 
7
  class InferenceHandler:
8
  def __init__(self):
 
9
  self.device = "cuda" if torch.cuda.is_available() else "cpu"
10
  model_name = "colt12/maxcushion"
11
 
12
+ # Load the pipeline with authentication
13
  self.pipe = StableDiffusionXLPipeline.from_pretrained(
14
  model_name,
15
  torch_dtype=torch.float16,
16
  use_safetensors=True,
17
+ use_auth_token=os.getenv("HUGGINGFACE_TOKEN")
18
  ).to(self.device)
19
 
20
+ # Set the scheduler programmatically
21
+ self.pipe.scheduler = DDIMScheduler.from_config(self.pipe.scheduler.config)
22
+
23
  def __call__(self, inputs):
 
24
  prompt = inputs.get("prompt", "")
25
  if not prompt:
26
  raise ValueError("A prompt must be provided")
27
+
28
  negative_prompt = inputs.get("negative_prompt", "")
29
 
 
30
  image = self.pipe(
31
  prompt=prompt,
32
  negative_prompt=negative_prompt,
33
  num_inference_steps=30,
34
  guidance_scale=7.5
35
  ).images[0]
36
+
 
37
  buffered = BytesIO()
38
  image.save(buffered, format="PNG")
39
  image_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
40
+
 
41
  return {"image_base64": image_base64}
42
 
 
43
  handler = InferenceHandler()