akhaliq HF Staff commited on
Commit
f88bd25
·
verified ·
1 Parent(s): 0d20f19

Update app.py from anycoder

Browse files
Files changed (1) hide show
  1. app.py +10 -16
app.py CHANGED
@@ -11,23 +11,18 @@ import gradio as gr
11
  import torch
12
  from diffusers.pipelines.glm_image import GlmImagePipeline
13
  from PIL import Image
14
- from gradio import spaces
15
  import time
16
  import random
17
 
18
- # Global pipeline variable
19
- pipe = None
20
-
21
- def load_model():
22
- """Load the GLM-Image model with bfloat16 precision."""
23
- global pipe
24
- if pipe is None:
25
- pipe = GlmImagePipeline.from_pretrained(
26
- "zai-org/GLM-Image",
27
- torch_dtype=torch.bfloat16,
28
- device_map="auto"
29
- )
30
- return pipe
31
 
32
  def calculate_duration(num_inference_steps: int) -> int:
33
  """
@@ -135,7 +130,6 @@ def process_image(
135
  height, width = adjusted_height, adjusted_width
136
 
137
  progress(0.1, desc="Loading model...")
138
- pipeline = load_model()
139
 
140
  progress(0.2, desc="Preparing image...")
141
  input_image = image.convert("RGB")
@@ -143,7 +137,7 @@ def process_image(
143
  generator = torch.Generator(device="cuda").manual_seed(seed)
144
 
145
  progress(0.4, desc="Generating image...", visible=True)
146
- result = pipeline(
147
  prompt=prompt,
148
  image=[input_image],
149
  height=height,
 
11
  import torch
12
  from diffusers.pipelines.glm_image import GlmImagePipeline
13
  from PIL import Image
14
+ import spaces
15
  import time
16
  import random
17
 
18
+ # Load the GLM-Image model directly with bfloat16 precision
19
+ print("Loading GLM-Image model... This may take a few minutes.")
20
+ pipe = GlmImagePipeline.from_pretrained(
21
+ "zai-org/GLM-Image",
22
+ torch_dtype=torch.bfloat16,
23
+ device_map="auto"
24
+ )
25
+ print("Model loaded successfully!")
 
 
 
 
 
26
 
27
  def calculate_duration(num_inference_steps: int) -> int:
28
  """
 
130
  height, width = adjusted_height, adjusted_width
131
 
132
  progress(0.1, desc="Loading model...")
 
133
 
134
  progress(0.2, desc="Preparing image...")
135
  input_image = image.convert("RGB")
 
137
  generator = torch.Generator(device="cuda").manual_seed(seed)
138
 
139
  progress(0.4, desc="Generating image...", visible=True)
140
+ result = pipe(
141
  prompt=prompt,
142
  image=[input_image],
143
  height=height,