smartdigitalnetworks commited on
Commit
2f94dc3
·
verified ·
1 Parent(s): cdda2d8

Update app_diffusers.py

Browse files
Files changed (1) hide show
  1. app_diffusers.py +5 -15
app_diffusers.py CHANGED
@@ -13,7 +13,6 @@ from diffusers import OvisImagePipeline
13
 
14
  logging.set_verbosity_error()
15
 
16
- # DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
17
  MAX_SEED = np.iinfo(np.int32).max
18
 
19
  device = "cuda" if torch.cuda.is_available() else "cpu"
@@ -28,17 +27,16 @@ pipe = OvisImagePipeline.from_pretrained(
28
  pipe.to(device)
29
 
30
  # @spaces.GPU(duration=75)
31
- def generate(prompt, img_height=1024, img_width=1024, seed=42, steps=50, guidance_scale=5.0):
32
- print(f'inference with prompt : {prompt}, size: {img_height}x{img_width}, seed : {seed}, step : {steps}, cfg : {guidance_scale}')
33
  generator = torch.Generator().manual_seed(seed)
34
  image = pipe(
35
  prompt,
36
  negative_prompt="",
37
  height=img_height,
38
  width=img_width,
39
- num_inference_steps=steps,
40
- true_cfg_scale=guidance_scale,
41
- generator=generator
42
  ).images[0]
43
  return image
44
 
@@ -100,14 +98,6 @@ Built upon [Ovis-U1](https://huggingface.co/spaces/AIDC-AI/Ovis-U1-3B), Ovis-Ima
100
 
101
  with gr.Row():
102
 
103
- guidance_scale = gr.Slider(
104
- label="Guidance Scale",
105
- minimum=1,
106
- maximum=14,
107
- step=0.1,
108
- value=5.0,
109
- )
110
-
111
  num_inference_steps = gr.Slider(
112
  label="Number of inference steps",
113
  minimum=1,
@@ -134,7 +124,7 @@ Built upon [Ovis-U1](https://huggingface.co/spaces/AIDC-AI/Ovis-U1-3B), Ovis-Ima
134
  gr.on(
135
  triggers=[run_button.click, prompt.submit],
136
  fn = generate,
137
- inputs = [prompt, img_height, img_width, seed, num_inference_steps, guidance_scale],
138
  outputs = [result]
139
  )
140
 
 
13
 
14
  logging.set_verbosity_error()
15
 
 
16
  MAX_SEED = np.iinfo(np.int32).max
17
 
18
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
27
  pipe.to(device)
28
 
29
  # @spaces.GPU(duration=75)
30
+ def generate(prompt, img_height=1024, img_width=1024, seed=42, steps=50):
31
+ print(f'inference with prompt : {prompt}, size: {img_height}x{img_width}, seed : {seed}, step : {steps}')
32
  generator = torch.Generator().manual_seed(seed)
33
  image = pipe(
34
  prompt,
35
  negative_prompt="",
36
  height=img_height,
37
  width=img_width,
38
+ num_inference_steps=steps,
39
+ generator=generator,
 
40
  ).images[0]
41
  return image
42
 
 
98
 
99
  with gr.Row():
100
 
 
 
 
 
 
 
 
 
101
  num_inference_steps = gr.Slider(
102
  label="Number of inference steps",
103
  minimum=1,
 
124
  gr.on(
125
  triggers=[run_button.click, prompt.submit],
126
  fn = generate,
127
+ inputs = [prompt, img_height, img_width, seed, num_inference_steps],
128
  outputs = [result]
129
  )
130