shumpei2525 commited on
Commit
30e32ff
ยท
verified ยท
1 Parent(s): c41c3ed

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -24
app.py CHANGED
@@ -58,30 +58,24 @@ pipe.load_lora_weights("purotan_1750.safetensors")
58
  MAX_SEED = np.iinfo(np.int32).max
59
  MAX_IMAGE_SIZE = 1024
60
 
61
- # Define the image generation function with progress
62
  @spaces.GPU # [uncomment to use ZeroGPU]
63
- def generate_image(place, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, progress: gr.Progress):
64
- with progress:
65
- progress(10, desc="ใ‚ทใƒผใƒ‰ใฎ่จญๅฎšไธญ...")
66
- if randomize_seed:
67
- seed = random.randint(0, MAX_SEED)
68
 
69
- progress(30, desc="ใƒ—ใƒญใƒณใƒ—ใƒˆใฎ็”Ÿๆˆไธญ...")
70
- prompt = make_prompt(place)
71
-
72
- progress(50, desc="็”ปๅƒ็”Ÿๆˆไธญ...")
73
- generator = torch.Generator().manual_seed(seed)
74
-
75
- image = pipe(
76
- prompt=prompt,
77
- guidance_scale=guidance_scale,
78
- num_inference_steps=num_inference_steps,
79
- width=width,
80
- height=height,
81
- generator=generator
82
- ).images[0]
83
-
84
- progress(100, desc="็”ปๅƒ็”ŸๆˆๅฎŒไบ†!")
85
 
86
  return image, seed
87
 
@@ -98,7 +92,7 @@ with gr.Blocks(css=css) as demo:
98
 
99
  with gr.Column(elem_id="col-container"):
100
  gr.Markdown("""
101
- # ใทใ‚ใŸใ‚“ใ‚ฌใƒใƒฃใ‚ฌใƒใƒฃ็”ปๅƒ็”Ÿๆˆ๏ผ
102
  ใƒœใ‚ฟใƒณใ‚’ๆŠผใ—ใฆๅ ดๆ‰€ใ‚’้ธๆŠžใ—ใ€็”ปๅƒใ‚’็”Ÿๆˆใ—ใฆใใ ใ•ใ„๏ผ
103
  """)
104
 
@@ -196,9 +190,9 @@ with gr.Blocks(css=css) as demo:
196
  height,
197
  guidance_scale,
198
  num_inference_steps
199
- # progress parameter is handled automatically by Gradio
200
  ],
201
  outputs=[result, seed]
202
  )
203
 
204
  demo.queue().launch()
 
 
58
  MAX_SEED = np.iinfo(np.int32).max
59
  MAX_IMAGE_SIZE = 1024
60
 
61
+ # Define the image generation function
62
  @spaces.GPU # [uncomment to use ZeroGPU]
63
+ def generate_image(place, seed, randomize_seed, width, height, guidance_scale, num_inference_steps):
64
+ if randomize_seed:
65
+ seed = random.randint(0, MAX_SEED)
 
 
66
 
67
+ prompt = make_prompt(place)
68
+
69
+ generator = torch.Generator().manual_seed(seed)
70
+
71
+ image = pipe(
72
+ prompt=prompt,
73
+ guidance_scale=guidance_scale,
74
+ num_inference_steps=num_inference_steps,
75
+ width=width,
76
+ height=height,
77
+ generator=generator
78
+ ).images[0]
 
 
 
 
79
 
80
  return image, seed
81
 
 
92
 
93
  with gr.Column(elem_id="col-container"):
94
  gr.Markdown("""
95
+ # Text-to-Image Gradio Template
96
  ใƒœใ‚ฟใƒณใ‚’ๆŠผใ—ใฆๅ ดๆ‰€ใ‚’้ธๆŠžใ—ใ€็”ปๅƒใ‚’็”Ÿๆˆใ—ใฆใใ ใ•ใ„๏ผ
97
  """)
98
 
 
190
  height,
191
  guidance_scale,
192
  num_inference_steps
 
193
  ],
194
  outputs=[result, seed]
195
  )
196
 
197
  demo.queue().launch()
198
+