stefania11 commited on
Commit
4f18cef
Β·
1 Parent(s): a6e3fde

update app file

Browse files
Files changed (1) hide show
  1. app.py +5 -50
app.py CHANGED
@@ -12,18 +12,12 @@ import os
12
  stable_diffusion = gr.Blocks.load(name="spaces/stabilityai/stable-diffusion")
13
  ### β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
14
 
15
- title="Whisper to Stable Diffusion"
16
 
17
  ### β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
18
 
19
  whisper_model = whisper.load_model("small")
20
 
21
- #device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
22
-
23
- #pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", use_auth_token=MY_SECRET_TOKEN)
24
- #pipe.to(device)
25
-
26
- ### β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
27
 
28
  def get_images(prompt):
29
  gallery_dir = stable_diffusion(prompt, fn_index=2)
@@ -38,52 +32,13 @@ def magic_whisper_to_sd(audio, guidance_scale, nb_iterations, seed):
38
 
39
  return whisper_results[0], whisper_results[1], whisper_results[2], images
40
 
41
- #def diffuse(prompt, guidance_scale, nb_iterations, seed):
42
- #
43
- # generator = torch.Generator(device=device).manual_seed(int(seed))
44
- #
45
- # print("""
46
- # β€”
47
- # Sending prompt to Stable Diffusion ...
48
- # β€”
49
- # """)
50
- # print("prompt: " + prompt)
51
- # print("guidance scale: " + str(guidance_scale))
52
- # print("inference steps: " + str(nb_iterations))
53
- # print("seed: " + str(seed))
54
- #
55
- # images_list = pipe(
56
- # [prompt] * 2,
57
- # guidance_scale=guidance_scale,
58
- # num_inference_steps=nb_iterations,
59
- # generator=generator
60
- # )
61
- #
62
- # images = []
63
- #
64
- # safe_image = Image.open(r"unsafe.png")
65
- #
66
- # for i, image in enumerate(images_list["sample"]):
67
- # if(images_list["nsfw_content_detected"][i]):
68
- # images.append(safe_image)
69
- # else:
70
- # images.append(image)
71
- #
72
- #
73
- # print("Stable Diffusion has finished")
74
- # print("β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”")
75
- #
76
- # return images
77
-
78
  def translate(audio):
79
  print("""
80
  β€”
81
  Sending audio to Whisper ...
82
  β€”
83
  """)
84
- # current dateTime
85
  now = datetime.now()
86
- # convert to string
87
  date_time_str = now.strftime("%Y-%m-%d %H:%M:%S")
88
  print('DateTime String:', date_time_str)
89
 
@@ -274,14 +229,14 @@ with gr.Blocks(css=css) as demo:
274
  with gr.Column():
275
  gr.HTML('''
276
  <h1>
277
- Whisper to Stable Diffusion
278
  </h1>
279
  <p style='text-align: center;'>
280
- Ask stable diffusion for images by speaking (or singing πŸ€—) in your native language ! Try it in French πŸ˜‰
281
  </p>
282
 
283
  <p style='text-align: center;'>
284
- This demo is wired to the official SD Space β€’ Offered by Sylvain <a href='https://twitter.com/fffiloni' target='_blank'>@fffiloni</a> β€’ <img id='visitor-badge' alt='visitor badge' src='https://visitor-badge.glitch.me/badge?page_id=gradio-blocks.whisper-to-stable-diffusion' style='display: inline-block' /><br />
285
  β€”
286
  </p>
287
 
@@ -318,7 +273,7 @@ with gr.Blocks(css=css) as demo:
318
  )
319
  with gr.Row():
320
  audio_u_translate = gr.Button("Check Whisper first ? πŸ‘", elem_id="check_btn_2")
321
- audio_u_direct_sd = gr.Button("Magic Whisper β€Ί SD right now!", elem_id="magic_btn_2")
322
 
323
  with gr.Accordion(label="Stable Diffusion Settings", elem_id="sd_settings", visible=False):
324
  with gr.Row():
 
12
  stable_diffusion = gr.Blocks.load(name="spaces/stabilityai/stable-diffusion")
13
  ### β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
14
 
15
+ title="DataTeller"
16
 
17
  ### β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
18
 
19
  whisper_model = whisper.load_model("small")
20
 
 
 
 
 
 
 
21
 
22
  def get_images(prompt):
23
  gallery_dir = stable_diffusion(prompt, fn_index=2)
 
32
 
33
  return whisper_results[0], whisper_results[1], whisper_results[2], images
34
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
  def translate(audio):
36
  print("""
37
  β€”
38
  Sending audio to Whisper ...
39
  β€”
40
  """)
 
41
  now = datetime.now()
 
42
  date_time_str = now.strftime("%Y-%m-%d %H:%M:%S")
43
  print('DateTime String:', date_time_str)
44
 
 
229
  with gr.Column():
230
  gr.HTML('''
231
  <h1>
232
+ DataTeller
233
  </h1>
234
  <p style='text-align: center;'>
235
+ Generate data visualizations by speaking in your native language ! Try it in Romanian πŸ˜‰
236
  </p>
237
 
238
  <p style='text-align: center;'>
239
+ This demo is wired to the official SD Space and using the Whisper model
240
  β€”
241
  </p>
242
 
 
273
  )
274
  with gr.Row():
275
  audio_u_translate = gr.Button("Check Whisper first ? πŸ‘", elem_id="check_btn_2")
276
+ # audio_u_direct_sd = gr.Button("Magic Whisper β€Ί SD right now!", elem_id="magic_btn_2")
277
 
278
  with gr.Accordion(label="Stable Diffusion Settings", elem_id="sd_settings", visible=False):
279
  with gr.Row():