RP-Azul commited on
Commit
5c13f3d
·
verified ·
1 Parent(s): f2d73f4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -15
app.py CHANGED
@@ -11,17 +11,8 @@ pipe3 = DiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusio
11
  pipe3.to("cuda" if torch.cuda.is_available() else "cpu")
12
 
13
  def audio_to_image(audio):
14
- try:
15
- # code sample from onl;ine
16
- if isinstance(audio, tuple):
17
- # If Gradio provides (sample rate, numpy array), save it as a temporary file
18
- sr, audio_data = audio
19
- with tempfile.NamedTemporaryFile(suffix=".wav") as temp_audio_file:
20
- librosa.output.write_wav(temp_audio_file.name, audio_data, sr)
21
- transcription = pipe1(temp_audio_file.name)
22
- else:
23
- # If Gradio provides a file path, use it directly
24
- transcription = pipe1(audio)
25
 
26
  transcription_text = transcription['text']
27
 
@@ -32,9 +23,6 @@ def audio_to_image(audio):
32
  image = pipe3(prompt).images[0]
33
 
34
  return image
35
- except Exception as e:
36
- print(f"Error during processing: {e}")
37
- return None
38
-
39
  demo = gr.Interface(fn=audio_to_image, inputs=gr.Audio(), outputs="image")
40
  demo.launch(share=True)
 
11
  pipe3.to("cuda" if torch.cuda.is_available() else "cpu")
12
 
13
  def audio_to_image(audio):
14
+
15
+ transcription = pipe1(audio)
 
 
 
 
 
 
 
 
 
16
 
17
  transcription_text = transcription['text']
18
 
 
23
  image = pipe3(prompt).images[0]
24
 
25
  return image
26
+
 
 
 
27
  demo = gr.Interface(fn=audio_to_image, inputs=gr.Audio(), outputs="image")
28
  demo.launch(share=True)