hellokawei commited on
Commit
76362b7
Β·
verified Β·
1 Parent(s): 8e43b34

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -5
app.py CHANGED
@@ -2,6 +2,8 @@ import gradio as gr
2
  from transformers import pipeline, WhisperProcessor, WhisperForConditionalGeneration
3
  from diffusers import StableDiffusionPipeline
4
  import torch
 
 
5
 
6
  # Step 1: Prompt-to-Prompt Generation using BART (or any LLM except GPT or DeepSeek)
7
  prompt_generator = pipeline("text2text-generation", model="facebook/bart-large-cnn")
@@ -34,9 +36,12 @@ def generate_image(prompt: str, creativity: float, include_background: bool):
34
  processor = WhisperProcessor.from_pretrained("openai/whisper-large")
35
  model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large")
36
 
37
- def transcribe_audio(audio):
 
 
 
38
  # Convert audio to text using Whisper
39
- audio_input = processor(audio, return_tensors="pt").input_features
40
  predicted_ids = model.generate(audio_input)
41
  transcription = processor.decode(predicted_ids[0], skip_special_tokens=True)
42
  return transcription
@@ -51,9 +56,9 @@ def process_input(description: str, creativity: float, include_background: bool)
51
 
52
  return prompt, image
53
 
54
- def process_audio_input(audio):
55
  # Convert audio to text
56
- description = transcribe_audio(audio)
57
  # Generate a prompt and image based on transcribed text
58
  prompt = generate_prompt(description)
59
  image = generate_image(prompt, creativity=0.7, include_background=True)
@@ -64,7 +69,7 @@ text_input = gr.Textbox(label="Enter Description", placeholder="E.g., A magical
64
  creativity_slider = gr.Slider(minimum=0, maximum=1, step=0.1, label="Creativity (0 to 1)", value=0.7)
65
  background_checkbox = gr.Checkbox(label="Include Background", value=True)
66
 
67
- audio_input = gr.Audio(type="numpy", label="Speak your Description")
68
 
69
  # Create Gradio interface for text input
70
  interface = gr.Interface(
@@ -94,3 +99,4 @@ interface_with_audio = gr.Interface(
94
 
95
  # Launch the interface with multiple tabs for text and voice input
96
  gr.TabbedInterface([interface, interface_with_audio]).launch()
 
 
2
  from transformers import pipeline, WhisperProcessor, WhisperForConditionalGeneration
3
  from diffusers import StableDiffusionPipeline
4
  import torch
5
+ import numpy as np
6
+ import soundfile as sf
7
 
8
  # Step 1: Prompt-to-Prompt Generation using BART (or any LLM except GPT or DeepSeek)
9
  prompt_generator = pipeline("text2text-generation", model="facebook/bart-large-cnn")
 
36
  processor = WhisperProcessor.from_pretrained("openai/whisper-large")
37
  model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large")
38
 
39
+ def transcribe_audio(audio: np.ndarray, sampling_rate: int) -> str:
40
+ # Save the audio as a temporary WAV file
41
+ sf.write("temp_audio.wav", audio, sampling_rate)
42
+
43
  # Convert audio to text using Whisper
44
+ audio_input = processor("temp_audio.wav", return_tensors="pt").input_features
45
  predicted_ids = model.generate(audio_input)
46
  transcription = processor.decode(predicted_ids[0], skip_special_tokens=True)
47
  return transcription
 
56
 
57
  return prompt, image
58
 
59
+ def process_audio_input(audio, sampling_rate):
60
  # Convert audio to text
61
+ description = transcribe_audio(audio, sampling_rate)
62
  # Generate a prompt and image based on transcribed text
63
  prompt = generate_prompt(description)
64
  image = generate_image(prompt, creativity=0.7, include_background=True)
 
69
  creativity_slider = gr.Slider(minimum=0, maximum=1, step=0.1, label="Creativity (0 to 1)", value=0.7)
70
  background_checkbox = gr.Checkbox(label="Include Background", value=True)
71
 
72
+ audio_input = gr.Audio(type="numpy", label="Speak your Description", source="microphone")
73
 
74
  # Create Gradio interface for text input
75
  interface = gr.Interface(
 
99
 
100
  # Launch the interface with multiple tabs for text and voice input
101
  gr.TabbedInterface([interface, interface_with_audio]).launch()
102
+