import gradio as gr from transformers import pipeline import torch from diffusers import DiffusionPipeline # Load speech-to-text model (Whisper) transcriber = pipeline("automatic-speech-recognition", model="openai/whisper-base") # Load image generation model (Stable Diffusion) device = "cuda" if torch.cuda.is_available() else "cpu" pipe = DiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16 if device == "cuda" else torch.float32 ) pipe = pipe.to(device) # Speech-to-text function def transcribe_audio(audio): """Convert audio to text using Whisper""" if audio is None: return "" try: # Gradio Audio with type="numpy" returns tuple of (sample_rate, audio_data) if isinstance(audio, tuple): sample_rate, audio_data = audio # Create a dictionary with the audio data for the pipeline result = transcriber({"array": audio_data, "sampling_rate": sample_rate}) else: result = transcriber(audio) text = result.get("text", "").strip() return text if text else "No speech detected" except Exception as e: return f"Error transcribing audio: {str(e)}" # Image generation function def generate_image_from_text(prompt): """Generate an image from a text prompt using Stable Diffusion""" if not prompt or prompt.strip() == "": return None, "Please provide a text prompt" try: with torch.no_grad(): image = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0] return image, f"✓ Generated image from prompt: '{prompt}'" except Exception as e: return None, f"Error generating image: {str(e)}" # Combined function: speech -> text -> image def speech_to_image(audio): """Convert speech to text, then generate image from the text""" # Step 1: Convert speech to text text_prompt = transcribe_audio(audio) if text_prompt.startswith("Error"): return None, text_prompt # Step 2: Generate image from text image, status = generate_image_from_text(text_prompt) return image, f"Transcript: '{text_prompt}'\n\n{status}" # Gradio interface with tabs with gr.Blocks(title="AI Image Generation from Speech") as demo: gr.Markdown("# 🎨 AI Image Generation from Speech") gr.Markdown("Speak your image description, and the AI will generate an image based on your words!") with gr.Tab("🎤 Speech to Image"): gr.Markdown("Record or upload audio with your image description") audio_input = gr.Audio(label="Record Audio", type="numpy") generate_btn = gr.Button("Generate Image from Speech", variant="primary") output_image = gr.Image(label="Generated Image") output_text = gr.Textbox(label="Status", interactive=False) generate_btn.click( fn=speech_to_image, inputs=audio_input, outputs=[output_image, output_text] ) with gr.Tab("⌨️ Text to Image"): gr.Markdown("Or type a description directly") text_input = gr.Textbox( label="Enter Image Description", placeholder="e.g., a beautiful sunset over mountains", lines=3 ) text_generate_btn = gr.Button("Generate Image", variant="primary") text_output_image = gr.Image(label="Generated Image") text_output_status = gr.Textbox(label="Status", interactive=False) text_generate_btn.click( fn=generate_image_from_text, inputs=text_input, outputs=[text_output_image, text_output_status] ) # Launch the interface if __name__ == "__main__": demo.launch()