import gradio as gr from transformers import pipeline, set_seed import torch import tempfile import os from gtts import gTTS import time import requests from PIL import Image import io import base64 # Use smaller, faster models to avoid download issues and improve speed try: # Text generation with a smaller model text_generator = pipeline('text-generation', model='distilgpt2', truncation=True) set_seed(42) print("Text model loaded successfully") except Exception as e: print(f"Error loading text model: {e}") text_generator = None # Image generation function using API as fallback (more reliable than downloading large models) def generate_image(prompt): """Generate image using a free API (as fallback)""" try: # Try using the Hugging Face API for faster image generation API_URL = "https://api-inference.huggingface.co/models/runwayml/stable-diffusion-v1-5" headers = {"Authorization": "Bearer hf_your_token_here"} # You can get a free token at huggingface.co # For demo purposes, we'll use a placeholder approach # In a real application, you would use the API with a valid token response = requests.post(API_URL, headers=headers, json={"inputs": prompt}) if response.status_code == 200: image = Image.open(io.BytesIO(response.content)) return image else: # Fallback to a placeholder image print(f"API request failed: {response.status_code}") return create_placeholder_image(prompt) except Exception as e: print(f"Error in image generation: {e}") return create_placeholder_image(prompt) def create_placeholder_image(prompt): """Create a simple placeholder image with text""" from PIL import Image, ImageDraw, ImageFont # Create a blank image img = Image.new('RGB', (512, 512), color=(73, 109, 137)) d = ImageDraw.Draw(img) # Try to use a font try: font = ImageFont.truetype("arial.ttf", 20) except: font = ImageFont.load_default() # Add text to the image text = f"Image for:\n{prompt[:50]}..." d.text((10, 256), text, fill=(255, 255, 255), font=font) return img def generate_text(prompt, max_length=150): """Generate text content based on prompt""" if text_generator is None: # Fallback text generation return f"Here is a sample article about {prompt}. This content was generated as a fallback because the AI model could not be loaded. In a real implementation, this would be proper AI-generated content based on your prompt." try: # Generate text generated = text_generator( prompt, max_length=max_length, num_return_sequences=1, temperature=0.7, do_sample=True, pad_token_id=50256 # GPT2 end of text token ) return generated[0]['generated_text'] except Exception as e: return f"Error generating text: {str(e)}" def generate_audio(text): """Generate audio from text using gTTS with error handling""" try: # Create audio file from text (limit length) short_text = text[:300] # Limit text length for audio to avoid issues tts = gTTS(text=short_text, lang='en', slow=False) audio_file = tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") tts.save(audio_file.name) return audio_file.name except Exception as e: print(f"Error generating audio: {e}") # Create a silent audio file as fallback silent_audio = tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") # Write minimal MP3 header with open(silent_audio.name, 'wb') as f: f.write(b'') # Empty file for now return silent_audio.name def generate_content(prompt, content_type, length=150): """Generate content based on user input""" start_time = time.time() # Generate text content text_content = generate_text(prompt, max_length=length) # Generate image if requested image_output = generate_image(prompt) if "image" in content_type else None # Generate audio if requested audio_output = generate_audio(text_content) if "audio" in content_type else None # Create download links download_files = [] # Save text to file text_file = tempfile.NamedTemporaryFile(delete=False, suffix=".txt", mode="w", encoding='utf-8') text_file.write(text_content) text_file.close() download_files.append(text_file.name) # Save image if generated if image_output: image_file = tempfile.NamedTemporaryFile(delete=False, suffix=".png") image_output.save(image_file.name, "PNG") image_file.close() download_files.append(image_file.name) # Audio file is already saved if audio_output: download_files.append(audio_output) generation_time = time.time() - start_time # Format output output_html = f"""

Generated Content

Prompt: {prompt}

Generation Time: {generation_time:.2f} seconds


Text Content:

{text_content.replace('\n', '
')}
""" return output_html, image_output, audio_output, download_files # Create Gradio interface with gr.Blocks(title="Content Generation App", theme=gr.themes.Soft()) as demo: gr.Markdown("# 🚀 AI Content Generator") gr.Markdown("Generate articles, blog posts, images, and audio from a single prompt using AI models.") with gr.Row(): with gr.Column(scale=1): prompt_input = gr.Textbox( label="Enter your content prompt", lines=3, placeholder="e.g., 'Write a blog post about the benefits of renewable energy'" ) content_type = gr.CheckboxGroup( choices=["text", "image", "audio"], value=["text", "image"], label="Content types to generate" ) length_slider = gr.Slider( minimum=50, maximum=500, value=150, step=50, label="Text Length (words)" ) generate_btn = gr.Button("Generate Content", variant="primary") with gr.Column(scale=2): output_html = gr.HTML(label="Generated Content") output_image = gr.Image(label="Generated Image", visible=True) output_audio = gr.Audio(label="Generated Audio", visible=True) download_files = gr.Files(label="Download Generated Content") # Set up event handling generate_btn.click( fn=generate_content, inputs=[prompt_input, content_type, length_slider], outputs=[output_html, output_image, output_audio, download_files] ) # Examples gr.Examples( examples=[ ["Write a short article about the future of artificial intelligence", ["text", "image"], 200], ["Create a blog post about healthy eating habits", ["text", "image"], 250], ["Describe a beautiful sunset over the ocean", ["text", "image"], 150] ], inputs=[prompt_input, content_type, length_slider], label="Click on any example below to get started:" ) # Launch the app if __name__ == "__main__": demo.launch(share=True)