File size: 7,719 Bytes
f4c8cd4
2d02726
 
f4c8cd4
 
2d02726
 
367721b
 
 
 
2d02726
367721b
2d02726
367721b
 
2d02726
367721b
2d02726
367721b
2d02726
367721b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2d02726
 
 
 
367721b
 
2d02726
f4c8cd4
2d02726
 
 
 
 
 
 
367721b
2d02726
 
f4c8cd4
2d02726
f4c8cd4
2d02726
367721b
f4c8cd4
367721b
 
 
2d02726
 
 
f4c8cd4
2d02726
367721b
 
 
 
 
 
f4c8cd4
2d02726
 
 
f4c8cd4
2d02726
 
f4c8cd4
2d02726
 
f4c8cd4
2d02726
 
f4c8cd4
2d02726
367721b
b316ae2
2d02726
367721b
2d02726
 
367721b
f4c8cd4
2d02726
 
 
 
 
367721b
f4c8cd4
2d02726
 
367721b
2d02726
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
367721b
f4c8cd4
2d02726
 
 
367721b
2d02726
 
 
 
 
 
 
b316ae2
2d02726
 
 
 
b316ae2
2d02726
 
 
 
 
 
 
 
b316ae2
2d02726
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
367721b
2d02726
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
import gradio as gr
from transformers import pipeline, set_seed
import torch
import tempfile
import os
from gtts import gTTS
import time
import requests
from PIL import Image
import io
import base64

# Use smaller, faster models to avoid download issues and improve speed
try:
    # Text generation with a smaller model
    text_generator = pipeline('text-generation', model='distilgpt2', truncation=True)
    set_seed(42)
    print("Text model loaded successfully")
except Exception as e:
    print(f"Error loading text model: {e}")
    text_generator = None

# Image generation function using API as fallback (more reliable than downloading large models)
def generate_image(prompt):
    """Generate image using a free API (as fallback)"""
    try:
        # Try using the Hugging Face API for faster image generation
        API_URL = "https://api-inference.huggingface.co/models/runwayml/stable-diffusion-v1-5"
        headers = {"Authorization": "Bearer hf_your_token_here"}  # You can get a free token at huggingface.co
        
        # For demo purposes, we'll use a placeholder approach
        # In a real application, you would use the API with a valid token
        response = requests.post(API_URL, headers=headers, json={"inputs": prompt})
        
        if response.status_code == 200:
            image = Image.open(io.BytesIO(response.content))
            return image
        else:
            # Fallback to a placeholder image
            print(f"API request failed: {response.status_code}")
            return create_placeholder_image(prompt)
    except Exception as e:
        print(f"Error in image generation: {e}")
        return create_placeholder_image(prompt)

def create_placeholder_image(prompt):
    """Create a simple placeholder image with text"""
    from PIL import Image, ImageDraw, ImageFont
    # Create a blank image
    img = Image.new('RGB', (512, 512), color=(73, 109, 137))
    d = ImageDraw.Draw(img)
    
    # Try to use a font
    try:
        font = ImageFont.truetype("arial.ttf", 20)
    except:
        font = ImageFont.load_default()
    
    # Add text to the image
    text = f"Image for:\n{prompt[:50]}..."
    d.text((10, 256), text, fill=(255, 255, 255), font=font)
    return img

def generate_text(prompt, max_length=150):
    """Generate text content based on prompt"""
    if text_generator is None:
        # Fallback text generation
        return f"Here is a sample article about {prompt}. This content was generated as a fallback because the AI model could not be loaded. In a real implementation, this would be proper AI-generated content based on your prompt."
    
    try:
        # Generate text
        generated = text_generator(
            prompt, 
            max_length=max_length, 
            num_return_sequences=1,
            temperature=0.7,
            do_sample=True,
            pad_token_id=50256  # GPT2 end of text token
        )
        return generated[0]['generated_text']
    except Exception as e:
        return f"Error generating text: {str(e)}"

def generate_audio(text):
    """Generate audio from text using gTTS with error handling"""
    try:
        # Create audio file from text (limit length)
        short_text = text[:300]  # Limit text length for audio to avoid issues
        tts = gTTS(text=short_text, lang='en', slow=False)
        audio_file = tempfile.NamedTemporaryFile(delete=False, suffix=".mp3")
        tts.save(audio_file.name)
        return audio_file.name
    except Exception as e:
        print(f"Error generating audio: {e}")
        # Create a silent audio file as fallback
        silent_audio = tempfile.NamedTemporaryFile(delete=False, suffix=".mp3")
        # Write minimal MP3 header
        with open(silent_audio.name, 'wb') as f:
            f.write(b'')  # Empty file for now
        return silent_audio.name

def generate_content(prompt, content_type, length=150):
    """Generate content based on user input"""
    start_time = time.time()
    
    # Generate text content
    text_content = generate_text(prompt, max_length=length)
    
    # Generate image if requested
    image_output = generate_image(prompt) if "image" in content_type else None
    
    # Generate audio if requested
    audio_output = generate_audio(text_content) if "audio" in content_type else None
    
    # Create download links
    download_files = []
    
    # Save text to file
    text_file = tempfile.NamedTemporaryFile(delete=False, suffix=".txt", mode="w", encoding='utf-8')
    text_file.write(text_content)
    text_file.close()
    download_files.append(text_file.name)
    
    # Save image if generated
    if image_output:
        image_file = tempfile.NamedTemporaryFile(delete=False, suffix=".png")
        image_output.save(image_file.name, "PNG")
        image_file.close()
        download_files.append(image_file.name)
    
    # Audio file is already saved
    if audio_output:
        download_files.append(audio_output)
    
    generation_time = time.time() - start_time
    
    # Format output
    output_html = f"""
    <div style='font-family: Arial, sans-serif; max-width: 800px; margin: 0 auto;'>
        <h2>Generated Content</h2>
        <p><strong>Prompt:</strong> {prompt}</p>
        <p><strong>Generation Time:</strong> {generation_time:.2f} seconds</p>
        <hr>
        <h3>Text Content:</h3>
        <div style='background-color: #f5f5f5; padding: 15px; border-radius: 5px;'>
            {text_content.replace('\n', '<br>')}
        </div>
    </div>
    """
    
    return output_html, image_output, audio_output, download_files

# Create Gradio interface
with gr.Blocks(title="Content Generation App", theme=gr.themes.Soft()) as demo:
    gr.Markdown("# 🚀 AI Content Generator")
    gr.Markdown("Generate articles, blog posts, images, and audio from a single prompt using AI models.")
    
    with gr.Row():
        with gr.Column(scale=1):
            prompt_input = gr.Textbox(
                label="Enter your content prompt", 
                lines=3,
                placeholder="e.g., 'Write a blog post about the benefits of renewable energy'"
            )
            content_type = gr.CheckboxGroup(
                choices=["text", "image", "audio"],
                value=["text", "image"],
                label="Content types to generate"
            )
            length_slider = gr.Slider(
                minimum=50, 
                maximum=500, 
                value=150, 
                step=50,
                label="Text Length (words)"
            )
            generate_btn = gr.Button("Generate Content", variant="primary")
            
        with gr.Column(scale=2):
            output_html = gr.HTML(label="Generated Content")
            output_image = gr.Image(label="Generated Image", visible=True)
            output_audio = gr.Audio(label="Generated Audio", visible=True)
            download_files = gr.Files(label="Download Generated Content")
    
    # Set up event handling
    generate_btn.click(
        fn=generate_content,
        inputs=[prompt_input, content_type, length_slider],
        outputs=[output_html, output_image, output_audio, download_files]
    )
    
    # Examples
    gr.Examples(
        examples=[
            ["Write a short article about the future of artificial intelligence", ["text", "image"], 200],
            ["Create a blog post about healthy eating habits", ["text", "image"], 250],
            ["Describe a beautiful sunset over the ocean", ["text", "image"], 150]
        ],
        inputs=[prompt_input, content_type, length_slider],
        label="Click on any example below to get started:"
    )

# Launch the app
if __name__ == "__main__":
    demo.launch(share=True)