shaheerawan3 commited on
Commit
f24f171
·
verified ·
1 Parent(s): 4c33be9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +435 -151
app.py CHANGED
@@ -1,94 +1,188 @@
1
  import streamlit as st
2
- import numpy as np
 
 
3
  from PIL import Image, ImageDraw, ImageFont
4
  import tempfile
5
  import os
6
- from pathlib import Path
7
- import textwrap
8
  from gtts import gTTS
9
- from moviepy.editor import VideoFileClip, ImageSequenceClip, AudioFileClip, CompositeVideoClip
10
- import logging
11
- from typing import Optional, Tuple
12
  import io
 
 
 
 
 
 
13
 
14
- class SimpleVideoGenerator:
15
  def __init__(self):
16
- """Initialize the video generator with basic components"""
17
- self.setup_logging()
18
- self.setup_workspace()
19
- self.setup_themes()
20
- self.load_font()
 
 
 
 
 
 
21
 
22
  def setup_logging(self):
23
- """Configure basic logging"""
24
  logging.basicConfig(
25
  level=logging.INFO,
26
- format='%(asctime)s - %(levelname)s - %(message)s'
 
 
 
 
27
  )
28
  self.logger = logging.getLogger(__name__)
29
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
  def setup_workspace(self):
31
- """Set up temporary directory for working files"""
32
  self.temp_dir = Path(tempfile.mkdtemp())
 
 
33
 
34
  def setup_themes(self):
35
- """Define color themes for videos"""
36
  self.themes = {
37
  'Professional': {
38
- 'bg': (245, 245, 245),
39
- 'text': (33, 33, 33),
40
- 'accent': (0, 102, 204)
41
  },
42
  'Creative': {
43
- 'bg': (255, 240, 245),
44
- 'text': (51, 51, 51),
45
- 'accent': (255, 64, 129)
46
  },
47
  'Educational': {
48
- 'bg': (240, 249, 255),
49
- 'text': (25, 25, 25),
50
- 'accent': (0, 151, 167)
51
  }
52
  }
53
 
54
- def load_font(self):
55
- """Load system font or fall back to default"""
56
  try:
57
- # Try to load Arial font
58
- self.font = ImageFont.truetype("arial.ttf", 40)
59
- except OSError:
60
- try:
61
- # Try system font on Linux
62
- self.font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf", 40)
63
- except OSError:
64
- # Fall back to default font
 
 
 
 
 
 
65
  self.font = ImageFont.load_default()
66
  self.logger.warning("Using default font - custom font loading failed")
67
 
68
- def create_frame(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
  self,
70
  text: str,
71
  theme: dict,
72
  frame_number: int,
73
  total_frames: int,
74
- size: Tuple[int, int] = (1280, 720)
 
75
  ) -> np.ndarray:
76
- """Create a single video frame with text overlay"""
77
  try:
78
  # Create base frame
79
- frame = np.full((size[1], size[0], 3), theme['bg'], dtype=np.uint8)
 
 
 
 
 
 
 
80
  img = Image.fromarray(frame)
81
- draw = ImageDraw.Draw(img)
 
 
 
 
 
 
 
 
 
82
 
83
- # Wrap text for better presentation
 
84
  wrapped_text = textwrap.fill(text, width=50)
85
-
86
  # Calculate text position
87
  text_bbox = draw.textbbox((0, 0), wrapped_text, font=self.font)
88
  text_width = text_bbox[2] - text_bbox[0]
89
  text_height = text_bbox[3] - text_bbox[1]
90
  text_x = (size[0] - text_width) // 2
91
- text_y = (size[1] - text_height) // 2 - 50 # Slightly above center
92
 
93
  # Draw text background
94
  padding = 20
@@ -99,176 +193,366 @@ class SimpleVideoGenerator:
99
  text_x + text_width + padding,
100
  text_y + text_height + padding
101
  ],
102
- fill=theme['bg'],
103
- outline=theme['accent']
104
  )
105
 
106
  # Draw text
107
  draw.text(
108
  (text_x, text_y),
109
  wrapped_text,
110
- fill=theme['text'],
111
  font=self.font
112
  )
113
 
114
- # Draw progress bar
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115
  progress = frame_number / total_frames
116
  bar_width = int(size[0] * 0.8) # 80% of screen width
117
  bar_height = 6
118
  x_offset = (size[0] - bar_width) // 2
119
  y_position = size[1] - 40
120
 
121
- # Background bar
122
  draw.rectangle(
123
  [x_offset, y_position, x_offset + bar_width, y_position + bar_height],
124
- fill=(200, 200, 200)
125
  )
126
 
127
- # Progress bar
128
  progress_width = int(bar_width * progress)
129
- draw.rectangle(
130
- [x_offset, y_position, x_offset + progress_width, y_position + bar_height],
131
- fill=theme['accent']
132
- )
 
 
133
 
134
- return np.array(img)
 
 
 
 
 
 
135
 
136
  except Exception as e:
137
- self.logger.error(f"Frame creation failed: {str(e)}")
138
- return np.zeros((size[1], size[0], 3), dtype=np.uint8)
139
 
140
- def generate_audio(self, text: str) -> str:
141
- """Generate audio from text using gTTS"""
142
  try:
143
- audio_path = str(self.temp_dir / "audio.mp3")
144
- tts = gTTS(text=text, lang='en', slow=False)
145
- tts.save(audio_path)
146
- return audio_path
 
 
 
 
 
147
  except Exception as e:
148
- self.logger.error(f"Audio generation failed: {str(e)}")
149
- return None
150
 
151
  def create_video(
152
  self,
153
- text: str,
154
  style: str,
155
  duration: int,
156
  output_path: str
157
- ) -> Optional[str]:
158
- """Create a video with text overlay and audio"""
159
  try:
160
- # Generate audio first to get timing
161
- audio_path = self.generate_audio(text)
162
- if not audio_path:
163
- raise Exception("Audio generation failed")
164
-
165
- # Create frames
 
166
  frames = []
167
  fps = 30
168
  total_frames = int(duration * fps)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
169
 
170
- for i in range(total_frames):
171
- # Calculate current text segment
172
- progress = i / total_frames
173
- text_index = int(progress * len(text.split()))
174
- current_text = " ".join(text.split()[:text_index + 1])
175
-
176
- # Create frame
177
- frame = self.create_frame(
178
- current_text,
179
- self.themes[style],
180
- i,
181
- total_frames
182
- )
183
- frames.append(frame)
184
-
185
- # Create video
186
  video = ImageSequenceClip(frames, fps=fps)
187
-
188
- # Add audio
189
- audio = AudioFileClip(audio_path)
190
  video = video.set_audio(audio)
 
 
 
 
 
 
 
 
191
 
192
- # Write video file
193
  video.write_videofile(
194
  output_path,
195
  fps=fps,
196
  codec='libx264',
197
- audio_codec='aac'
 
 
198
  )
199
 
200
  return output_path
201
 
202
  except Exception as e:
203
  self.logger.error(f"Video creation failed: {str(e)}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
204
  return None
205
 
206
  def cleanup(self):
207
- """Clean up temporary files"""
208
  try:
209
- import shutil
210
- shutil.rmtree(self.temp_dir)
 
 
 
 
 
 
 
 
 
 
211
  except Exception as e:
212
  self.logger.error(f"Cleanup failed: {str(e)}")
213
 
214
- # Streamlit UI
215
- def main():
216
- st.title("Simple Video Generator")
217
- st.write("Create videos with text-to-speech and animations")
218
-
219
- # Input fields
220
- text = st.text_area("Enter your text", height=100)
221
-
222
- col1, col2 = st.columns(2)
223
- with col1:
224
- style = st.selectbox(
225
- "Choose style",
226
- options=['Professional', 'Creative', 'Educational']
227
- )
228
-
229
- with col2:
230
- duration = st.slider(
231
- "Video duration (seconds)",
232
- min_value=5,
233
- max_value=60,
234
- value=30
235
- )
236
 
237
- if st.button("Generate Video"):
238
- if not text:
239
- st.error("Please enter some text.")
240
- return
241
 
242
- try:
243
- with st.spinner("Generating video..."):
244
- generator = SimpleVideoGenerator()
245
- output_path = "generated_video.mp4"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
246
 
247
- result = generator.create_video(
248
- text,
249
- style,
250
- duration,
251
- output_path
252
  )
253
 
254
- if result:
255
- st.success("Video generated successfully!")
256
-
257
- # Provide download button
258
- with open(output_path, 'rb') as f:
259
- st.download_button(
260
- label="Download Video",
261
- data=f.read(),
262
- file_name=output_path,
263
- mime="video/mp4"
 
 
 
 
 
 
 
 
 
 
264
  )
265
- else:
266
- st.error("Failed to generate video. Please try again.")
267
-
268
- generator.cleanup()
269
-
270
- except Exception as e:
271
- st.error(f"An error occurred: {str(e)}")
 
 
 
 
 
 
 
 
272
 
273
  if __name__ == "__main__":
274
- main()
 
1
  import streamlit as st
2
+ from pathlib import Path
3
+ import torch
4
+ from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
5
  from PIL import Image, ImageDraw, ImageFont
6
  import tempfile
7
  import os
8
+ from moviepy.editor import *
9
+ import numpy as np
10
  from gtts import gTTS
11
+ import textwrap
12
+ from concurrent.futures import ThreadPoolExecutor
 
13
  import io
14
+ import unicodedata
15
+ import re
16
+ import requests
17
+ import random
18
+ import logging
19
+ from typing import Optional, List, Dict, Tuple
20
 
21
+ class EnhancedVideoGenerator:
22
  def __init__(self):
23
+ """Initialize the video generator with all required components"""
24
+ try:
25
+ self.setup_logging()
26
+ self.setup_device()
27
+ self.initialize_models()
28
+ self.setup_workspace()
29
+ self.load_assets()
30
+ self.setup_themes()
31
+ except Exception as e:
32
+ logging.error(f"Initialization failed: {str(e)}")
33
+ raise RuntimeError("Failed to initialize video generator")
34
 
35
  def setup_logging(self):
36
+ """Configure logging for the application"""
37
  logging.basicConfig(
38
  level=logging.INFO,
39
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
40
+ handlers=[
41
+ logging.FileHandler('video_generator.log'),
42
+ logging.StreamHandler()
43
+ ]
44
  )
45
  self.logger = logging.getLogger(__name__)
46
 
47
+ def setup_device(self):
48
+ """Set up computing device (CPU/GPU)"""
49
+ self.device = "cuda" if torch.cuda.is_available() else "cpu"
50
+ self.logger.info(f"Using device: {self.device}")
51
+
52
+ def initialize_models(self):
53
+ """Initialize all AI models"""
54
+ try:
55
+ # Text generation model
56
+ self.text_generator = pipeline(
57
+ 'text-generation',
58
+ model='gpt2',
59
+ device=0 if self.device == "cuda" else -1
60
+ )
61
+
62
+ # Initialize free image generation model
63
+ self.image_model = AutoModelForCausalLM.from_pretrained(
64
+ "CompVis/stable-diffusion-v1-4",
65
+ torch_dtype=torch.float16 if self.device == "cuda" else torch.float32
66
+ ).to(self.device)
67
+
68
+ except Exception as e:
69
+ self.logger.error(f"Model initialization failed: {str(e)}")
70
+ raise
71
+
72
  def setup_workspace(self):
73
+ """Set up working directory and resources"""
74
  self.temp_dir = Path(tempfile.mkdtemp())
75
+ self.asset_dir = self.temp_dir / "assets"
76
+ self.asset_dir.mkdir(exist_ok=True)
77
 
78
  def setup_themes(self):
79
+ """Set up visual themes"""
80
  self.themes = {
81
  'Professional': {
82
+ 'bg': (240, 240, 240),
83
+ 'accent': (0, 120, 212),
84
+ 'text': (33, 33, 33)
85
  },
86
  'Creative': {
87
+ 'bg': (255, 250, 240),
88
+ 'accent': (255, 123, 0),
89
+ 'text': (51, 51, 51)
90
  },
91
  'Educational': {
92
+ 'bg': (248, 249, 250),
93
+ 'accent': (40, 167, 69),
94
+ 'text': (33, 37, 41)
95
  }
96
  }
97
 
98
+ def load_assets(self):
99
+ """Load visual assets and fonts"""
100
  try:
101
+ # Try multiple font options
102
+ font_options = [
103
+ "arial.ttf",
104
+ "/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf",
105
+ "/System/Library/Fonts/Helvetica.ttc"
106
+ ]
107
+
108
+ for font_path in font_options:
109
+ try:
110
+ self.font = ImageFont.truetype(font_path, 40)
111
+ break
112
+ except OSError:
113
+ continue
114
+ else:
115
  self.font = ImageFont.load_default()
116
  self.logger.warning("Using default font - custom font loading failed")
117
 
118
+ except Exception as e:
119
+ self.logger.error(f"Asset loading failed: {str(e)}")
120
+
121
+ def generate_visual_assets(self, script: str, style: str) -> List[Dict]:
122
+ """Generate relevant visual assets based on script content"""
123
+ try:
124
+ # Extract key topics from script
125
+ topics = self.extract_key_topics(script)
126
+
127
+ assets = []
128
+ for topic in topics:
129
+ # Generate AI image
130
+ image = self.generate_ai_image(topic, style)
131
+ if image:
132
+ assets.append({
133
+ 'type': 'image',
134
+ 'data': image,
135
+ 'topic': topic
136
+ })
137
+
138
+ return assets
139
+
140
+ except Exception as e:
141
+ self.logger.error(f"Visual asset generation failed: {str(e)}")
142
+ return []
143
+
144
+ def create_enhanced_frame(
145
  self,
146
  text: str,
147
  theme: dict,
148
  frame_number: int,
149
  total_frames: int,
150
+ background_image: Optional[Image.Image] = None,
151
+ size: Tuple[int, int] = (1920, 1080) # Upgraded to 1080p
152
  ) -> np.ndarray:
153
+ """Create a visually enhanced frame with background, text, and effects"""
154
  try:
155
  # Create base frame
156
+ if background_image:
157
+ # Resize and crop background to fit
158
+ bg = background_image.resize(size, Image.LANCZOS)
159
+ frame = np.array(bg)
160
+ else:
161
+ frame = np.full((size[1], size[0], 3), theme['bg'], dtype=np.uint8)
162
+
163
+ # Convert to PIL Image for drawing
164
  img = Image.fromarray(frame)
165
+ draw = ImageDraw.Draw(img, 'RGBA')
166
+
167
+ # Add subtle gradient overlay
168
+ overlay = Image.new('RGBA', size, (0, 0, 0, 0))
169
+ overlay_draw = ImageDraw.Draw(overlay)
170
+ overlay_draw.rectangle(
171
+ [0, 0, size[0], size[1]],
172
+ fill=(255, 255, 255, 100) # Semi-transparent white
173
+ )
174
+ img = Image.alpha_composite(img.convert('RGBA'), overlay)
175
 
176
+ # Add text with improved styling
177
+ text = self.clean_text(text)
178
  wrapped_text = textwrap.fill(text, width=50)
179
+
180
  # Calculate text position
181
  text_bbox = draw.textbbox((0, 0), wrapped_text, font=self.font)
182
  text_width = text_bbox[2] - text_bbox[0]
183
  text_height = text_bbox[3] - text_bbox[1]
184
  text_x = (size[0] - text_width) // 2
185
+ text_y = size[1] - text_height - 100 # Position at bottom
186
 
187
  # Draw text background
188
  padding = 20
 
193
  text_x + text_width + padding,
194
  text_y + text_height + padding
195
  ],
196
+ fill=(0, 0, 0, 160) # Semi-transparent black
 
197
  )
198
 
199
  # Draw text
200
  draw.text(
201
  (text_x, text_y),
202
  wrapped_text,
203
+ fill=(255, 255, 255, 255),
204
  font=self.font
205
  )
206
 
207
+ # Add progress bar with animation
208
+ self.draw_animated_progress_bar(
209
+ draw,
210
+ frame_number,
211
+ total_frames,
212
+ size,
213
+ theme
214
+ )
215
+
216
+ return np.array(img)
217
+
218
+ except Exception as e:
219
+ self.logger.error(f"Frame creation failed: {str(e)}")
220
+ # Return fallback frame
221
+ return np.full((size[1], size[0], 3), theme['bg'], dtype=np.uint8)
222
+
223
+ def draw_animated_progress_bar(
224
+ self,
225
+ draw: ImageDraw.Draw,
226
+ frame_number: int,
227
+ total_frames: int,
228
+ size: Tuple[int, int],
229
+ theme: dict
230
+ ):
231
+ """Draw an animated progress bar with effects"""
232
+ try:
233
  progress = frame_number / total_frames
234
  bar_width = int(size[0] * 0.8) # 80% of screen width
235
  bar_height = 6
236
  x_offset = (size[0] - bar_width) // 2
237
  y_position = size[1] - 40
238
 
239
+ # Draw background bar
240
  draw.rectangle(
241
  [x_offset, y_position, x_offset + bar_width, y_position + bar_height],
242
+ fill=(200, 200, 200, 160)
243
  )
244
 
245
+ # Draw progress with gradient effect
246
  progress_width = int(bar_width * progress)
247
+ for x in range(progress_width):
248
+ alpha = int(255 * (x / bar_width)) # Gradient effect
249
+ draw.line(
250
+ [x_offset + x, y_position, x_offset + x, y_position + bar_height],
251
+ fill=(theme['accent'][0], theme['accent'][1], theme['accent'][2], alpha)
252
+ )
253
 
254
+ # Add animated highlight
255
+ highlight_pos = x_offset + progress_width
256
+ if highlight_pos < x_offset + bar_width:
257
+ draw.rectangle(
258
+ [highlight_pos-2, y_position-1, highlight_pos+2, y_position + bar_height+1],
259
+ fill=(255, 255, 255, 200)
260
+ )
261
 
262
  except Exception as e:
263
+ self.logger.error(f"Progress bar drawing failed: {str(e)}")
 
264
 
265
+ def generate_voice_over(self, script: str) -> AudioFileClip:
266
+ """Generate voice-over audio using gTTS"""
267
  try:
268
+ audio_path = self.temp_dir / "voice.mp3"
269
+ tts = gTTS(
270
+ text=script,
271
+ lang='en',
272
+ slow=False
273
+ )
274
+ tts.save(str(audio_path))
275
+ return AudioFileClip(str(audio_path))
276
+
277
  except Exception as e:
278
+ self.logger.error(f"Voice-over generation failed: {str(e)}")
279
+ return AudioFileClip(duration=len(script.split()) * 0.3)
280
 
281
  def create_video(
282
  self,
283
+ script: str,
284
  style: str,
285
  duration: int,
286
  output_path: str
287
+ ) -> str:
288
+ """Create full video with all enhanced features"""
289
  try:
290
+ # Generate visual assets
291
+ assets = self.generate_visual_assets(script, style)
292
+
293
+ # Generate voice-over
294
+ audio = self.generate_voice_over(script)
295
+
296
+ # Create frames with visual assets
297
  frames = []
298
  fps = 30
299
  total_frames = int(duration * fps)
300
+
301
+ with ThreadPoolExecutor() as executor:
302
+ frame_futures = []
303
+
304
+ for i in range(total_frames):
305
+ # Calculate current text segment
306
+ progress = i / total_frames
307
+ text_index = int(progress * len(script.split()))
308
+ current_text = " ".join(script.split()[:text_index + 1])
309
+
310
+ # Get appropriate background
311
+ asset_index = int(progress * len(assets))
312
+ current_asset = assets[asset_index] if assets else None
313
+
314
+ # Submit frame creation to thread pool
315
+ future = executor.submit(
316
+ self.create_enhanced_frame,
317
+ current_text,
318
+ self.themes[style],
319
+ i,
320
+ total_frames,
321
+ current_asset['data'] if current_asset and current_asset['type'] == 'image' else None
322
+ )
323
+ frame_futures.append(future)
324
+
325
+ # Collect frames
326
+ frames = [future.result() for future in frame_futures]
327
 
328
+ # Create video clip
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
329
  video = ImageSequenceClip(frames, fps=fps)
330
+
331
+ # Add voice-over
 
332
  video = video.set_audio(audio)
333
+
334
+ # Add background music (if available)
335
+ try:
336
+ music = AudioFileClip("assets/music/background.mp3")
337
+ music = music.volumex(0.1).loop(duration=video.duration)
338
+ video = video.set_audio(CompositeAudioClip([video.audio, music]))
339
+ except Exception as e:
340
+ self.logger.warning(f"Background music addition failed: {str(e)}")
341
 
342
+ # Write final video
343
  video.write_videofile(
344
  output_path,
345
  fps=fps,
346
  codec='libx264',
347
+ audio_codec='aac',
348
+ threads=4,
349
+ preset='medium'
350
  )
351
 
352
  return output_path
353
 
354
  except Exception as e:
355
  self.logger.error(f"Video creation failed: {str(e)}")
356
+ raise
357
+
358
+ @staticmethod
359
+ def clean_text(text: str) -> str:
360
+ """Clean and normalize text for display"""
361
+ if not isinstance(text, str):
362
+ text = str(text)
363
+
364
+ # Normalize unicode characters
365
+ text = unicodedata.normalize('NFKD', text)
366
+
367
+ # Remove non-ASCII characters
368
+ text = text.encode('ascii', 'ignore').decode('ascii')
369
+
370
+ # Replace problematic characters
371
+ replacements = {
372
+ '–': '-', # en dash
373
+ '—': '-', # em dash
374
+ '"': '"', # smart quotes
375
+ '"': '"', # smart quotes
376
+ ''': "'", # smart apostrophe
377
+ ''': "'", # smart apostrophe
378
+ '…': '...', # ellipsis
379
+ }
380
+ for old, new in replacements.items():
381
+ text = text.replace(old, new)
382
+
383
+ # Remove any remaining non-standard characters
384
+ text = re.sub(r'[^\x00-\x7F]+', '', text)
385
+
386
+ return text.strip()
387
+
388
+ def extract_key_topics(self, script: str) -> List[str]:
389
+ """Extract main topics from the script for visual asset generation"""
390
+ try:
391
+ # Simple keyword extraction based on noun phrases
392
+ # In a production environment, you might want to use a proper NLP library
393
+ sentences = script.split('.')
394
+ topics = []
395
+
396
+ for sentence in sentences:
397
+ words = sentence.strip().split()
398
+ if len(words) >= 2:
399
+ # Extract potential noun phrases (pairs of words)
400
+ topics.append(' '.join(words[:2]))
401
+
402
+ # Remove duplicates and limit to top 5 topics
403
+ return list(dict.fromkeys(topics))[:5]
404
+
405
+ except Exception as e:
406
+ self.logger.error(f"Topic extraction failed: {str(e)}")
407
+ return ["default topic"]
408
+
409
+ def generate_ai_image(self, prompt: str, style: str) -> Optional[Image.Image]:
410
+ """Generate an AI image using Stability AI"""
411
+ try:
412
+ if not self.stability_api:
413
+ return None
414
+
415
+ # Enhance prompt based on style
416
+ style_prompts = {
417
+ 'Professional': "professional, corporate, clean, modern",
418
+ 'Creative': "artistic, vibrant, innovative, dynamic",
419
+ 'Educational': "clear, informative, academic, detailed"
420
+ }
421
+
422
+ enhanced_prompt = f"{prompt}, {style_prompts.get(style, '')}, high quality, 4k"
423
+
424
+ # Generate image
425
+ response = self.stability_api.generate(
426
+ prompt=enhanced_prompt,
427
+ samples=1,
428
+ width=1920,
429
+ height=1080
430
+ )
431
+
432
+ if response and len(response) > 0:
433
+ image_data = response[0].image
434
+ return Image.open(io.BytesIO(image_data))
435
+
436
+ return None
437
+
438
+ except Exception as e:
439
+ self.logger.error(f"AI image generation failed: {str(e)}")
440
  return None
441
 
442
  def cleanup(self):
443
+ """Clean up temporary files and resources"""
444
  try:
445
+ for file in self.temp_dir.glob('*'):
446
+ try:
447
+ if file.is_file():
448
+ file.unlink()
449
+ elif file.is_dir():
450
+ import shutil
451
+ shutil.rmtree(file)
452
+ except Exception as e:
453
+ self.logger.warning(f"Failed to delete {file}: {str(e)}")
454
+
455
+ self.temp_dir.rmdir()
456
+
457
  except Exception as e:
458
  self.logger.error(f"Cleanup failed: {str(e)}")
459
 
460
+ def __enter__(self):
461
+ return self
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
462
 
463
+ def __exit__(self, exc_type, exc_val, exc_tb):
464
+ self.cleanup()
 
 
465
 
466
+ # Streamlit UI Class
467
+ class VideoGeneratorUI:
468
+ def __init__(self):
469
+ self.generator = EnhancedVideoGenerator()
470
+ self.setup_ui()
471
+
472
+ def setup_ui(self):
473
+ st.title("Enhanced Video Generator")
474
+ st.write("Create professional videos with AI-generated content")
475
+
476
+ with st.form("video_generator_form"):
477
+ # Input fields
478
+ prompt = st.text_area(
479
+ "Enter your video topic/prompt",
480
+ height=100,
481
+ help="Describe what you want your video to be about"
482
+ )
483
+
484
+ col1, col2 = st.columns(2)
485
+ with col1:
486
+ style = st.selectbox(
487
+ "Choose style",
488
+ options=list(self.generator.themes.keys())
489
+ )
490
+
491
+ with col2:
492
+ duration = st.slider(
493
+ "Video duration (seconds)",
494
+ min_value=10,
495
+ max_value=300,
496
+ value=60,
497
+ step=10
498
+ )
499
+
500
+ advanced_options = st.expander("Advanced Options")
501
+ with advanced_options:
502
+ use_premium_voice = st.checkbox(
503
+ "Use premium voice-over",
504
+ value=False,
505
+ help="Requires ElevenLabs API key"
506
+ )
507
+
508
+ include_music = st.checkbox(
509
+ "Include background music",
510
+ value=True
511
+ )
512
 
513
+ fps = st.slider(
514
+ "Frames per second",
515
+ min_value=24,
516
+ max_value=60,
517
+ value=30
518
  )
519
 
520
+ submit_button = st.form_submit_button("Generate Video")
521
+
522
+ if submit_button:
523
+ if not prompt:
524
+ st.error("Please enter a prompt for your video.")
525
+ return
526
+
527
+ try:
528
+ with st.spinner("Generating your video..."):
529
+ output_path = f"generated_video_{int(time.time())}.mp4"
530
+
531
+ # Update generator settings based on advanced options
532
+ self.generator.use_premium_voice = use_premium_voice
533
+
534
+ # Generate video
535
+ video_path = self.generator.create_video(
536
+ prompt,
537
+ style,
538
+ duration,
539
+ output_path
540
  )
541
+
542
+ # Show success message and download button
543
+ st.success("Video generated successfully!")
544
+
545
+ with open(video_path, 'rb') as f:
546
+ st.download_button(
547
+ label="Download Video",
548
+ data=f.read(),
549
+ file_name=output_path,
550
+ mime="video/mp4"
551
+ )
552
+
553
+ except Exception as e:
554
+ st.error(f"Failed to generate video: {str(e)}")
555
+ st.error("Please try again with different settings or contact support.")
556
 
557
  if __name__ == "__main__":
558
+ ui = VideoGeneratorUI()