shaheerawan3 commited on
Commit
203e15a
·
verified ·
1 Parent(s): bac2847

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +52 -144
app.py CHANGED
@@ -6,7 +6,7 @@ from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
6
  from PIL import Image, ImageDraw, ImageFont
7
  import tempfile
8
  import os
9
- from moviepy.editor import *
10
  import numpy as np
11
  from gtts import gTTS
12
  import textwrap
@@ -955,152 +955,60 @@ class EnhancedVideoGenerator:
955
  msecs = int((seconds - int(seconds)) * 1000)
956
  return f"{hours:02d}:{minutes:02d}:{secs:02d},{msecs:03d}"
957
 
958
- def create_video(self, script: str, style: str, duration: int, output_path: str, selected_images: List[str],
959
- video_effects: dict = None, progress_callback: Callable[[float], None] = None) -> str:
960
- """Create video with selected images and effects"""
961
  try:
962
- # Initialize default effects if none provided
963
- if video_effects is None:
964
- video_effects = {
965
- 'zoom': 1.0,
966
- 'brightness': 1.0,
967
- 'contrast': 1.0,
968
- 'blur': False
969
- }
970
-
971
- # Process images with error handling
972
- processed_images = []
973
- total_images = len(selected_images)
974
-
975
- for idx, img_url in enumerate(selected_images):
976
- try:
977
- response = requests.get(img_url, timeout=10)
978
- response.raise_for_status()
979
- img = Image.open(BytesIO(response.content))
980
- img = img.convert('RGB')
981
- img = img.resize((1920, 1080), Image.LANCZOS)
982
- processed_images.append(img)
983
-
984
- # Update progress (20% of total progress is for image processing)
985
- if progress_callback:
986
- progress = (idx + 1) / total_images * 20
987
- progress_callback(progress)
988
-
989
- except Exception as e:
990
- self.logger.error(f"Error processing image {img_url}: {e}")
991
- continue
992
-
993
- if not processed_images:
994
- raise ValueError("No valid images to process")
995
-
996
- # Generate voice-over
997
- audio = self.generate_fallback_audio(script)
998
- if progress_callback:
999
- progress_callback(30) # 30% progress after audio generation
1000
-
1001
- # Calculate frames
1002
- fps = 30
1003
- total_frames = int(duration * fps)
1004
- frames_per_image = total_frames // len(processed_images)
1005
-
1006
- # Create frames with effects
1007
- frames = []
1008
- frame_count = 0
1009
-
1010
- # Apply zoom effect over time
1011
- zoom_range = np.linspace(1.0, video_effects['zoom'], frames_per_image)
1012
-
1013
- for idx, img in enumerate(processed_images):
1014
- img_array = np.array(img)
1015
-
1016
- # Calculate frames for this image
1017
- if idx == len(processed_images) - 1:
1018
- n_frames = total_frames - frame_count
1019
- else:
1020
- n_frames = min(frames_per_image, total_frames - frame_count)
1021
-
1022
- # Generate frames with effects
1023
- for frame_idx in range(n_frames):
1024
- current_effects = video_effects.copy()
1025
-
1026
- # Update zoom factor
1027
- if video_effects['zoom'] != 1.0:
1028
- current_effects['zoom'] = zoom_range[min(frame_idx, len(zoom_range)-1)]
1029
-
1030
- # Apply effects to frame
1031
- frame = self.apply_video_effects(img_array.copy(), current_effects)
1032
- frames.append(frame)
1033
- frame_count += 1
1034
-
1035
- # Update progress (30% to 70% is for frame generation)
1036
- if progress_callback and frame_count % 30 == 0:
1037
- progress = 30 + (frame_count / total_frames * 40)
1038
- progress_callback(progress)
1039
-
1040
- # Add transition to next image if enabled
1041
- if idx < len(processed_images) - 1 and video_effects.get('transition_style') != 'None':
1042
- next_img_array = np.array(processed_images[idx + 1])
1043
- transition_frames = 15
1044
-
1045
- for t in range(transition_frames):
1046
- if frame_count < total_frames:
1047
- alpha = t / transition_frames
1048
- transition_frame = cv2.addWeighted(
1049
- img_array, 1 - alpha,
1050
- next_img_array, alpha, 0
1051
- )
1052
- frames.append(transition_frame)
1053
- frame_count += 1
1054
-
1055
- # Create video clip
1056
- clip = ImageSequenceClip(frames, fps=fps)
1057
- if progress_callback:
1058
- progress_callback(80) # 80% progress after creating clip
1059
-
1060
- # Adjust audio duration
1061
- if audio.duration > clip.duration:
1062
- audio = audio.subclip(0, clip.duration)
1063
- elif audio.duration < clip.duration:
1064
- clip = clip.subclip(0, audio.duration)
1065
-
1066
- # Combine video and audio
1067
- final_clip = clip.set_audio(audio)
1068
- if progress_callback:
1069
- progress_callback(90) # 90% progress after combining audio
1070
-
1071
- # Ensure output directory exists
1072
- os.makedirs(os.path.dirname(output_path), exist_ok=True)
1073
-
1074
- # Write video file
1075
- final_clip.write_videofile(
1076
- output_path,
1077
- fps=fps,
1078
- codec='libx264',
1079
- audio_codec='aac',
1080
- ffmpeg_params=['-pix_fmt', 'yuv420p'],
1081
- verbose=False,
1082
- logger=None
1083
- )
1084
-
1085
- if progress_callback:
1086
- progress_callback(100) # 100% progress after writing file
1087
-
1088
  return output_path
1089
-
1090
  except Exception as e:
1091
- self.logger.error(f"Video creation failed: {str(e)}")
1092
- raise
1093
- finally:
1094
- # Cleanup
1095
- try:
1096
- if 'clip' in locals():
1097
- clip.close()
1098
- if 'final_clip' in locals():
1099
- final_clip.close()
1100
- if 'audio' in locals():
1101
- audio.close()
1102
- except Exception as e:
1103
- self.logger.error(f"Cleanup error: {e}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1104
 
1105
 
1106
 
 
6
  from PIL import Image, ImageDraw, ImageFont
7
  import tempfile
8
  import os
9
+ from moviepy.editor import AudioFileClip, concatenate_videoclips, ImageClip
10
  import numpy as np
11
  from gtts import gTTS
12
  import textwrap
 
955
  msecs = int((seconds - int(seconds)) * 1000)
956
  return f"{hours:02d}:{minutes:02d}:{secs:02d},{msecs:03d}"
957
 
958
+ def create_video(self, images, audio_file, duration, output_path):
959
+ """Creates a video from images and audio."""
 
960
  try:
961
+ if not images or not audio_file:
962
+ raise ValueError("Both images and audio are required.")
963
+
964
+ # Load and adjust the audio file
965
+ audio = AudioFileClip(audio_file)
966
+ audio = audio.subclip(0, min(duration, audio.duration)) # Adjust duration if needed
967
+
968
+ # Create video clips from images
969
+ video_clips = []
970
+ for img_path in images:
971
+ img = Image.open(img_path)
972
+ clip = ImageClip(img).set_duration(duration / len(images))
973
+ video_clips.append(clip)
974
+
975
+ # Combine all clips and set audio
976
+ final_clip = concatenate_videoclips(video_clips)
977
+ final_clip = final_clip.set_audio(audio)
978
+
979
+ # Write to output file
980
+ final_clip.write_videofile(output_path, fps=24, codec="libx264")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
981
  return output_path
 
982
  except Exception as e:
983
+ raise RuntimeError(f"Error generating video: {str(e)}")
984
+
985
+ # Streamlit interface
986
+ st.title("Enhanced Video Generator")
987
+
988
+ # Upload files
989
+ images = st.file_uploader("Upload Images", type=["jpg", "png"], accept_multiple_files=True)
990
+ audio_file = st.file_uploader("Upload Audio", type=["mp3", "wav"])
991
+ duration = st.number_input("Video Duration (seconds)", min_value=10, max_value=300, value=30)
992
+ output_path = "output_video.mp4"
993
+
994
+ # Generate video button
995
+ if st.button("Generate Video"):
996
+ if not images or not audio_file:
997
+ st.error("Please upload both images and audio.")
998
+ else:
999
+ try:
1000
+ generator = EnhancedVideoGenerator()
1001
+ image_paths = [img.name for img in images]
1002
+ for img in images:
1003
+ with open(img.name, "wb") as f:
1004
+ f.write(img.getbuffer())
1005
+
1006
+ # Generate video
1007
+ output = generator.create_video(image_paths, audio_file.name, duration, output_path)
1008
+ st.success(f"Video generated: {output}")
1009
+ st.video(output)
1010
+ except Exception as e:
1011
+ st.error(f"Error: {e}")
1012
 
1013
 
1014