amongusrickroll68 commited on
Commit
91d1244
·
1 Parent(s): 97716d7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -10
app.py CHANGED
@@ -1,19 +1,42 @@
1
- import moviepy.editor as mp
 
 
2
  import subprocess
3
 
4
  text = input("Enter text to convert to video: ")
5
 
6
- # create a text clip using MoviePy
7
- txt_clip = mp.TextClip(text, fontsize=70, color='white', size=(1920, 1080)).set_duration(10)
 
 
8
 
9
- # create a video clip using a solid color background
10
- color_clip = mp.ColorClip(size=(1920, 1080), color=(0, 0, 0)).set_duration(10)
11
 
12
- # overlay the text clip on top of the color clip
13
- video_clip = mp.CompositeVideoClip([color_clip, txt_clip])
 
14
 
15
- # write the video to a file
16
- video_clip.write_videofile("output.mp4", fps=25)
 
 
 
 
 
 
 
 
 
 
 
17
 
18
- # use FFmpeg to add audio to the video
 
 
 
 
 
 
 
19
  subprocess.call(['ffmpeg', '-i', 'output.mp4', '-i', 'audio.mp3', '-c:v', 'copy', '-c:a', 'aac', '-strict', 'experimental', '-map', '0:v:0', '-map', '1:a:0', '-shortest', 'final_output.mp4'])
 
1
+ import cv2
2
+ import numpy as np
3
+ import torch
4
  import subprocess
5
 
6
  text = input("Enter text to convert to video: ")
7
 
8
+ # Load pre-trained GPT-2 model
9
+ model = torch.hub.load('huggingface/transformers', 'gpt2', tokenizer='gpt2-medium')
10
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
11
+ model.to(device)
12
 
13
+ # Generate text tokens from the input text
14
+ input_ids = torch.tensor(model.tokenizer.encode(text)).unsqueeze(0).to(device)
15
 
16
+ # Generate text sequences from the model
17
+ with torch.no_grad():
18
+ output_sequences = model.generate(input_ids=input_ids, max_length=1024, temperature=1.0)
19
 
20
+ # Convert text sequences to video frames
21
+ frames = []
22
+ for sequence in output_sequences:
23
+ sequence = sequence.cpu().numpy().tolist()
24
+ frame = np.zeros((1080, 1920, 3), dtype=np.uint8)
25
+ for i in range(len(sequence)):
26
+ color = (255, 255, 255)
27
+ if sequence[i] == 0:
28
+ break
29
+ if sequence[i] == 50256: # <eos> token
30
+ continue
31
+ cv2.putText(frame, model.tokenizer.decode(sequence[i]), (50, (i+1)*70), cv2.FONT_HERSHEY_SIMPLEX, 2, color, 3)
32
+ frames.append(frame)
33
 
34
+ # Save frames as video
35
+ fourcc = cv2.VideoWriter_fourcc(*'mp4v')
36
+ video_writer = cv2.VideoWriter("output.mp4", fourcc, 25.0, (1920, 1080))
37
+ for frame in frames:
38
+ video_writer.write(frame)
39
+ video_writer.release()
40
+
41
+ # Use FFmpeg to add audio to the video
42
  subprocess.call(['ffmpeg', '-i', 'output.mp4', '-i', 'audio.mp3', '-c:v', 'copy', '-c:a', 'aac', '-strict', 'experimental', '-map', '0:v:0', '-map', '1:a:0', '-shortest', 'final_output.mp4'])