amongusrickroll68 commited on
Commit
3f293be
·
1 Parent(s): 9c2a880

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -38
app.py CHANGED
@@ -1,41 +1,11 @@
1
- import subprocess
2
- import ffmpeg
3
- import numpy as np
4
 
5
- # Upload the video file
6
- video_file = input("Enter the path to the video file: ")
7
 
8
- # Trim the video to 5 seconds
9
- ffmpeg.input(video_file).trim(start=0, duration=5).output("preview2.mp4").run()
 
10
 
11
- # Load the audio from the trimmed video
12
- audio, _ = (ffmpeg
13
- .input("preview2.mp4")
14
- .filter("aextract")
15
- .output("-", format="s16le", acodec="pcm_s16le")
16
- .run(capture_stdout=True, quiet=True)
17
- )
18
- audio = np.frombuffer(audio, dtype=np.int16)
19
-
20
- # Define the pitch progression
21
- pitch_progression = [0, 0, 1, 1, -2, -2, 1, 1]
22
-
23
- # Create the final audio by applying the pitch progression to the video audio
24
- final_audio = np.empty(0, dtype=np.int16)
25
- for i in range(len(pitch_progression)):
26
- # Set the pitch shift
27
- pitch_shift = pitch_progression[i]
28
-
29
- # Apply the pitch shift to the audio
30
- audio_shifted = np.roll(audio, pitch_shift)
31
-
32
- # Add the shifted audio to the final audio
33
- final_audio = np.concatenate([final_audio, audio_shifted])
34
-
35
- # Export the final audio as an mp3 file
36
- ffmpeg.input('pipe:', format='s16le', acodec='pcm_s16le', ar='48000', channels=1, seekable='0', bufsize='512k', stdin_final=True).output("preview2.mp3", format="mp3").run(input=final_audio.tobytes())
37
-
38
- # Delete the temporary video file
39
- subprocess.run(['rm', 'preview2.mp4'])
40
-
41
- print("Preview 2 generated successfully!")
 
1
+ from transformers import GPTNeoForCausalLM, GPT2Tokenizer
 
 
2
 
3
+ model = GPTNeoForCausalLM.from_pretrained("EleutherAI/gpt-neo-2.7B")
4
+ tokenizer = GPT2Tokenizer.from_pretrained("EleutherAI/gpt-neo-2.7B")
5
 
6
+ prompt = "The quick brown fox"
7
+ input_ids = tokenizer(prompt, return_tensors="pt").input_ids
8
+ generated_text = model.generate(input_ids=input_ids, max_length=50, do_sample=True)
9
 
10
+ decoded_text = tokenizer.decode(generated_text[0], skip_special_tokens=True)
11
+ print(decoded_text)