| """ |
| π¬ VIRAL HORROR SHORTS GENERATOR - GRADIO APP |
| Deploy this on Hugging Face Spaces for a one-click horror video generator! |
| |
| SETUP INSTRUCTIONS FOR HUGGING FACE SPACES: |
| 1. Create new Space at https://huggingface.co/new-space |
| 2. Choose: Gradio SDK |
| 3. Choose: CPU Basic (free tier works!) |
| 4. Upload this file as app.py |
| 5. Create requirements.txt with dependencies (see bottom of file) |
| 6. Your app will be live at: https://huggingface.co/spaces/YOUR_USERNAME/horror-shorts |
| |
| Click "Generate Horror Short" and wait ~2-3 minutes for your video! |
| """ |
|
|
| import gradio as gr |
| import torch |
| import random |
| import numpy as np |
| import cv2 |
| from PIL import Image, ImageDraw, ImageFont |
| import os |
| import shutil |
| from pathlib import Path |
|
|
| |
| from diffusers import StableDiffusionPipeline |
| from gtts import gTTS |
| from pydub import AudioSegment |
| from pydub.generators import Sine, WhiteNoise |
| from pydub.effects import low_pass_filter |
|
|
| |
| |
| |
|
|
| |
| HORROR_SCRIPTS = [ |
| { |
| "hook": "I found a door in my apartment that wasn't there yesterday.", |
| "build": "Behind it was my living room. But I was already standing in my living room.", |
| "twist": "I heard my own voice from inside, asking who's at the door." |
| }, |
| { |
| "hook": "The security camera records one extra person leaving than entering.", |
| "build": "Every single day. I checked the footage.", |
| "twist": "The extra person always leaves at 3:33 AM. It's me." |
| }, |
| { |
| "hook": "My phone's camera turned on at 3 AM.", |
| "build": "In the photo, someone was standing behind me in the dark.", |
| "twist": "I live alone. I checked my apartment. I'm still alone." |
| }, |
| { |
| "hook": "I work night shifts at an empty mall.", |
| "build": "Last night, all the mannequins were facing the camera room.", |
| "twist": "This morning, they're facing the exit. I'm the only one here." |
| }, |
| { |
| "hook": "There's a staircase in the woods that leads nowhere.", |
| "build": "Every night at midnight, I hear footsteps going up.", |
| "twist": "Tonight, I heard them coming back down." |
| }, |
| { |
| "hook": "I found VHS tapes in my attic labeled with tomorrow's date.", |
| "build": "I played one. It was security footage of my house.", |
| "twist": "In the video, I'm not alone." |
| }, |
| { |
| "hook": "The elevator has a button for floor 13. We only have 12 floors.", |
| "build": "Curiosity got the better of me. I pressed it.", |
| "twist": "The doors opened to floor 1. But everyone was gone." |
| }, |
| { |
| "hook": "Every mirror here shows the room 30 seconds ago.", |
| "build": "I stood still and watched myself in the mirror.", |
| "twist": "After 30 seconds, my reflection didn't move with me." |
| }, |
| ] |
|
|
| |
| VISUAL_PROMPTS = [ |
| "empty fluorescent lit hallway, liminal space, eerie, backrooms aesthetic, unsettling, film grain", |
| "abandoned mall at night, security camera POV, grainy, dark, ominous shadows", |
| "empty parking garage, concrete, flickering lights, analog horror, CCTV footage", |
| "dimly lit stairwell, institutional walls, exit sign glowing, uncomfortable perspective", |
| "long hotel corridor, identical doors, carpet pattern, yellow lighting, liminal space", |
| "empty subway platform at 3am, fluorescent lights, abandoned, eerie", |
| "abandoned office space, cubicles in darkness, emergency lighting, liminal", |
| "dark basement, old furniture, single hanging light bulb, grainy photo, creepy", |
| ] |
|
|
| NEGATIVE_PROMPT = "people, faces, bright, colorful, cartoon, cheerful, sunlight, text, watermark" |
|
|
| |
| |
| |
|
|
| def setup_directories(): |
| """Create clean working directories.""" |
| for folder in ['output', 'temp']: |
| if os.path.exists(folder): |
| shutil.rmtree(folder) |
| os.makedirs(folder) |
|
|
| def generate_script(): |
| """Pick random horror script.""" |
| story = random.choice(HORROR_SCRIPTS) |
| return f"{story['hook']} {story['build']} {story['twist']}" |
|
|
| def create_voiceover(script, output_path="temp/voice.mp3"): |
| """Generate creepy TTS voiceover.""" |
| |
| tts = gTTS(text=script, lang='en', slow=True) |
| tts.save("temp/voice_raw.mp3") |
| |
| |
| audio = AudioSegment.from_mp3("temp/voice_raw.mp3") |
| audio = audio - 3 |
| audio = low_pass_filter(audio, 2800) |
| audio = audio.fade_in(100).fade_out(100) |
| |
| |
| audio.export(output_path, format='mp3') |
| |
| return output_path, len(audio) / 1000.0 |
|
|
| def create_ambient_sound(duration_sec, output_path="temp/ambient.mp3"): |
| """Generate creepy ambient drone.""" |
| duration_ms = int(duration_sec * 1000) |
| |
| |
| drone = Sine(60).to_audio_segment(duration=duration_ms) |
| drone = drone - 20 |
| |
| |
| whine = Sine(8000).to_audio_segment(duration=duration_ms) |
| whine = whine - 30 |
| |
| |
| noise = WhiteNoise().to_audio_segment(duration=duration_ms) |
| noise = noise - 35 |
| |
| |
| ambient = drone.overlay(whine).overlay(noise) |
| ambient = ambient.fade_in(2000).fade_out(2000) |
| ambient.export(output_path, format='mp3') |
| |
| return output_path |
|
|
| def generate_horror_image(prompt, pipe): |
| """Generate single liminal horror image.""" |
| image = pipe( |
| prompt=prompt, |
| negative_prompt=NEGATIVE_PROMPT, |
| height=768, |
| width=512, |
| num_inference_steps=20, |
| guidance_scale=7.5 |
| ).images[0] |
| |
| return image |
|
|
| def create_zoom_animation(image, duration_sec=3.75, fps=30): |
| """Create slow zoom effect on image.""" |
| |
| img_array = np.array(image) |
| img_array = cv2.cvtColor(img_array, cv2.COLOR_RGB2BGR) |
| |
| height, width = img_array.shape[:2] |
| frames = [] |
| total_frames = int(duration_sec * fps) |
| |
| for i in range(total_frames): |
| progress = i / total_frames |
| scale = 1.0 + (progress * 0.15) |
| |
| |
| new_w = int(width * scale) |
| new_h = int(height * scale) |
| zoomed = cv2.resize(img_array, (new_w, new_h)) |
| |
| |
| start_x = (new_w - width) // 2 |
| start_y = (new_h - height) // 2 |
| cropped = zoomed[start_y:start_y+height, start_x:start_x+width] |
| |
| frames.append(cropped) |
| |
| return frames |
|
|
| def resize_to_shorts(frame): |
| """Resize frame to YouTube Shorts format (1080x1920).""" |
| target_h, target_w = 1920, 1080 |
| current_h, current_w = frame.shape[:2] |
| |
| |
| scale = max(target_w / current_w, target_h / current_h) |
| new_w = int(current_w * scale) |
| new_h = int(current_h * scale) |
| |
| |
| resized = cv2.resize(frame, (new_w, new_h)) |
| start_x = (new_w - target_w) // 2 |
| start_y = (new_h - target_h) // 2 |
| cropped = resized[start_y:start_y+target_h, start_x:start_x+target_w] |
| |
| return cropped |
|
|
| def add_subtitle_to_frame(frame, text): |
| """Burn subtitle directly onto frame using PIL (no ImageMagick needed).""" |
| |
| frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) |
| pil_img = Image.fromarray(frame_rgb) |
| draw = ImageDraw.Draw(pil_img) |
| |
| |
| try: |
| font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf", 50) |
| except: |
| font = ImageFont.load_default() |
| |
| |
| bbox = draw.textbbox((0, 0), text, font=font) |
| text_width = bbox[2] - bbox[0] |
| text_height = bbox[3] - bbox[1] |
| |
| |
| x = (1080 - text_width) // 2 |
| y = 1600 |
| |
| |
| stroke_width = 3 |
| |
| for adj_x in range(-stroke_width, stroke_width+1): |
| for adj_y in range(-stroke_width, stroke_width+1): |
| draw.text((x+adj_x, y+adj_y), text, font=font, fill='black') |
| |
| draw.text((x, y), text, font=font, fill='white') |
| |
| |
| frame_with_text = cv2.cvtColor(np.array(pil_img), cv2.COLOR_RGB2BGR) |
| return frame_with_text |
|
|
| def create_video_with_subtitles(frames, script, fps=30, output_path="output/final_short.mp4"): |
| """Create video with burned-in subtitles.""" |
| |
| words = script.split() |
| chunks = [' '.join(words[i:i+3]) for i in range(0, len(words), 3)] |
| |
| |
| total_frames = len(frames) |
| frames_per_subtitle = total_frames // len(chunks) |
| |
| |
| frames_with_subs = [] |
| for i, frame in enumerate(frames): |
| subtitle_idx = min(i // frames_per_subtitle, len(chunks) - 1) |
| frame_with_sub = add_subtitle_to_frame(frame, chunks[subtitle_idx]) |
| frames_with_subs.append(frame_with_sub) |
| |
| return frames_with_subs |
|
|
| def combine_audio_video(video_frames, voice_path, ambient_path, fps=30, output_path="output/final_short.mp4"): |
| """Combine video frames with audio using FFmpeg directly.""" |
| |
| temp_video = "temp/video_no_audio.mp4" |
| fourcc = cv2.VideoWriter_fourcc(*'mp4v') |
| out = cv2.VideoWriter(temp_video, fourcc, fps, (1080, 1920)) |
| |
| for frame in video_frames: |
| out.write(frame) |
| out.release() |
| |
| |
| voice = AudioSegment.from_mp3(voice_path) |
| ambient = AudioSegment.from_mp3(ambient_path) |
| |
| |
| mixed = voice.overlay(ambient - 12) |
| mixed.export("temp/mixed_audio.mp3", format='mp3') |
| |
| |
| os.system(f'ffmpeg -y -i {temp_video} -i temp/mixed_audio.mp3 -c:v libx264 -c:a aac -shortest {output_path} -loglevel quiet') |
| |
| return output_path |
|
|
| |
| |
| |
|
|
| def generate_horror_short(progress=gr.Progress()): |
| """Main function: Generate complete horror short.""" |
| |
| setup_directories() |
| |
| |
| progress(0.1, desc="π Writing creepy script...") |
| script = generate_script() |
| |
| |
| progress(0.2, desc="ποΈ Creating eerie voiceover...") |
| voice_path, duration = create_voiceover(script) |
| |
| |
| progress(0.25, desc="π΅ Generating ambient horror sound...") |
| ambient_path = create_ambient_sound(duration) |
| |
| |
| progress(0.3, desc="πΌοΈ Loading image generator...") |
| pipe = StableDiffusionPipeline.from_pretrained( |
| "runwayml/stable-diffusion-v1-5", |
| torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, |
| safety_checker=None |
| ) |
| |
| if torch.cuda.is_available(): |
| pipe = pipe.to("cuda") |
| |
| |
| num_frames = 4 |
| all_animated_frames = [] |
| |
| for i in range(num_frames): |
| progress(0.3 + (i * 0.15), desc=f"πΌοΈ Generating horror image {i+1}/{num_frames}...") |
| |
| |
| prompt = random.choice(VISUAL_PROMPTS) |
| image = generate_horror_image(prompt, pipe) |
| |
| |
| progress(0.3 + (i * 0.15) + 0.05, desc=f"ποΈ Animating image {i+1}/{num_frames}...") |
| frames = create_zoom_animation(image, duration_sec=duration/num_frames) |
| |
| |
| frames = [resize_to_shorts(f) for f in frames] |
| all_animated_frames.extend(frames) |
| |
| |
| progress(0.85, desc="π Burning in subtitles...") |
| frames_with_subs = create_video_with_subtitles(all_animated_frames, script) |
| |
| |
| progress(0.95, desc="π¬ Rendering final video...") |
| output_video = combine_audio_video(frames_with_subs, voice_path, ambient_path) |
| |
| progress(1.0, desc="β
Horror short complete!") |
| |
| return output_video, script |
|
|
| |
| |
| |
|
|
| def create_interface(): |
| """Create Gradio UI.""" |
| |
| with gr.Blocks(theme=gr.themes.Base(primary_hue="red", secondary_hue="gray")) as demo: |
| gr.Markdown(""" |
| # π¬ Viral Horror Shorts Generator |
| ### Create Faceless YouTube Shorts in Creepypasta Style |
| |
| **One-click pipeline:** Script β Voice β Images β Animation β Subtitles β Final Video |
| |
| Click the button below and wait ~2-3 minutes for your horror short! |
| """) |
| |
| with gr.Row(): |
| with gr.Column(): |
| generate_btn = gr.Button("π¬ Generate Horror Short", variant="primary", size="lg") |
| |
| gr.Markdown(""" |
| ### π What This Creates: |
| - β
Creepy creepypasta-style script |
| - β
Eerie AI voiceover |
| - β
Liminal space visuals (Stable Diffusion) |
| - β
Slow zoom animations |
| - β
Auto subtitles |
| - β
Ambient horror soundtrack |
| - β
1080x1920 YouTube Shorts format |
| |
| ### β±οΈ Generation Time: |
| - **With GPU:** ~2-3 minutes |
| - **CPU only:** ~8-10 minutes |
| |
| ### π¨ Style: |
| Liminal spaces, backrooms, analog horror, CCTV aesthetic |
| """) |
| |
| with gr.Column(): |
| video_output = gr.Video(label="Your Horror Short", height=600) |
| script_output = gr.Textbox(label="Generated Script", lines=4) |
| |
| generate_btn.click( |
| fn=generate_horror_short, |
| inputs=[], |
| outputs=[video_output, script_output] |
| ) |
| |
| gr.Markdown(""" |
| --- |
| ### π‘ Tips: |
| - Run multiple times for different stories and visuals |
| - Download the video and upload to YouTube Shorts |
| - Best results with GPU (use Hugging Face Spaces with GPU) |
| |
| ### π Deploy Your Own: |
| 1. Fork this Space |
| 2. Upgrade to GPU for faster generation |
| 3. Customize HORROR_SCRIPTS and VISUAL_PROMPTS in the code |
| """) |
| |
| return demo |
|
|
| |
| |
| |
|
|
| if __name__ == "__main__": |
| demo = create_interface() |
| demo.launch() |
|
|
| """ |
| βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ |
| π¦ REQUIREMENTS.TXT (Create this file in your Hugging Face Space) |
| βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ |
| |
| gradio |
| torch |
| torchvision |
| diffusers |
| transformers |
| accelerate |
| gtts |
| pydub |
| opencv-python-headless |
| pillow |
| numpy |
| |
| βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ |
| π DEPLOYMENT INSTRUCTIONS |
| βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ |
| |
| 1. Go to https://huggingface.co/new-space |
| 2. Name: "horror-shorts-generator" |
| 3. License: MIT |
| 4. Select SDK: Gradio |
| 5. Select Hardware: CPU Basic (free) or GPU (faster) |
| 6. Create Space |
| |
| 7. Click "Files" β "Add file" β "Create new file" |
| 8. Name it: app.py |
| 9. Paste this entire code |
| 10. Save |
| |
| 11. Create another file: requirements.txt |
| 12. Paste the dependencies listed above |
| 13. Save |
| |
| 14. Your app will build automatically (2-3 minutes) |
| 15. Once ready, click "Generate Horror Short"! |
| |
| βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ |
| """ |