generator / app.py
ash12321's picture
Create app.py
ecf25dd verified
raw
history blame
18.2 kB
"""
🎬 VIRAL HORROR SHORTS GENERATOR - GRADIO APP
Deploy this on Hugging Face Spaces for a one-click horror video generator!
SETUP INSTRUCTIONS FOR HUGGING FACE SPACES:
1. Create new Space at https://huggingface.co/new-space
2. Choose: Gradio SDK
3. Choose: CPU Basic (free tier works!)
4. Upload this file as app.py
5. Create requirements.txt with dependencies (see bottom of file)
6. Your app will be live at: https://huggingface.co/spaces/YOUR_USERNAME/horror-shorts
Click "Generate Horror Short" and wait ~2-3 minutes for your video!
"""
import gradio as gr
import torch
import random
import numpy as np
import cv2
from PIL import Image, ImageDraw, ImageFont
import os
import shutil
from pathlib import Path
# Core libraries
from diffusers import StableDiffusionPipeline
from gtts import gTTS
from pydub import AudioSegment
from pydub.generators import Sine, WhiteNoise
from pydub.effects import low_pass_filter
# ═══════════════════════════════════════════════════════════════════
# CONFIGURATION
# ═══════════════════════════════════════════════════════════════════
# Horror story templates (optimized for viral engagement)
HORROR_SCRIPTS = [
{
"hook": "I found a door in my apartment that wasn't there yesterday.",
"build": "Behind it was my living room. But I was already standing in my living room.",
"twist": "I heard my own voice from inside, asking who's at the door."
},
{
"hook": "The security camera records one extra person leaving than entering.",
"build": "Every single day. I checked the footage.",
"twist": "The extra person always leaves at 3:33 AM. It's me."
},
{
"hook": "My phone's camera turned on at 3 AM.",
"build": "In the photo, someone was standing behind me in the dark.",
"twist": "I live alone. I checked my apartment. I'm still alone."
},
{
"hook": "I work night shifts at an empty mall.",
"build": "Last night, all the mannequins were facing the camera room.",
"twist": "This morning, they're facing the exit. I'm the only one here."
},
{
"hook": "There's a staircase in the woods that leads nowhere.",
"build": "Every night at midnight, I hear footsteps going up.",
"twist": "Tonight, I heard them coming back down."
},
{
"hook": "I found VHS tapes in my attic labeled with tomorrow's date.",
"build": "I played one. It was security footage of my house.",
"twist": "In the video, I'm not alone."
},
{
"hook": "The elevator has a button for floor 13. We only have 12 floors.",
"build": "Curiosity got the better of me. I pressed it.",
"twist": "The doors opened to floor 1. But everyone was gone."
},
{
"hook": "Every mirror here shows the room 30 seconds ago.",
"build": "I stood still and watched myself in the mirror.",
"twist": "After 30 seconds, my reflection didn't move with me."
},
]
# Visual prompts for liminal/horror aesthetic
VISUAL_PROMPTS = [
"empty fluorescent lit hallway, liminal space, eerie, backrooms aesthetic, unsettling, film grain",
"abandoned mall at night, security camera POV, grainy, dark, ominous shadows",
"empty parking garage, concrete, flickering lights, analog horror, CCTV footage",
"dimly lit stairwell, institutional walls, exit sign glowing, uncomfortable perspective",
"long hotel corridor, identical doors, carpet pattern, yellow lighting, liminal space",
"empty subway platform at 3am, fluorescent lights, abandoned, eerie",
"abandoned office space, cubicles in darkness, emergency lighting, liminal",
"dark basement, old furniture, single hanging light bulb, grainy photo, creepy",
]
NEGATIVE_PROMPT = "people, faces, bright, colorful, cartoon, cheerful, sunlight, text, watermark"
# ═══════════════════════════════════════════════════════════════════
# UTILITY FUNCTIONS
# ═══════════════════════════════════════════════════════════════════
def setup_directories():
"""Create clean working directories."""
for folder in ['output', 'temp']:
if os.path.exists(folder):
shutil.rmtree(folder)
os.makedirs(folder)
def generate_script():
"""Pick random horror script."""
story = random.choice(HORROR_SCRIPTS)
return f"{story['hook']} {story['build']} {story['twist']}"
def create_voiceover(script, output_path="temp/voice.mp3"):
"""Generate creepy TTS voiceover."""
# Generate base TTS
tts = gTTS(text=script, lang='en', slow=True)
tts.save("temp/voice_raw.mp3")
# Load and process for creepy effect
audio = AudioSegment.from_mp3("temp/voice_raw.mp3")
audio = audio - 3 # Lower volume slightly
audio = low_pass_filter(audio, 2800) # Remove highs = more ominous
audio = audio.fade_in(100).fade_out(100)
# Export
audio.export(output_path, format='mp3')
return output_path, len(audio) / 1000.0 # Return path and duration
def create_ambient_sound(duration_sec, output_path="temp/ambient.mp3"):
"""Generate creepy ambient drone."""
duration_ms = int(duration_sec * 1000)
# Deep drone bass (unsettling frequency)
drone = Sine(60).to_audio_segment(duration=duration_ms)
drone = drone - 20
# High frequency tension
whine = Sine(8000).to_audio_segment(duration=duration_ms)
whine = whine - 30
# Subtle static
noise = WhiteNoise().to_audio_segment(duration=duration_ms)
noise = noise - 35
# Mix and fade
ambient = drone.overlay(whine).overlay(noise)
ambient = ambient.fade_in(2000).fade_out(2000)
ambient.export(output_path, format='mp3')
return output_path
def generate_horror_image(prompt, pipe):
"""Generate single liminal horror image."""
image = pipe(
prompt=prompt,
negative_prompt=NEGATIVE_PROMPT,
height=768,
width=512,
num_inference_steps=20, # Faster generation
guidance_scale=7.5
).images[0]
return image
def create_zoom_animation(image, duration_sec=3.75, fps=30):
"""Create slow zoom effect on image."""
# Convert PIL to numpy
img_array = np.array(image)
img_array = cv2.cvtColor(img_array, cv2.COLOR_RGB2BGR)
height, width = img_array.shape[:2]
frames = []
total_frames = int(duration_sec * fps)
for i in range(total_frames):
progress = i / total_frames
scale = 1.0 + (progress * 0.15) # Zoom to 115%
# Resize
new_w = int(width * scale)
new_h = int(height * scale)
zoomed = cv2.resize(img_array, (new_w, new_h))
# Center crop
start_x = (new_w - width) // 2
start_y = (new_h - height) // 2
cropped = zoomed[start_y:start_y+height, start_x:start_x+width]
frames.append(cropped)
return frames
def resize_to_shorts(frame):
"""Resize frame to YouTube Shorts format (1080x1920)."""
target_h, target_w = 1920, 1080
current_h, current_w = frame.shape[:2]
# Scale to cover target
scale = max(target_w / current_w, target_h / current_h)
new_w = int(current_w * scale)
new_h = int(current_h * scale)
# Resize and center crop
resized = cv2.resize(frame, (new_w, new_h))
start_x = (new_w - target_w) // 2
start_y = (new_h - target_h) // 2
cropped = resized[start_y:start_y+target_h, start_x:start_x+target_w]
return cropped
def add_subtitle_to_frame(frame, text):
"""Burn subtitle directly onto frame using PIL (no ImageMagick needed)."""
# Convert to PIL
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
pil_img = Image.fromarray(frame_rgb)
draw = ImageDraw.Draw(pil_img)
# Try to load font, fallback to default
try:
font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf", 50)
except:
font = ImageFont.load_default()
# Get text size
bbox = draw.textbbox((0, 0), text, font=font)
text_width = bbox[2] - bbox[0]
text_height = bbox[3] - bbox[1]
# Position (centered, near bottom)
x = (1080 - text_width) // 2
y = 1600
# Draw text with stroke (outline)
stroke_width = 3
# Draw outline
for adj_x in range(-stroke_width, stroke_width+1):
for adj_y in range(-stroke_width, stroke_width+1):
draw.text((x+adj_x, y+adj_y), text, font=font, fill='black')
# Draw main text
draw.text((x, y), text, font=font, fill='white')
# Convert back to cv2
frame_with_text = cv2.cvtColor(np.array(pil_img), cv2.COLOR_RGB2BGR)
return frame_with_text
def create_video_with_subtitles(frames, script, fps=30, output_path="output/final_short.mp4"):
"""Create video with burned-in subtitles."""
# Split script into chunks
words = script.split()
chunks = [' '.join(words[i:i+3]) for i in range(0, len(words), 3)]
# Calculate timing
total_frames = len(frames)
frames_per_subtitle = total_frames // len(chunks)
# Add subtitles to frames
frames_with_subs = []
for i, frame in enumerate(frames):
subtitle_idx = min(i // frames_per_subtitle, len(chunks) - 1)
frame_with_sub = add_subtitle_to_frame(frame, chunks[subtitle_idx])
frames_with_subs.append(frame_with_sub)
return frames_with_subs
def combine_audio_video(video_frames, voice_path, ambient_path, fps=30, output_path="output/final_short.mp4"):
"""Combine video frames with audio using FFmpeg directly."""
# Save frames as temporary video
temp_video = "temp/video_no_audio.mp4"
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
out = cv2.VideoWriter(temp_video, fourcc, fps, (1080, 1920))
for frame in video_frames:
out.write(frame)
out.release()
# Load audio files
voice = AudioSegment.from_mp3(voice_path)
ambient = AudioSegment.from_mp3(ambient_path)
# Mix audio
mixed = voice.overlay(ambient - 12) # Ambient quieter
mixed.export("temp/mixed_audio.mp3", format='mp3')
# Combine with FFmpeg
os.system(f'ffmpeg -y -i {temp_video} -i temp/mixed_audio.mp3 -c:v libx264 -c:a aac -shortest {output_path} -loglevel quiet')
return output_path
# ═══════════════════════════════════════════════════════════════════
# MAIN GENERATION FUNCTION
# ═══════════════════════════════════════════════════════════════════
def generate_horror_short(progress=gr.Progress()):
"""Main function: Generate complete horror short."""
setup_directories()
# Step 1: Generate Script
progress(0.1, desc="πŸ“ Writing creepy script...")
script = generate_script()
# Step 2: Create Voiceover
progress(0.2, desc="πŸŽ™οΈ Creating eerie voiceover...")
voice_path, duration = create_voiceover(script)
# Step 3: Create Ambient Sound
progress(0.25, desc="🎡 Generating ambient horror sound...")
ambient_path = create_ambient_sound(duration)
# Step 4: Load Stable Diffusion
progress(0.3, desc="πŸ–ΌοΈ Loading image generator...")
pipe = StableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5",
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
safety_checker=None
)
if torch.cuda.is_available():
pipe = pipe.to("cuda")
# Step 5: Generate Horror Images
num_frames = 4
all_animated_frames = []
for i in range(num_frames):
progress(0.3 + (i * 0.15), desc=f"πŸ–ΌοΈ Generating horror image {i+1}/{num_frames}...")
# Generate image
prompt = random.choice(VISUAL_PROMPTS)
image = generate_horror_image(prompt, pipe)
# Animate with zoom
progress(0.3 + (i * 0.15) + 0.05, desc=f"🎞️ Animating image {i+1}/{num_frames}...")
frames = create_zoom_animation(image, duration_sec=duration/num_frames)
# Resize to Shorts format
frames = [resize_to_shorts(f) for f in frames]
all_animated_frames.extend(frames)
# Step 6: Add Subtitles
progress(0.85, desc="πŸ“„ Burning in subtitles...")
frames_with_subs = create_video_with_subtitles(all_animated_frames, script)
# Step 7: Combine Everything
progress(0.95, desc="🎬 Rendering final video...")
output_video = combine_audio_video(frames_with_subs, voice_path, ambient_path)
progress(1.0, desc="βœ… Horror short complete!")
return output_video, script
# ═══════════════════════════════════════════════════════════════════
# GRADIO INTERFACE
# ═══════════════════════════════════════════════════════════════════
def create_interface():
"""Create Gradio UI."""
with gr.Blocks(theme=gr.themes.Base(primary_hue="red", secondary_hue="gray")) as demo:
gr.Markdown("""
# 🎬 Viral Horror Shorts Generator
### Create Faceless YouTube Shorts in Creepypasta Style
**One-click pipeline:** Script β†’ Voice β†’ Images β†’ Animation β†’ Subtitles β†’ Final Video
Click the button below and wait ~2-3 minutes for your horror short!
""")
with gr.Row():
with gr.Column():
generate_btn = gr.Button("🎬 Generate Horror Short", variant="primary", size="lg")
gr.Markdown("""
### πŸ“‹ What This Creates:
- βœ… Creepy creepypasta-style script
- βœ… Eerie AI voiceover
- βœ… Liminal space visuals (Stable Diffusion)
- βœ… Slow zoom animations
- βœ… Auto subtitles
- βœ… Ambient horror soundtrack
- βœ… 1080x1920 YouTube Shorts format
### ⏱️ Generation Time:
- **With GPU:** ~2-3 minutes
- **CPU only:** ~8-10 minutes
### 🎨 Style:
Liminal spaces, backrooms, analog horror, CCTV aesthetic
""")
with gr.Column():
video_output = gr.Video(label="Your Horror Short", height=600)
script_output = gr.Textbox(label="Generated Script", lines=4)
generate_btn.click(
fn=generate_horror_short,
inputs=[],
outputs=[video_output, script_output]
)
gr.Markdown("""
---
### πŸ’‘ Tips:
- Run multiple times for different stories and visuals
- Download the video and upload to YouTube Shorts
- Best results with GPU (use Hugging Face Spaces with GPU)
### πŸš€ Deploy Your Own:
1. Fork this Space
2. Upgrade to GPU for faster generation
3. Customize HORROR_SCRIPTS and VISUAL_PROMPTS in the code
""")
return demo
# ═══════════════════════════════════════════════════════════════════
# LAUNCH APP
# ═══════════════════════════════════════════════════════════════════
if __name__ == "__main__":
demo = create_interface()
demo.launch()
"""
═══════════════════════════════════════════════════════════════════
πŸ“¦ REQUIREMENTS.TXT (Create this file in your Hugging Face Space)
═══════════════════════════════════════════════════════════════════
gradio
torch
torchvision
diffusers
transformers
accelerate
gtts
pydub
opencv-python-headless
pillow
numpy
═══════════════════════════════════════════════════════════════════
πŸš€ DEPLOYMENT INSTRUCTIONS
═══════════════════════════════════════════════════════════════════
1. Go to https://huggingface.co/new-space
2. Name: "horror-shorts-generator"
3. License: MIT
4. Select SDK: Gradio
5. Select Hardware: CPU Basic (free) or GPU (faster)
6. Create Space
7. Click "Files" β†’ "Add file" β†’ "Create new file"
8. Name it: app.py
9. Paste this entire code
10. Save
11. Create another file: requirements.txt
12. Paste the dependencies listed above
13. Save
14. Your app will build automatically (2-3 minutes)
15. Once ready, click "Generate Horror Short"!
═══════════════════════════════════════════════════════════════════
"""