Spaces:
Sleeping
Sleeping
File size: 5,649 Bytes
d8c37f0 bea3a5d 51c2e19 47c8e01 e736c4a 51c2e19 b2c607c bea3a5d 0c6346c b2c607c 51c2e19 0c6346c b2c607c 51c2e19 b2c607c 0c6346c b2c607c 0c6346c bea3a5d 51c2e19 b2c607c 0c6346c 51c2e19 a223b54 0c6346c b2c607c 0c6346c b2c607c 51c2e19 b2c607c 0c6346c b2c607c 51c2e19 b2c607c 7259665 b2c607c 0c6346c b2c607c 0c6346c b2c607c 0c6346c b2c607c 51c2e19 b2c607c 0c6346c b2c607c 0c6346c b2c607c e736c4a b2c607c ff5db10 0c6346c 7259665 51c2e19 7259665 51c2e19 b2c607c 7259665 51c2e19 b2c607c f1cc272 7259665 3025b31 f1cc272 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 |
import cv2
import numpy as np
import tensorflow as tf
from moviepy import VideoFileClip, concatenate_videoclips
import gradio as gr
from tqdm import tqdm
import os
import logging
from datetime import datetime
# --- IMPORTANT CHANGE: No folders needed ---
# The code now assumes 'model.h5' is in the same root directory as this app.py file.
MODEL_PATH = 'model.h5'
# --- Setup Basic Logging ---
# This will print helpful info to the Hugging Face logs for debugging.
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
# --- Load Model ---
if not os.path.exists(MODEL_PATH):
error_msg = f"Model file not found at '{MODEL_PATH}'. Make sure you have uploaded your 'model.h5' to the root of your Space."
logging.error(error_msg)
raise FileNotFoundError(error_msg)
model = tf.keras.models.load_model(MODEL_PATH)
logging.info("AI model loaded successfully.")
# Based on your training code, LabelBinarizer sorts class names alphabetically.
# "jumpscare" comes before "normal", so the model's output for the "jumpscare" class
# will be at index 0. If this is wrong, change this to 1.
JUMPSCARE_CLASS_INDEX = 0
logging.info(f"Using class index {JUMPSCARE_CLASS_INDEX} for 'jumpscare' probability.")
def predict_frame_is_jumpscare(frame, threshold):
"""Analyzes a single video frame and predicts if it's a jumpscare."""
# Preprocess the frame
rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
resized_frame = cv2.resize(rgb_frame, (128, 128))
img_array = np.array(resized_frame) / 255.0
img_array = np.expand_dims(img_array, axis=0)
# Get the model's prediction (e.g., [[0.9, 0.1]])
prediction = model.predict(img_array, verbose=0)
# Get the specific probability for the 'jumpscare' class
jumpscare_probability = prediction[0][JUMPSCARE_CLASS_INDEX]
return jumpscare_probability > threshold
def generate_jumpscare_compilation(video_path, sensitivity, progress=gr.Progress()):
"""Analyzes a video, finds jumpscare segments, and creates a compilation."""
try:
# --- Initialization ---
threshold = sensitivity / 100.0
analysis_fps = 10
pre_scare_buffer = 1.0 # seconds before the scare
post_scare_buffer = 1.5 # seconds after the scare
logging.info(f"Starting analysis for video: {os.path.basename(video_path)}")
logging.info(f"Settings: Sensitivity={sensitivity}, Threshold={threshold}")
original_clip = VideoFileClip(video_path)
jumpscare_times = []
total_frames = int(original_clip.duration * analysis_fps)
# --- Frame-by-Frame Analysis ---
progress(0, desc="Analyzing Frames...")
for i, frame in enumerate(tqdm(original_clip.iter_frames(fps=analysis_fps), total=total_frames, desc="Analyzing Frames")):
current_time = i / analysis_fps
progress(i / total_frames, desc=f"Analyzing... {int(current_time)}s / {int(original_clip.duration)}s")
if predict_frame_is_jumpscare(frame, threshold):
jumpscare_times.append(current_time)
if not jumpscare_times:
msg = "No jumpscares detected. Try a lower sensitivity value."
logging.warning(msg)
raise gr.Error(msg)
# --- Merge close detections into continuous segments ---
logging.info(f"Merging {len(jumpscare_times)} detected frames into clips...")
merged_segments = []
if jumpscare_times:
start_time = end_time = jumpscare_times[0]
for t in jumpscare_times[1:]:
if t <= end_time + post_scare_buffer:
end_time = t
else:
merged_segments.append((max(0, start_time - pre_scare_buffer), end_time + post_scare_buffer))
start_time = end_time = t
merged_segments.append((max(0, start_time - pre_scare_buffer), end_time + post_scare_buffer))
# --- Create Final Video ---
progress(0.9, desc="Stitching clips together...")
final_clips = [original_clip.subclip(start, min(end, original_clip.duration)) for start, end in merged_segments]
final_video = concatenate_videoclips(final_clips, method="compose")
# Save the output video to the root with a unique name
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
output_path = f"jumpscare_compilation_{timestamp}.mp4"
logging.info(f"Writing final video to {output_path}")
final_video.write_videofile(output_path, codec="libx264", audio_codec="aac")
original_clip.close()
final_video.close()
logging.info("Process completed successfully.")
return output_path
except Exception as e:
logging.error(f"An error occurred: {e}", exc_info=True)
raise gr.Error(f"An unexpected error occurred. Check the logs for details. Error: {e}")
# --- Gradio Interface (Simplified) ---
iface = gr.Interface(
fn=generate_jumpscare_compilation,
inputs=[
gr.Video(label="Upload FNAF Video"),
gr.Slider(minimum=1, maximum=99, step=1, value=80, label="Detection Sensitivity",
info="Higher values require more certainty from the AI. Lower values find more, but might have errors.")
],
outputs=gr.Video(label="Jumpscare Compilation"),
title="🤖 AI FNAF Jumpscare Dump Generator",
description="Upload a video, and the AI will find all jumpscares and compile them. All files are in the root directory."
)
if __name__ == "__main__":
iface.launch() |