Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -5,144 +5,133 @@ from moviepy.editor import VideoFileClip, concatenate_videoclips
|
|
| 5 |
import gradio as gr
|
| 6 |
from tqdm import tqdm
|
| 7 |
import os
|
|
|
|
|
|
|
| 8 |
|
| 9 |
-
# ---
|
| 10 |
-
#
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
if not os.path.exists(MODEL_PATH):
|
| 15 |
-
|
|
|
|
|
|
|
|
|
|
| 16 |
model = tf.keras.models.load_model(MODEL_PATH)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
|
| 18 |
|
| 19 |
def predict_frame_is_jumpscare(frame, threshold):
|
| 20 |
-
"""
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
Args:
|
| 24 |
-
frame (np.array): A single video frame in BGR format from OpenCV.
|
| 25 |
-
threshold (float): The sensitivity threshold (0.0 to 1.0) for the prediction.
|
| 26 |
-
|
| 27 |
-
Returns:
|
| 28 |
-
bool: True if the frame is predicted as a jumpscare, False otherwise.
|
| 29 |
-
"""
|
| 30 |
-
# 1. Preprocess the frame for the model
|
| 31 |
-
# Convert BGR (OpenCV default) to RGB (model was likely trained on RGB)
|
| 32 |
rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
| 33 |
-
# Resize to the model's expected input size (e.g., 128x128)
|
| 34 |
resized_frame = cv2.resize(rgb_frame, (128, 128))
|
| 35 |
-
# Normalize pixel values to be between 0 and 1
|
| 36 |
img_array = np.array(resized_frame) / 255.0
|
| 37 |
-
# Add a batch dimension (model expects shape: 1, height, width, channels)
|
| 38 |
img_array = np.expand_dims(img_array, axis=0)
|
| 39 |
-
|
| 40 |
-
# 2. Make a prediction
|
| 41 |
-
prediction = model.predict(img_array, verbose=0) # verbose=0 silences a lot of Keras logs
|
| 42 |
|
| 43 |
-
#
|
| 44 |
-
|
| 45 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 46 |
|
| 47 |
|
| 48 |
def generate_jumpscare_compilation(video_path, sensitivity, progress=gr.Progress()):
|
| 49 |
-
"""
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
threshold = sensitivity / 100.0
|
| 62 |
-
|
| 63 |
-
# Use a lower FPS for analysis to speed things up. Jumpscares usually last
|
| 64 |
-
# for several frames, so we don't need to check every single one.
|
| 65 |
-
analysis_fps = 10
|
| 66 |
-
|
| 67 |
-
print("Loading video file...")
|
| 68 |
-
original_clip = VideoFileClip(video_path)
|
| 69 |
-
|
| 70 |
-
jumpscare_segments = []
|
| 71 |
-
is_in_jumpscare_segment = False
|
| 72 |
-
segment_start_time = 0
|
| 73 |
-
|
| 74 |
-
print(f"Analyzing video for jumpscares with threshold {threshold}...")
|
| 75 |
-
|
| 76 |
-
# Use tqdm for a console progress bar and gr.Progress for the UI
|
| 77 |
-
total_frames = int(original_clip.duration * analysis_fps)
|
| 78 |
-
frame_iterator = original_clip.iter_frames(fps=analysis_fps)
|
| 79 |
-
|
| 80 |
-
for i, frame in enumerate(tqdm(frame_iterator, total=total_frames, desc="Analyzing Frames")):
|
| 81 |
-
current_time = i / analysis_fps
|
| 82 |
-
progress(i / total_frames, desc=f"Analyzing... {int(current_time)}s / {int(original_clip.duration)}s")
|
| 83 |
|
| 84 |
-
|
|
|
|
| 85 |
|
| 86 |
-
# ---
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 91 |
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
# We need to return a value that Gradio can handle.
|
| 107 |
-
# Returning None will cause an error. Instead, we can return a message.
|
| 108 |
-
# A better approach would be to update a gr.Text component, but for simplicity,
|
| 109 |
-
# we'll raise an error that Gradio can display.
|
| 110 |
-
raise gr.Error("No jumpscares were detected with the current sensitivity setting. Try a lower value.")
|
| 111 |
-
|
| 112 |
-
print(f"Found {len(jumpscare_segments)} jumpscare segments. Creating compilation...")
|
| 113 |
-
progress(0.9, desc="Stitching clips together...")
|
| 114 |
-
|
| 115 |
-
# Create subclips from the original video using the detected timestamps
|
| 116 |
-
final_clips = [original_clip.subclip(start, end) for start, end in jumpscare_segments]
|
| 117 |
-
|
| 118 |
-
# Concatenate all the jumpscare clips into one video
|
| 119 |
-
final_video = concatenate_videoclips(final_clips)
|
| 120 |
-
|
| 121 |
-
output_path = "fnaf_jumpscare_compilation.mp4"
|
| 122 |
-
# Write the final video file, including audio
|
| 123 |
-
final_video.write_videofile(output_path, codec="libx264", audio_codec="aac", temp_audiofile='temp-audio.m4a', remove_temp=True)
|
| 124 |
-
|
| 125 |
-
# Close the clips to free up resources
|
| 126 |
-
original_clip.close()
|
| 127 |
-
for clip in final_clips:
|
| 128 |
-
clip.close()
|
| 129 |
-
|
| 130 |
-
print(f"Compilation saved to {output_path}")
|
| 131 |
-
return output_path
|
| 132 |
|
|
|
|
|
|
|
|
|
|
| 133 |
|
| 134 |
# --- Gradio Interface ---
|
| 135 |
-
# Create a Gradio interface for the function
|
| 136 |
iface = gr.Interface(
|
| 137 |
fn=generate_jumpscare_compilation,
|
| 138 |
inputs=[
|
| 139 |
gr.Video(label="Upload FNAF Video"),
|
| 140 |
-
gr.Slider(minimum=1, maximum=
|
| 141 |
-
info="Higher values
|
| 142 |
],
|
| 143 |
outputs=gr.Video(label="Jumpscare Compilation"),
|
| 144 |
-
title="AI FNAF Jumpscare Dump Generator",
|
| 145 |
-
description="Upload a video, and
|
| 146 |
allow_flagging="never"
|
| 147 |
)
|
| 148 |
|
|
|
|
| 5 |
import gradio as gr
|
| 6 |
from tqdm import tqdm
|
| 7 |
import os
|
| 8 |
+
import logging
|
| 9 |
+
from datetime import datetime
|
| 10 |
|
| 11 |
+
# --- Configuration ---
|
| 12 |
+
# These paths are relative to the root of your Hugging Face Space repo
|
| 13 |
+
MODEL_PATH = 'model/model.h5'
|
| 14 |
+
OUTPUT_DIR = 'output/'
|
| 15 |
+
os.makedirs(OUTPUT_DIR, exist_ok=True) # Create output dir if it doesn't exist
|
| 16 |
+
|
| 17 |
+
# --- Setup Logging ---
|
| 18 |
+
# This will help you debug issues in the Hugging Face logs
|
| 19 |
+
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
| 20 |
+
|
| 21 |
+
# --- Load Model ---
|
| 22 |
if not os.path.exists(MODEL_PATH):
|
| 23 |
+
error_msg = f"Model file not found at {MODEL_PATH}. Make sure you have uploaded your 'model.h5' to a 'model' directory in your Space."
|
| 24 |
+
logging.error(error_msg)
|
| 25 |
+
raise FileNotFoundError(error_msg)
|
| 26 |
+
|
| 27 |
model = tf.keras.models.load_model(MODEL_PATH)
|
| 28 |
+
logging.info("AI model loaded successfully.")
|
| 29 |
+
|
| 30 |
+
# We need to know which index corresponds to 'jumpscare'.
|
| 31 |
+
# Based on your training code, LabelBinarizer sorts alphabetically.
|
| 32 |
+
# "jumpscare" comes before "normal", so it's likely index 0.
|
| 33 |
+
# If it's the other way, change this to 1.
|
| 34 |
+
JUMPSCARE_CLASS_INDEX = 0
|
| 35 |
+
logging.info(f"Assuming 'jumpscare' is class index {JUMPSCARE_CLASS_INDEX}. If predictions are wrong, change this value.")
|
| 36 |
|
| 37 |
|
| 38 |
def predict_frame_is_jumpscare(frame, threshold):
|
| 39 |
+
"""Analyzes a single video frame and predicts if it's a jumpscare."""
|
| 40 |
+
# Preprocess the frame for the model
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
|
|
|
| 42 |
resized_frame = cv2.resize(rgb_frame, (128, 128))
|
|
|
|
| 43 |
img_array = np.array(resized_frame) / 255.0
|
|
|
|
| 44 |
img_array = np.expand_dims(img_array, axis=0)
|
|
|
|
|
|
|
|
|
|
| 45 |
|
| 46 |
+
# Make a prediction. This will return a list of probabilities, e.g., [[0.9, 0.1]]
|
| 47 |
+
prediction = model.predict(img_array, verbose=0)
|
| 48 |
+
|
| 49 |
+
#_CRITICAL_FIX_HERE_
|
| 50 |
+
# Instead of checking prediction[0][0], we get the probability for the specific "jumpscare" class.
|
| 51 |
+
jumpscare_probability = prediction[0][JUMPSCARE_CLASS_INDEX]
|
| 52 |
+
|
| 53 |
+
return jumpscare_probability > threshold
|
| 54 |
|
| 55 |
|
| 56 |
def generate_jumpscare_compilation(video_path, sensitivity, progress=gr.Progress()):
|
| 57 |
+
"""Analyzes a video, finds jumpscare segments, and creates a compilation."""
|
| 58 |
+
try:
|
| 59 |
+
# --- Initialization ---
|
| 60 |
+
threshold = sensitivity / 100.0
|
| 61 |
+
analysis_fps = 10
|
| 62 |
+
pre_scare_buffer = 1.0 # seconds to include before the scare
|
| 63 |
+
post_scare_buffer = 1.5 # seconds to include after the scare
|
| 64 |
+
|
| 65 |
+
logging.info(f"Starting analysis for video: {os.path.basename(video_path)}")
|
| 66 |
+
logging.info(f"Settings: Sensitivity={sensitivity}, Threshold={threshold}")
|
| 67 |
+
|
| 68 |
+
original_clip = VideoFileClip(video_path)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 69 |
|
| 70 |
+
jumpscare_times = []
|
| 71 |
+
total_frames = int(original_clip.duration * analysis_fps)
|
| 72 |
|
| 73 |
+
# --- Frame-by-Frame Analysis ---
|
| 74 |
+
progress(0, desc="Analyzing Frames...")
|
| 75 |
+
for i, frame in enumerate(tqdm(original_clip.iter_frames(fps=analysis_fps), total=total_frames, desc="Analyzing Frames")):
|
| 76 |
+
current_time = i / analysis_fps
|
| 77 |
+
progress(i / total_frames, desc=f"Analyzing... {int(current_time)}s / {int(original_clip.duration)}s")
|
| 78 |
+
|
| 79 |
+
if predict_frame_is_jumpscare(frame, threshold):
|
| 80 |
+
jumpscare_times.append(current_time)
|
| 81 |
+
logging.info(f"Jumpscare detected at timestamp: {current_time:.2f}s")
|
| 82 |
+
|
| 83 |
+
# --- Segment Merging ---
|
| 84 |
+
if not jumpscare_times:
|
| 85 |
+
msg = "No jumpscares detected. Try a lower sensitivity value."
|
| 86 |
+
logging.warning(msg)
|
| 87 |
+
raise gr.Error(msg)
|
| 88 |
+
|
| 89 |
+
logging.info(f"Merging {len(jumpscare_times)} detected frames into continuous clips...")
|
| 90 |
+
merged_segments = []
|
| 91 |
+
if jumpscare_times:
|
| 92 |
+
start_time = end_time = jumpscare_times[0]
|
| 93 |
+
for t in jumpscare_times[1:]:
|
| 94 |
+
if t <= end_time + post_scare_buffer:
|
| 95 |
+
end_time = t # Extend the segment
|
| 96 |
+
else:
|
| 97 |
+
merged_segments.append((max(0, start_time - pre_scare_buffer), end_time + post_scare_buffer))
|
| 98 |
+
start_time = end_time = t
|
| 99 |
+
merged_segments.append((max(0, start_time - pre_scare_buffer), end_time + post_scare_buffer))
|
| 100 |
+
|
| 101 |
+
# --- Video Creation ---
|
| 102 |
+
progress(0.9, desc="Stitching clips together...")
|
| 103 |
+
final_clips = [original_clip.subclip(start, min(end, original_clip.duration)) for start, end in merged_segments]
|
| 104 |
|
| 105 |
+
final_video = concatenate_videoclips(final_clips, method="compose")
|
| 106 |
+
|
| 107 |
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
| 108 |
+
output_filename = f"jumpscare_compilation_{timestamp}.mp4"
|
| 109 |
+
output_path = os.path.join(OUTPUT_DIR, output_filename)
|
| 110 |
+
|
| 111 |
+
logging.info(f"Writing final video to {output_path}")
|
| 112 |
+
final_video.write_videofile(output_path, codec="libx264", audio_codec="aac")
|
| 113 |
+
|
| 114 |
+
original_clip.close()
|
| 115 |
+
final_video.close()
|
| 116 |
+
|
| 117 |
+
logging.info("Process completed successfully.")
|
| 118 |
+
return output_path
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 119 |
|
| 120 |
+
except Exception as e:
|
| 121 |
+
logging.error(f"An error occurred: {e}", exc_info=True)
|
| 122 |
+
raise gr.Error(f"An unexpected error occurred. Check the logs for details. Error: {e}")
|
| 123 |
|
| 124 |
# --- Gradio Interface ---
|
|
|
|
| 125 |
iface = gr.Interface(
|
| 126 |
fn=generate_jumpscare_compilation,
|
| 127 |
inputs=[
|
| 128 |
gr.Video(label="Upload FNAF Video"),
|
| 129 |
+
gr.Slider(minimum=1, maximum=99, step=1, value=80, label="Detection Sensitivity",
|
| 130 |
+
info="Higher values require more certainty from the AI. Lower values find more, but might have errors.")
|
| 131 |
],
|
| 132 |
outputs=gr.Video(label="Jumpscare Compilation"),
|
| 133 |
+
title="🤖 AI FNAF Jumpscare Dump Generator",
|
| 134 |
+
description="Upload a video, and the AI will find all jumpscares and compile them. This app runs on a trained model.",
|
| 135 |
allow_flagging="never"
|
| 136 |
)
|
| 137 |
|