frendyrachman's picture
Update app.py
1f79c5e verified
import os
import zipfile
from ultralytics import YOLO
import cv2
from moviepy.video.io.VideoFileClip import VideoFileClip
import gradio as gr
from huggingface_hub import hf_hub_download
# Directories for uploaded videos and output clips
UPLOAD_FOLDER = 'uploaded_videos'
OUTPUT_FOLDER = 'output_clips'
os.makedirs(UPLOAD_FOLDER, exist_ok=True)
os.makedirs(OUTPUT_FOLDER, exist_ok=True)
# Load YOLO model
MODEL_WEIGHTS = hf_hub_download(
repo_id="frendyrachman/mlbb-ai-clipper",
filename="train_size_n/weights/best.pt"
)
model = YOLO(MODEL_WEIGHTS)
def process_video(video_path):
"""Process the video to generate highlights"""
clip_output_folder = os.path.join(OUTPUT_FOLDER, 'clips')
os.makedirs(clip_output_folder, exist_ok=True)
cap = cv2.VideoCapture(video_path)
if not cap.isOpened():
return "Error: Cannot open video file"
frame_rate = cap.get(cv2.CAP_PROP_FPS)
frame_skip = int(frame_rate * 1.5) # Process every 1.5 seconds
events = []
highlight_moments = []
while cap.isOpened():
current_frame = int(cap.get(cv2.CAP_PROP_POS_FRAMES))
ret, frame = cap.read()
if not ret:
break
results = model.predict(source=frame, save=False, conf=0.5)
if results[0].boxes is None:
continue
timestamp = cap.get(cv2.CAP_PROP_POS_MSEC) / 1000.0
for box in results[0].boxes:
class_id = int(box.cls)
class_name = model.names[class_id]
event = {
'class_name': class_name,
'timestamp': timestamp
}
events.append(event)
cap.set(cv2.CAP_PROP_POS_FRAMES, current_frame + frame_skip)
cap.release()
# Group highlights
current_highlight = None
buffer_start, buffer_end = 7, 10
for event in events:
if current_highlight is None:
current_highlight = {
'start': max(event['timestamp'] - buffer_start, 0),
'end': event['timestamp'] + buffer_end
}
else:
if event['timestamp'] <= current_highlight['end'] + 10:
current_highlight['end'] = event['timestamp'] + buffer_end
else:
if current_highlight['end'] - current_highlight['start'] >= 10:
highlight_moments.append(current_highlight)
current_highlight = {
'start': max(event['timestamp'] - buffer_start, 0),
'end': event['timestamp'] + buffer_end
}
if current_highlight and current_highlight['end'] - current_highlight['start'] >= 10:
highlight_moments.append(current_highlight)
# Save highlights as video
zip_path = os.path.join(OUTPUT_FOLDER, 'highlights.zip')
clip_count = 0
completion_message = ""
with zipfile.ZipFile(zip_path, 'w') as zipf:
for moment in highlight_moments:
start_time = max(moment['start'], 0)
end_time = min(moment['end'], VideoFileClip(video_path).duration)
try:
clip_count += 1
# Create a new VideoFileClip for the segment
with VideoFileClip(video_path) as video_segment:
clip = video_segment.subclip(start_time, end_time)
clip_filename = os.path.join(clip_output_folder, f"highlight_{clip_count}.mp4")
clip.write_videofile(clip_filename, codec="libx264", audio_codec="aac")
zipf.write(clip_filename, arcname=f"highlight_{clip_count}.mp4")
except Exception as e:
print(f"Error saving clip from {start_time} to {end_time}: {e}")
completion_message = f"Rendering complete. {clip_count} highlights were generated."
print(completion_message)
return completion_message, zip_path # Return pesan dan path
def gradio_interface(video):
"""Interface function for Gradio"""
try:
if isinstance(video, str):
file_path = video
else:
file_path = os.path.join(UPLOAD_FOLDER, "uploaded_video.mp4")
video.save(file_path)
result_message, result_path = process_video(file_path)
if result_path.endswith(".zip"):
return f"{result_message}\n\nDownload Highlights:", result_path
return "Error processing video"
except Exception as e: # Handle all other exceptions
print(f"Error occurred: {e}")
return "An unexpected error occurred during processing."
# Gradio Interface
interface = gr.Interface(
fn=gradio_interface,
inputs=gr.Video(label="Upload Video (MP4/AVI/MOV/MKV)"),
outputs=[
gr.Textbox(label="Status"),
gr.File(label="Download Highlights ZIP")
],
title="Mobile Legends AI Highlights Generator",
description=
"""
Welcome to the Mobile Legends AI Highlights Generator!
This tool uses a YOLOv8n model to analyze your gameplay video and generate highlights automatically. If you prefer a larger model, such as YOLOv8m, or want to download the model for local use, please visit the following page:
[https://huggingface.co/frendyrachman/mlbb-ai-clipper](https://huggingface.co/frendyrachman/mlbb-ai-clipper)
**How to use this tool:**
1. Upload your gameplay video (formats supported: MP4, AVI, MOV, MKV).
2. Click "Submit" and wait for the processing to complete.
3. Once done, download the "highlight.zip" file containing your video highlights.
**Important Notes:**
- This tool runs on a free Hugging Face Space with hardware specifications of 2 vCPUs and 16GB RAM.
- Processing time depends on the following factors:
- **Video duration:** Longer videos require more time.
- **Video resolution:** Higher resolutions take longer to process, while lower resolutions may result in reduced clip quality.
Thank you for using this tool! We hope it enhances your gaming experience.
"""
)
if __name__ == "__main__":
interface.launch()