Bliss-Ruth's picture
Created app.py
8dc4f2c verified
raw
history blame
5.15 kB
# app.py
import torch
import torch.nn as nn
from transformers import XCLIPProcessor, XCLIPModel
import gradio as gr
import cv2
import numpy as np
from PIL import Image
import tempfile
import os
# Your exact model class
class XCLIPSignLanguageClassifier(nn.Module):
def __init__(self, num_classes, feature_dim=512):
super().__init__()
self.xclip = XCLIPModel.from_pretrained("microsoft/xclip-base-patch32")
for param in self.xclip.parameters():
param.requires_grad = False
self.classifier = nn.Sequential(
nn.Dropout(0.5), nn.Linear(feature_dim, 128), nn.LayerNorm(128), nn.ReLU(),
nn.Dropout(0.3), nn.Linear(128, 64), nn.LayerNorm(64), nn.ReLU(),
nn.Dropout(0.2), nn.Linear(64, num_classes)
)
def forward(self, input_ids, attention_mask, pixel_values):
with torch.no_grad():
outputs = self.xclip(input_ids=input_ids, attention_mask=attention_mask,
pixel_values=pixel_values, return_dict=True)
video_embeds = outputs.video_embeds
return self.classifier(video_embeds)
print("🚀 Loading Ugandan Sign Language Model...")
# Initialize
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
processor = XCLIPProcessor.from_pretrained("microsoft/xclip-base-patch32")
# Load your trained model
try:
checkpoint = torch.load("best_xclip_model.pth", map_location=device, weights_only=False)
model = XCLIPSignLanguageClassifier(num_classes=len(checkpoint["label_to_id"])).to(device)
model.load_state_dict(checkpoint["model_state_dict"])
model.eval()
id_to_label = checkpoint["id_to_label"]
print(f"✅ Model loaded! Can recognize {len(id_to_label)} signs: {list(id_to_label.values())}")
except Exception as e:
print(f"❌ Error loading model: {e}")
exit(1)
def extract_frames(video_path, num_frames=8):
"""Extract frames from video file"""
try:
cap = cv2.VideoCapture(video_path)
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
if total_frames <= num_frames:
indices = list(range(total_frames)) + [total_frames-1] * (num_frames - total_frames)
else:
start = total_frames // 6
end = 5 * total_frames // 6
indices = np.linspace(start, end, num_frames, dtype=int)
frames = []
for idx in indices:
cap.set(cv2.CAP_PROP_POS_FRAMES, int(idx))
ret, frame = cap.read()
if ret:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = cv2.resize(frame, (224, 224))
frames.append(Image.fromarray(frame))
else:
frames.append(Image.new("RGB", (224, 224), (128, 128, 128)))
cap.release()
return frames
except Exception as e:
print(f"Frame extraction error: {e}")
return [Image.new("RGB", (224, 224), (128, 128, 128)) for _ in range(num_frames)]
def predict_video(video_file, user_correction=None):
"""Predict sign language from uploaded video"""
try:
# Get prediction
predicted_label, confidence = predict_sign(video_file, model, processor, id_to_label, device)
# Format results - EXACT SAME as our Colab interface
result = f"🎯 **Prediction**: {predicted_label}\n"
result += f"📊 **Confidence**: {confidence*100:.1f}%\n"
result += f"🔍 **Model**: X-CLIP Fine-tuned"
return result
except Exception as e:
return f"❌ Error processing video: {str(e)}"
def predict_sign(video_path, model, processor, id_to_label, device):
"""Core prediction function"""
try:
# Sample frames
frames = extract_frames(video_path)
# Process
video_inputs = processor.video_processor([frames], return_tensors="pt")
text_inputs = processor(text=["a person performing sign language"], return_tensors="pt")
pixel_values = video_inputs['pixel_values'].to(device)
input_ids = text_inputs['input_ids'].to(device)
attention_mask = text_inputs['attention_mask'].to(device)
with torch.no_grad():
logits = model(input_ids, attention_mask, pixel_values)
probs = torch.softmax(logits, dim=1)
confidence, pred_class = torch.max(probs, 1)
return id_to_label[pred_class.item()], confidence.item()
except Exception as e:
print(f"❌ Prediction error: {e}")
return "Unknown", 0.0
# Create the interface - EXACT SAME as our Colab version
demo = gr.Interface(
fn=predict_video,
inputs=gr.Video(label="📹 Upload Sign Language Video"),
outputs=gr.Markdown(label="🎯 Prediction Results"),
title="🤟 Ugandan Sign Language Recognition",
description="Upload a video of sign language and the AI will predict which sign it is!",
examples=[] # You can add example videos later
)
# For Hugging Face Spaces deployment
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0", server_port=7860)