act-experm / app.py
foryahasake's picture
Update app.py
ff3f9ef verified
import cv2
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import tensorflow as tf
import tf_keras
import gradio as gr
import requests
import os
model_url = "https://huggingface.co/foryahasake/act-h5/resolve/main/bestXceptionPlusData.h5"
model_filename = "bestXceptionPlusData.h5"
from huggingface_hub import from_pretrained_keras
model = from_pretrained_keras( "foryahasake/act-h5")
def inference(inp):
if len(inp.shape) == 3: # If the image is colored
pil_gray = cv2.cvtColor(inp, cv2.COLOR_RGB2GRAY)
else:
pil_gray = inp
resized_img = cv2.resize(pil_gray, (48,48), interpolation=cv2.INTER_CUBIC)
model_input = resized_img / 255.0
model_input = np.expand_dims(model_input, axis=-1) # Add channel dimension
model_input = np.expand_dims(model_input, axis=0) # Add batch dimension
# Prediction
result = model.predict(model_input)
predictions = result[0]
predicted_class = np.argmax(predictions)
emotion_labels = ["Angry", "Disgust", "Fear", "Happy", "Sad", "Surprise", "Neutral"]
predicted_emotion = emotion_labels[predicted_class]
confidence = predictions[predicted_class]
return predicted_emotion
demo = gr.Interface(fn=inference, inputs="image", outputs="label")
demo.launch()