Ars135 commited on
Commit
5920006
·
verified ·
1 Parent(s): 2d1ba99

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -25
app.py CHANGED
@@ -1,30 +1,36 @@
1
  import gradio as gr
2
- from deepface import DeepFace
3
- import traceback
 
 
4
 
5
- def detect_emotion(img):
6
- try:
7
- # DeepFace handles preprocessing and detection; enforce_detection=False avoids crashes on non-perfect faces
8
- result = DeepFace.analyze(img, actions=['emotion'], enforce_detection=False)
9
- emotion = result.get('dominant_emotion', None)
10
- scores = result.get('emotion', {})
11
- lines = []
12
- if emotion:
13
- lines.append(f"Dominant emotion: {emotion}")
14
- if scores:
15
- for k, v in sorted(scores.items(), key=lambda x: -x[1]):
16
- lines.append(f"{k}: {v:.2f}")
17
- return "\\n".join(lines) if lines else "No result"
18
- except Exception as e:
19
- return "Error during analysis:\\n" + str(e) + "\\n" + traceback.format_exc()
20
 
21
- iface = gr.Interface(
22
- fn=detect_emotion,
23
- inputs=gr.Image(type="numpy", label="Upload face image"),
24
- outputs=gr.Textbox(label="Emotion Output"),
25
- title="Emotion Detection (DeepFace)",
26
- description="Detect facial emotions using DeepFace pretrained models."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
  )
28
 
29
- if __name__ == "__main__":
30
- iface.launch()
 
1
  import gradio as gr
2
+ import torch
3
+ from torchvision import transforms
4
+ from PIL import Image
5
+ from transformers import AutoModelForImageClassification, AutoImageProcessor
6
 
7
+ model_name = "nateraw/fer-2013"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
+ processor = AutoImageProcessor.from_pretrained(model_name)
10
+ model = AutoModelForImageClassification.from_pretrained(model_name)
11
+
12
+ transform = transforms.Compose([
13
+ transforms.Resize((224, 224)),
14
+ transforms.ToTensor()
15
+ ])
16
+
17
+ def predict(img):
18
+ img = Image.fromarray(img).convert("RGB")
19
+ inputs = processor(images=img, return_tensors="pt")
20
+ with torch.no_grad():
21
+ outputs = model(**inputs)
22
+ logits = outputs.logits
23
+ prob = logits.softmax(dim=1)
24
+ score, label_id = torch.max(prob, dim=1)
25
+ label = model.config.id2label[label_id.item()]
26
+ return f"Emotion: {label} ({score.item():.2f})"
27
+
28
+ ui = gr.Interface(
29
+ fn=predict,
30
+ inputs=gr.Image(type="numpy", label="Upload Image"),
31
+ outputs="text",
32
+ title="Emotion Detection (PyTorch)",
33
+ description="Detect emotions using a lightweight PyTorch model (FER-2013)."
34
  )
35
 
36
+ ui.launch()