SEARO1 commited on
Commit
ffdd436
·
verified ·
1 Parent(s): 51ba167

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -13
app.py CHANGED
@@ -3,30 +3,26 @@ from transformers import AutoModelForImageClassification, AutoImageProcessor
3
  from PIL import Image
4
  import torch
5
 
6
- # 你的模型 ID
7
  model_id = "SEAR01/FER_model"
8
- processor = AutoImageProcessor.from_pretrained(model_id)
9
- model = AutoModelForImageClassification.from_pretrained(model_id)
 
 
 
 
10
  emotion_labels = ['angry', 'disgust', 'fear', 'happy', 'neutral', 'sad', 'surprise']
11
 
12
  def predict_emotion(image):
13
  if image is None:
14
- return "Please upload an image."
15
  inputs = processor(images=image, return_tensors="pt")
16
  with torch.no_grad():
17
  outputs = model(**inputs)
18
  predicted = outputs.logits.argmax(-1).item()
19
  emotion = emotion_labels[predicted]
20
  confidence = torch.softmax(outputs.logits, dim=-1)[0][predicted].item()
21
- return f"Detected emotion: {emotion}\nConfidence: {confidence:.2f}"
22
 
23
- # Gradio 介面
24
- iface = gr.Interface(
25
- fn=predict_emotion,
26
- inputs=gr.Image(type="pil", label="Upload a face image"),
27
- outputs=gr.Textbox(label="Prediction"),
28
- title="FER Mental Health Detector",
29
- description="Upload an image to detect emotions and assess mental health risk."
30
- )
31
  if __name__ == "__main__":
32
  iface.launch()
 
3
  from PIL import Image
4
  import torch
5
 
 
6
  model_id = "SEAR01/FER_model"
7
+ try:
8
+ processor = AutoImageProcessor.from_pretrained(model_id)
9
+ model = AutoModelForImageClassification.from_pretrained(model_id, trust_remote_code=True) # 加這行允許自訂模型
10
+ except Exception as e:
11
+ raise ValueError(f"Model load failed: {e}. Check repo files.")
12
+
13
  emotion_labels = ['angry', 'disgust', 'fear', 'happy', 'neutral', 'sad', 'surprise']
14
 
15
  def predict_emotion(image):
16
  if image is None:
17
+ return "Upload an image."
18
  inputs = processor(images=image, return_tensors="pt")
19
  with torch.no_grad():
20
  outputs = model(**inputs)
21
  predicted = outputs.logits.argmax(-1).item()
22
  emotion = emotion_labels[predicted]
23
  confidence = torch.softmax(outputs.logits, dim=-1)[0][predicted].item()
24
+ return f"Emotion: {emotion} (Confidence: {confidence:.2f})"
25
 
26
+ iface = gr.Interface(fn=predict_emotion, inputs=gr.Image(type="pil"), outputs="text", title="FER Demo")
 
 
 
 
 
 
 
27
  if __name__ == "__main__":
28
  iface.launch()