Spaces:
Build error
Build error
Update app.py
Browse filesadded user feedback
app.py
CHANGED
|
@@ -3,6 +3,9 @@ from torchvision import models, transforms
|
|
| 3 |
from PIL import Image
|
| 4 |
import gradio as gr
|
| 5 |
import matplotlib.pyplot as plt
|
|
|
|
|
|
|
|
|
|
| 6 |
from huggingface_hub import hf_hub_download
|
| 7 |
|
| 8 |
# Modell laden vom Hugging Face Model Hub
|
|
@@ -28,6 +31,15 @@ transform = transforms.Compose([
|
|
| 28 |
transforms.ToTensor()
|
| 29 |
])
|
| 30 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
def plot_probabilities(probabilities, labels):
|
| 32 |
probs = probabilities.cpu().numpy().flatten()
|
| 33 |
fig, ax = plt.subplots(figsize=(8, 4))
|
|
@@ -39,12 +51,13 @@ def plot_probabilities(probabilities, labels):
|
|
| 39 |
plt.tight_layout()
|
| 40 |
return fig
|
| 41 |
|
|
|
|
| 42 |
def predict_emotion(image):
|
| 43 |
image = image.convert("RGB")
|
| 44 |
-
|
| 45 |
|
| 46 |
with torch.no_grad():
|
| 47 |
-
outputs = model(
|
| 48 |
probs = torch.softmax(outputs, dim=1)
|
| 49 |
|
| 50 |
# Top 3 Predictions
|
|
@@ -64,21 +77,52 @@ def predict_emotion(image):
|
|
| 64 |
# Bar Chart
|
| 65 |
fig = plot_probabilities(probs, labels)
|
| 66 |
|
| 67 |
-
#
|
| 68 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 69 |
|
| 70 |
# Gradio Interface
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
gr.
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 83 |
|
| 84 |
interface.launch()
|
|
|
|
| 3 |
from PIL import Image
|
| 4 |
import gradio as gr
|
| 5 |
import matplotlib.pyplot as plt
|
| 6 |
+
import pandas as pd
|
| 7 |
+
import os
|
| 8 |
+
import hashlib
|
| 9 |
from huggingface_hub import hf_hub_download
|
| 10 |
|
| 11 |
# Modell laden vom Hugging Face Model Hub
|
|
|
|
| 31 |
transforms.ToTensor()
|
| 32 |
])
|
| 33 |
|
| 34 |
+
# Feedback-File
|
| 35 |
+
FEEDBACK_FILE = "user_feedback.csv"
|
| 36 |
+
|
| 37 |
+
# Hilfsfunktion für Hash des Bildes
|
| 38 |
+
def get_image_hash(image):
|
| 39 |
+
img_bytes = image.tobytes()
|
| 40 |
+
return hashlib.md5(img_bytes).hexdigest()
|
| 41 |
+
|
| 42 |
+
# Plot-Funktion
|
| 43 |
def plot_probabilities(probabilities, labels):
|
| 44 |
probs = probabilities.cpu().numpy().flatten()
|
| 45 |
fig, ax = plt.subplots(figsize=(8, 4))
|
|
|
|
| 51 |
plt.tight_layout()
|
| 52 |
return fig
|
| 53 |
|
| 54 |
+
# Prediction-Funktion
|
| 55 |
def predict_emotion(image):
|
| 56 |
image = image.convert("RGB")
|
| 57 |
+
transformed_image = transform(image).unsqueeze(0).to(device)
|
| 58 |
|
| 59 |
with torch.no_grad():
|
| 60 |
+
outputs = model(transformed_image)
|
| 61 |
probs = torch.softmax(outputs, dim=1)
|
| 62 |
|
| 63 |
# Top 3 Predictions
|
|
|
|
| 77 |
# Bar Chart
|
| 78 |
fig = plot_probabilities(probs, labels)
|
| 79 |
|
| 80 |
+
# Bild-Hash für spätere Zuordnung
|
| 81 |
+
img_hash = get_image_hash(image)
|
| 82 |
+
|
| 83 |
+
return prediction, f"Confidence: {confidence.item()*100:.2f}%\n{prediction_status}", top3, fig, img_hash
|
| 84 |
+
|
| 85 |
+
# Funktion um Feedback zu speichern
|
| 86 |
+
def save_feedback(img_hash, model_prediction, user_feedback, confidence):
|
| 87 |
+
data = {
|
| 88 |
+
"image_hash": [img_hash],
|
| 89 |
+
"model_prediction": [model_prediction],
|
| 90 |
+
"user_feedback": [user_feedback],
|
| 91 |
+
"confidence": [confidence]
|
| 92 |
+
}
|
| 93 |
+
df_new = pd.DataFrame(data)
|
| 94 |
+
if os.path.exists(FEEDBACK_FILE):
|
| 95 |
+
df_existing = pd.read_csv(FEEDBACK_FILE)
|
| 96 |
+
df_existing = pd.concat([df_existing, df_new], ignore_index=True)
|
| 97 |
+
df_existing.to_csv(FEEDBACK_FILE, index=False)
|
| 98 |
+
else:
|
| 99 |
+
df_new.to_csv(FEEDBACK_FILE, index=False)
|
| 100 |
+
return "✅ Vielen Dank für dein Feedback!"
|
| 101 |
+
|
| 102 |
+
# Kombinierte Funktion
|
| 103 |
+
def full_pipeline(image, user_feedback):
|
| 104 |
+
prediction, confidence_text, top3, fig, img_hash = predict_emotion(image)
|
| 105 |
+
feedback_message = save_feedback(img_hash, prediction, user_feedback, confidence_text.split("\n")[0])
|
| 106 |
+
return prediction, confidence_text, top3, fig, feedback_message
|
| 107 |
|
| 108 |
# Gradio Interface
|
| 109 |
+
with gr.Blocks() as interface:
|
| 110 |
+
with gr.Row():
|
| 111 |
+
with gr.Column():
|
| 112 |
+
image_input = gr.Image(type="pil", label="Lade dein Bild hoch")
|
| 113 |
+
feedback_input = gr.Dropdown(choices=labels, label="Dein Feedback: Was ist die richtige Emotion?")
|
| 114 |
+
submit_btn = gr.Button("Absenden")
|
| 115 |
+
with gr.Column():
|
| 116 |
+
prediction_output = gr.Textbox(label="Vorhergesagte Emotion")
|
| 117 |
+
confidence_output = gr.Textbox(label="Confidence + Einschätzung")
|
| 118 |
+
top3_output = gr.Dataframe(headers=["Emotion", "Wahrscheinlichkeit (%)"], label="Top 3 Emotionen")
|
| 119 |
+
plot_output = gr.Plot(label="Verteilung der Emotionen")
|
| 120 |
+
feedback_message_output = gr.Textbox(label="Feedback-Status")
|
| 121 |
+
|
| 122 |
+
submit_btn.click(
|
| 123 |
+
fn=full_pipeline,
|
| 124 |
+
inputs=[image_input, feedback_input],
|
| 125 |
+
outputs=[prediction_output, confidence_output, top3_output, plot_output, feedback_message_output]
|
| 126 |
+
)
|
| 127 |
|
| 128 |
interface.launch()
|