PatriciaWening commited on
Commit
9ae9faf
·
verified ·
1 Parent(s): 005162f

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +184 -175
  2. requirements.txt +1 -1
app.py CHANGED
@@ -1,176 +1,185 @@
1
- import os
2
- import cv2
3
- import numpy as np
4
- import base64
5
- import json
6
- from io import BytesIO
7
- from PIL import Image
8
- import tensorflow as tf
9
- from tensorflow import keras
10
- from fastapi import FastAPI, Request, Form, File, UploadFile
11
- from fastapi.middleware.cors import CORSMiddleware
12
- from fastapi.responses import JSONResponse
13
- import uvicorn
14
- from huggingface_hub import hf_hub_download
15
-
16
- emotion_model = None
17
-
18
- app = FastAPI()
19
-
20
- @app.on_event("startup")
21
- def load_model():
22
- global emotion_model
23
- model_path = hf_hub_download(
24
- repo_id="PatriciaWening/moodDetection",
25
- filename="moodDetection.keras", # pastikan sesuai
26
- cache_dir="/tmp"
27
- )
28
- # ini akan assign objek keras.Model
29
- emotion_model = keras.models.load_model(model_path)
30
-
31
- emotion_labels = ['angry', 'disgust', 'fear', 'happy', 'neutral', 'sad', 'surprise']
32
-
33
- face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
34
-
35
- app.add_middleware(
36
- CORSMiddleware,
37
- allow_origins=["*"],
38
- allow_credentials=True,
39
- allow_methods=["*"],
40
- allow_headers=["*"],
41
- )
42
-
43
- sessions = {}
44
-
45
- @app.post("/api/deteksi-emosi")
46
- async def detect_emotion(request: Request):
47
- try:
48
- form = await request.form()
49
- image_data = form.get("image")
50
-
51
- print(f"Received request with sessionId: {form.get('sessionId', 'not provided')}")
52
- print(f"Image data received: {bool(image_data)}")
53
-
54
- if image_data and "base64" in image_data:
55
- try:
56
- base64_data = image_data.split(',')[1]
57
- image_bytes = base64.b64decode(base64_data)
58
-
59
- print("Successfully decoded base64 data")
60
-
61
- img = Image.open(BytesIO(image_bytes))
62
- img_array = np.array(img)
63
-
64
- if len(img_array.shape) > 2 and img_array.shape[2] == 3:
65
- gray = cv2.cvtColor(img_array, cv2.COLOR_RGB2GRAY)
66
- else:
67
- gray = img_array
68
-
69
- print(f"Processed image shape: {gray.shape}")
70
-
71
- faces = face_cascade.detectMultiScale(gray, 1.3, 5)
72
- print(f"Detected {len(faces)} faces")
73
-
74
- if len(faces) > 0:
75
- (x, y, w, h) = faces[0]
76
-
77
- face_roi = gray[y:y+h, x:x+w]
78
-
79
- resized_face = cv2.resize(face_roi, (48, 48))
80
- normalized_face = resized_face / 255.0
81
- reshaped_face = normalized_face.reshape(1, 48, 48, 1)
82
-
83
- prediction = emotion_model.predict(reshaped_face)
84
- emotion_idx = np.argmax(prediction[0])
85
- emotion = emotion_labels[emotion_idx]
86
- confidence = float(prediction[0][emotion_idx])
87
-
88
- stress_mapping = {
89
- 'angry': 85, 'disgust': 65, 'fear': 70,
90
- 'sad': 75, 'surprise': 45, 'neutral': 30, 'happy': 15
91
- }
92
-
93
- stress_level = stress_mapping.get(emotion, 50)
94
-
95
- session_id = form.get("sessionId", "default")
96
- if session_id not in sessions:
97
- sessions[session_id] = {
98
- "emotions": [],
99
- "stress_levels": []
100
- }
101
-
102
- sessions[session_id]["emotions"].append(emotion)
103
- sessions[session_id]["stress_levels"].append(stress_level)
104
-
105
- result = {
106
- "emotion": emotion,
107
- "confidence": confidence,
108
- "stressLevel": stress_level,
109
- "faceDetected": True,
110
- "faceRegion": {"x": int(x), "y": int(y), "width": int(w), "height": int(h)}
111
- }
112
- else:
113
- result = {
114
- "emotion": "unknown",
115
- "confidence": 0,
116
- "stressLevel": 0,
117
- "faceDetected": False
118
- }
119
- except Exception as e:
120
- print(f"Error processing image: {str(e)}")
121
- import traceback
122
- traceback.print_exc()
123
- result = {"error": f"Image processing error: {str(e)}"}
124
- else:
125
- result = {"error": "Invalid image data"}
126
-
127
- except Exception as e:
128
- print(f"Request handling error: {str(e)}")
129
- import traceback
130
- traceback.print_exc()
131
- result = {"error": f"Server error: {str(e)}"}
132
-
133
- response = JSONResponse(content=result)
134
- response.headers["Access-Control-Allow-Origin"] = "*"
135
- response.headers["Access-Control-Allow-Credentials"] = "true"
136
- return response
137
-
138
- @app.get("/api/session-report/{session_id}")
139
- async def session_report(session_id: str):
140
- if session_id not in sessions:
141
- return JSONResponse(content={"error": "Session not found"}, status_code=404)
142
-
143
- session_data = sessions[session_id]
144
-
145
- if session_data["emotions"]:
146
- emotion_counts = {}
147
- for emotion in session_data["emotions"]:
148
- if emotion in emotion_counts:
149
- emotion_counts[emotion] += 1
150
- else:
151
- emotion_counts[emotion] = 1
152
-
153
- dominant_emotion = max(emotion_counts, key=emotion_counts.get)
154
-
155
- avg_stress = sum(session_data["stress_levels"]) / len(session_data["stress_levels"])
156
-
157
- min_stress = min(session_data["stress_levels"])
158
- max_stress = max(session_data["stress_levels"])
159
-
160
- result = {
161
- "dominantEmotion": dominant_emotion,
162
- "emotionCounts": emotion_counts,
163
- "averageStressLevel": round(avg_stress, 2),
164
- "minStressLevel": min_stress,
165
- "maxStressLevel": max_stress,
166
- "totalFrames": len(session_data["emotions"])
167
- }
168
- else:
169
- result = {
170
- "error": "No data in session"
171
- }
172
-
173
- return JSONResponse(content=result)
174
-
175
- if __name__ == "__main__":
 
 
 
 
 
 
 
 
 
176
  uvicorn.run("app:app", host="127.0.0.1", port=8080, reload=True)
 
1
+ import os
2
+ import cv2
3
+ import numpy as np
4
+ import base64
5
+ import json
6
+ from io import BytesIO
7
+ from PIL import Image
8
+ import tensorflow as tf
9
+ from tensorflow import keras
10
+ from fastapi import FastAPI, Request, Form, File, UploadFile
11
+ from fastapi.middleware.cors import CORSMiddleware
12
+ from fastapi.responses import JSONResponse
13
+ import uvicorn
14
+ import requests
15
+
16
+ emotion_model = None
17
+
18
+ app = FastAPI()
19
+
20
+ @app.on_event("startup")
21
+ def load_model():
22
+ global emotion_model
23
+
24
+ file_id = "1ist4U_0oCmHe7atGZvnxRCDMEGXCqqke" # 🔁 Ganti dengan ID milikmu
25
+ url = f"https://drive.google.com/uc?export=download&id={file_id}"
26
+ output_path = "/tmp/moodDetection.keras"
27
+
28
+ print("📥 Mengunduh model dari Google Drive...")
29
+ response = requests.get(url)
30
+ if response.status_code != 200:
31
+ raise Exception("❌ Gagal mengunduh model dari Google Drive")
32
+
33
+ with open(output_path, "wb") as f:
34
+ f.write(response.content)
35
+
36
+ print("✅ Model berhasil diunduh, memuat ke memory...")
37
+ emotion_model = keras.models.load_model(output_path)
38
+ print("✅ Model siap digunakan!")
39
+
40
+ emotion_labels = ['angry', 'disgust', 'fear', 'happy', 'neutral', 'sad', 'surprise']
41
+
42
+ face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
43
+
44
+ app.add_middleware(
45
+ CORSMiddleware,
46
+ allow_origins=["*"],
47
+ allow_credentials=True,
48
+ allow_methods=["*"],
49
+ allow_headers=["*"],
50
+ )
51
+
52
+ sessions = {}
53
+
54
+ @app.post("/api/deteksi-emosi")
55
+ async def detect_emotion(request: Request):
56
+ try:
57
+ form = await request.form()
58
+ image_data = form.get("image")
59
+
60
+ print(f"Received request with sessionId: {form.get('sessionId', 'not provided')}")
61
+ print(f"Image data received: {bool(image_data)}")
62
+
63
+ if image_data and "base64" in image_data:
64
+ try:
65
+ base64_data = image_data.split(',')[1]
66
+ image_bytes = base64.b64decode(base64_data)
67
+
68
+ print("Successfully decoded base64 data")
69
+
70
+ img = Image.open(BytesIO(image_bytes))
71
+ img_array = np.array(img)
72
+
73
+ if len(img_array.shape) > 2 and img_array.shape[2] == 3:
74
+ gray = cv2.cvtColor(img_array, cv2.COLOR_RGB2GRAY)
75
+ else:
76
+ gray = img_array
77
+
78
+ print(f"Processed image shape: {gray.shape}")
79
+
80
+ faces = face_cascade.detectMultiScale(gray, 1.3, 5)
81
+ print(f"Detected {len(faces)} faces")
82
+
83
+ if len(faces) > 0:
84
+ (x, y, w, h) = faces[0]
85
+
86
+ face_roi = gray[y:y+h, x:x+w]
87
+
88
+ resized_face = cv2.resize(face_roi, (48, 48))
89
+ normalized_face = resized_face / 255.0
90
+ reshaped_face = normalized_face.reshape(1, 48, 48, 1)
91
+
92
+ prediction = emotion_model.predict(reshaped_face)
93
+ emotion_idx = np.argmax(prediction[0])
94
+ emotion = emotion_labels[emotion_idx]
95
+ confidence = float(prediction[0][emotion_idx])
96
+
97
+ stress_mapping = {
98
+ 'angry': 85, 'disgust': 65, 'fear': 70,
99
+ 'sad': 75, 'surprise': 45, 'neutral': 30, 'happy': 15
100
+ }
101
+
102
+ stress_level = stress_mapping.get(emotion, 50)
103
+
104
+ session_id = form.get("sessionId", "default")
105
+ if session_id not in sessions:
106
+ sessions[session_id] = {
107
+ "emotions": [],
108
+ "stress_levels": []
109
+ }
110
+
111
+ sessions[session_id]["emotions"].append(emotion)
112
+ sessions[session_id]["stress_levels"].append(stress_level)
113
+
114
+ result = {
115
+ "emotion": emotion,
116
+ "confidence": confidence,
117
+ "stressLevel": stress_level,
118
+ "faceDetected": True,
119
+ "faceRegion": {"x": int(x), "y": int(y), "width": int(w), "height": int(h)}
120
+ }
121
+ else:
122
+ result = {
123
+ "emotion": "unknown",
124
+ "confidence": 0,
125
+ "stressLevel": 0,
126
+ "faceDetected": False
127
+ }
128
+ except Exception as e:
129
+ print(f"Error processing image: {str(e)}")
130
+ import traceback
131
+ traceback.print_exc()
132
+ result = {"error": f"Image processing error: {str(e)}"}
133
+ else:
134
+ result = {"error": "Invalid image data"}
135
+
136
+ except Exception as e:
137
+ print(f"Request handling error: {str(e)}")
138
+ import traceback
139
+ traceback.print_exc()
140
+ result = {"error": f"Server error: {str(e)}"}
141
+
142
+ response = JSONResponse(content=result)
143
+ response.headers["Access-Control-Allow-Origin"] = "*"
144
+ response.headers["Access-Control-Allow-Credentials"] = "true"
145
+ return response
146
+
147
+ @app.get("/api/session-report/{session_id}")
148
+ async def session_report(session_id: str):
149
+ if session_id not in sessions:
150
+ return JSONResponse(content={"error": "Session not found"}, status_code=404)
151
+
152
+ session_data = sessions[session_id]
153
+
154
+ if session_data["emotions"]:
155
+ emotion_counts = {}
156
+ for emotion in session_data["emotions"]:
157
+ if emotion in emotion_counts:
158
+ emotion_counts[emotion] += 1
159
+ else:
160
+ emotion_counts[emotion] = 1
161
+
162
+ dominant_emotion = max(emotion_counts, key=emotion_counts.get)
163
+
164
+ avg_stress = sum(session_data["stress_levels"]) / len(session_data["stress_levels"])
165
+
166
+ min_stress = min(session_data["stress_levels"])
167
+ max_stress = max(session_data["stress_levels"])
168
+
169
+ result = {
170
+ "dominantEmotion": dominant_emotion,
171
+ "emotionCounts": emotion_counts,
172
+ "averageStressLevel": round(avg_stress, 2),
173
+ "minStressLevel": min_stress,
174
+ "maxStressLevel": max_stress,
175
+ "totalFrames": len(session_data["emotions"])
176
+ }
177
+ else:
178
+ result = {
179
+ "error": "No data in session"
180
+ }
181
+
182
+ return JSONResponse(content=result)
183
+
184
+ if __name__ == "__main__":
185
  uvicorn.run("app:app", host="127.0.0.1", port=8080, reload=True)
requirements.txt CHANGED
@@ -4,4 +4,4 @@ tensorflow
4
  opencv-python-headless
5
  pillow
6
  python-multipart
7
- huggingface_hub
 
4
  opencv-python-headless
5
  pillow
6
  python-multipart
7
+ requests