vaibhav07112004 commited on
Commit
80ba88c
·
verified ·
1 Parent(s): fe2ea6a

Update api/app.py

Browse files
Files changed (1) hide show
  1. api/app.py +423 -335
api/app.py CHANGED
@@ -1,335 +1,423 @@
1
- from fastapi import FastAPI, File, UploadFile, HTTPException
2
- from fastapi.middleware.cors import CORSMiddleware
3
- from fastapi.responses import JSONResponse
4
- import torch
5
- import numpy as np
6
- from PIL import Image
7
- import io
8
- import json
9
- import logging
10
- import gc
11
- from torchvision import transforms
12
- import timm
13
- import os
14
- import sys
15
-
16
- from dotenv import load_dotenv
17
-
18
- load_dotenv()
19
-
20
- redis_url = os.getenv("REDIS_URL")
21
- google_client_id = os.getenv("GOOGLE_CLIENT_ID")
22
- google_client_secret = os.getenv("GOOGLE_CLIENT_SECRET")
23
-
24
- # Configure logging
25
- logging.basicConfig(level=logging.INFO)
26
- logger = logging.getLogger(__name__)
27
-
28
- # Add current directory to Python path
29
- current_dir = os.path.dirname(os.path.abspath(__file__))
30
- sys.path.insert(0, current_dir)
31
- sys.path.insert(0, '.')
32
-
33
- # Configure TensorFlow for memory optimization BEFORE importing DeepFace
34
- def configure_tensorflow_memory():
35
- """Configure TensorFlow to use memory efficiently"""
36
- try:
37
- import tensorflow as tf
38
- tf.config.threading.set_intra_op_parallelism_threads(2)
39
- tf.config.threading.set_inter_op_parallelism_threads(2)
40
- tf.config.set_visible_devices([], 'GPU')
41
- logger.info("✅ TensorFlow configured for memory optimization")
42
- return True
43
- except Exception as e:
44
- logger.error(f"❌ TensorFlow configuration failed: {e}")
45
- return False
46
-
47
- # Configure TensorFlow and import DeepFace
48
- DEEPFACE_AVAILABLE = False
49
- if configure_tensorflow_memory():
50
- try:
51
- from deepface import DeepFace
52
- DEEPFACE_AVAILABLE = True
53
- logger.info("🎉 DeepFace loaded with memory optimization!")
54
- except Exception as e:
55
- logger.error(f"❌ DeepFace loading failed: {e}")
56
- DEEPFACE_AVAILABLE = False
57
-
58
- # Initialize FastAPI app
59
- app = FastAPI(
60
- title="Vibe Detection Backend API",
61
- description="Professional emotion recognition API with DeepFace + AA-DCN + HybridResNetViT",
62
- version="1.0.0",
63
- docs_url="/docs",
64
- redoc_url="/redoc"
65
- )
66
-
67
- # Add CORS middleware
68
- app.add_middleware(
69
- CORSMiddleware,
70
- allow_origins=["*"], # Configure for your frontend domain in production
71
- allow_credentials=True,
72
- allow_methods=["GET", "POST"],
73
- allow_headers=["*"],
74
- )
75
-
76
- # Initialize availability flags
77
- ML_AVAILABLE = False
78
- CUSTOM_MODULES_AVAILABLE = False
79
-
80
- # Load ML dependencies
81
- try:
82
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
83
- ML_AVAILABLE = True
84
- logger.info(f"✅ ML dependencies loaded successfully on {device}")
85
- except ImportError as e:
86
- logger.warning(f"❌ ML dependencies not available: {e}")
87
- ML_AVAILABLE = False
88
-
89
- # Import your custom modules
90
- face_model = None
91
- try:
92
- from models.model_definitions import create_aadcn_model, HybridResNetViT
93
- from utils.image_utils import detect_face, preprocess_image
94
- from utils.mood_utils import map_vibe_to_moods, get_genres_for_moods, load_mood_genre_mapping
95
- from recommender.song_recommender import recommend_songs_for_moods_and_genres
96
- from models.vibe_model import predict_vibe
97
- from config import ensure_models_available, FACE_MODEL_PATH
98
-
99
- CUSTOM_MODULES_AVAILABLE = True
100
- logger.info("✅ Custom modules loaded successfully!")
101
-
102
- # Load mood-genre mapping
103
- load_mood_genre_mapping()
104
-
105
- # Ensure models are available from Hugging Face
106
- if ensure_models_available():
107
- face_model = create_aadcn_model(num_classes=8)
108
- face_model.load_state_dict(torch.load(FACE_MODEL_PATH, map_location=device))
109
- face_model.eval()
110
- logger.info("🎉 AA-DCN face model loaded successfully!")
111
-
112
- except Exception as e:
113
- logger.error(f"❌ Custom modules import failed: {e}")
114
- CUSTOM_MODULES_AVAILABLE = False
115
-
116
- # YOUR EXACT EMOTION MAPPINGS
117
- emotion_idx_to_label = {
118
- 0: 'angry', 1: 'contempt', 2: 'disgust', 3: 'fear',
119
- 4: 'happy', 5: 'neutral', 6: 'sad', 7: 'surprise'
120
- }
121
-
122
- dataset_to_custom = {
123
- 'angry': ['Anger', 'Annoyance', 'Disapproval'],
124
- 'contempt': ['Disapproval', 'Disconnection', 'Annoyance'],
125
- 'disgust': ['Aversion', 'Disapproval', 'Disconnection'],
126
- 'fear': ['Fear', 'Disquietment', 'Doubt/Confusion'],
127
- 'happy': ['Happiness', 'Affection', 'Pleasure', 'Excitement'],
128
- 'neutral': ['Peace', 'Esteem', 'Confidence'],
129
- 'sad': ['Sadness', 'Fatigue', 'Suffering'],
130
- 'surprise': ['Surprise', 'Anticipation', 'Excitement']
131
- }
132
-
133
- # YOUR EXACT TRANSFORM
134
- transform = transforms.Compose([
135
- transforms.Resize((224, 224)),
136
- transforms.ToTensor(),
137
- transforms.Normalize([0.485,0.456,0.406],[0.229,0.224,0.225])
138
- ])
139
-
140
- def predict_face_emotion_hybrid(image_bytes):
141
- """YOUR EXACT FACE EMOTION LOGIC: DeepFace for happy → AA-DCN for others"""
142
-
143
- # YOUR EXACT LOGIC: Try DeepFace for "happy" detection first
144
- if DEEPFACE_AVAILABLE:
145
- try:
146
- image = Image.open(io.BytesIO(image_bytes)).convert('RGB')
147
- np_image = np.array(image)
148
- result = DeepFace.analyze(
149
- np_image,
150
- actions=['emotion'],
151
- enforce_detection=False,
152
- detector_backend='opencv'
153
- )
154
- if isinstance(result, list):
155
- result = result[0]
156
-
157
- # YOUR EXACT LOGIC: If happy detected, return happy
158
- if result['dominant_emotion'] == 'happy':
159
- logger.info("🎉 DeepFace detected HAPPY - returning happy result!")
160
- gc.collect()
161
- return 'happy', dataset_to_custom['happy']
162
- else:
163
- logger.info(f"🔄 DeepFace detected {result['dominant_emotion']} (not happy) - sending to AA-DCN")
164
- gc.collect()
165
-
166
- except Exception as e:
167
- logger.warning(f"DeepFace error: {e}. Falling back to AA-DCN model.")
168
- gc.collect()
169
-
170
- # YOUR EXACT LOGIC: Fallback to AA-DCN for all non-happy emotions
171
- if CUSTOM_MODULES_AVAILABLE and face_model is not None:
172
- try:
173
- image = Image.open(io.BytesIO(image_bytes)).convert('RGB')
174
- img_t = transform(image).unsqueeze(0).to(device)
175
- with torch.no_grad():
176
- output = face_model(img_t)
177
- pred_idx = output.argmax(dim=1).item()
178
- dataset_label = emotion_idx_to_label.get(pred_idx, 'unknown')
179
- custom_moods = dataset_to_custom.get(dataset_label, ['Unknown'])
180
-
181
- logger.info(f"✅ AA-DCN detected: {dataset_label} - returning AA-DCN result!")
182
- return dataset_label, custom_moods
183
-
184
- except Exception as e:
185
- logger.error(f"❌ AA-DCN prediction error: {e}")
186
-
187
- # Final fallback
188
- logger.warning("⚠️ Using basic emotion fallback")
189
- return 'neutral', ['Peace', 'Esteem', 'Confidence']
190
-
191
- def predict_environment_vibe_resnetvit(image_bytes):
192
- """YOUR EXACT VIBE LOGIC: HybridResNetViT for environment/surrounding detection"""
193
- try:
194
- if CUSTOM_MODULES_AVAILABLE:
195
- image_tensor = preprocess_image(image_bytes)
196
- vibe_idx = predict_vibe(image_tensor)
197
- moods = map_vibe_to_moods(vibe_idx)
198
-
199
- logger.info(f"🌍 YOUR HybridResNetViT detected environment vibe: {moods} (vibe_idx: {vibe_idx})")
200
- return None, moods, "vibe_hybridresnetvit"
201
- else:
202
- logger.error(" Custom modules not available - cannot perform vibe detection")
203
- return None, ['error'], "vibe_unavailable"
204
-
205
- except Exception as e:
206
- logger.error(f"❌ Environment vibe detection failed: {e}")
207
- return None, ['error'], "vibe_error"
208
-
209
- # API Routes
210
- @app.get("/")
211
- async def root():
212
- """API health check and information"""
213
- return {
214
- "message": "🎭 Vibe Detection Backend API",
215
- "status": "healthy",
216
- "version": "1.0.0",
217
- "endpoints": {
218
- "analyze": "/analyze",
219
- "health": "/health",
220
- "docs": "/docs"
221
- },
222
- "models": {
223
- "deepface_available": DEEPFACE_AVAILABLE,
224
- "custom_modules_available": CUSTOM_MODULES_AVAILABLE,
225
- "face_model_loaded": face_model is not None
226
- }
227
- }
228
-
229
- @app.get("/health")
230
- async def health_check():
231
- """Detailed health check for monitoring"""
232
- return {
233
- "status": "healthy",
234
- "ml_available": ML_AVAILABLE,
235
- "deepface_available": DEEPFACE_AVAILABLE,
236
- "custom_modules_available": CUSTOM_MODULES_AVAILABLE,
237
- "face_model_loaded": face_model is not None,
238
- "device": str(device) if ML_AVAILABLE else "N/A",
239
- "platform": "Hugging Face Spaces (16GB RAM)",
240
- "your_exact_logic": {
241
- "face_detection": "DeepFace (happy) → AA-DCN (other emotions)",
242
- "environment_detection": "HybridResNetViT (surrounding vibe)"
243
- }
244
- }
245
-
246
- @app.post("/analyze")
247
- async def analyze_emotion(file: UploadFile = File(...)):
248
- """Main emotion analysis endpoint with YOUR EXACT LOGIC"""
249
-
250
- if not file.content_type.startswith('image/'):
251
- raise HTTPException(status_code=400, detail="File must be an image")
252
-
253
- if not ML_AVAILABLE:
254
- raise HTTPException(status_code=503, detail="ML models not available")
255
-
256
- try:
257
- # Read image bytes
258
- image_bytes = await file.read()
259
-
260
- # YOUR EXACT LOGIC IMPLEMENTATION
261
- if CUSTOM_MODULES_AVAILABLE:
262
- # Check if image has face
263
- if detect_face(image_bytes):
264
- logger.info("👤 Face detected - Using YOUR EXACT LOGIC: DeepFace → AA-DCN")
265
-
266
- # YOUR EXACT FACE EMOTION LOGIC
267
- label, moods = predict_face_emotion_hybrid(image_bytes)
268
-
269
- logger.info(f"🎭 Face emotion result: {label}")
270
-
271
- else:
272
- logger.info("🌍 No face detected - Using HybridResNetViT for environment vibe")
273
-
274
- # YOUR EXACT VIBE LOGIC
275
- label, moods, source = predict_environment_vibe_resnetvit(image_bytes)
276
-
277
- logger.info(f"🌍 Environment vibe result: {moods}")
278
-
279
- # Get music recommendations
280
- genres = []
281
- tracks = []
282
- try:
283
- genres = get_genres_for_moods(moods)
284
- tracks = recommend_songs_for_moods_and_genres(moods, genres, limit_per_mood=5, limit_per_genre=5)
285
- except Exception as e:
286
- logger.warning(f"⚠️ Music recommendation failed: {e}")
287
- genres = ['pop', 'indie']
288
- tracks = [
289
- {"title": "Happy Song", "artist": "Sample Artist", "genre": "pop"},
290
- {"title": "Mood Song", "artist": "Sample Artist", "genre": "indie"}
291
- ]
292
- else:
293
- # Fallback mode
294
- label = 'neutral'
295
- moods = ['Peace', 'Esteem', 'Confidence']
296
- genres = ['pop', 'indie']
297
- tracks = [{"title": "Fallback Track", "artist": "Fallback Artist", "genre": "pop"}]
298
-
299
- result = {
300
- "emotion": label,
301
- "moods": moods,
302
- "genres": genres,
303
- "tracks": tracks,
304
- "status": "success",
305
- "model_info": {
306
- "device": str(device),
307
- "deepface_available": DEEPFACE_AVAILABLE,
308
- "custom_modules_available": CUSTOM_MODULES_AVAILABLE,
309
- "face_model_loaded": face_model is not None
310
- }
311
- }
312
-
313
- logger.info(f"🎉 YOUR EXACT LOGIC COMPLETE: {label}")
314
- gc.collect()
315
-
316
- return JSONResponse(content=result)
317
-
318
- except HTTPException:
319
- raise
320
- except Exception as e:
321
- logger.error(f"❌ Analysis error: {e}")
322
- gc.collect()
323
- raise HTTPException(status_code=500, detail=f"Analysis failed: {str(e)}")
324
-
325
- @app.exception_handler(Exception)
326
- async def global_exception_handler(request, exc):
327
- logger.error(f"❌ Global error: {exc}")
328
- return JSONResponse(
329
- status_code=500,
330
- content={"detail": f"Internal server error: {str(exc)}"}
331
- )
332
-
333
- if __name__ == "__main__":
334
- import uvicorn
335
- uvicorn.run(app, host="0.0.0.0", port=7860)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, File, UploadFile, HTTPException
2
+ from fastapi.middleware.cors import CORSMiddleware
3
+ from fastapi.responses import JSONResponse
4
+ import torch
5
+ import numpy as np
6
+ from PIL import Image
7
+ import io
8
+ import json
9
+ import logging
10
+ import gc
11
+ from torchvision import transforms
12
+ import timm
13
+ import os
14
+ import sys
15
+ from dotenv import load_dotenv
16
+ from ytmusicapi import YTMusic
17
+
18
+ # Load environment variables
19
+ load_dotenv()
20
+
21
+ # Configure logging
22
+ logging.basicConfig(level=logging.INFO)
23
+ logger = logging.getLogger(__name__)
24
+
25
+ # Add current directory to Python path
26
+ current_dir = os.path.dirname(os.path.abspath(__file__))
27
+ sys.path.append(current_dir)
28
+ sys.path.append('.')
29
+
30
+ # Configure TensorFlow for memory optimization BEFORE importing DeepFace
31
+ def configure_tensorflow_memory():
32
+ try:
33
+ import tensorflow as tf
34
+ tf.config.threading.set_intra_op_parallelism_threads(2)
35
+ tf.config.threading.set_inter_op_parallelism_threads(2)
36
+ tf.config.set_visible_devices([], 'GPU')
37
+ logger.info("✅ TensorFlow configured for memory optimization")
38
+ return True
39
+ except Exception as e:
40
+ logger.error(f"❌ TensorFlow configuration failed: {e}")
41
+ return False
42
+
43
+ # Configure TensorFlow and import DeepFace
44
+ DEEPFACE_AVAILABLE = False
45
+ if configure_tensorflow_memory():
46
+ try:
47
+ from deepface import DeepFace
48
+ DEEPFACE_AVAILABLE = True
49
+ logger.info("🎉 DeepFace loaded with memory optimization on Hugging Face Spaces!")
50
+ except Exception as e:
51
+ logger.error(f"❌ DeepFace loading failed: {e}")
52
+ DEEPFACE_AVAILABLE = False
53
+
54
+ # Initialize FastAPI app
55
+ app = FastAPI(
56
+ title="Vibe Detection Backend API",
57
+ description="Professional emotion recognition API with DeepFace + AA-DCN + HybridResNetViT + YouTube Music",
58
+ version="1.0.0",
59
+ docs_url="/docs",
60
+ redoc_url="/redoc"
61
+ )
62
+
63
+ # Add CORS middleware
64
+ app.add_middleware(
65
+ CORSMiddleware,
66
+ allow_origins=["*"], # Configure for your frontend domain in production
67
+ allow_credentials=True,
68
+ allow_methods=["GET", "POST"],
69
+ allow_headers=["*"],
70
+ )
71
+
72
+ # Initialize availability flags
73
+ ML_AVAILABLE = False
74
+ CUSTOM_MODULES_AVAILABLE = False
75
+
76
+ # Load ML dependencies
77
+ try:
78
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
79
+ ML_AVAILABLE = True
80
+ logger.info(f"✅ ML dependencies loaded successfully on {device}")
81
+ except ImportError as e:
82
+ logger.warning(f" ML dependencies not available: {e}")
83
+ ML_AVAILABLE = False
84
+
85
+ # Initialize YouTube Music API client
86
+ try:
87
+ ytmusic = YTMusic()
88
+ logger.info("✅ YouTube Music API initialized successfully!")
89
+ except Exception as e:
90
+ logger.error(f"❌ YouTube Music API initialization failed: {e}")
91
+ ytmusic = None
92
+
93
+ # YOUR EXACT YOUTUBE MUSIC RECOMMENDATION LOGIC
94
+ def recommend_songs_for_moods_and_genres(moods, genres, limit_per_mood=5, limit_per_genre=5):
95
+ """YOUR EXACT SONG RECOMMENDATION LOGIC with YouTube Music API"""
96
+ if not ytmusic:
97
+ logger.warning("⚠️ YouTube Music API not available")
98
+ return [
99
+ {"title": "Sample Song", "artist": "Sample Artist", "url": "https://music.youtube.com", "source": "fallback"}
100
+ ]
101
+
102
+ all_results = []
103
+ seen_ids = set()
104
+
105
+ try:
106
+ # Search by mood/vibe (YOUR EXACT LOGIC)
107
+ for mood in moods:
108
+ query = f"{mood} songs"
109
+ search_results = ytmusic.search(query, filter="songs")
110
+ for song in search_results[:limit_per_mood]:
111
+ song_id = song.get('videoId')
112
+ if song_id and song_id not in seen_ids:
113
+ seen_ids.add(song_id)
114
+ all_results.append({
115
+ "title": song['title'],
116
+ "artist": song['artists'][0]['name'], # YOUR exact artist extraction
117
+ "url": f"https://music.youtube.com/watch?v={song['videoId']}",
118
+ "source": "mood"
119
+ })
120
+
121
+ # Search by genre (YOUR EXACT LOGIC)
122
+ for genre in genres:
123
+ query = f"{genre} songs"
124
+ search_results = ytmusic.search(query, filter="songs")
125
+ for song in search_results[:limit_per_genre]:
126
+ song_id = song.get('videoId')
127
+ if song_id and song_id not in seen_ids:
128
+ seen_ids.add(song_id)
129
+ all_results.append({
130
+ "title": song['title'],
131
+ "artist": song['artists'][0]['name'], # YOUR exact artist extraction
132
+ "url": f"https://music.youtube.com/watch?v={song['videoId']}",
133
+ "source": "genre"
134
+ })
135
+
136
+ logger.info(f"🎵 YouTube Music API returned {len(all_results)} songs")
137
+ return all_results
138
+
139
+ except Exception as e:
140
+ logger.error(f"❌ YouTube Music API error: {e}")
141
+ return [
142
+ {"title": "Fallback Song", "artist": "Fallback Artist", "url": "https://music.youtube.com", "source": "error"}
143
+ ]
144
+
145
+ # Import your custom modules with proper error handling
146
+ face_model = None
147
+ try:
148
+ from models.model_definitions import create_aadcn_model, HybridResNetViT
149
+ from utils.image_utils import detect_face, preprocess_image
150
+ from utils.mood_utils import map_vibe_to_moods, get_genres_for_moods, load_mood_genre_mapping
151
+ from models.vibe_model import predict_vibe
152
+ from config import ensure_models_available, FACE_MODEL_PATH
153
+
154
+ CUSTOM_MODULES_AVAILABLE = True
155
+ logger.info("✅ Custom modules loaded successfully!")
156
+
157
+ # Load mood-genre mapping from CSV
158
+ load_mood_genre_mapping()
159
+
160
+ # Ensure models are available from Hugging Face
161
+ if ensure_models_available():
162
+ face_model = create_aadcn_model(num_classes=8)
163
+ face_model.load_state_dict(torch.load(FACE_MODEL_PATH, map_location=device))
164
+ face_model.eval()
165
+ logger.info("🎉 AA-DCN face model loaded successfully!")
166
+
167
+ except ImportError as e:
168
+ logger.error(f"❌ Custom modules import failed: {e}")
169
+ CUSTOM_MODULES_AVAILABLE = False
170
+
171
+ # YOUR EXACT EMOTION MAPPINGS
172
+ emotion_idx_to_label = {
173
+ 0: 'angry',
174
+ 1: 'contempt',
175
+ 2: 'disgust',
176
+ 3: 'fear',
177
+ 4: 'happy',
178
+ 5: 'neutral',
179
+ 6: 'sad',
180
+ 7: 'surprise'
181
+ }
182
+
183
+ dataset_to_custom = {
184
+ 'angry': ['Anger', 'Annoyance', 'Disapproval'],
185
+ 'contempt': ['Disapproval', 'Disconnection', 'Annoyance'],
186
+ 'disgust': ['Aversion', 'Disapproval', 'Disconnection'],
187
+ 'fear': ['Fear', 'Disquietment', 'Doubt/Confusion'],
188
+ 'happy': ['Happiness', 'Affection', 'Pleasure', 'Excitement'],
189
+ 'neutral': ['Peace', 'Esteem', 'Confidence'],
190
+ 'sad': ['Sadness', 'Fatigue', 'Suffering'],
191
+ 'surprise': ['Surprise', 'Anticipation', 'Excitement']
192
+ }
193
+
194
+ # YOUR EXACT TRANSFORM
195
+ transform = transforms.Compose([
196
+ transforms.Resize((224, 224)),
197
+ transforms.ToTensor(),
198
+ transforms.Normalize([0.485,0.456,0.406],[0.229,0.224,0.225])
199
+ ]) if ML_AVAILABLE else None
200
+
201
+ def predict_face_emotion_hybrid(image_bytes):
202
+ """YOUR EXACT FACE EMOTION LOGIC: DeepFace for happy AA-DCN for others"""
203
+
204
+ # YOUR EXACT LOGIC: Try DeepFace for "happy" detection first
205
+ if DEEPFACE_AVAILABLE:
206
+ try:
207
+ image = Image.open(io.BytesIO(image_bytes)).convert('RGB')
208
+ np_image = np.array(image) # YOUR exact numpy array conversion
209
+ result = DeepFace.analyze(
210
+ np_image,
211
+ actions=['emotion'],
212
+ enforce_detection=False,
213
+ detector_backend='opencv'
214
+ )
215
+ if isinstance(result, list):
216
+ result = result[0] # YOUR exact list handling
217
+
218
+ # YOUR EXACT LOGIC: If happy detected, return happy
219
+ if result['dominant_emotion'] == 'happy':
220
+ logger.info("🎉 DeepFace detected HAPPY - returning happy result!")
221
+ gc.collect()
222
+ return 'happy', dataset_to_custom['happy']
223
+ else:
224
+ logger.info(f"🔄 DeepFace detected {result['dominant_emotion']} (not happy) - sending to AA-DCN")
225
+ gc.collect()
226
+
227
+ except Exception as e:
228
+ logger.warning(f"DeepFace error: {e}. Falling back to AA-DCN model.")
229
+ gc.collect()
230
+
231
+ # YOUR EXACT LOGIC: Fallback to AA-DCN for all non-happy emotions
232
+ if CUSTOM_MODULES_AVAILABLE and face_model is not None and transform is not None:
233
+ try:
234
+ image = Image.open(io.BytesIO(image_bytes)).convert('RGB')
235
+ img_t = transform(image).unsqueeze(0).to(device) # YOUR exact transform
236
+ with torch.no_grad():
237
+ output = face_model(img_t)
238
+ pred_idx = output.argmax(dim=1).item() # YOUR exact prediction
239
+ dataset_label = emotion_idx_to_label.get(pred_idx, 'unknown') # YOUR exact mapping
240
+ custom_moods = dataset_to_custom.get(dataset_label, ['Unknown']) # YOUR exact moods
241
+
242
+ logger.info(f"✅ AA-DCN detected: {dataset_label} - returning AA-DCN result!")
243
+ return dataset_label, custom_moods
244
+
245
+ except Exception as e:
246
+ logger.error(f"❌ AA-DCN prediction error: {e}")
247
+
248
+ # Final fallback
249
+ logger.warning("⚠️ Using basic emotion fallback")
250
+ return 'neutral', ['Peace', 'Esteem', 'Confidence']
251
+
252
+ def predict_environment_vibe_resnetvit(image_bytes):
253
+ """YOUR EXACT VIBE LOGIC: HybridResNetViT for environment/surrounding detection"""
254
+ try:
255
+ if CUSTOM_MODULES_AVAILABLE:
256
+ # Use YOUR exact vibe detection logic
257
+ image_tensor = preprocess_image(image_bytes)
258
+ vibe_idx = predict_vibe(image_tensor) # YOUR exact function
259
+ moods = map_vibe_to_moods(vibe_idx) # YOUR exact mapping
260
+
261
+ logger.info(f"🌍 YOUR HybridResNetViT detected environment vibe: {moods} (vibe_idx: {vibe_idx})")
262
+ return None, moods, "vibe_hybridresnetvit"
263
+ else:
264
+ logger.error(" Custom modules not available - cannot perform vibe detection")
265
+ return None, ['error'], "vibe_unavailable"
266
+
267
+ except Exception as e:
268
+ logger.error(f"❌ Environment vibe detection failed: {e}")
269
+ return None, ['error'], "vibe_error"
270
+
271
+ # API Routes
272
+ @app.get("/")
273
+ async def root():
274
+ """API health check and information"""
275
+ return {
276
+ "message": "🎭 Vibe Detection Backend API with YouTube Music",
277
+ "status": "healthy",
278
+ "version": "1.0.0",
279
+ "endpoints": {
280
+ "analyze": "/analyze",
281
+ "health": "/health",
282
+ "docs": "/docs"
283
+ },
284
+ "models": {
285
+ "deepface_available": DEEPFACE_AVAILABLE,
286
+ "custom_modules_available": CUSTOM_MODULES_AVAILABLE,
287
+ "face_model_loaded": face_model is not None,
288
+ "youtube_music_available": ytmusic is not None
289
+ }
290
+ }
291
+
292
+ @app.get("/health")
293
+ async def health_check():
294
+ """Detailed health check for monitoring"""
295
+ return {
296
+ "status": "healthy",
297
+ "ml_available": ML_AVAILABLE,
298
+ "deepface_available": DEEPFACE_AVAILABLE,
299
+ "custom_modules_available": CUSTOM_MODULES_AVAILABLE,
300
+ "face_model_loaded": face_model is not None,
301
+ "youtube_music_available": ytmusic is not None,
302
+ "device": str(device) if ML_AVAILABLE else "N/A",
303
+ "platform": "Hugging Face Spaces (16GB RAM)",
304
+ "your_exact_logic": {
305
+ "face_detection": "DeepFace (happy) → AA-DCN (other emotions)",
306
+ "environment_detection": "HybridResNetViT (surrounding vibe)",
307
+ "csv_mapping": "Mood to genre mapping from CSV file",
308
+ "music_api": "YouTube Music API integration (Spotify removed)"
309
+ }
310
+ }
311
+
312
+ @app.post("/analyze")
313
+ async def analyze_emotion(file: UploadFile = File(...)):
314
+ """Main emotion analysis endpoint with YOUR EXACT LOGIC"""
315
+
316
+ if not file.content_type.startswith('image/'):
317
+ raise HTTPException(status_code=400, detail="File must be an image")
318
+
319
+ if not ML_AVAILABLE:
320
+ raise HTTPException(status_code=503, detail="ML models not available")
321
+
322
+ try:
323
+ # Read image bytes
324
+ image_bytes = await file.read()
325
+
326
+ # YOUR EXACT LOGIC IMPLEMENTATION
327
+ if CUSTOM_MODULES_AVAILABLE:
328
+ # Check if image has face
329
+ if detect_face(image_bytes):
330
+ logger.info("👤 Face detected - Using YOUR EXACT LOGIC: DeepFace → AA-DCN")
331
+
332
+ # YOUR EXACT FACE EMOTION LOGIC
333
+ label, moods = predict_face_emotion_hybrid(image_bytes)
334
+ source = "face_emotion"
335
+
336
+ logger.info(f"🎭 Face emotion result: {label}")
337
+
338
+ else:
339
+ logger.info("🌍 No face detected - Using HybridResNetViT for environment vibe")
340
+
341
+ # YOUR EXACT VIBE LOGIC
342
+ label, moods, source = predict_environment_vibe_resnetvit(image_bytes)
343
+
344
+ logger.info(f"🌍 Environment vibe result: {moods}")
345
+
346
+ # YOUR CSV MAPPING + YOUTUBE MUSIC API LOGIC
347
+ genres = []
348
+ tracks = []
349
+ try:
350
+ # Step 1: Map moods to genres using CSV file
351
+ genres = get_genres_for_moods(moods) # YOUR CSV mapping!
352
+
353
+ # Step 2: Get songs from YouTube Music API (YOUR EXACT FUNCTION)
354
+ tracks = recommend_songs_for_moods_and_genres(
355
+ moods, genres,
356
+ limit_per_mood=5,
357
+ limit_per_genre=5
358
+ ) # YOUR YouTube Music API logic!
359
+
360
+ logger.info(f"🎵 Music recommendations: {len(tracks)} tracks from {len(genres)} genres")
361
+
362
+ except Exception as e:
363
+ logger.warning(f"⚠️ Music recommendation failed: {e}")
364
+ genres = ['pop', 'indie']
365
+ tracks = [
366
+ {"title": "Happy Song", "artist": "Sample Artist", "url": "https://music.youtube.com", "source": "fallback"}
367
+ ]
368
+ else:
369
+ # Fallback mode
370
+ label = 'neutral'
371
+ moods = ['Peace', 'Esteem', 'Confidence']
372
+ genres = ['pop', 'indie']
373
+ tracks = [{"title": "Fallback Track", "artist": "Fallback Artist", "url": "https://music.youtube.com", "source": "fallback"}]
374
+ source = "fallback"
375
+
376
+ # YOUR EXACT RESULT FORMAT
377
+ result = {
378
+ "emotion": label,
379
+ "moods": moods,
380
+ "genres": genres, # From CSV mapping
381
+ "tracks": tracks, # From YOUR YouTube Music API function
382
+ "source": source,
383
+ "status": "success",
384
+ "your_exact_logic": {
385
+ "face_detection": "DeepFace (happy) → AA-DCN (other emotions)",
386
+ "environment_detection": "HybridResNetViT (surrounding vibe)",
387
+ "csv_mapping": "Mood to genre mapping completed",
388
+ "music_api": "YouTube Music API songs retrieved (Spotify removed)"
389
+ },
390
+ "model_info": {
391
+ "device": str(device),
392
+ "deepface_available": DEEPFACE_AVAILABLE,
393
+ "custom_modules_available": CUSTOM_MODULES_AVAILABLE,
394
+ "face_model_loaded": face_model is not None,
395
+ "youtube_music_available": ytmusic is not None,
396
+ "memory_optimized": True,
397
+ "platform": "Hugging Face Spaces (16GB RAM)"
398
+ }
399
+ }
400
+
401
+ logger.info(f"🎉 YOUR EXACT LOGIC COMPLETE: {label} - {source}")
402
+ gc.collect()
403
+
404
+ return JSONResponse(content=result)
405
+
406
+ except HTTPException:
407
+ raise
408
+ except Exception as e:
409
+ logger.error(f"❌ Analysis error: {e}")
410
+ gc.collect()
411
+ raise HTTPException(status_code=500, detail=f"Analysis failed: {str(e)}")
412
+
413
+ @app.exception_handler(Exception)
414
+ async def global_exception_handler(request, exc):
415
+ logger.error(f"❌ Global error: {exc}")
416
+ return JSONResponse(
417
+ status_code=500,
418
+ content={"detail": f"Internal server error: {str(exc)}"}
419
+ )
420
+
421
+ if __name__ == "__main__":
422
+ import uvicorn
423
+ uvicorn.run(app, host="0.0.0.0", port=7860)