| from fastapi import FastAPI, File, UploadFile, HTTPException |
| from fastapi.middleware.cors import CORSMiddleware |
| from fastapi.responses import JSONResponse |
| import torch |
| import numpy as np |
| from PIL import Image |
| import io |
| import json |
| import logging |
| import gc |
| from torchvision import transforms |
| import timm |
| import os |
| import sys |
| from dotenv import load_dotenv |
| from ytmusicapi import YTMusic |
|
|
| |
| load_dotenv() |
|
|
| |
| logging.basicConfig(level=logging.INFO) |
| logger = logging.getLogger(__name__) |
|
|
| |
| current_dir = os.path.dirname(os.path.abspath(__file__)) |
| sys.path.append(current_dir) |
| sys.path.append('.') |
|
|
| |
| def configure_tensorflow_memory(): |
| try: |
| import tensorflow as tf |
| tf.config.threading.set_intra_op_parallelism_threads(2) |
| tf.config.threading.set_inter_op_parallelism_threads(2) |
| tf.config.set_visible_devices([], 'GPU') |
| logger.info("✅ TensorFlow configured for memory optimization") |
| return True |
| except Exception as e: |
| logger.error(f"❌ TensorFlow configuration failed: {e}") |
| return False |
|
|
| |
| DEEPFACE_AVAILABLE = False |
| if configure_tensorflow_memory(): |
| try: |
| from deepface import DeepFace |
| DEEPFACE_AVAILABLE = True |
| logger.info("🎉 DeepFace loaded with memory optimization on Hugging Face Spaces!") |
| except Exception as e: |
| logger.error(f"❌ DeepFace loading failed: {e}") |
| DEEPFACE_AVAILABLE = False |
|
|
| |
| app = FastAPI( |
| title="Vibe Detection Backend API", |
| description="Professional emotion recognition API with DeepFace + AA-DCN + HybridResNetViT + YouTube Music", |
| version="1.0.0", |
| docs_url="/docs", |
| redoc_url="/redoc" |
| ) |
|
|
| |
| app.add_middleware( |
| CORSMiddleware, |
| allow_origins=["*"], |
| allow_credentials=True, |
| allow_methods=["GET", "POST"], |
| allow_headers=["*"], |
| ) |
|
|
| |
| ML_AVAILABLE = False |
| CUSTOM_MODULES_AVAILABLE = False |
|
|
| |
| try: |
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
| ML_AVAILABLE = True |
| logger.info(f"✅ ML dependencies loaded successfully on {device}") |
| except ImportError as e: |
| logger.warning(f"❌ ML dependencies not available: {e}") |
| ML_AVAILABLE = False |
|
|
| |
| try: |
| ytmusic = YTMusic() |
| logger.info("✅ YouTube Music API initialized successfully!") |
| except Exception as e: |
| logger.error(f"❌ YouTube Music API initialization failed: {e}") |
| ytmusic = None |
|
|
| |
| def recommend_songs_for_moods_and_genres(moods, genres, limit_per_mood=5, limit_per_genre=5): |
| """YOUR EXACT SONG RECOMMENDATION LOGIC with YouTube Music API""" |
| if not ytmusic: |
| logger.warning("⚠️ YouTube Music API not available") |
| return [ |
| {"title": "Sample Song", "artist": "Sample Artist", "url": "https://music.youtube.com", "source": "fallback"} |
| ] |
| |
| all_results = [] |
| seen_ids = set() |
| |
| try: |
| |
| for mood in moods: |
| query = f"{mood} songs" |
| search_results = ytmusic.search(query, filter="songs") |
| for song in search_results[:limit_per_mood]: |
| song_id = song.get('videoId') |
| if song_id and song_id not in seen_ids: |
| seen_ids.add(song_id) |
| all_results.append({ |
| "title": song['title'], |
| "artist": song['artists'][0]['name'], |
| "url": f"https://music.youtube.com/watch?v={song['videoId']}", |
| "source": "mood" |
| }) |
| |
| |
| for genre in genres: |
| query = f"{genre} songs" |
| search_results = ytmusic.search(query, filter="songs") |
| for song in search_results[:limit_per_genre]: |
| song_id = song.get('videoId') |
| if song_id and song_id not in seen_ids: |
| seen_ids.add(song_id) |
| all_results.append({ |
| "title": song['title'], |
| "artist": song['artists'][0]['name'], |
| "url": f"https://music.youtube.com/watch?v={song['videoId']}", |
| "source": "genre" |
| }) |
| |
| logger.info(f"🎵 YouTube Music API returned {len(all_results)} songs") |
| return all_results |
| |
| except Exception as e: |
| logger.error(f"❌ YouTube Music API error: {e}") |
| return [ |
| {"title": "Fallback Song", "artist": "Fallback Artist", "url": "https://music.youtube.com", "source": "error"} |
| ] |
|
|
| |
| face_model = None |
| try: |
| from models.model_definitions import create_aadcn_model, HybridResNetViT |
| from utils.image_utils import detect_face, preprocess_image |
| from utils.mood_utils import map_vibe_to_moods, get_genres_for_moods, load_mood_genre_mapping |
| from models.vibe_model import predict_vibe |
| from config import ensure_models_available, FACE_MODEL_PATH |
| |
| CUSTOM_MODULES_AVAILABLE = True |
| logger.info("✅ Custom modules loaded successfully!") |
| |
| |
| load_mood_genre_mapping() |
| |
| |
| if ensure_models_available(): |
| face_model = create_aadcn_model(num_classes=8) |
| face_model.load_state_dict(torch.load(FACE_MODEL_PATH, map_location=device)) |
| face_model.eval() |
| logger.info("🎉 AA-DCN face model loaded successfully!") |
| |
| except ImportError as e: |
| logger.error(f"❌ Custom modules import failed: {e}") |
| CUSTOM_MODULES_AVAILABLE = False |
|
|
| |
| emotion_idx_to_label = { |
| 0: 'angry', |
| 1: 'contempt', |
| 2: 'disgust', |
| 3: 'fear', |
| 4: 'happy', |
| 5: 'neutral', |
| 6: 'sad', |
| 7: 'surprise' |
| } |
|
|
| dataset_to_custom = { |
| 'angry': ['Anger', 'Annoyance', 'Disapproval'], |
| 'contempt': ['Disapproval', 'Disconnection', 'Annoyance'], |
| 'disgust': ['Aversion', 'Disapproval', 'Disconnection'], |
| 'fear': ['Fear', 'Disquietment', 'Doubt/Confusion'], |
| 'happy': ['Happiness', 'Affection', 'Pleasure', 'Excitement'], |
| 'neutral': ['Peace', 'Esteem', 'Confidence'], |
| 'sad': ['Sadness', 'Fatigue', 'Suffering'], |
| 'surprise': ['Surprise', 'Anticipation', 'Excitement'] |
| } |
|
|
| |
| transform = transforms.Compose([ |
| transforms.Resize((224, 224)), |
| transforms.ToTensor(), |
| transforms.Normalize([0.485,0.456,0.406],[0.229,0.224,0.225]) |
| ]) if ML_AVAILABLE else None |
|
|
| def predict_face_emotion_hybrid(image_bytes): |
| """YOUR EXACT FACE EMOTION LOGIC: DeepFace for happy → AA-DCN for others""" |
| |
| |
| if DEEPFACE_AVAILABLE: |
| try: |
| image = Image.open(io.BytesIO(image_bytes)).convert('RGB') |
| np_image = np.array(image) |
| result = DeepFace.analyze( |
| np_image, |
| actions=['emotion'], |
| enforce_detection=False, |
| detector_backend='opencv' |
| ) |
| if isinstance(result, list): |
| result = result[0] |
| |
| |
| if result['dominant_emotion'] == 'happy': |
| logger.info("🎉 DeepFace detected HAPPY - returning happy result!") |
| gc.collect() |
| return 'happy', dataset_to_custom['happy'] |
| else: |
| logger.info(f"🔄 DeepFace detected {result['dominant_emotion']} (not happy) - sending to AA-DCN") |
| gc.collect() |
| |
| except Exception as e: |
| logger.warning(f"DeepFace error: {e}. Falling back to AA-DCN model.") |
| gc.collect() |
|
|
| |
| if CUSTOM_MODULES_AVAILABLE and face_model is not None and transform is not None: |
| try: |
| image = Image.open(io.BytesIO(image_bytes)).convert('RGB') |
| img_t = transform(image).unsqueeze(0).to(device) |
| with torch.no_grad(): |
| output = face_model(img_t) |
| pred_idx = output.argmax(dim=1).item() |
| dataset_label = emotion_idx_to_label.get(pred_idx, 'unknown') |
| custom_moods = dataset_to_custom.get(dataset_label, ['Unknown']) |
| |
| logger.info(f"✅ AA-DCN detected: {dataset_label} - returning AA-DCN result!") |
| return dataset_label, custom_moods |
| |
| except Exception as e: |
| logger.error(f"❌ AA-DCN prediction error: {e}") |
| |
| |
| logger.warning("⚠️ Using basic emotion fallback") |
| return 'neutral', ['Peace', 'Esteem', 'Confidence'] |
|
|
| def predict_environment_vibe_resnetvit(image_bytes): |
| """YOUR EXACT VIBE LOGIC: HybridResNetViT for environment/surrounding detection""" |
| try: |
| if CUSTOM_MODULES_AVAILABLE: |
| |
| image_tensor = preprocess_image(image_bytes) |
| vibe_idx = predict_vibe(image_tensor) |
| moods = map_vibe_to_moods(vibe_idx) |
| |
| logger.info(f"🌍 YOUR HybridResNetViT detected environment vibe: {moods} (vibe_idx: {vibe_idx})") |
| return None, moods, "vibe_hybridresnetvit" |
| else: |
| logger.error("❌ Custom modules not available - cannot perform vibe detection") |
| return None, ['error'], "vibe_unavailable" |
| |
| except Exception as e: |
| logger.error(f"❌ Environment vibe detection failed: {e}") |
| return None, ['error'], "vibe_error" |
|
|
| |
| @app.get("/") |
| async def root(): |
| """API health check and information""" |
| return { |
| "message": "🎭 Vibe Detection Backend API with YouTube Music", |
| "status": "healthy", |
| "version": "1.0.0", |
| "endpoints": { |
| "analyze": "/analyze", |
| "health": "/health", |
| "docs": "/docs" |
| }, |
| "models": { |
| "deepface_available": DEEPFACE_AVAILABLE, |
| "custom_modules_available": CUSTOM_MODULES_AVAILABLE, |
| "face_model_loaded": face_model is not None, |
| "youtube_music_available": ytmusic is not None |
| } |
| } |
|
|
| @app.get("/health") |
| async def health_check(): |
| """Detailed health check for monitoring""" |
| return { |
| "status": "healthy", |
| "ml_available": ML_AVAILABLE, |
| "deepface_available": DEEPFACE_AVAILABLE, |
| "custom_modules_available": CUSTOM_MODULES_AVAILABLE, |
| "face_model_loaded": face_model is not None, |
| "youtube_music_available": ytmusic is not None, |
| "device": str(device) if ML_AVAILABLE else "N/A", |
| "platform": "Hugging Face Spaces (16GB RAM)", |
| "your_exact_logic": { |
| "face_detection": "DeepFace (happy) → AA-DCN (other emotions)", |
| "environment_detection": "HybridResNetViT (surrounding vibe)", |
| "csv_mapping": "Mood to genre mapping from CSV file", |
| "music_api": "YouTube Music API integration (Spotify removed)" |
| } |
| } |
|
|
| @app.post("/analyze") |
| async def analyze_emotion(file: UploadFile = File(...)): |
| """Main emotion analysis endpoint with YOUR EXACT LOGIC""" |
| |
| if not file.content_type.startswith('image/'): |
| raise HTTPException(status_code=400, detail="File must be an image") |
| |
| if not ML_AVAILABLE: |
| raise HTTPException(status_code=503, detail="ML models not available") |
| |
| try: |
| |
| image_bytes = await file.read() |
| |
| |
| if CUSTOM_MODULES_AVAILABLE: |
| |
| if detect_face(image_bytes): |
| logger.info("👤 Face detected - Using YOUR EXACT LOGIC: DeepFace → AA-DCN") |
| |
| |
| label, moods = predict_face_emotion_hybrid(image_bytes) |
| source = "face_emotion" |
| |
| logger.info(f"🎭 Face emotion result: {label}") |
| |
| else: |
| logger.info("🌍 No face detected - Using HybridResNetViT for environment vibe") |
| |
| |
| label, moods, source = predict_environment_vibe_resnetvit(image_bytes) |
| |
| logger.info(f"🌍 Environment vibe result: {moods}") |
| |
| |
| genres = [] |
| tracks = [] |
| try: |
| |
| genres = get_genres_for_moods(moods) |
| |
| |
| tracks = recommend_songs_for_moods_and_genres( |
| moods, genres, |
| limit_per_mood=5, |
| limit_per_genre=5 |
| ) |
| |
| logger.info(f"🎵 Music recommendations: {len(tracks)} tracks from {len(genres)} genres") |
| |
| except Exception as e: |
| logger.warning(f"⚠️ Music recommendation failed: {e}") |
| genres = ['pop', 'indie'] |
| tracks = [ |
| {"title": "Happy Song", "artist": "Sample Artist", "url": "https://music.youtube.com", "source": "fallback"} |
| ] |
| else: |
| |
| label = 'neutral' |
| moods = ['Peace', 'Esteem', 'Confidence'] |
| genres = ['pop', 'indie'] |
| tracks = [{"title": "Fallback Track", "artist": "Fallback Artist", "url": "https://music.youtube.com", "source": "fallback"}] |
| source = "fallback" |
| |
| |
| result = { |
| "emotion": label, |
| "moods": moods, |
| "genres": genres, |
| "tracks": tracks, |
| "source": source, |
| "status": "success", |
| "your_exact_logic": { |
| "face_detection": "DeepFace (happy) → AA-DCN (other emotions)", |
| "environment_detection": "HybridResNetViT (surrounding vibe)", |
| "csv_mapping": "Mood to genre mapping completed", |
| "music_api": "YouTube Music API songs retrieved (Spotify removed)" |
| }, |
| "model_info": { |
| "device": str(device), |
| "deepface_available": DEEPFACE_AVAILABLE, |
| "custom_modules_available": CUSTOM_MODULES_AVAILABLE, |
| "face_model_loaded": face_model is not None, |
| "youtube_music_available": ytmusic is not None, |
| "memory_optimized": True, |
| "platform": "Hugging Face Spaces (16GB RAM)" |
| } |
| } |
| |
| logger.info(f"🎉 YOUR EXACT LOGIC COMPLETE: {label} - {source}") |
| gc.collect() |
| |
| return JSONResponse(content=result) |
| |
| except HTTPException: |
| raise |
| except Exception as e: |
| logger.error(f"❌ Analysis error: {e}") |
| gc.collect() |
| raise HTTPException(status_code=500, detail=f"Analysis failed: {str(e)}") |
|
|
| @app.exception_handler(Exception) |
| async def global_exception_handler(request, exc): |
| logger.error(f"❌ Global error: {exc}") |
| return JSONResponse( |
| status_code=500, |
| content={"detail": f"Internal server error: {str(exc)}"} |
| ) |
|
|
| if __name__ == "__main__": |
| import uvicorn |
| uvicorn.run(app, host="0.0.0.0", port=7860) |
|
|