import os from fastapi import FastAPI from fastapi.responses import HTMLResponse from pydantic import BaseModel from transformers import pipeline # Initialize FastAPI app = FastAPI() # --- LOAD MODEL (Run only once on server startup) --- print("Loading model anggars/emotive-sentiment...") try: classifier = pipeline( "text-classification", model="anggars/emotive-sentiment", top_k=None # Retrieve all scores ) print("Model successfully loaded!") except Exception as e: print(f"Failed to load model: {e}") classifier = None class TextRequest(BaseModel): text: str # --- ROOT ENDPOINT (Embed Web via Iframe) --- @app.get("/", response_class=HTMLResponse) def home(): return """ Personalify API """ # --- API ENDPOINT (For Backend requests) --- @app.post("/predict") def predict(req: TextRequest): if classifier is None: return {"error": "Model is not ready or failed to load."} try: # 1. Log the Input (Truncated to avoid massive logs) print("\n" + "="*40) print(f"[INFERENCE] Input Text: {req.text[:100]}..." if len(req.text) > 100 else f"[INFERENCE] Input Text: {req.text}") # 2. Run Inference # Pipeline result format: [[{'label': 'joy', 'score': 0.9}, ...]] results = classifier(req.text) emotions = results[0] # 3. Log the Output (Detailed like Backend) # Sort by score descending for better readability in logs sorted_emotions = sorted(emotions, key=lambda x: x['score'], reverse=True) print("[INFERENCE] Results:") for item in sorted_emotions: # Print format: LABEL ...... SCORE print(f" • {item['label']:<15}: {item['score']:.5f}") print("="*40 + "\n") # End separator return {"emotions": emotions} except Exception as e: print(f"[ERROR] Inference failed: {e}") return {"error": str(e)}