Spaces:
Running
Running
File size: 2,338 Bytes
ae24679 3697b25 ae24679 3697b25 ae24679 3697b25 ae24679 3697b25 ae24679 3697b25 ae24679 3697b25 ae24679 3697b25 ae24679 3697b25 ae24679 3697b25 ae24679 3697b25 ae24679 7391415 3697b25 ae24679 7391415 ae24679 7391415 ae24679 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 |
import os
from fastapi import FastAPI
from fastapi.responses import HTMLResponse
from pydantic import BaseModel
from transformers import pipeline
# Initialize FastAPI
app = FastAPI()
# --- LOAD MODEL (Run only once on server startup) ---
print("Loading model anggars/emotive-sentiment...")
try:
classifier = pipeline(
"text-classification",
model="anggars/emotive-sentiment",
top_k=None # Retrieve all scores
)
print("Model successfully loaded!")
except Exception as e:
print(f"Failed to load model: {e}")
classifier = None
class TextRequest(BaseModel):
text: str
# --- ROOT ENDPOINT (Embed Web via Iframe) ---
@app.get("/", response_class=HTMLResponse)
def home():
return """
<!DOCTYPE html>
<html>
<head>
<title>Personalify API</title>
<style>
body, html { margin: 0; padding: 0; height: 100%; overflow: hidden; }
iframe { width: 100%; height: 100%; border: none; }
</style>
</head>
<body>
<iframe src="https://personalify.vercel.app/lyrics"></iframe>
</body>
</html>
"""
# --- API ENDPOINT (For Backend requests) ---
@app.post("/predict")
def predict(req: TextRequest):
if classifier is None:
return {"error": "Model is not ready or failed to load."}
try:
# 1. Log the Input (Truncated to avoid massive logs)
print("\n" + "="*40)
print(f"[INFERENCE] Input Text: {req.text[:100]}..." if len(req.text) > 100 else f"[INFERENCE] Input Text: {req.text}")
# 2. Run Inference
# Pipeline result format: [[{'label': 'joy', 'score': 0.9}, ...]]
results = classifier(req.text)
emotions = results[0]
# 3. Log the Output (Detailed like Backend)
# Sort by score descending for better readability in logs
sorted_emotions = sorted(emotions, key=lambda x: x['score'], reverse=True)
print("[INFERENCE] Results:")
for item in sorted_emotions:
# Print format: LABEL ...... SCORE
print(f" • {item['label']:<15}: {item['score']:.5f}")
print("="*40 + "\n") # End separator
return {"emotions": emotions}
except Exception as e:
print(f"[ERROR] Inference failed: {e}")
return {"error": str(e)} |