Spaces:
Runtime error
Runtime error
File size: 4,302 Bytes
bcfd653 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 |
import pickle
import numpy as np
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from typing import Dict, List
import uvicorn
# Initialize FastAPI app
app = FastAPI(
title="Sentiment Analysis API",
description="A sentiment analysis model that predicts sentiment (positive/negative) from text",
version="1.0.0"
)
# Load the model
try:
with open("sentiment.pkl", "rb") as f:
model = pickle.load(f)
except FileNotFoundError:
raise Exception("sentiment.pkl not found. Please ensure the model file is in the correct location.")
# Pydantic models for request/response
class TextInput(BaseModel):
text: str
class PredictionResponse(BaseModel):
prediction: int
confidence: float
sentiment: str
class ProbabilityResponse(BaseModel):
probabilities: List[float]
prediction: int
sentiment: str
@app.get("/")
async def root():
return {
"message": "Sentiment Analysis API",
"endpoints": {
"/predict": "Get sentiment prediction (0 or 1)",
"/predict_proba": "Get prediction probabilities",
"/health": "Check API health"
}
}
@app.get("/health")
async def health_check():
return {"status": "healthy", "model_loaded": True}
@app.post("/predict", response_model=PredictionResponse)
async def predict_sentiment(input_data: TextInput):
"""
Predict sentiment from text input.
Returns integer prediction (0 for negative, 1 for positive).
"""
try:
# Convert text to the format expected by your model
# Note: You may need to preprocess the text depending on your model's requirements
text_data = [input_data.text]
# Get prediction
prediction = model.predict(text_data)[0]
# Get probabilities to calculate confidence
probabilities = model.predict_proba(text_data)[0]
confidence = float(max(probabilities))
# Convert prediction to sentiment label
sentiment = "positive" if prediction == 1 else "negative"
return PredictionResponse(
prediction=int(prediction),
confidence=confidence,
sentiment=sentiment
)
except Exception as e:
raise HTTPException(status_code=500, detail=f"Prediction failed: {str(e)}")
@app.post("/predict_proba", response_model=ProbabilityResponse)
async def predict_probabilities(input_data: TextInput):
"""
Get prediction probabilities for sentiment analysis.
Returns probability arrays with shape (n_samples, 2).
"""
try:
# Convert text to the format expected by your model
text_data = [input_data.text]
# Get probabilities
probabilities = model.predict_proba(text_data)[0]
prediction = model.predict(text_data)[0]
# Convert prediction to sentiment label
sentiment = "positive" if prediction == 1 else "negative"
return ProbabilityResponse(
probabilities=probabilities.tolist(),
prediction=int(prediction),
sentiment=sentiment
)
except Exception as e:
raise HTTPException(status_code=500, detail=f"Probability prediction failed: {str(e)}")
@app.post("/batch_predict")
async def batch_predict(texts: List[str]):
"""
Predict sentiment for multiple texts at once.
"""
try:
predictions = model.predict(texts)
probabilities = model.predict_proba(texts)
results = []
for i, text in enumerate(texts):
sentiment = "positive" if predictions[i] == 1 else "negative"
results.append({
"text": text,
"prediction": int(predictions[i]),
"probabilities": probabilities[i].tolist(),
"sentiment": sentiment,
"confidence": float(max(probabilities[i]))
})
return {"results": results}
except Exception as e:
raise HTTPException(status_code=500, detail=f"Batch prediction failed: {str(e)}")
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=7860)
|