Spaces:
Sleeping
Sleeping
ibraheem15
commited on
Commit
·
e7268a9
1
Parent(s):
8d80196
Add initial implementation of Spam Detection API with Docker support
Browse files- Dockerfile +24 -0
- main.py +68 -0
- requirements.txt +10 -0
Dockerfile
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Dockerfile
|
| 2 |
+
|
| 3 |
+
# Use a lean official Python image
|
| 4 |
+
FROM python:3.10-slim
|
| 5 |
+
|
| 6 |
+
# Set working directory
|
| 7 |
+
WORKDIR /app
|
| 8 |
+
|
| 9 |
+
# Copy requirements and install dependencies
|
| 10 |
+
COPY requirements.txt /app/
|
| 11 |
+
|
| 12 |
+
RUN pip install --no-cache-dir torch --index-url https://download.pytorch.org/whl/cpu
|
| 13 |
+
|
| 14 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 15 |
+
|
| 16 |
+
# Copy the application code
|
| 17 |
+
COPY main.py /app/
|
| 18 |
+
COPY tests/ /app/tests/
|
| 19 |
+
# The port is often fixed by the cloud host (e.g., 7860 for HF Spaces)
|
| 20 |
+
# We use 8000 as a standard, and the cloud host will map its port to this one.
|
| 21 |
+
EXPOSE 8000
|
| 22 |
+
|
| 23 |
+
# Command to run the application using Uvicorn
|
| 24 |
+
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
|
main.py
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# main.py
|
| 2 |
+
from fastapi import FastAPI, HTTPException
|
| 3 |
+
from pydantic import BaseModel
|
| 4 |
+
from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
|
| 5 |
+
from contextlib import asynccontextmanager
|
| 6 |
+
import torch
|
| 7 |
+
|
| 8 |
+
# --- MLOps Configuration ---
|
| 9 |
+
HF_MODEL_NAME = "roshana1s/spam-message-classifier"
|
| 10 |
+
CLASSIFIER_PIPELINE = None
|
| 11 |
+
|
| 12 |
+
# Pydantic model for request body
|
| 13 |
+
class Message(BaseModel):
|
| 14 |
+
text: str
|
| 15 |
+
|
| 16 |
+
# --- LIFESPAN MANAGER (The Modern Fix) ---
|
| 17 |
+
@asynccontextmanager
|
| 18 |
+
async def lifespan(app: FastAPI):
|
| 19 |
+
# STARTUP: Load the model
|
| 20 |
+
global CLASSIFIER_PIPELINE
|
| 21 |
+
print(f"Loading model {HF_MODEL_NAME}...")
|
| 22 |
+
try:
|
| 23 |
+
tokenizer = AutoTokenizer.from_pretrained(HF_MODEL_NAME)
|
| 24 |
+
model = AutoModelForSequenceClassification.from_pretrained(HF_MODEL_NAME)
|
| 25 |
+
|
| 26 |
+
CLASSIFIER_PIPELINE = pipeline(
|
| 27 |
+
"text-classification",
|
| 28 |
+
model=model,
|
| 29 |
+
tokenizer=tokenizer,
|
| 30 |
+
device=0 if torch.cuda.is_available() else -1
|
| 31 |
+
)
|
| 32 |
+
print("Model loaded successfully.")
|
| 33 |
+
except Exception as e:
|
| 34 |
+
print(f"Failed to load model: {e}")
|
| 35 |
+
|
| 36 |
+
yield # This point is where the app runs
|
| 37 |
+
|
| 38 |
+
# SHUTDOWN: Clean up resources (if needed)
|
| 39 |
+
print("Shutting down model resources...")
|
| 40 |
+
CLASSIFIER_PIPELINE = None
|
| 41 |
+
|
| 42 |
+
# Initialize App with Lifespan
|
| 43 |
+
app = FastAPI(
|
| 44 |
+
title="Spam Detection API",
|
| 45 |
+
lifespan=lifespan
|
| 46 |
+
)
|
| 47 |
+
|
| 48 |
+
@app.get("/")
|
| 49 |
+
def health_check():
|
| 50 |
+
return {"status": "ok", "model_loaded": CLASSIFIER_PIPELINE is not None}
|
| 51 |
+
|
| 52 |
+
@app.post("/predict")
|
| 53 |
+
def predict_spam(item: Message):
|
| 54 |
+
if CLASSIFIER_PIPELINE is None:
|
| 55 |
+
raise HTTPException(status_code=503, detail="Model is not ready.")
|
| 56 |
+
|
| 57 |
+
try:
|
| 58 |
+
results = CLASSIFIER_PIPELINE(item.text)
|
| 59 |
+
label = results[0]['label']
|
| 60 |
+
score = results[0]['score']
|
| 61 |
+
output_label = "spam" if label == 'LABEL_1' else "ham"
|
| 62 |
+
|
| 63 |
+
return {
|
| 64 |
+
"prediction": output_label,
|
| 65 |
+
"confidence_score": score,
|
| 66 |
+
}
|
| 67 |
+
except Exception as e:
|
| 68 |
+
raise HTTPException(status_code=500, detail=str(e))
|
requirements.txt
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This line tells pip to look for CPU versions first
|
| 2 |
+
--extra-index-url https://download.pytorch.org/whl/cpu
|
| 3 |
+
|
| 4 |
+
fastapi
|
| 5 |
+
uvicorn[standard]
|
| 6 |
+
transformers
|
| 7 |
+
torch # PyTorch is the framework for this model
|
| 8 |
+
pydantic
|
| 9 |
+
pytest
|
| 10 |
+
httpx
|