abhinavvvvv's picture
solved batch problem
1e4a1ed
import pickle
import os
from fastapi import FastAPI
from pydantic import BaseModel
from typing import List
app = FastAPI(
title="RTL Log Severity Classifier",
description="Batch severity prediction for RTL verification logs",
version="1.1"
)
VECTORIZER_PATH = "vectorizer.pkl"
MODEL_PATH = "severity_model.pkl"
REVERSE_MAP = {
0: "INFO",
1: "WARNING",
2: "ERROR",
3: "CRITICAL"
}
# Load artifacts
with open(VECTORIZER_PATH, "rb") as f:
vectorizer = pickle.load(f)
with open(MODEL_PATH, "rb") as f:
model = pickle.load(f)
# ---------- Request Schemas ----------
class LogItem(BaseModel):
module: str
message: str
class BatchRequest(BaseModel):
logs: List[LogItem]
# ---------- Health ----------
@app.get("/")
def health():
return {
"status": "running",
"model": "RTL Severity Classifier",
"batch_support": True
}
# ---------- Single Prediction ----------
@app.post("/predict")
def predict(log: LogItem):
text = log.module + " " + log.message
vec = vectorizer.transform([text])
pred = model.predict(vec)[0]
return {
"module": log.module,
"message": log.message,
"predicted_severity": REVERSE_MAP[pred]
}
# ---------- Batch Prediction ----------
@app.post("/predict_batch")
def predict_batch(request: BatchRequest):
texts = [
log.module + " " + log.message
for log in request.logs
]
vectors = vectorizer.transform(texts)
preds = model.predict(vectors)
results = []
for i, p in enumerate(preds):
results.append({
"module": request.logs[i].module,
"message": request.logs[i].message,
"predicted_severity": REVERSE_MAP[int(p)]
})
return {
"count": len(results),
"results": results
}