File size: 1,846 Bytes
aa3f563 1e4a1ed aa3f563 1e4a1ed aa3f563 1e4a1ed aa3f563 1e4a1ed aa3f563 1e4a1ed aa3f563 1e4a1ed aa3f563 1e4a1ed aa3f563 1e4a1ed aa3f563 1e4a1ed aa3f563 1e4a1ed aa3f563 1e4a1ed aa3f563 1e4a1ed aa3f563 1e4a1ed aa3f563 1e4a1ed aa3f563 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 | import pickle
import os
from fastapi import FastAPI
from pydantic import BaseModel
from typing import List
app = FastAPI(
title="RTL Log Severity Classifier",
description="Batch severity prediction for RTL verification logs",
version="1.1"
)
VECTORIZER_PATH = "vectorizer.pkl"
MODEL_PATH = "severity_model.pkl"
REVERSE_MAP = {
0: "INFO",
1: "WARNING",
2: "ERROR",
3: "CRITICAL"
}
# Load artifacts
with open(VECTORIZER_PATH, "rb") as f:
vectorizer = pickle.load(f)
with open(MODEL_PATH, "rb") as f:
model = pickle.load(f)
# ---------- Request Schemas ----------
class LogItem(BaseModel):
module: str
message: str
class BatchRequest(BaseModel):
logs: List[LogItem]
# ---------- Health ----------
@app.get("/")
def health():
return {
"status": "running",
"model": "RTL Severity Classifier",
"batch_support": True
}
# ---------- Single Prediction ----------
@app.post("/predict")
def predict(log: LogItem):
text = log.module + " " + log.message
vec = vectorizer.transform([text])
pred = model.predict(vec)[0]
return {
"module": log.module,
"message": log.message,
"predicted_severity": REVERSE_MAP[pred]
}
# ---------- Batch Prediction ----------
@app.post("/predict_batch")
def predict_batch(request: BatchRequest):
texts = [
log.module + " " + log.message
for log in request.logs
]
vectors = vectorizer.transform(texts)
preds = model.predict(vectors)
results = []
for i, p in enumerate(preds):
results.append({
"module": request.logs[i].module,
"message": request.logs[i].message,
"predicted_severity": REVERSE_MAP[int(p)]
})
return {
"count": len(results),
"results": results
} |