| # handler.py | |
| import time | |
| from typing import Any, Dict, List, Union | |
| from transformers import pipeline | |
| class EndpointHandler: | |
| def __init__(self, path: str = ""): | |
| # Load a standard text-classification pipeline from local repo files | |
| self.pipe = pipeline("text-classification", model=path) | |
| def __call__(self, data: Dict[str, Any]) -> Dict[str, Any]: | |
| # Accept {"inputs": "..."} or {"inputs": ["...", "..."]} | |
| inputs: Union[str, List[str]] = data.get("inputs", data) | |
| t0 = time.perf_counter() | |
| preds = self.pipe(inputs, truncation=True) | |
| elapsed = time.perf_counter() - t0 | |
| # Normalize to list | |
| preds_list = preds if isinstance(preds, list) else [preds] | |
| labels = [p["label"] for p in preds_list] | |
| scores = [float(p["score"]) for p in preds_list] | |
| return { | |
| "labels": labels, | |
| "scores": scores, | |
| "processing_time_sec": elapsed, | |
| } |