Spaces:
Build error
Build error
| from fastapi import FastAPI | |
| from pydantic import BaseModel | |
| import torch | |
| import torch.nn as nn | |
| import pickle | |
| from transformers import DebertaModel, DebertaTokenizer | |
| import uvicorn | |
| LABEL_COLUMNS = ['Red_Flag_Reason', 'Maker_Action', 'Escalation_Level', | |
| 'Risk_Category', 'Risk_Drivers', 'Investigation_Outcome'] | |
| DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
| class InputText(BaseModel): | |
| text: str | |
| with open("app/deberta_model.pkl", "rb") as f: | |
| checkpoint = pickle.load(f) | |
| tokenizer = checkpoint['tokenizer'] | |
| label_encoders = checkpoint['label_encoders'] | |
| class DebertaMultiOutput(nn.Module): | |
| def __init__(self, num_labels_per_output): | |
| super().__init__() | |
| self.deberta = DebertaModel.from_pretrained("microsoft/deberta-base") | |
| self.dropout = nn.Dropout(0.3) | |
| self.classifiers = nn.ModuleList([ | |
| nn.Linear(self.deberta.config.hidden_size, n_labels) for n_labels in num_labels_per_output | |
| ]) | |
| def forward(self, input_ids, attention_mask): | |
| outputs = self.deberta(input_ids=input_ids, attention_mask=attention_mask) | |
| pooled = self.dropout(outputs.last_hidden_state[:, 0]) | |
| return [classifier(pooled) for classifier in self.classifiers] | |
| num_labels = [len(le.classes_) for le in label_encoders.values()] | |
| model = DebertaMultiOutput(num_labels) | |
| model.load_state_dict(checkpoint['model_state_dict']) | |
| model.to(DEVICE) | |
| model.eval() | |
| app = FastAPI() | |
| def root(): | |
| return {"message": "🟢 DeBERTa multi-output classifier ready."} | |
| def predict(input: InputText): | |
| inputs = tokenizer(input.text, return_tensors="pt", truncation=True, padding=True, max_length=128) | |
| input_ids = inputs['input_ids'].to(DEVICE) | |
| attention_mask = inputs['attention_mask'].to(DEVICE) | |
| with torch.no_grad(): | |
| outputs = model(input_ids, attention_mask) | |
| preds = {} | |
| for output, col, le in zip(outputs, LABEL_COLUMNS, label_encoders.values()): | |
| pred_idx = torch.argmax(output, dim=1).item() | |
| pred_label = le.inverse_transform([pred_idx])[0] | |
| preds[col] = pred_label | |
| return preds | |