bhavan2410 commited on
Commit
da97e2c
·
verified ·
1 Parent(s): 2bcbcd0

Create handler.py

Browse files
Files changed (1) hide show
  1. handler.py +74 -0
handler.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Dict, List, Any
2
+ import json
3
+ import torch
4
+ from transformers import BertTokenizerFast, BertForTokenClassification
5
+
6
+
7
+ class EndpointHandler:
8
+ def __init__(self, path=""):
9
+ # Load the tokenizer and model
10
+ self.tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased")
11
+ self.model = BertForTokenClassification.from_pretrained(path)
12
+ self.model.eval()
13
+ self.device = "cuda" if torch.cuda.is_available() else "cpu"
14
+ self.model.to(self.device)
15
+
16
+ # ID to label mapping
17
+ self.id2label = {
18
+ 0: "O",
19
+ 1: "B-STEREO",
20
+ 2: "I-STEREO",
21
+ 3: "B-GEN",
22
+ 4: "I-GEN",
23
+ 5: "B-UNFAIR",
24
+ 6: "I-UNFAIR",
25
+ 7: "B-EXCL",
26
+ 8: "I-EXCL",
27
+ 9: "B-FRAME",
28
+ 10: "I-FRAME",
29
+ 11: "B-ASSUMP",
30
+ 12: "I-ASSUMP",
31
+ }
32
+
33
+ def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]:
34
+ """
35
+ Args:
36
+ data (Dict[str, Any]): A dictionary containing the input text under 'inputs'.
37
+ Returns:
38
+ List[Dict[str, Any]]: A list of dictionaries with token labels.
39
+ """
40
+ # Extract the input sentence
41
+ sentence = data.get("inputs", "")
42
+ if not sentence:
43
+ return [{"error": "Input 'inputs' is required."}]
44
+
45
+ # Tokenize the input sentence
46
+ inputs = self.tokenizer(
47
+ sentence, return_tensors="pt", padding=True, truncation=True, max_length=128
48
+ )
49
+ input_ids = inputs["input_ids"].to(self.device)
50
+ attention_mask = inputs["attention_mask"].to(self.device)
51
+
52
+ # Run inference
53
+ with torch.no_grad():
54
+ outputs = self.model(input_ids=input_ids, attention_mask=attention_mask)
55
+ logits = outputs.logits
56
+ probabilities = torch.sigmoid(logits)
57
+ predicted_labels = (probabilities > 0.5).int()
58
+
59
+ # Prepare the result
60
+ result = []
61
+ tokens = self.tokenizer.convert_ids_to_tokens(input_ids[0])
62
+ for i, token in enumerate(tokens):
63
+ if token not in self.tokenizer.all_special_tokens:
64
+ label_indices = (
65
+ (predicted_labels[0][i] == 1).nonzero(as_tuple=False).squeeze(-1)
66
+ )
67
+ labels = (
68
+ [self.id2label[idx.item()] for idx in label_indices]
69
+ if label_indices.numel() > 0
70
+ else ["O"]
71
+ )
72
+ result.append({"token": token, "labels": labels})
73
+
74
+ return result