Prateek0515 commited on
Commit
c2479f9
·
verified ·
1 Parent(s): 29ccd19

Add inference handler for model deployment

Browse files
Files changed (1) hide show
  1. handler.py +107 -0
handler.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import torch
3
+ import torch.nn as nn
4
+ import numpy as np
5
+ from transformers import AutoTokenizer, AutoModel
6
+ from torchcrf import CRF
7
+
8
+ class PositionalEncoding(nn.Module):
9
+ def __init__(self, d_model, dropout=0.1, max_len=5000):
10
+ super(PositionalEncoding, self).__init__()
11
+ self.dropout = nn.Dropout(p=dropout)
12
+ pe = torch.zeros(max_len, d_model)
13
+ position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
14
+ div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-np.log(10000.0) / d_model))
15
+ pe[:, 0::2] = torch.sin(position * div_term)
16
+ pe[:, 1::2] = torch.cos(position * div_term)
17
+ pe = pe.unsqueeze(0)
18
+ self.register_buffer('pe', pe)
19
+
20
+ def forward(self, x):
21
+ x = x + self.pe[:, :x.size(1), :]
22
+ return self.dropout(x)
23
+
24
+ class VanillaTransformer(nn.Module):
25
+ def __init__(self, d_model=768, nhead=8, num_layers=3, dim_feedforward=2048, dropout=0.1):
26
+ super(VanillaTransformer, self).__init__()
27
+ self.pos_encoder = PositionalEncoding(d_model, dropout)
28
+ encoder_layer = nn.TransformerEncoderLayer(
29
+ d_model=d_model, nhead=nhead, dim_feedforward=dim_feedforward,
30
+ dropout=dropout, activation='gelu', batch_first=True
31
+ )
32
+ self.transformer = nn.TransformerEncoder(encoder_layer, num_layers=num_layers)
33
+ self.d_model = d_model
34
+
35
+ def forward(self, src, src_mask=None, src_key_padding_mask=None):
36
+ src = self.pos_encoder(src)
37
+ output = self.transformer(src, mask=src_mask, src_key_padding_mask=src_key_padding_mask)
38
+ return output
39
+
40
+ class HierarchicalLegalSegModel(nn.Module):
41
+ def __init__(self, longformer_model, num_labels, hidden_dim=768, transformer_layers=3, transformer_heads=8, dropout=0.1):
42
+ super(HierarchicalLegalSegModel, self).__init__()
43
+ self.longformer = longformer_model
44
+ self.hidden_dim = hidden_dim
45
+ self.vanilla_transformer = VanillaTransformer(d_model=hidden_dim, nhead=transformer_heads, num_layers=transformer_layers, dim_feedforward=hidden_dim*4, dropout=dropout)
46
+ self.classifier = nn.Linear(hidden_dim, num_labels)
47
+ self.crf = CRF(num_labels, batch_first=True)
48
+ self.dropout = nn.Dropout(dropout)
49
+ self.num_labels = num_labels
50
+
51
+ def encode_sentences(self, input_ids, attention_mask):
52
+ batch_size, num_sentences, max_seq_len = input_ids.shape
53
+ input_ids_flat = input_ids.view(-1, max_seq_len)
54
+ attention_mask_flat = attention_mask.view(-1, max_seq_len)
55
+ outputs = self.longformer(input_ids=input_ids_flat, attention_mask=attention_mask_flat)
56
+ cls_embeddings = outputs.last_hidden_state[:, 0, :]
57
+ sentence_embeddings = cls_embeddings.view(batch_size, num_sentences, self.hidden_dim)
58
+ return sentence_embeddings
59
+
60
+ def forward(self, input_ids, attention_mask, labels=None, sentence_mask=None):
61
+ sentence_embeddings = self.encode_sentences(input_ids, attention_mask)
62
+ sentence_embeddings = self.dropout(sentence_embeddings)
63
+ transformer_output = self.vanilla_transformer(sentence_embeddings, src_key_padding_mask=~sentence_mask if sentence_mask is not None else None)
64
+ emissions = self.classifier(transformer_output)
65
+ if labels is not None:
66
+ loss = -self.crf(emissions, labels, mask=sentence_mask, reduction='mean')
67
+ return loss
68
+ else:
69
+ predictions = self.crf.decode(emissions, mask=sentence_mask)
70
+ return predictions
71
+
72
+ class EndpointHandler:
73
+ def __init__(self, path=""):
74
+ self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
75
+ self.tokenizer = AutoTokenizer.from_pretrained(path)
76
+
77
+ longformer = AutoModel.from_pretrained("lexlms/legal-longformer-base")
78
+ longformer = longformer.to(self.device)
79
+ for param in longformer.parameters():
80
+ param.requires_grad = False
81
+
82
+ self.model = HierarchicalLegalSegModel(longformer, num_labels=7)
83
+ checkpoint = torch.load(f"{path}/model.pth", map_location=self.device)
84
+ if isinstance(checkpoint, dict) and 'model_state_dict' in checkpoint:
85
+ self.model.load_state_dict(checkpoint['model_state_dict'])
86
+ else:
87
+ self.model.load_state_dict(checkpoint)
88
+ self.model = self.model.to(self.device)
89
+ self.model.eval()
90
+
91
+ self.id2label = {
92
+ 0: "Arguments of Petitioner", 1: "Arguments of Respondent", 2: "Decision",
93
+ 3: "Facts", 4: "Issue", 5: "None", 6: "Reasoning"
94
+ }
95
+
96
+ def __call__(self, data):
97
+ text = data.get("inputs", "")
98
+ encoded = self.tokenizer(text, padding="max_length", truncation=True, max_length=512, return_tensors="pt")
99
+ input_ids = encoded["input_ids"].unsqueeze(1).to(self.device)
100
+ attention_mask = encoded["attention_mask"].unsqueeze(1).to(self.device)
101
+ sentence_mask = torch.ones(1, 1, dtype=torch.bool).to(self.device)
102
+
103
+ with torch.no_grad():
104
+ predictions = self.model(input_ids, attention_mask, sentence_mask=sentence_mask)
105
+
106
+ label_id = predictions[0][0]
107
+ return [{"label": self.id2label[label_id], "score": 1.0}]