File size: 1,445 Bytes
9bddf5f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
import typing as tp

import torch
from transformers import AutoModelForSequenceClassification, AutoTokenizer


class SciBertPaperClassifier:
    def __init__(self, model_path="trained_model"):
        self.model = AutoModelForSequenceClassification.from_pretrained(model_path)
        self.tokenizer = AutoTokenizer.from_pretrained(model_path)
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.model.to(self.device)
        self.model.eval()

    def __call__(self, inputs):
        if not isinstance(inputs, tp.Iterable):
            inputs = [inputs]
        texts = [
            f"AUTHORS: {' '.join(paper.authors) if isinstance(paper.authors, list) else paper.authors} "
            f"TITLE: {paper.title} ABSTRACT: {paper.abstract}"
            for paper in inputs
        ]

        inputs = self.tokenizer(
            texts, truncation=True, padding=True, max_length=256, return_tensors="pt"
        ).to(self.device)

        with torch.no_grad():
            outputs = self.model(**inputs)

        probs = torch.nn.functional.softmax(outputs.logits, dim=-1)
        results = []
        for prob in probs:
            result = [
                {self.model.config.id2label[label_idx]: score.item()}
                for label_idx, score in enumerate(prob)
            ]
            results.append(result)
        if 1 == len(results):
            return results[0]
        return results