File size: 2,431 Bytes
ef2841e
161cf85
 
 
 
 
 
 
 
 
 
 
 
ef2841e
 
 
 
161cf85
 
 
 
 
64c0ea0
1dcfe80
 
 
 
ef2841e
 
 
 
 
 
 
 
 
 
 
1dcfe80
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ef2841e
1dcfe80
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
from typing import Dict, List, Any
from transformers import (
    AutoModelForCausalLM,
    AutoTokenizer)
import torch

model = AutoModelForCausalLM.from_pretrained(
                "sjster/test_medium",
                trust_remote_code=True,
                quantization_config=None,
                torch_dtype=torch.float,     # data type is float
                device_map="auto",
            )

class EndpointHandler():
    def __init__(self, path=""):
        # Preload all the elements you are going to need at inference.
         self.model = AutoModelForCausalLM.from_pretrained(
                path,
                trust_remote_code=True,
                quantization_config=None,
                torch_dtype=torch.float,     # data type is float
                device_map="auto")
         self.tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True)
         self.tokenizer.padding_side = "left"
         self.tokenizer.pad_token = self.tokenizer.eos_token
         self.tokenizer.add_eos_token = True

    def __call__(self, data: Dict[str, Any]) -> List[Dict[str, Any]]:
        """
       data args:
            inputs (:obj: `str` | `PIL.Image` | `np.array`)
            kwargs
      Return:
            A :obj:`list` | `dict`: will be serialized and returned
        """

        inputs = data.pop("inputs", data)
        messages = [
            {
                "role": "user",
                "content": ""
                + inputs,
            },
        ]
        encodeds = self.tokenizer.apply_chat_template(messages, return_tensors="pt")
        encoded_length = len(encodeds[0])
        model_inputs = encodeds.to('cuda')
        result = self.model.generate(model_inputs,
                                do_sample=False,
                                output_scores=True,
                                return_dict_in_generate=True,
                                output_attentions=True,
                                output_hidden_states=True,
                                #num_beams=3,
                                #no_repeat_ngram_size=1,
                                early_stopping = True,
                                #top_k=0,
                                max_new_tokens=400)
        x, logits_gen = result.sequences, result.scores
        x = x[:,encoded_length:]
        decoded = self.tokenizer.batch_decode(x)

        return [{"outputs": decoded[0]}]