File size: 1,644 Bytes
e896746
 
 
 
 
 
 
 
 
 
68027e1
bce61ba
e896746
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d63134f
 
94bd5b3
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
import torch
import subprocess
import sys
from typing import Dict, List, Any
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline

class EndpointHandler:
    def __init__(self, path=""):
        
        subprocess.check_call([sys.executable, "-m", "pip", "install", "flash-attn"])
        subprocess.check_call([sys.executable, "-m", "pip", "install", "tiktoken"])
        subprocess.check_call([sys.executable, "-m", "pip", "install", "pytest"])
        
        # load the model
        tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True)
        model = AutoModelForCausalLM.from_pretrained(path, device_map="auto",torch_dtype="auto", trust_remote_code=True)
        
        # create inference pipeline
        self.pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=4096)

    def __call__(self, data: Any) -> List[List[Dict[str, float]]]:
        inputs = data.pop("inputs", data)
        parameters = data.pop("parameters", None)

        messages = [
            {"role": "system", "content": "Translate the text to English. Rephrase if needed to ensure it sounds natural. Output only the translated text."},
            {"role": "user", "content": inputs},
        ]
        
        # pass inputs with all kwargs in data
        if parameters is not None:
            prediction = self.pipeline(messages, **parameters)
        else:
            prediction = self.pipeline(messages)
            
        # postprocess the prediction
        assistant_output = prediction[0]["generated_text"][-1]
        
        return {"output": assistant_output["content"]}