Update inference.py
Browse files- inference.py +44 -15
inference.py
CHANGED
|
@@ -1,18 +1,47 @@
|
|
| 1 |
-
|
| 2 |
-
from
|
| 3 |
-
|
| 4 |
|
| 5 |
-
|
| 6 |
-
|
|
|
|
| 7 |
|
| 8 |
-
def
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
"
|
| 14 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from transformers import Pipeline, PreTrainedTokenizer, AutoTokenizer
|
| 2 |
+
from typing import Dict, Union, List
|
| 3 |
+
import torch
|
| 4 |
|
| 5 |
+
class TokenizerPipeline(Pipeline):
|
| 6 |
+
def __init__(self, **kwargs):
|
| 7 |
+
super().__init__(**kwargs)
|
| 8 |
|
| 9 |
+
def _sanitize_parameters(self, **kwargs):
|
| 10 |
+
# 处理传入参数:是否解码、padding等
|
| 11 |
+
preprocess_kwargs = {}
|
| 12 |
+
if "padding" in kwargs:
|
| 13 |
+
preprocess_kwargs["padding"] = kwargs["padding"]
|
| 14 |
+
if "truncation" in kwargs:
|
| 15 |
+
preprocess_kwargs["truncation"] = kwargs["truncation"]
|
| 16 |
+
|
| 17 |
+
postprocess_kwargs = {}
|
| 18 |
+
if "return_tokens" in kwargs:
|
| 19 |
+
postprocess_kwargs["return_tokens"] = kwargs["return_tokens"]
|
| 20 |
+
|
| 21 |
+
return preprocess_kwargs, {}, postprocess_kwargs
|
| 22 |
|
| 23 |
+
def preprocess(self, inputs, **kwargs) -> Dict:
|
| 24 |
+
# 使用Tokenizer处理输入文本
|
| 25 |
+
return self.tokenizer(inputs, return_tensors="pt", **kwargs)
|
| 26 |
+
|
| 27 |
+
def _forward(self, inputs) -> Dict:
|
| 28 |
+
# 直接返回预处理结果(无模型推理)
|
| 29 |
+
return inputs
|
| 30 |
+
|
| 31 |
+
def postprocess(self, model_outputs, **kwargs) -> Dict:
|
| 32 |
+
# 转换输出为可读格式
|
| 33 |
+
input_ids = model_outputs["input_ids"][0]
|
| 34 |
+
|
| 35 |
+
if kwargs.get("return_tokens", True):
|
| 36 |
+
tokens = self.tokenizer.convert_ids_to_tokens(input_ids)
|
| 37 |
+
return {"tokens": tokens}
|
| 38 |
+
else:
|
| 39 |
+
return {"input_ids": input_ids.tolist()}
|
| 40 |
+
|
| 41 |
+
# 关键:创建并导出pipeline实例
|
| 42 |
+
tokenizer = AutoTokenizer.from_pretrained(".")
|
| 43 |
+
pipeline = TokenizerPipeline(tokenizer=tokenizer)
|
| 44 |
+
|
| 45 |
+
# 可选:添加类型提示供HF解析
|
| 46 |
+
def get_pipeline() -> Pipeline:
|
| 47 |
+
return pipeline
|