Upload ms-swift/examples/infer/demo_bert.py with huggingface_hub
Browse files
ms-swift/examples/infer/demo_bert.py
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Alibaba, Inc. and its affiliates.
|
| 2 |
+
import os
|
| 3 |
+
from typing import List
|
| 4 |
+
|
| 5 |
+
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def infer_batch(engine: 'InferEngine', infer_requests: List['InferRequest']):
|
| 9 |
+
resp_list = engine.infer(infer_requests)
|
| 10 |
+
query0 = infer_requests[0].messages[0]['content']
|
| 11 |
+
query1 = infer_requests[1].messages[0]['content']
|
| 12 |
+
print(f'query0: {query0}')
|
| 13 |
+
print(f'response0: {resp_list[0].choices[0].message.content}')
|
| 14 |
+
print(f'query1: {query1}')
|
| 15 |
+
print(f'response1: {resp_list[1].choices[0].message.content}')
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
if __name__ == '__main__':
|
| 19 |
+
# This is an example of BERT with LoRA.
|
| 20 |
+
from swift.llm import InferEngine, InferRequest, PtEngine, load_dataset, safe_snapshot_download, BaseArguments
|
| 21 |
+
from swift.tuners import Swift
|
| 22 |
+
adapter_path = safe_snapshot_download('swift/test_bert')
|
| 23 |
+
args = BaseArguments.from_pretrained(adapter_path)
|
| 24 |
+
args.max_length = 512
|
| 25 |
+
args.truncation_strategy = 'right'
|
| 26 |
+
# method1
|
| 27 |
+
model, processor = args.get_model_processor()
|
| 28 |
+
model = Swift.from_pretrained(model, adapter_path)
|
| 29 |
+
template = args.get_template(processor)
|
| 30 |
+
engine = PtEngine.from_model_template(model, template, max_batch_size=64)
|
| 31 |
+
|
| 32 |
+
# method2
|
| 33 |
+
# engine = PtEngine(args.model, adapters=[adapter_path], max_batch_size=64,
|
| 34 |
+
# task_type=args.task_type, num_labels=args.num_labels)
|
| 35 |
+
# template = args.get_template(engine.processor)
|
| 36 |
+
# engine.default_template = template
|
| 37 |
+
|
| 38 |
+
# Here, `load_dataset` is used for convenience; `infer_batch` does not require creating a dataset.
|
| 39 |
+
dataset = load_dataset(['DAMO_NLP/jd:cls#1000'], seed=42)[0]
|
| 40 |
+
print(f'dataset: {dataset}')
|
| 41 |
+
infer_requests = [InferRequest(messages=data['messages']) for data in dataset]
|
| 42 |
+
infer_batch(engine, infer_requests)
|
| 43 |
+
|
| 44 |
+
infer_batch(engine, [
|
| 45 |
+
InferRequest(messages=[{
|
| 46 |
+
'role': 'user',
|
| 47 |
+
'content': '今天天气真好呀'
|
| 48 |
+
}]),
|
| 49 |
+
InferRequest(messages=[{
|
| 50 |
+
'role': 'user',
|
| 51 |
+
'content': '真倒霉'
|
| 52 |
+
}])
|
| 53 |
+
])
|