yuccaaa commited on
Commit
0ccf423
·
verified ·
1 Parent(s): eb21099

Upload ms-swift/examples/infer/demo.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. ms-swift/examples/infer/demo.py +73 -0
ms-swift/examples/infer/demo.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Alibaba, Inc. and its affiliates.
2
+ import asyncio
3
+ import os
4
+ from typing import List
5
+
6
+ os.environ['CUDA_VISIBLE_DEVICES'] = '0'
7
+
8
+
9
+ def infer_batch(engine: 'InferEngine', infer_requests: List['InferRequest']):
10
+ request_config = RequestConfig(max_tokens=512, temperature=0)
11
+ metric = InferStats()
12
+ resp_list = engine.infer(infer_requests, request_config, metrics=[metric])
13
+ query0 = infer_requests[0].messages[0]['content']
14
+ print(f'query0: {query0}')
15
+ print(f'response0: {resp_list[0].choices[0].message.content}')
16
+ print(f'metric: {metric.compute()}')
17
+ # metric.reset() # reuse
18
+
19
+
20
+ def infer_async_batch(engine: 'InferEngine', infer_requests: List['InferRequest']):
21
+ # The asynchronous interface below is equivalent to the synchronous interface above.
22
+ request_config = RequestConfig(max_tokens=512, temperature=0)
23
+
24
+ async def _run():
25
+ tasks = [engine.infer_async(infer_request, request_config) for infer_request in infer_requests]
26
+ return await asyncio.gather(*tasks)
27
+
28
+ resp_list = asyncio.run(_run())
29
+
30
+ query0 = infer_requests[0].messages[0]['content']
31
+ print(f'query0: {query0}')
32
+ print(f'response0: {resp_list[0].choices[0].message.content}')
33
+
34
+
35
+ def infer_stream(engine: 'InferEngine', infer_request: 'InferRequest'):
36
+ request_config = RequestConfig(max_tokens=512, temperature=0, stream=True)
37
+ metric = InferStats()
38
+ gen_list = engine.infer([infer_request], request_config, metrics=[metric])
39
+ query = infer_request.messages[0]['content']
40
+ print(f'query: {query}\nresponse: ', end='')
41
+ for resp in gen_list[0]:
42
+ if resp is None:
43
+ continue
44
+ print(resp.choices[0].delta.content, end='', flush=True)
45
+ print()
46
+ print(f'metric: {metric.compute()}')
47
+
48
+
49
+ if __name__ == '__main__':
50
+ from swift.llm import InferEngine, InferRequest, PtEngine, RequestConfig, load_dataset
51
+ from swift.plugin import InferStats
52
+ model = 'Qwen/Qwen2.5-1.5B-Instruct'
53
+ infer_backend = 'pt'
54
+
55
+ if infer_backend == 'pt':
56
+ engine = PtEngine(model, max_batch_size=64)
57
+ elif infer_backend == 'vllm':
58
+ from swift.llm import VllmEngine
59
+ engine = VllmEngine(model, max_model_len=8192)
60
+ elif infer_backend == 'lmdeploy':
61
+ from swift.llm import LmdeployEngine
62
+ engine = LmdeployEngine(model)
63
+
64
+ # Here, `load_dataset` is used for convenience; `infer_batch` does not require creating a dataset.
65
+ dataset = load_dataset(['AI-ModelScope/alpaca-gpt4-data-zh#1000'], seed=42)[0]
66
+ print(f'dataset: {dataset}')
67
+ infer_requests = [InferRequest(**data) for data in dataset]
68
+ # if infer_backend in {'vllm', 'lmdeploy'}:
69
+ # infer_async_batch(engine, infer_requests)
70
+ infer_batch(engine, infer_requests)
71
+
72
+ messages = [{'role': 'user', 'content': 'who are you?'}]
73
+ infer_stream(engine, InferRequest(messages=messages))