File size: 1,283 Bytes
7134ce7 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 | # Copyright (c) ModelScope Contributors. All rights reserved.
import os
from openai import OpenAI
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
def infer(client, model: str, messages):
resp = client.chat.completions.create(model=model, messages=messages)
scores = resp.choices[0].message.content
print(f'messages: {messages}')
print(f'scores: {scores}')
return scores
def run_client(host: str = '127.0.0.1', port: int = 8000):
client = OpenAI(
api_key='EMPTY',
base_url=f'http://{host}:{port}/v1',
)
model = client.models.list().data[0].id
print(f'model: {model}')
messages = [{
'role': 'user',
'content': 'what is the capital of China?',
}, {
'role': 'assistant',
'content': 'Beijing.',
}]
infer(client, model, messages)
if __name__ == '__main__':
from swift import run_deploy, DeployArguments
with run_deploy(
DeployArguments(
model='Qwen/Qwen3-Reranker-0.6B',
task_type='generative_reranker',
infer_backend='vllm',
gpu_memory_utilization=0.7,
verbose=False,
log_interval=-1)) as port:
run_client(port=port)
|