id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
7,983 | from transformers import AutoModel, AutoTokenizer
import gradio as gr
import mdtex2html
def reset_state():
return [], [] | null |
7,984 | import argparse
import os
import platform
import shutil
from copy import deepcopy
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
from transformers.trainer_utils import set_seed
def _load_model_tokenizer(args):
tokenizer = AutoTokenizer.from_pretrained(
args.checkpoint_path, trust_remote_code=True, resume_download=True,
)
if args.cpu_only:
device_map = "cpu"
else:
device_map = "auto"
model = AutoModelForCausalLM.from_pretrained(
args.checkpoint_path,
device_map=device_map,
trust_remote_code=True,
resume_download=True,
).eval()
config = GenerationConfig.from_pretrained(
args.checkpoint_path, trust_remote_code=True, resume_download=True,
)
return model, tokenizer, config | null |
7,985 | import argparse
import os
import platform
import shutil
from copy import deepcopy
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
from transformers.trainer_utils import set_seed
def _gc():
import gc
gc.collect()
if torch.cuda.is_available():
torch.cuda.empty_cache() | null |
7,986 | import argparse
import os
import platform
import shutil
from copy import deepcopy
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
from transformers.trainer_utils import set_seed
def _clear_screen():
if platform.system() == "Windows":
os.system("cls")
else:
os.system("clear") | null |
7,987 | import argparse
import os
import platform
import shutil
from copy import deepcopy
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
from transformers.trainer_utils import set_seed
def _print_history(history):
terminal_width = shutil.get_terminal_size()[0]
print(f'History ({len(history)})'.center(terminal_width, '='))
for index, (query, response) in enumerate(history):
print(f'User[{index}]: {query}')
print(f'QWen[{index}]: {response}')
print('=' * terminal_width) | null |
7,988 | import argparse
import os
import platform
import shutil
from copy import deepcopy
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
from transformers.trainer_utils import set_seed
def _get_input() -> str:
while True:
try:
message = input('User> ').strip()
except UnicodeDecodeError:
print('[ERROR] Encoding error in input')
continue
except KeyboardInterrupt:
exit(1)
if message:
return message
print('[ERROR] Query is empty') | null |
7,989 | import torch
from transformers import AutoModelForCausalLM
from accelerate import dispatch_model
def _device_map(num_gpus, num_layers):
def load_model_on_gpus(model_name_or_path, num_gpus: int = 2):
num_devices = torch.cuda.device_count()
if num_gpus == 1:
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map='auto',
trust_remote_code=True).eval()
elif 1 < num_gpus <= num_devices:
model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map='cpu',
trust_remote_code=True).eval()
num_layers = model.config.num_hidden_layers
device_map = _device_map(num_gpus, num_layers)
print(device_map)
model = dispatch_model(model, device_map=device_map)
else:
raise KeyError
return model | null |
7,990 | from transformers.generation import GenerationConfig
parser.add_argument('--path', type=str, default='Qwen-7B/eval/evaluate_ceval.py')
parser.add_argument('--regenerate', action='store_true', default=False)return args
if (not args.regenerate) and os.path.exists(comments_path):
print("use cache: ", comments_path)
return
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--path', type=str, default='Qwen-7B/eval/evaluate_ceval.py')
parser.add_argument('--regenerate', action='store_true', default=False) #如果已经生成过注释,默认不会重新生成
args = parser.parse_args()
return args | null |
7,991 | from transformers.generation import GenerationConfig
class QWenChat():
def __init__(self):
self.tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen-7B-Chat", trust_remote_code=True)
# use bf16
# model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-7B-Chat", device_map="auto", trust_remote_code=True, bf16=True).eval()
# use fp16
# model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-7B-Chat", device_map="auto", trust_remote_code=True, fp16=True).eval()
# use cpu only
# model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-7B-Chat", device_map="cpu", trust_remote_code=True).eval()
# use auto mode, automatically select precision based on the device.
self.model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-7B-Chat", device_map="auto", trust_remote_code=True).eval()
# Specify hyperparameters for generation
self.model.generation_config = GenerationConfig.from_pretrained("Qwen/Qwen-7B-Chat", trust_remote_code=True)
self.history = None
def chat(self, query, system = ""):
# use history
# response, history = self.model.chat(self.tokenizer, query, history=self.history)
# 默认不使用history
response, history = self.model.chat(self.tokenizer, query, history=None)
self.history = history
return response_code_comments(context, model = None, **kwargs):
prompt = "\n为以上代码生成细致的中文注释,注意使用合适的语法。要求必须在每个函数开头生成一段统一的函数功能注释。\n除了注释,请保证原始代码内容不变。不要返回除了注释和代码以外的其余信息,不要生成额外代码。\n"
return model.chat(context + prompt)
(model, path, args):
for fl in os.listdir(path):
now_path = os.path.join(path, fl)
if os.path.isfile(now_path):
if (now_path.split(".")[-1] in CodeFileType) and ("_comments" not in now_path):
deal_one_file(model, now_path, args)
elif os.path.isdir(now_path):
deal_folder(model, now_path, args)
else:
print("Please specify a correct path!")
def transfer(args):
model = QWenChat()
if os.path.isfile(args.path):
if (args.path.split(".")[-1] in CodeFileType) and ("_comments" not in args.path):
deal_one_file(model, args.path, args)
elif os.path.isdir(args.path):
deal_folder(model, args.path, args)
else:
print("Please specify a correct path!") | null |
7,992 | import json
import os
import json5
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
def text_completion(input_text: str, stop_words) -> str: # 作为一个文本续写模型来使用
im_end = '<|im_end|>'
if im_end not in stop_words:
stop_words = stop_words + [im_end]
stop_words_ids = [tokenizer.encode(w) for w in stop_words]
# TODO: 增加流式输出的样例实现
input_ids = torch.tensor([tokenizer.encode(input_text)]).to(model.device)
output = model.generate(input_ids, stop_words_ids=stop_words_ids)
output = output.tolist()[0]
output = tokenizer.decode(output, errors="ignore")
assert output.startswith(input_text)
output = output[len(input_text) :].replace('<|endoftext|>', '').replace(im_end, '')
for stop_str in stop_words:
idx = output.find(stop_str)
if idx != -1:
output = output[: idx + len(stop_str)]
return output # 续写 input_text 的结果,不包含 input_text 的内容
def parse_latest_plugin_call(text):
plugin_name, plugin_args = '', ''
i = text.rfind('\nAction:')
j = text.rfind('\nAction Input:')
k = text.rfind('\nObservation:')
if 0 <= i < j: # If the text has `Action` and `Action input`,
if k < j: # but does not contain `Observation`,
# then it is likely that `Observation` is ommited by the LLM,
# because the output text may have discarded the stop word.
text = text.rstrip() + '\nObservation:' # Add it back.
k = text.rfind('\nObservation:')
plugin_name = text[i + len('\nAction:') : j].strip()
plugin_args = text[j + len('\nAction Input:') : k].strip()
text = text[:k]
return plugin_name, plugin_args, text
def call_plugin(plugin_name: str, plugin_args: str) -> str:
#
# 请开发者自行完善这部分内容。这里的参考实现仅是 demo 用途,非生产用途。
#
if plugin_name == 'google_search':
# 使用 SerpAPI 需要在这里填入您的 SERPAPI_API_KEY!
os.environ["SERPAPI_API_KEY"] = os.getenv("SERPAPI_API_KEY", default='')
from langchain import SerpAPIWrapper
return SerpAPIWrapper().run(json5.loads(plugin_args)['search_query'])
elif plugin_name == 'image_gen':
import urllib.parse
prompt = json5.loads(plugin_args)["prompt"]
prompt = urllib.parse.quote(prompt)
return json.dumps({'image_url': f'https://image.pollinations.ai/prompt/{prompt}'}, ensure_ascii=False)
else:
raise NotImplementedError
def llm_with_plugin(prompt: str, history, list_of_plugin_info=()):
chat_history = [(x['user'], x['bot']) for x in history] + [(prompt, '')]
# 需要让模型进行续写的初始文本
planning_prompt = build_input_text(chat_history, list_of_plugin_info)
text = ''
while True:
output = text_completion(planning_prompt + text, stop_words=['Observation:', 'Observation:\n'])
action, action_input, output = parse_latest_plugin_call(output)
if action: # 需要调用插件
# action、action_input 分别为需要调用的插件代号、输入参数
# observation是插件返回的结果,为字符串
observation = call_plugin(action, action_input)
output += f'\nObservation: {observation}\nThought:'
text += output
else: # 生成结束,并且不再需要调用插件
text += output
break
new_history = []
new_history.extend(history)
new_history.append({'user': prompt, 'bot': text})
return text, new_history | null |
7,993 | import json
from pprint import pprint
import openai
openai.api_base = 'http://localhost:8000/v1'
openai.api_key = 'none'
def call_qwen(messages, functions=None):
print('input:')
pprint(messages, indent=2)
if functions:
response = openai.ChatCompletion.create(model='Qwen',
messages=messages,
functions=functions)
else:
response = openai.ChatCompletion.create(model='Qwen',
messages=messages)
response = response.choices[0]['message']
response = json.loads(json.dumps(response,
ensure_ascii=False)) # fix zh rendering
print('output:')
pprint(response, indent=2)
print()
return response
= '__main__':
print('### Test Case 1 - No Function Calling (普通问答、无函数调用) ###')
test_1()
print('### Test Case 2 - Use Qwen-Style Functions (函数调用,千问格式) ###')
test_2()
print('### Test Case 3 - Use GPT-Style Functions (函数调用,GPT格式) ###')
test_3()
print('### Test Case 4 - Use LangChain (接入Langchain) ###')
test_4()
def test_1():
messages = [{'role': 'user', 'content': '你好'}]
call_qwen(messages)
messages.append({'role': 'assistant', 'content': '你好!很高兴为你提供帮助。'})
messages.append({
'role': 'user',
'content': '给我讲一个年轻人奋斗创业最终取得成功的故事。故事只能有一句话。'
})
call_qwen(messages)
messages.append({
'role':
'assistant',
'content':
'故事的主人公叫李明,他来自一个普通的家庭,父母都是普通的工人。李明想要成为一名成功的企业家。……',
})
messages.append({'role': 'user', 'content': '给这个故事起一个标题'})
call_qwen(messages) | null |
7,994 | import json
from pprint import pprint
import openai
openai.api_base = 'http://localhost:8000/v1'
openai.api_key = 'none'
def call_qwen(messages, functions=None):
print('input:')
pprint(messages, indent=2)
if functions:
response = openai.ChatCompletion.create(model='Qwen',
messages=messages,
functions=functions)
else:
response = openai.ChatCompletion.create(model='Qwen',
messages=messages)
response = response.choices[0]['message']
response = json.loads(json.dumps(response,
ensure_ascii=False)) # fix zh rendering
print('output:')
pprint(response, indent=2)
print()
return response
= '__main__':
print('### Test Case 1 - No Function Calling (普通问答、无函数调用) ###')
test_1()
print('### Test Case 2 - Use Qwen-Style Functions (函数调用,千问格式) ###')
test_2()
print('### Test Case 3 - Use GPT-Style Functions (函数调用,GPT格式) ###')
test_3()
print('### Test Case 4 - Use LangChain (接入Langchain) ###')
test_4()
def test_2():
functions = [
{
'name_for_human':
'谷歌搜索',
'name_for_model':
'google_search',
'description_for_model':
'谷歌搜索是一个通用搜索引擎,可用于访问互联网、查询百科知识、了解时事新闻等。' +
' Format the arguments as a JSON object.',
'parameters': [{
'name': 'search_query',
'description': '搜索关键词或短语',
'required': True,
'schema': {
'type': 'string'
},
}],
},
{
'name_for_human':
'文生图',
'name_for_model':
'image_gen',
'description_for_model':
'文生图是一个AI绘画(图像生成)服务,输入文本描述,返回根据文本作画得到的图片的URL。' +
' Format the arguments as a JSON object.',
'parameters': [{
'name': 'prompt',
'description': '英文关键词,描述了希望图像具有什么内容',
'required': True,
'schema': {
'type': 'string'
},
}],
},
]
messages = [{'role': 'user', 'content': '(请不要调用工具)\n\n你好'}]
call_qwen(messages, functions)
messages.append({
'role': 'assistant',
'content': '你好!很高兴见到你。有什么我可以帮忙的吗?'
}, )
messages.append({'role': 'user', 'content': '搜索一下谁是周杰伦'})
call_qwen(messages, functions)
messages.append({
'role': 'assistant',
'content': '我应该使用Google搜索查找相关信息。',
'function_call': {
'name': 'google_search',
'arguments': '{"search_query": "周杰伦"}',
},
})
messages.append({
'role': 'function',
'name': 'google_search',
'content': 'Jay Chou is a Taiwanese singer.',
})
call_qwen(messages, functions)
messages.append(
{
'role': 'assistant',
'content': '周杰伦(Jay Chou)是一位来自台湾的歌手。',
}, )
messages.append({'role': 'user', 'content': '搜索一下他老婆是谁'})
call_qwen(messages, functions)
messages.append({
'role': 'assistant',
'content': '我应该使用Google搜索查找相关信息。',
'function_call': {
'name': 'google_search',
'arguments': '{"search_query": "周杰伦 老婆"}',
},
})
messages.append({
'role': 'function',
'name': 'google_search',
'content': 'Hannah Quinlivan'
})
call_qwen(messages, functions)
messages.append(
{
'role': 'assistant',
'content': '周杰伦的老婆是Hannah Quinlivan。',
}, )
messages.append({'role': 'user', 'content': '用文生图工具画个可爱的小猫吧,最好是黑猫'})
call_qwen(messages, functions)
messages.append({
'role': 'assistant',
'content': '我应该使用文生图API来生成一张可爱的小猫图片。',
'function_call': {
'name': 'image_gen',
'arguments': '{"prompt": "cute black cat"}',
},
})
messages.append({
'role':
'function',
'name':
'image_gen',
'content':
'{"image_url": "https://image.pollinations.ai/prompt/cute%20black%20cat"}',
})
call_qwen(messages, functions) | null |
7,995 | import json
from pprint import pprint
import openai
openai.api_base = 'http://localhost:8000/v1'
openai.api_key = 'none'
def call_qwen(messages, functions=None):
print('input:')
pprint(messages, indent=2)
if functions:
response = openai.ChatCompletion.create(model='Qwen',
messages=messages,
functions=functions)
else:
response = openai.ChatCompletion.create(model='Qwen',
messages=messages)
response = response.choices[0]['message']
response = json.loads(json.dumps(response,
ensure_ascii=False)) # fix zh rendering
print('output:')
pprint(response, indent=2)
print()
return response
= '__main__':
print('### Test Case 1 - No Function Calling (普通问答、无函数调用) ###')
test_1()
print('### Test Case 2 - Use Qwen-Style Functions (函数调用,千问格式) ###')
test_2()
print('### Test Case 3 - Use GPT-Style Functions (函数调用,GPT格式) ###')
test_3()
print('### Test Case 4 - Use LangChain (接入Langchain) ###')
test_4()
def test_3():
functions = [{
'name': 'get_current_weather',
'description': 'Get the current weather in a given location.',
'parameters': {
'type': 'object',
'properties': {
'location': {
'type': 'string',
'description':
'The city and state, e.g. San Francisco, CA',
},
'unit': {
'type': 'string',
'enum': ['celsius', 'fahrenheit']
},
},
'required': ['location'],
},
}]
messages = [{
'role': 'user',
# Note: The current version of Qwen-7B-Chat (as of 2023.08) performs okay with Chinese tool-use prompts,
# but performs terribly when it comes to English tool-use prompts, due to a mistake in data collecting.
'content': '波士顿天气如何?',
}]
call_qwen(messages, functions)
messages.append(
{
'role': 'assistant',
'content': None,
'function_call': {
'name': 'get_current_weather',
'arguments': '{"location": "Boston, MA"}',
},
}, )
messages.append({
'role':
'function',
'name':
'get_current_weather',
'content':
'{"temperature": "22", "unit": "celsius", "description": "Sunny"}',
})
call_qwen(messages, functions) | null |
7,996 |
def test_4():
from langchain.agents import AgentType, initialize_agent, load_tools
from langchain.chat_models import ChatOpenAI
llm = ChatOpenAI(
model_name='Qwen',
openai_api_base='http://localhost:8000/v1',
openai_api_key='EMPTY',
streaming=False,
)
tools = load_tools(['arxiv'], )
agent_chain = initialize_agent(
tools,
llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
)
# TODO: The performance is okay with Chinese prompts, but not so good when it comes to English.
agent_chain.run('查一下论文 1605.08386 的信息') | null |
7,997 | import json
def format_train_sample(messages):
#
# You do not need the `function` role, as Qwen's function calling is actually implemented via ReAct,
# not by adding a `function` role or `function_call` message. See openai_api.py for details.
#
# If you need the `system` role, you might need to modify `finetune.py` accordingly.
#
assert set(m["role"] for m in messages) == {"user", "assistant"}
sample = {
"conversations": [
{
"from": m["role"],
"value": m["content"],
}
for m in messages
]
}
return sample | null |
7,998 | import json
TOOL_DESC = """{name_for_model}: Call this tool to interact with the {name_for_human} API. What is the {name_for_human} API useful for? {description_for_model} Parameters: {parameters}"""
REACT_INSTRUCTION = """Answer the following questions as best you can. You have access to the following APIs:
{tools_text}
Use the following format:
Question: the input question you must answer
Thought: you should always think about what to do
Action: the action to take, should be one of [{tools_name_text}]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can be repeated zero or more times)
Thought: I now know the final answer
Final Answer: the final answer to the original input question
Begin!"""
def build_react_instruction(functions):
tools_text = []
tools_name_text = []
for func_info in functions:
name = func_info.get("name", "")
name_m = func_info.get("name_for_model", name)
name_h = func_info.get("name_for_human", name)
desc = func_info.get("description", "")
desc_m = func_info.get("description_for_model", desc)
tool = TOOL_DESC.format(
name_for_model=name_m,
name_for_human=name_h,
description_for_model=desc_m,
parameters=json.dumps(func_info["parameters"], ensure_ascii=False),
)
tools_text.append(tool)
tools_name_text.append(name_m)
tools_text = "\n\n".join(tools_text)
tools_name_text = ", ".join(tools_name_text)
instruction = REACT_INSTRUCTION.format(
tools_text=tools_text,
tools_name_text=tools_name_text,
)
return instruction | null |
7,999 | from transformers import PreTrainedTokenizer, GenerationConfig, StoppingCriteriaList
from typing import Optional, Callable, List, Tuple, Union
import copy
import torch
from transformers import AutoTokenizer
from transformers.generation.logits_process import LogitsProcessorList
from packaging import version
def get_stop_words_ids(chat_format, tokenizer):
if chat_format == "raw":
stop_words_ids = [tokenizer.encode("Human:"), [tokenizer.eod_id]]
elif chat_format == "chatml":
stop_words_ids = [[tokenizer.im_end_id], [tokenizer.im_start_id]]
else:
raise NotImplementedError(f"Unknown chat format {chat_format!r}")
return stop_words_ids | null |
8,000 | from transformers import PreTrainedTokenizer, GenerationConfig, StoppingCriteriaList
from typing import Optional, Callable, List, Tuple, Union
import copy
import torch
from transformers import AutoTokenizer
from transformers.generation.logits_process import LogitsProcessorList
from packaging import version
def make_context(
tokenizer: PreTrainedTokenizer,
query: str,
history: List[Tuple[str, str]] = None,
system: str = "",
max_window_size: int = 6144,
chat_format: str = "chatml",
):
if history is None:
history = []
if chat_format == "chatml":
im_start, im_end = "<|im_start|>", "<|im_end|>"
im_start_tokens = [tokenizer.im_start_id]
im_end_tokens = [tokenizer.im_end_id]
nl_tokens = tokenizer.encode("\n")
def _tokenize_str(role, content):
return f"{role}\n{content}", tokenizer.encode(
role, allowed_special=set()
) + nl_tokens + tokenizer.encode(content, allowed_special=set())
system_text, system_tokens_part = _tokenize_str("system", system)
system_tokens = im_start_tokens + system_tokens_part + im_end_tokens
raw_text = ""
context_tokens = []
for turn_query, turn_response in reversed(history):
query_text, query_tokens_part = _tokenize_str("user", turn_query)
query_tokens = im_start_tokens + query_tokens_part + im_end_tokens
response_text, response_tokens_part = _tokenize_str(
"assistant", turn_response
)
response_tokens = im_start_tokens + response_tokens_part + im_end_tokens
next_context_tokens = nl_tokens + query_tokens + nl_tokens + response_tokens
prev_chat = (
f"\n{im_start}{query_text}{im_end}\n{im_start}{response_text}{im_end}"
)
current_context_size = (
len(system_tokens) + len(next_context_tokens) + len(context_tokens)
)
if current_context_size < max_window_size:
context_tokens = next_context_tokens + context_tokens
raw_text = prev_chat + raw_text
else:
break
context_tokens = system_tokens + context_tokens
raw_text = f"{im_start}{system_text}{im_end}" + raw_text
context_tokens += (
nl_tokens
+ im_start_tokens
+ _tokenize_str("user", query)[1]
+ im_end_tokens
+ nl_tokens
+ im_start_tokens
+ tokenizer.encode("assistant")
+ nl_tokens
)
raw_text += f"\n{im_start}user\n{query}{im_end}\n{im_start}assistant\n"
elif chat_format == "raw":
raw_text = query
context_tokens = tokenizer.encode(raw_text)
else:
raise NotImplementedError(f"Unknown chat format {chat_format!r}")
return raw_text, context_tokens | null |
8,001 | import argparse
import base64
import collections
import logging
import unicodedata
from pathlib import Path
import regex as re
from tqdm.contrib.logging import tqdm_logging_redirect
logger = logging.getLogger(__name__)
def load_tiktoken_bpe(tiktoken_bpe_file: str) -> "dict[bytes, int]":
def dump_tiktoken_bpe(bpe_ranks: "dict[bytes, int]", tiktoken_bpe_file: str) -> None:
def learn_bpe(
freqs: "dict[str,int]", existing: "dict[bytes, int]"
) -> "tuple[bytes, bytes]":
def load_expand_vocab(path: Path) -> "dict[str, int]":
def make_new_merges_by_bpe(
input_path: Path, output_path: Path, expand_path: Path, start_id: int
) -> None:
mergeable_ranks = load_tiktoken_bpe(input_path)
if not start_id or start_id == -1:
start_id = len(mergeable_ranks)
elif start_id < len(mergeable_ranks):
logger.warning(
f"start_id {start_id} is too small, existing merges will be overridden, DONOT DO THIS. changed to {len(mergeable_ranks)}"
)
start_id = len(mergeable_ranks)
else:
start_id = start_id
expand_vocab_freqs = load_expand_vocab(expand_path)
for word in list(expand_vocab_freqs):
token = word.encode("utf-8")
if token in mergeable_ranks:
logger.warning(f"word {word} is already a token {token}, skipping")
del expand_vocab_freqs[word]
logger.info(f"number of existing merges: {len(mergeable_ranks)}")
logger.info(f"number of words for expanding: {len(expand_vocab_freqs)}")
new_merges = learn_bpe(expand_vocab_freqs, mergeable_ranks)
logger.info(f"number of newly learned merges: {len(new_merges)}")
extra_merges = {p[0] + p[1]: i for i, p in enumerate(new_merges, start=start_id)}
dump_tiktoken_bpe(extra_merges, output_path) | null |
8,002 | from dataclasses import dataclass, field
import json
import math
import logging
import os
from typing import Dict, Optional, List
import torch
from torch.utils.data import Dataset
from deepspeed import zero
from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus
import transformers
from transformers import Trainer, GPTQConfig, deepspeed
from transformers.trainer_pt_utils import LabelSmoother
from peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training
from accelerate.utils import DistributedType
IGNORE_TOKEN_ID = LabelSmoother.ignore_index
def preprocess(
sources,
tokenizer: transformers.PreTrainedTokenizer,
max_len: int,
system_message: str = "You are a helpful assistant."
) -> Dict:
roles = {"user": "<|im_start|>user", "assistant": "<|im_start|>assistant"}
im_start = tokenizer.im_start_id
im_end = tokenizer.im_end_id
nl_tokens = tokenizer('\n').input_ids
_system = tokenizer('system').input_ids + nl_tokens
_user = tokenizer('user').input_ids + nl_tokens
_assistant = tokenizer('assistant').input_ids + nl_tokens
# Apply prompt templates
input_ids, targets = [], []
for i, source in enumerate(sources):
if roles[source[0]["from"]] != roles["user"]:
source = source[1:]
input_id, target = [], []
system = [im_start] + _system + tokenizer(system_message).input_ids + [im_end] + nl_tokens
input_id += system
target += [im_start] + [IGNORE_TOKEN_ID] * (len(system)-3) + [im_end] + nl_tokens
assert len(input_id) == len(target)
for j, sentence in enumerate(source):
role = roles[sentence["from"]]
_input_id = tokenizer(role).input_ids + nl_tokens + \
tokenizer(sentence["value"]).input_ids + [im_end] + nl_tokens
input_id += _input_id
if role == '<|im_start|>user':
_target = [im_start] + [IGNORE_TOKEN_ID] * (len(_input_id)-3) + [im_end] + nl_tokens
elif role == '<|im_start|>assistant':
_target = [im_start] + [IGNORE_TOKEN_ID] * len(tokenizer(role).input_ids) + \
_input_id[len(tokenizer(role).input_ids)+1:-2] + [im_end] + nl_tokens
else:
raise NotImplementedError
target += _target
assert len(input_id) == len(target)
input_id += [tokenizer.pad_token_id] * (max_len - len(input_id))
target += [IGNORE_TOKEN_ID] * (max_len - len(target))
input_ids.append(input_id[:max_len])
targets.append(target[:max_len])
input_ids = torch.tensor(input_ids, dtype=torch.int)
targets = torch.tensor(targets, dtype=torch.int)
return dict(
input_ids=input_ids,
labels=targets,
attention_mask=input_ids.ne(tokenizer.pad_token_id),
) | null |
8,003 | from dataclasses import dataclass, field
import json
import math
import logging
import os
from typing import Dict, Optional, List
import torch
from torch.utils.data import Dataset
from deepspeed import zero
from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus
import transformers
from transformers import Trainer, GPTQConfig, deepspeed
from transformers.trainer_pt_utils import LabelSmoother
from peft import LoraConfig, get_peft_model, prepare_model_for_kbit_training
from accelerate.utils import DistributedType
class ModelArguments:
model_name_or_path: Optional[str] = field(default="Qwen/Qwen-7B")
class DataArguments:
data_path: str = field(
default=None, metadata={"help": "Path to the training data."}
)
eval_data_path: str = field(
default=None, metadata={"help": "Path to the evaluation data."}
)
lazy_preprocess: bool = False
class TrainingArguments(transformers.TrainingArguments):
cache_dir: Optional[str] = field(default=None)
optim: str = field(default="adamw_torch")
model_max_length: int = field(
default=8192,
metadata={
"help": "Maximum sequence length. Sequences will be right padded (and possibly truncated)."
},
)
use_lora: bool = False
class LoraArguments:
lora_r: int = 64
lora_alpha: int = 16
lora_dropout: float = 0.05
lora_target_modules: List[str] = field(
default_factory=lambda: ["c_attn", "c_proj", "w1", "w2"]
)
lora_weight_path: str = ""
lora_bias: str = "none"
q_lora: bool = False
local_rank = None
def safe_save_model_for_hf_trainer(trainer: transformers.Trainer, output_dir: str, bias="none"):
"""Collects the state dict and dump to disk."""
# check if zero3 mode enabled
if deepspeed.is_deepspeed_zero3_enabled():
state_dict = trainer.model_wrapped._zero3_consolidated_16bit_state_dict()
else:
if trainer.args.use_lora:
state_dict = get_peft_state_maybe_zero_3(
trainer.model.named_parameters(), bias
)
else:
state_dict = trainer.model.state_dict()
if trainer.args.should_save and trainer.args.local_rank == 0:
trainer._save(output_dir, state_dict=state_dict)
def make_supervised_data_module(
tokenizer: transformers.PreTrainedTokenizer, data_args, max_len,
) -> Dict:
"""Make dataset and collator for supervised fine-tuning."""
dataset_cls = (
LazySupervisedDataset if data_args.lazy_preprocess else SupervisedDataset
)
rank0_print("Loading data...")
train_json = json.load(open(data_args.data_path, "r"))
train_dataset = dataset_cls(train_json, tokenizer=tokenizer, max_len=max_len)
if data_args.eval_data_path:
eval_json = json.load(open(data_args.eval_data_path, "r"))
eval_dataset = dataset_cls(eval_json, tokenizer=tokenizer, max_len=max_len)
else:
eval_dataset = None
return dict(train_dataset=train_dataset, eval_dataset=eval_dataset)
def train():
global local_rank
parser = transformers.HfArgumentParser(
(ModelArguments, DataArguments, TrainingArguments, LoraArguments)
)
(
model_args,
data_args,
training_args,
lora_args,
) = parser.parse_args_into_dataclasses()
# This serves for single-gpu qlora.
if getattr(training_args, 'deepspeed', None) and int(os.environ.get("WORLD_SIZE", 1))==1:
training_args.distributed_state.distributed_type = DistributedType.DEEPSPEED
local_rank = training_args.local_rank
device_map = None
world_size = int(os.environ.get("WORLD_SIZE", 1))
ddp = world_size != 1
if lora_args.q_lora:
device_map = {"": int(os.environ.get("LOCAL_RANK") or 0)} if ddp else "auto"
if len(training_args.fsdp) > 0 or deepspeed.is_deepspeed_zero3_enabled():
logging.warning(
"FSDP or ZeRO3 are incompatible with QLoRA."
)
is_chat_model = 'chat' in model_args.model_name_or_path.lower()
if (
training_args.use_lora
and not lora_args.q_lora
and deepspeed.is_deepspeed_zero3_enabled()
and not is_chat_model
):
raise RuntimeError("ZeRO3 is incompatible with LoRA when finetuning on base model.")
model_load_kwargs = {
'low_cpu_mem_usage': not deepspeed.is_deepspeed_zero3_enabled(),
}
# Set RoPE scaling factor
config = transformers.AutoConfig.from_pretrained(
model_args.model_name_or_path,
cache_dir=training_args.cache_dir,
trust_remote_code=True,
)
config.use_cache = False
# Load model and tokenizer
model = transformers.AutoModelForCausalLM.from_pretrained(
model_args.model_name_or_path,
config=config,
cache_dir=training_args.cache_dir,
device_map=device_map,
trust_remote_code=True,
quantization_config=GPTQConfig(
bits=4, disable_exllama=True
)
if training_args.use_lora and lora_args.q_lora
else None,
**model_load_kwargs,
)
tokenizer = transformers.AutoTokenizer.from_pretrained(
model_args.model_name_or_path,
cache_dir=training_args.cache_dir,
model_max_length=training_args.model_max_length,
padding_side="right",
use_fast=False,
trust_remote_code=True,
)
tokenizer.pad_token_id = tokenizer.eod_id
if training_args.use_lora:
if lora_args.q_lora or is_chat_model:
modules_to_save = None
else:
modules_to_save = ["wte", "lm_head"]
lora_config = LoraConfig(
r=lora_args.lora_r,
lora_alpha=lora_args.lora_alpha,
target_modules=lora_args.lora_target_modules,
lora_dropout=lora_args.lora_dropout,
bias=lora_args.lora_bias,
task_type="CAUSAL_LM",
modules_to_save=modules_to_save # This argument serves for adding new tokens.
)
if lora_args.q_lora:
model = prepare_model_for_kbit_training(
model, use_gradient_checkpointing=training_args.gradient_checkpointing
)
model = get_peft_model(model, lora_config)
# Print peft trainable params
model.print_trainable_parameters()
if training_args.gradient_checkpointing:
model.enable_input_require_grads()
# Load data
data_module = make_supervised_data_module(
tokenizer=tokenizer, data_args=data_args, max_len=training_args.model_max_length
)
# Start trainner
trainer = Trainer(
model=model, tokenizer=tokenizer, args=training_args, **data_module
)
trainer.train()
trainer.save_state()
safe_save_model_for_hf_trainer(trainer=trainer, output_dir=training_args.output_dir, bias=lora_args.lora_bias) | null |
8,004 | import argparse
import json
from typing import Dict
import logging
import torch
import transformers
from transformers import AutoTokenizer
from transformers.trainer_pt_utils import LabelSmoother
from auto_gptq import AutoGPTQForCausalLM, BaseQuantizeConfig
IGNORE_TOKEN_ID = LabelSmoother.ignore_index
def preprocess(
sources,
tokenizer: transformers.PreTrainedTokenizer,
max_len: int,
system_message: str = "You are a helpful assistant."
) -> Dict:
roles = {"user": "<|im_start|>user", "assistant": "<|im_start|>assistant"}
im_start = tokenizer.im_start_id
im_end = tokenizer.im_end_id
nl_tokens = tokenizer('\n').input_ids
_system = tokenizer('system').input_ids + nl_tokens
_user = tokenizer('user').input_ids + nl_tokens
_assistant = tokenizer('assistant').input_ids + nl_tokens
# Apply prompt templates
data = []
# input_ids, targets = [], []
for i, source in enumerate(sources):
source = source["conversations"]
if roles[source[0]["from"]] != roles["user"]:
source = source[1:]
input_id, target = [], []
system = [im_start] + _system + tokenizer(system_message).input_ids + [im_end] + nl_tokens
input_id += system
target += [im_start] + [IGNORE_TOKEN_ID] * (len(system)-3) + [im_end] + nl_tokens
assert len(input_id) == len(target)
for j, sentence in enumerate(source):
role = roles[sentence["from"]]
_input_id = tokenizer(role).input_ids + nl_tokens + \
tokenizer(sentence["value"]).input_ids + [im_end] + nl_tokens
input_id += _input_id
if role == '<|im_start|>user':
_target = [im_start] + [IGNORE_TOKEN_ID] * (len(_input_id)-3) + [im_end] + nl_tokens
elif role == '<|im_start|>assistant':
_target = [im_start] + [IGNORE_TOKEN_ID] * len(tokenizer(role).input_ids) + \
_input_id[len(tokenizer(role).input_ids)+1:-2] + [im_end] + nl_tokens
else:
raise NotImplementedError
target += _target
assert len(input_id) == len(target)
input_id = torch.tensor(input_id[:max_len], dtype=torch.int)
target = torch.tensor(target[:max_len], dtype=torch.int)
data.append(dict(input_ids=input_id, attention_mask=input_id.ne(tokenizer.pad_token_id)))
return data | null |
8,006 | from transformers import PreTrainedTokenizer, GenerationConfig, StoppingCriteriaList
from typing import Optional, Callable, List, Tuple, Union
import copy
import torch
from transformers import AutoTokenizer
from transformers.generation.logits_process import LogitsProcessorList
from packaging import version
def make_context(
tokenizer: PreTrainedTokenizer,
query: str,
history: List[Tuple[str, str]] = None,
system: str = "",
max_window_size: int = 6144,
chat_format: str = "chatml",
):
if history is None:
history = []
if chat_format == "chatml":
im_start_tokens = [tokenizer.im_start_id]
im_end_tokens = [tokenizer.im_end_id]
im_start, im_end = tokenizer.decode(im_start_tokens, skip_special_tokens=False), tokenizer.decode(im_end_tokens, skip_special_tokens=False)
nl_tokens = tokenizer.encode("\n")
def _tokenize_str(role, content):
return f"{role}\n{content}", tokenizer.encode(
role, allowed_special=set()
) + nl_tokens + tokenizer.encode(content, allowed_special=set())
system_text, system_tokens_part = _tokenize_str("system", system)
system_tokens = im_start_tokens + system_tokens_part + im_end_tokens
raw_text = ""
context_tokens = []
for turn_query, turn_response in reversed(history):
query_text, query_tokens_part = _tokenize_str("user", turn_query)
query_tokens = im_start_tokens + query_tokens_part + im_end_tokens
response_text, response_tokens_part = _tokenize_str(
"assistant", turn_response
)
response_tokens = im_start_tokens + response_tokens_part + im_end_tokens
next_context_tokens = nl_tokens + query_tokens + nl_tokens + response_tokens
prev_chat = (
f"\n{im_start}{query_text}{im_end}\n{im_start}{response_text}{im_end}"
)
current_context_size = (
len(system_tokens) + len(next_context_tokens) + len(context_tokens)
)
if current_context_size < max_window_size:
context_tokens = next_context_tokens + context_tokens
raw_text = prev_chat + raw_text
else:
break
context_tokens = system_tokens + context_tokens
raw_text = f"{im_start}{system_text}{im_end}" + raw_text
context_tokens += (
nl_tokens
+ im_start_tokens
+ _tokenize_str("user", query)[1]
+ im_end_tokens
+ nl_tokens
+ im_start_tokens
+ tokenizer.encode("assistant")
+ nl_tokens
)
raw_text += f"\n{im_start}user\n{query}{im_end}\n{im_start}assistant\n"
elif chat_format == "raw":
raw_text = query
context_tokens = tokenizer.encode(raw_text)
else:
raise NotImplementedError(f"Unknown chat format {chat_format!r}")
return raw_text, context_tokens | null |
8,007 | import base64
import copy
import json
import time
from argparse import ArgumentParser
from contextlib import asynccontextmanager
from pprint import pprint
from typing import Dict, List, Literal, Optional, Union
import torch
import uvicorn
from fastapi import FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel, Field
from sse_starlette.sse import EventSourceResponse
from starlette.middleware.base import BaseHTTPMiddleware
from starlette.requests import Request
from starlette.responses import Response
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
def _gc(forced: bool = False):
global args
if args.disable_gc and not forced:
return
import gc
gc.collect()
if torch.cuda.is_available():
torch.cuda.empty_cache()
async def lifespan(app: FastAPI): # collects GPU memory
yield
_gc(forced=True) | null |
8,008 | import base64
import copy
import json
import time
from argparse import ArgumentParser
from contextlib import asynccontextmanager
from pprint import pprint
from typing import Dict, List, Literal, Optional, Union
import torch
import uvicorn
from fastapi import FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel, Field
from sse_starlette.sse import EventSourceResponse
from starlette.middleware.base import BaseHTTPMiddleware
from starlette.requests import Request
from starlette.responses import Response
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
class ModelCard(BaseModel):
id: str
object: str = 'model'
created: int = Field(default_factory=lambda: int(time.time()))
owned_by: str = 'owner'
root: Optional[str] = None
parent: Optional[str] = None
permission: Optional[list] = None
class ModelList(BaseModel):
object: str = 'list'
data: List[ModelCard] = []
async def list_models():
global model_args
model_card = ModelCard(id='gpt-3.5-turbo')
return ModelList(data=[model_card]) | null |
8,009 | import base64
import copy
import json
import time
from argparse import ArgumentParser
from contextlib import asynccontextmanager
from pprint import pprint
from typing import Dict, List, Literal, Optional, Union
import torch
import uvicorn
from fastapi import FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel, Field
from sse_starlette.sse import EventSourceResponse
from starlette.middleware.base import BaseHTTPMiddleware
from starlette.requests import Request
from starlette.responses import Response
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
def _gc(forced: bool = False):
global args
if args.disable_gc and not forced:
return
import gc
gc.collect()
if torch.cuda.is_available():
torch.cuda.empty_cache()
class ChatMessage(BaseModel):
role: Literal['user', 'assistant', 'system', 'function']
content: Optional[str]
function_call: Optional[Dict] = None
class ChatCompletionRequest(BaseModel):
model: str
messages: List[ChatMessage]
functions: Optional[List[Dict]] = None
temperature: Optional[float] = None
top_p: Optional[float] = None
top_k: Optional[int] = None
max_length: Optional[int] = None
stream: Optional[bool] = False
stop: Optional[List[str]] = None
class ChatCompletionResponseChoice(BaseModel):
index: int
message: Union[ChatMessage]
finish_reason: Literal['stop', 'length', 'function_call']
class ChatCompletionResponse(BaseModel):
model: str
object: Literal['chat.completion', 'chat.completion.chunk']
choices: List[Union[ChatCompletionResponseChoice,
ChatCompletionResponseStreamChoice]]
created: Optional[int] = Field(default_factory=lambda: int(time.time()))
def add_extra_stop_words(stop_words):
if stop_words:
_stop_words = []
_stop_words.extend(stop_words)
for x in stop_words:
s = x.lstrip('\n')
if s and (s not in _stop_words):
_stop_words.append(s)
return _stop_words
return stop_words
def trim_stop_words(response, stop_words):
if stop_words:
for stop in stop_words:
idx = response.find(stop)
if idx != -1:
response = response[:idx]
return response
_TEXT_COMPLETION_CMD = object()
def parse_messages(messages, functions):
if all(m.role != 'user' for m in messages):
raise HTTPException(
status_code=400,
detail='Invalid request: Expecting at least one user message.',
)
messages = copy.deepcopy(messages)
if messages[0].role == 'system':
system = messages.pop(0).content.lstrip('\n').rstrip()
else:
system = 'You are a helpful assistant.'
if functions:
tools_text = []
tools_name_text = []
for func_info in functions:
name = func_info.get('name', '')
name_m = func_info.get('name_for_model', name)
name_h = func_info.get('name_for_human', name)
desc = func_info.get('description', '')
desc_m = func_info.get('description_for_model', desc)
tool = TOOL_DESC.format(
name_for_model=name_m,
name_for_human=name_h,
# Hint: You can add the following format requirements in description:
# "Format the arguments as a JSON object."
# "Enclose the code within triple backticks (`) at the beginning and end of the code."
description_for_model=desc_m,
parameters=json.dumps(func_info['parameters'],
ensure_ascii=False),
)
tools_text.append(tool)
tools_name_text.append(name_m)
tools_text = '\n\n'.join(tools_text)
tools_name_text = ', '.join(tools_name_text)
instruction = (REACT_INSTRUCTION.format(
tools_text=tools_text,
tools_name_text=tools_name_text,
).lstrip('\n').rstrip())
else:
instruction = ''
messages_with_fncall = messages
messages = []
for m_idx, m in enumerate(messages_with_fncall):
role, content, func_call = m.role, m.content, m.function_call
content = content or ''
content = content.lstrip('\n').rstrip()
if role == 'function':
if (len(messages) == 0) or (messages[-1].role != 'assistant'):
raise HTTPException(
status_code=400,
detail=
'Invalid request: Expecting role assistant before role function.',
)
messages[-1].content += f'\nObservation: {content}'
if m_idx == len(messages_with_fncall) - 1:
# add a prefix for text completion
messages[-1].content += '\nThought:'
elif role == 'assistant':
if len(messages) == 0:
raise HTTPException(
status_code=400,
detail=
'Invalid request: Expecting role user before role assistant.',
)
if func_call is None:
if functions:
content = f'Thought: I now know the final answer.\nFinal Answer: {content}'
else:
f_name, f_args = func_call['name'], func_call['arguments']
if not content.startswith('Thought:'):
content = f'Thought: {content}'
content = f'{content}\nAction: {f_name}\nAction Input: {f_args}'
if messages[-1].role == 'user':
messages.append(
ChatMessage(role='assistant',
content=content.lstrip('\n').rstrip()))
else:
messages[-1].content += '\n' + content
elif role == 'user':
messages.append(
ChatMessage(role='user',
content=content.lstrip('\n').rstrip()))
else:
raise HTTPException(
status_code=400,
detail=f'Invalid request: Incorrect role {role}.')
query = _TEXT_COMPLETION_CMD
if messages[-1].role == 'user':
query = messages[-1].content
messages = messages[:-1]
if len(messages) % 2 != 0:
raise HTTPException(status_code=400, detail='Invalid request')
history = [] # [(Q1, A1), (Q2, A2), ..., (Q_last_turn, A_last_turn)]
for i in range(0, len(messages), 2):
if messages[i].role == 'user' and messages[i + 1].role == 'assistant':
usr_msg = messages[i].content.lstrip('\n').rstrip()
bot_msg = messages[i + 1].content.lstrip('\n').rstrip()
if instruction and (i == len(messages) - 2):
usr_msg = f'{instruction}\n\nQuestion: {usr_msg}'
instruction = ''
history.append([usr_msg, bot_msg])
else:
raise HTTPException(
status_code=400,
detail=
'Invalid request: Expecting exactly one user (or function) role before every assistant role.',
)
if instruction:
assert query is not _TEXT_COMPLETION_CMD
query = f'{instruction}\n\nQuestion: {query}'
return query, history, system
def parse_response(response):
func_name, func_args = '', ''
i = response.find('\nAction:')
j = response.find('\nAction Input:')
k = response.find('\nObservation:')
if 0 <= i < j: # If the text has `Action` and `Action input`,
if k < j: # but does not contain `Observation`,
# then it is likely that `Observation` is omitted by the LLM,
# because the output text may have discarded the stop word.
response = response.rstrip() + '\nObservation:' # Add it back.
k = response.find('\nObservation:')
func_name = response[i + len('\nAction:'):j].strip()
func_args = response[j + len('\nAction Input:'):k].strip()
if func_name:
response = response[:i]
t = response.find('Thought: ')
if t >= 0:
response = response[t + len('Thought: '):]
response = response.strip()
choice_data = ChatCompletionResponseChoice(
index=0,
message=ChatMessage(
role='assistant',
content=response,
function_call={
'name': func_name,
'arguments': func_args
},
),
finish_reason='function_call',
)
return choice_data
z = response.rfind('\nFinal Answer: ')
if z >= 0:
response = response[z + len('\nFinal Answer: '):]
choice_data = ChatCompletionResponseChoice(
index=0,
message=ChatMessage(role='assistant', content=response),
finish_reason='stop',
)
return choice_data
def text_complete_last_message(history, stop_words_ids, gen_kwargs, system):
im_start = '<|im_start|>'
im_end = '<|im_end|>'
prompt = f'{im_start}system\n{system}{im_end}'
for i, (query, response) in enumerate(history):
query = query.lstrip('\n').rstrip()
response = response.lstrip('\n').rstrip()
prompt += f'\n{im_start}user\n{query}{im_end}'
prompt += f'\n{im_start}assistant\n{response}{im_end}'
prompt = prompt[:-len(im_end)]
_stop_words_ids = [tokenizer.encode(im_end)]
if stop_words_ids:
for s in stop_words_ids:
_stop_words_ids.append(s)
stop_words_ids = _stop_words_ids
input_ids = torch.tensor([tokenizer.encode(prompt)]).to(model.device)
output = model.generate(input_ids,
stop_words_ids=stop_words_ids,
**gen_kwargs).tolist()[0]
output = tokenizer.decode(output, errors='ignore')
assert output.startswith(prompt)
output = output[len(prompt):]
output = trim_stop_words(output, ['<|endoftext|>', im_end])
print(f'<completion>\n{prompt}\n<!-- *** -->\n{output}\n</completion>')
return output
async def predict(
query: str,
history: List[List[str]],
model_id: str,
stop_words: List[str],
gen_kwargs: Dict,
system: str,
):
global model, tokenizer
choice_data = ChatCompletionResponseStreamChoice(
index=0, delta=DeltaMessage(role='assistant'), finish_reason=None)
chunk = ChatCompletionResponse(model=model_id,
choices=[choice_data],
object='chat.completion.chunk')
yield '{}'.format(_dump_json(chunk, exclude_unset=True))
current_length = 0
stop_words_ids = [tokenizer.encode(s)
for s in stop_words] if stop_words else None
delay_token_num = max([len(x) for x in stop_words]) if stop_words_ids else 0
response_generator = model.chat_stream(tokenizer,
query,
history=history,
stop_words_ids=stop_words_ids,
system=system,
**gen_kwargs)
for _new_response in response_generator:
if len(_new_response) <= delay_token_num:
continue
new_response = _new_response[:-delay_token_num] if delay_token_num else _new_response
if len(new_response) == current_length:
continue
new_text = new_response[current_length:]
current_length = len(new_response)
choice_data = ChatCompletionResponseStreamChoice(
index=0, delta=DeltaMessage(content=new_text), finish_reason=None)
chunk = ChatCompletionResponse(model=model_id,
choices=[choice_data],
object='chat.completion.chunk')
yield '{}'.format(_dump_json(chunk, exclude_unset=True))
if current_length != len(_new_response):
# Determine whether to print the delay tokens
delayed_text = _new_response[current_length:]
new_text = trim_stop_words(delayed_text, stop_words)
if len(new_text) > 0:
choice_data = ChatCompletionResponseStreamChoice(
index=0, delta=DeltaMessage(content=new_text), finish_reason=None)
chunk = ChatCompletionResponse(model=model_id,
choices=[choice_data],
object='chat.completion.chunk')
yield '{}'.format(_dump_json(chunk, exclude_unset=True))
choice_data = ChatCompletionResponseStreamChoice(index=0,
delta=DeltaMessage(),
finish_reason='stop')
chunk = ChatCompletionResponse(model=model_id,
choices=[choice_data],
object='chat.completion.chunk')
yield '{}'.format(_dump_json(chunk, exclude_unset=True))
yield '[DONE]'
_gc()
async def create_chat_completion(request: ChatCompletionRequest):
global model, tokenizer
gen_kwargs = {}
if request.top_k is not None:
gen_kwargs['top_k'] = request.top_k
if request.temperature is not None:
if request.temperature < 0.01:
gen_kwargs['top_k'] = 1 # greedy decoding
else:
# Not recommended. Please tune top_p instead.
gen_kwargs['temperature'] = request.temperature
if request.top_p is not None:
gen_kwargs['top_p'] = request.top_p
stop_words = add_extra_stop_words(request.stop)
if request.functions:
stop_words = stop_words or []
if 'Observation:' not in stop_words:
stop_words.append('Observation:')
query, history, system = parse_messages(request.messages,
request.functions)
if request.stream:
if request.functions:
raise HTTPException(
status_code=400,
detail=
'Invalid request: Function calling is not yet implemented for stream mode.',
)
generate = predict(query,
history,
request.model,
stop_words,
gen_kwargs,
system=system)
return EventSourceResponse(generate, media_type='text/event-stream')
stop_words_ids = [tokenizer.encode(s)
for s in stop_words] if stop_words else None
if query is _TEXT_COMPLETION_CMD:
response = text_complete_last_message(history,
stop_words_ids=stop_words_ids,
gen_kwargs=gen_kwargs,
system=system)
else:
response, _ = model.chat(
tokenizer,
query,
history=history,
system=system,
stop_words_ids=stop_words_ids,
**gen_kwargs,
)
print('<chat>')
pprint(history, indent=2)
print(f'{query}\n<!-- *** -->\n{response}\n</chat>')
_gc()
response = trim_stop_words(response, stop_words)
if request.functions:
choice_data = parse_response(response)
else:
choice_data = ChatCompletionResponseChoice(
index=0,
message=ChatMessage(role='assistant', content=response),
finish_reason='stop',
)
return ChatCompletionResponse(model=request.model,
choices=[choice_data],
object='chat.completion') | null |
8,010 | import base64
import copy
import json
import time
from argparse import ArgumentParser
from contextlib import asynccontextmanager
from pprint import pprint
from typing import Dict, List, Literal, Optional, Union
import torch
import uvicorn
from fastapi import FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel, Field
from sse_starlette.sse import EventSourceResponse
from starlette.middleware.base import BaseHTTPMiddleware
from starlette.requests import Request
from starlette.responses import Response
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
def _get_args():
parser = ArgumentParser()
parser.add_argument(
'-c',
'--checkpoint-path',
type=str,
default='Qwen/Qwen-7B-Chat',
help='Checkpoint name or path, default to %(default)r',
)
parser.add_argument('--api-auth', help='API authentication credentials')
parser.add_argument('--cpu-only',
action='store_true',
help='Run demo with CPU only')
parser.add_argument('--server-port',
type=int,
default=8000,
help='Demo server port.')
parser.add_argument(
'--server-name',
type=str,
default='127.0.0.1',
help=
'Demo server name. Default: 127.0.0.1, which is only visible from the local computer.'
' If you want other computers to access your server, use 0.0.0.0 instead.',
)
parser.add_argument(
'--disable-gc',
action='store_true',
help='Disable GC after each response generated.',
)
args = parser.parse_args()
return args | null |
8,011 | import ctypesimport math
import o
import threading
from typing import Optional, Tuple, Union, List, Callable, Dict, An
from copy import deepcopy
import platform
fastllm_lib.create_llm_model.argtypes = [ctypes.c_char_p]
fastllm_lib.create_llm_model.restype = ctypes.c_int
fastllm_lib.token_decode.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_char_p]
fastllm_lib.token_decode.restype = ctypes.c_int
fastllm_lib.token_encode_string.argtypes = [ctypes.c_int, ctypes.c_char_p, ctypes.c_int, ctypes.POINTER(ctypes.c_int)]
fastllm_lib.token_encode_string.restype = ctypes.c_int
fastllm_lib.launch_response_llm_model.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.c_void_p,
ctypes.c_int, ctypes.c_bool, ctypes.c_float, ctypes.c_int,
ctypes.c_float, ctypes.c_float, ctypes.c_bool]
fastllm_lib.launch_response_llm_model.restype = ctypes.c_int
fastllm_lib.fetch_response_llm_model.argtypes = [ctypes.c_int, ctypes.c_int]
fastllm_lib.fetch_response_llm_model.restype = ctypes.c_int
fastllm_lib.fetch_response_logits_llm_model.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.POINTER(ctypes.c_float)]
fastllm_lib.fetch_response_logits_llm_model.restype = ctypes.c_int
fastllm_lib.response_str_llm_model.argtypes = [ctypes.c_int, ctypes.c_char_p,
ctypes.c_int, ctypes.c_bool, ctypes.c_float, ctypes.c_int,
ctypes.c_float, ctypes.c_float, ctypes.c_bool]
fastllm_lib.response_str_llm_model.restype = ctypes.c_char_p
fastllm_lib.launch_response_str_llm_model.argtype = [ctypes.c_int, ctypes.c_char_p,
ctypes.c_int, ctypes.c_bool, ctypes.c_float, ctypes.c_int,
ctypes.c_float, ctypes.c_float, ctypes.c_bool]
fastllm_lib.launch_response_str_llm_model.restype = ctypes.c_int
fastllm_lib.fetch_response_str_llm_model.argtypes = [ctypes.c_int, ctypes.c_int]
fastllm_lib.fetch_response_str_llm_model.restype = ctypes.c_char_p
fastllm_lib.make_history_llm_model.argtype = [ctypes.c_int, ctypes.c_char_p, ctypes.c_int, ctypes.c_char_p, ctypes.c_char_p]
fastllm_lib.make_history_llm_model.restype = ctypes.c_char_p
fastllm_lib.make_input_llm_model.argtype = [ctypes.c_int, ctypes.c_char_p, ctypes.c_int, ctypes.c_char_p]
fastllm_lib.make_input_llm_model.restype = ctypes.c_char_p
fastllm_lib.add_tokenizer_word_llm_model.argtype = [ctypes.c_int, ctypes.c_char_p, ctypes.c_float, ctypes.c_int]
fastllm_lib.set_device_map.argtype = [ctypes.c_int, ctypes.c_void_p, ctypes.c_char_p, ctypes.c_void_p]
fastllm_lib.get_llm_model_type.argtype = [ctypes.c_int]
fastllm_lib.get_llm_model_type.restype = ctypes.c_char_p
fastllm_lib.response_batch_str_llm_model.argtypes = [ctypes.c_int, ctypes.POINTER(ctypes.c_char_p), ctypes.c_int,
ctypes.c_int, ctypes.c_bool, ctypes.c_float, ctypes.c_int,
ctypes.c_float, ctypes.c_float, ctypes.c_bool]
fastllm_lib.response_batch_str_llm_model.restype = ctypes.POINTER(ctypes.c_char_p)
fastllm_lib.response_batch_tokens_llm_model.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int),
ctypes.c_int, ctypes.c_bool, ctypes.c_float, ctypes.c_int,
ctypes.c_float, ctypes.c_float, ctypes.c_bool]
fastllm_lib.response_batch_tokens_llm_model.restype = ctypes.POINTER(ctypes.c_char_p)
def set_cpu_threads(threads: int):
fastllm_lib.set_cpu_threads(threads); | null |
8,012 | import ctypesimport math
import o
import threading
from typing import Optional, Tuple, Union, List, Callable, Dict, An
from copy import deepcopy
import platform
fastllm_lib.create_llm_model.argtypes = [ctypes.c_char_p]
fastllm_lib.create_llm_model.restype = ctypes.c_int
fastllm_lib.token_decode.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_char_p]
fastllm_lib.token_decode.restype = ctypes.c_int
fastllm_lib.token_encode_string.argtypes = [ctypes.c_int, ctypes.c_char_p, ctypes.c_int, ctypes.POINTER(ctypes.c_int)]
fastllm_lib.token_encode_string.restype = ctypes.c_int
fastllm_lib.launch_response_llm_model.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.c_void_p,
ctypes.c_int, ctypes.c_bool, ctypes.c_float, ctypes.c_int,
ctypes.c_float, ctypes.c_float, ctypes.c_bool]
fastllm_lib.launch_response_llm_model.restype = ctypes.c_int
fastllm_lib.fetch_response_llm_model.argtypes = [ctypes.c_int, ctypes.c_int]
fastllm_lib.fetch_response_llm_model.restype = ctypes.c_int
fastllm_lib.fetch_response_logits_llm_model.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.POINTER(ctypes.c_float)]
fastllm_lib.fetch_response_logits_llm_model.restype = ctypes.c_int
fastllm_lib.response_str_llm_model.argtypes = [ctypes.c_int, ctypes.c_char_p,
ctypes.c_int, ctypes.c_bool, ctypes.c_float, ctypes.c_int,
ctypes.c_float, ctypes.c_float, ctypes.c_bool]
fastllm_lib.response_str_llm_model.restype = ctypes.c_char_p
fastllm_lib.launch_response_str_llm_model.argtype = [ctypes.c_int, ctypes.c_char_p,
ctypes.c_int, ctypes.c_bool, ctypes.c_float, ctypes.c_int,
ctypes.c_float, ctypes.c_float, ctypes.c_bool]
fastllm_lib.launch_response_str_llm_model.restype = ctypes.c_int
fastllm_lib.fetch_response_str_llm_model.argtypes = [ctypes.c_int, ctypes.c_int]
fastllm_lib.fetch_response_str_llm_model.restype = ctypes.c_char_p
fastllm_lib.make_history_llm_model.argtype = [ctypes.c_int, ctypes.c_char_p, ctypes.c_int, ctypes.c_char_p, ctypes.c_char_p]
fastllm_lib.make_history_llm_model.restype = ctypes.c_char_p
fastllm_lib.make_input_llm_model.argtype = [ctypes.c_int, ctypes.c_char_p, ctypes.c_int, ctypes.c_char_p]
fastllm_lib.make_input_llm_model.restype = ctypes.c_char_p
fastllm_lib.add_tokenizer_word_llm_model.argtype = [ctypes.c_int, ctypes.c_char_p, ctypes.c_float, ctypes.c_int]
fastllm_lib.set_device_map.argtype = [ctypes.c_int, ctypes.c_void_p, ctypes.c_char_p, ctypes.c_void_p]
fastllm_lib.get_llm_model_type.argtype = [ctypes.c_int]
fastllm_lib.get_llm_model_type.restype = ctypes.c_char_p
fastllm_lib.response_batch_str_llm_model.argtypes = [ctypes.c_int, ctypes.POINTER(ctypes.c_char_p), ctypes.c_int,
ctypes.c_int, ctypes.c_bool, ctypes.c_float, ctypes.c_int,
ctypes.c_float, ctypes.c_float, ctypes.c_bool]
fastllm_lib.response_batch_str_llm_model.restype = ctypes.POINTER(ctypes.c_char_p)
fastllm_lib.response_batch_tokens_llm_model.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int),
ctypes.c_int, ctypes.c_bool, ctypes.c_float, ctypes.c_int,
ctypes.c_float, ctypes.c_float, ctypes.c_bool]
fastllm_lib.response_batch_tokens_llm_model.restype = ctypes.POINTER(ctypes.c_char_p)
def get_cpu_threads() -> int:
return fastllm_lib.get_cpu_threads(); | null |
8,013 | import ctypesimport math
import o
import threading
from typing import Optional, Tuple, Union, List, Callable, Dict, An
from copy import deepcopy
import platform
fastllm_lib.create_llm_model.argtypes = [ctypes.c_char_p]
fastllm_lib.create_llm_model.restype = ctypes.c_int
fastllm_lib.token_decode.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_char_p]
fastllm_lib.token_decode.restype = ctypes.c_int
fastllm_lib.token_encode_string.argtypes = [ctypes.c_int, ctypes.c_char_p, ctypes.c_int, ctypes.POINTER(ctypes.c_int)]
fastllm_lib.token_encode_string.restype = ctypes.c_int
fastllm_lib.launch_response_llm_model.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.c_void_p,
ctypes.c_int, ctypes.c_bool, ctypes.c_float, ctypes.c_int,
ctypes.c_float, ctypes.c_float, ctypes.c_bool]
fastllm_lib.launch_response_llm_model.restype = ctypes.c_int
fastllm_lib.fetch_response_llm_model.argtypes = [ctypes.c_int, ctypes.c_int]
fastllm_lib.fetch_response_llm_model.restype = ctypes.c_int
fastllm_lib.fetch_response_logits_llm_model.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.POINTER(ctypes.c_float)]
fastllm_lib.fetch_response_logits_llm_model.restype = ctypes.c_int
fastllm_lib.response_str_llm_model.argtypes = [ctypes.c_int, ctypes.c_char_p,
ctypes.c_int, ctypes.c_bool, ctypes.c_float, ctypes.c_int,
ctypes.c_float, ctypes.c_float, ctypes.c_bool]
fastllm_lib.response_str_llm_model.restype = ctypes.c_char_p
fastllm_lib.launch_response_str_llm_model.argtype = [ctypes.c_int, ctypes.c_char_p,
ctypes.c_int, ctypes.c_bool, ctypes.c_float, ctypes.c_int,
ctypes.c_float, ctypes.c_float, ctypes.c_bool]
fastllm_lib.launch_response_str_llm_model.restype = ctypes.c_int
fastllm_lib.fetch_response_str_llm_model.argtypes = [ctypes.c_int, ctypes.c_int]
fastllm_lib.fetch_response_str_llm_model.restype = ctypes.c_char_p
fastllm_lib.make_history_llm_model.argtype = [ctypes.c_int, ctypes.c_char_p, ctypes.c_int, ctypes.c_char_p, ctypes.c_char_p]
fastllm_lib.make_history_llm_model.restype = ctypes.c_char_p
fastllm_lib.make_input_llm_model.argtype = [ctypes.c_int, ctypes.c_char_p, ctypes.c_int, ctypes.c_char_p]
fastllm_lib.make_input_llm_model.restype = ctypes.c_char_p
fastllm_lib.add_tokenizer_word_llm_model.argtype = [ctypes.c_int, ctypes.c_char_p, ctypes.c_float, ctypes.c_int]
fastllm_lib.set_device_map.argtype = [ctypes.c_int, ctypes.c_void_p, ctypes.c_char_p, ctypes.c_void_p]
fastllm_lib.get_llm_model_type.argtype = [ctypes.c_int]
fastllm_lib.get_llm_model_type.restype = ctypes.c_char_p
fastllm_lib.response_batch_str_llm_model.argtypes = [ctypes.c_int, ctypes.POINTER(ctypes.c_char_p), ctypes.c_int,
ctypes.c_int, ctypes.c_bool, ctypes.c_float, ctypes.c_int,
ctypes.c_float, ctypes.c_float, ctypes.c_bool]
fastllm_lib.response_batch_str_llm_model.restype = ctypes.POINTER(ctypes.c_char_p)
fastllm_lib.response_batch_tokens_llm_model.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int),
ctypes.c_int, ctypes.c_bool, ctypes.c_float, ctypes.c_int,
ctypes.c_float, ctypes.c_float, ctypes.c_bool]
fastllm_lib.response_batch_tokens_llm_model.restype = ctypes.POINTER(ctypes.c_char_p)
def print_ins_info():
fastllm_lib.print_cpu_ins(); | null |
8,014 | import ctypesimport math
import o
import threading
from typing import Optional, Tuple, Union, List, Callable, Dict, An
from copy import deepcopy
import platform
fastllm_lib.create_llm_model.argtypes = [ctypes.c_char_p]
fastllm_lib.create_llm_model.restype = ctypes.c_int
fastllm_lib.token_decode.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_char_p]
fastllm_lib.token_decode.restype = ctypes.c_int
fastllm_lib.token_encode_string.argtypes = [ctypes.c_int, ctypes.c_char_p, ctypes.c_int, ctypes.POINTER(ctypes.c_int)]
fastllm_lib.token_encode_string.restype = ctypes.c_int
fastllm_lib.launch_response_llm_model.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.c_void_p,
ctypes.c_int, ctypes.c_bool, ctypes.c_float, ctypes.c_int,
ctypes.c_float, ctypes.c_float, ctypes.c_bool]
fastllm_lib.launch_response_llm_model.restype = ctypes.c_int
fastllm_lib.fetch_response_llm_model.argtypes = [ctypes.c_int, ctypes.c_int]
fastllm_lib.fetch_response_llm_model.restype = ctypes.c_int
fastllm_lib.fetch_response_logits_llm_model.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.POINTER(ctypes.c_float)]
fastllm_lib.fetch_response_logits_llm_model.restype = ctypes.c_int
fastllm_lib.response_str_llm_model.argtypes = [ctypes.c_int, ctypes.c_char_p,
ctypes.c_int, ctypes.c_bool, ctypes.c_float, ctypes.c_int,
ctypes.c_float, ctypes.c_float, ctypes.c_bool]
fastllm_lib.response_str_llm_model.restype = ctypes.c_char_p
fastllm_lib.launch_response_str_llm_model.argtype = [ctypes.c_int, ctypes.c_char_p,
ctypes.c_int, ctypes.c_bool, ctypes.c_float, ctypes.c_int,
ctypes.c_float, ctypes.c_float, ctypes.c_bool]
fastllm_lib.launch_response_str_llm_model.restype = ctypes.c_int
fastllm_lib.fetch_response_str_llm_model.argtypes = [ctypes.c_int, ctypes.c_int]
fastllm_lib.fetch_response_str_llm_model.restype = ctypes.c_char_p
fastllm_lib.make_history_llm_model.argtype = [ctypes.c_int, ctypes.c_char_p, ctypes.c_int, ctypes.c_char_p, ctypes.c_char_p]
fastllm_lib.make_history_llm_model.restype = ctypes.c_char_p
fastllm_lib.make_input_llm_model.argtype = [ctypes.c_int, ctypes.c_char_p, ctypes.c_int, ctypes.c_char_p]
fastllm_lib.make_input_llm_model.restype = ctypes.c_char_p
fastllm_lib.add_tokenizer_word_llm_model.argtype = [ctypes.c_int, ctypes.c_char_p, ctypes.c_float, ctypes.c_int]
fastllm_lib.set_device_map.argtype = [ctypes.c_int, ctypes.c_void_p, ctypes.c_char_p, ctypes.c_void_p]
fastllm_lib.get_llm_model_type.argtype = [ctypes.c_int]
fastllm_lib.get_llm_model_type.restype = ctypes.c_char_p
fastllm_lib.response_batch_str_llm_model.argtypes = [ctypes.c_int, ctypes.POINTER(ctypes.c_char_p), ctypes.c_int,
ctypes.c_int, ctypes.c_bool, ctypes.c_float, ctypes.c_int,
ctypes.c_float, ctypes.c_float, ctypes.c_bool]
fastllm_lib.response_batch_str_llm_model.restype = ctypes.POINTER(ctypes.c_char_p)
fastllm_lib.response_batch_tokens_llm_model.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int),
ctypes.c_int, ctypes.c_bool, ctypes.c_float, ctypes.c_int,
ctypes.c_float, ctypes.c_float, ctypes.c_bool]
fastllm_lib.response_batch_tokens_llm_model.restype = ctypes.POINTER(ctypes.c_char_p)
def set_cpu_kvcache(cpu_kvcache):
fastllm_lib.set_kvcache_in_cpu(ctypes.c_bool(cpu_kvcache)); | null |
8,015 | import ctypesimport math
import o
import threading
from typing import Optional, Tuple, Union, List, Callable, Dict, An
from copy import deepcopy
import platform
fastllm_lib.create_llm_model.argtypes = [ctypes.c_char_p]
fastllm_lib.create_llm_model.restype = ctypes.c_int
fastllm_lib.token_decode.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_char_p]
fastllm_lib.token_decode.restype = ctypes.c_int
fastllm_lib.token_encode_string.argtypes = [ctypes.c_int, ctypes.c_char_p, ctypes.c_int, ctypes.POINTER(ctypes.c_int)]
fastllm_lib.token_encode_string.restype = ctypes.c_int
fastllm_lib.launch_response_llm_model.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.c_void_p,
ctypes.c_int, ctypes.c_bool, ctypes.c_float, ctypes.c_int,
ctypes.c_float, ctypes.c_float, ctypes.c_bool]
fastllm_lib.launch_response_llm_model.restype = ctypes.c_int
fastllm_lib.fetch_response_llm_model.argtypes = [ctypes.c_int, ctypes.c_int]
fastllm_lib.fetch_response_llm_model.restype = ctypes.c_int
fastllm_lib.fetch_response_logits_llm_model.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.POINTER(ctypes.c_float)]
fastllm_lib.fetch_response_logits_llm_model.restype = ctypes.c_int
fastllm_lib.response_str_llm_model.argtypes = [ctypes.c_int, ctypes.c_char_p,
ctypes.c_int, ctypes.c_bool, ctypes.c_float, ctypes.c_int,
ctypes.c_float, ctypes.c_float, ctypes.c_bool]
fastllm_lib.response_str_llm_model.restype = ctypes.c_char_p
fastllm_lib.launch_response_str_llm_model.argtype = [ctypes.c_int, ctypes.c_char_p,
ctypes.c_int, ctypes.c_bool, ctypes.c_float, ctypes.c_int,
ctypes.c_float, ctypes.c_float, ctypes.c_bool]
fastllm_lib.launch_response_str_llm_model.restype = ctypes.c_int
fastllm_lib.fetch_response_str_llm_model.argtypes = [ctypes.c_int, ctypes.c_int]
fastllm_lib.fetch_response_str_llm_model.restype = ctypes.c_char_p
fastllm_lib.make_history_llm_model.argtype = [ctypes.c_int, ctypes.c_char_p, ctypes.c_int, ctypes.c_char_p, ctypes.c_char_p]
fastllm_lib.make_history_llm_model.restype = ctypes.c_char_p
fastllm_lib.make_input_llm_model.argtype = [ctypes.c_int, ctypes.c_char_p, ctypes.c_int, ctypes.c_char_p]
fastllm_lib.make_input_llm_model.restype = ctypes.c_char_p
fastllm_lib.add_tokenizer_word_llm_model.argtype = [ctypes.c_int, ctypes.c_char_p, ctypes.c_float, ctypes.c_int]
fastllm_lib.set_device_map.argtype = [ctypes.c_int, ctypes.c_void_p, ctypes.c_char_p, ctypes.c_void_p]
fastllm_lib.get_llm_model_type.argtype = [ctypes.c_int]
fastllm_lib.get_llm_model_type.restype = ctypes.c_char_p
fastllm_lib.response_batch_str_llm_model.argtypes = [ctypes.c_int, ctypes.POINTER(ctypes.c_char_p), ctypes.c_int,
ctypes.c_int, ctypes.c_bool, ctypes.c_float, ctypes.c_int,
ctypes.c_float, ctypes.c_float, ctypes.c_bool]
fastllm_lib.response_batch_str_llm_model.restype = ctypes.POINTER(ctypes.c_char_p)
fastllm_lib.response_batch_tokens_llm_model.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int),
ctypes.c_int, ctypes.c_bool, ctypes.c_float, ctypes.c_int,
ctypes.c_float, ctypes.c_float, ctypes.c_bool]
fastllm_lib.response_batch_tokens_llm_model.restype = ctypes.POINTER(ctypes.c_char_p)
def get_cpu_kvcache():
return fastllm_lib.get_kvcache_in_cpu(); | null |
8,016 | import ctypesimport math
import o
import threading
from typing import Optional, Tuple, Union, List, Callable, Dict, An
from copy import deepcopy
import platform
fastllm_lib.create_llm_model.argtypes = [ctypes.c_char_p]
fastllm_lib.create_llm_model.restype = ctypes.c_int
fastllm_lib.token_decode.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_char_p]
fastllm_lib.token_decode.restype = ctypes.c_int
fastllm_lib.token_encode_string.argtypes = [ctypes.c_int, ctypes.c_char_p, ctypes.c_int, ctypes.POINTER(ctypes.c_int)]
fastllm_lib.token_encode_string.restype = ctypes.c_int
fastllm_lib.launch_response_llm_model.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.c_void_p,
ctypes.c_int, ctypes.c_bool, ctypes.c_float, ctypes.c_int,
ctypes.c_float, ctypes.c_float, ctypes.c_bool]
fastllm_lib.launch_response_llm_model.restype = ctypes.c_int
fastllm_lib.fetch_response_llm_model.argtypes = [ctypes.c_int, ctypes.c_int]
fastllm_lib.fetch_response_llm_model.restype = ctypes.c_int
fastllm_lib.fetch_response_logits_llm_model.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.POINTER(ctypes.c_float)]
fastllm_lib.fetch_response_logits_llm_model.restype = ctypes.c_int
fastllm_lib.response_str_llm_model.argtypes = [ctypes.c_int, ctypes.c_char_p,
ctypes.c_int, ctypes.c_bool, ctypes.c_float, ctypes.c_int,
ctypes.c_float, ctypes.c_float, ctypes.c_bool]
fastllm_lib.response_str_llm_model.restype = ctypes.c_char_p
fastllm_lib.launch_response_str_llm_model.argtype = [ctypes.c_int, ctypes.c_char_p,
ctypes.c_int, ctypes.c_bool, ctypes.c_float, ctypes.c_int,
ctypes.c_float, ctypes.c_float, ctypes.c_bool]
fastllm_lib.launch_response_str_llm_model.restype = ctypes.c_int
fastllm_lib.fetch_response_str_llm_model.argtypes = [ctypes.c_int, ctypes.c_int]
fastllm_lib.fetch_response_str_llm_model.restype = ctypes.c_char_p
fastllm_lib.make_history_llm_model.argtype = [ctypes.c_int, ctypes.c_char_p, ctypes.c_int, ctypes.c_char_p, ctypes.c_char_p]
fastllm_lib.make_history_llm_model.restype = ctypes.c_char_p
fastllm_lib.make_input_llm_model.argtype = [ctypes.c_int, ctypes.c_char_p, ctypes.c_int, ctypes.c_char_p]
fastllm_lib.make_input_llm_model.restype = ctypes.c_char_p
fastllm_lib.add_tokenizer_word_llm_model.argtype = [ctypes.c_int, ctypes.c_char_p, ctypes.c_float, ctypes.c_int]
fastllm_lib.set_device_map.argtype = [ctypes.c_int, ctypes.c_void_p, ctypes.c_char_p, ctypes.c_void_p]
fastllm_lib.get_llm_model_type.argtype = [ctypes.c_int]
fastllm_lib.get_llm_model_type.restype = ctypes.c_char_p
fastllm_lib.response_batch_str_llm_model.argtypes = [ctypes.c_int, ctypes.POINTER(ctypes.c_char_p), ctypes.c_int,
ctypes.c_int, ctypes.c_bool, ctypes.c_float, ctypes.c_int,
ctypes.c_float, ctypes.c_float, ctypes.c_bool]
fastllm_lib.response_batch_str_llm_model.restype = ctypes.POINTER(ctypes.c_char_p)
fastllm_lib.response_batch_tokens_llm_model.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int),
ctypes.c_int, ctypes.c_bool, ctypes.c_float, ctypes.c_int,
ctypes.c_float, ctypes.c_float, ctypes.c_bool]
fastllm_lib.response_batch_tokens_llm_model.restype = ctypes.POINTER(ctypes.c_char_p)
def set_cpu_low_mem(low_mem):
fastllm_lib.set_cpu_low_mem(ctypes.c_bool(low_mem)); | null |
8,017 | import ctypesimport math
import o
import threading
from typing import Optional, Tuple, Union, List, Callable, Dict, An
from copy import deepcopy
import platform
fastllm_lib.create_llm_model.argtypes = [ctypes.c_char_p]
fastllm_lib.create_llm_model.restype = ctypes.c_int
fastllm_lib.token_decode.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_char_p]
fastllm_lib.token_decode.restype = ctypes.c_int
fastllm_lib.token_encode_string.argtypes = [ctypes.c_int, ctypes.c_char_p, ctypes.c_int, ctypes.POINTER(ctypes.c_int)]
fastllm_lib.token_encode_string.restype = ctypes.c_int
fastllm_lib.launch_response_llm_model.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.c_void_p,
ctypes.c_int, ctypes.c_bool, ctypes.c_float, ctypes.c_int,
ctypes.c_float, ctypes.c_float, ctypes.c_bool]
fastllm_lib.launch_response_llm_model.restype = ctypes.c_int
fastllm_lib.fetch_response_llm_model.argtypes = [ctypes.c_int, ctypes.c_int]
fastllm_lib.fetch_response_llm_model.restype = ctypes.c_int
fastllm_lib.fetch_response_logits_llm_model.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.POINTER(ctypes.c_float)]
fastllm_lib.fetch_response_logits_llm_model.restype = ctypes.c_int
fastllm_lib.response_str_llm_model.argtypes = [ctypes.c_int, ctypes.c_char_p,
ctypes.c_int, ctypes.c_bool, ctypes.c_float, ctypes.c_int,
ctypes.c_float, ctypes.c_float, ctypes.c_bool]
fastllm_lib.response_str_llm_model.restype = ctypes.c_char_p
fastllm_lib.launch_response_str_llm_model.argtype = [ctypes.c_int, ctypes.c_char_p,
ctypes.c_int, ctypes.c_bool, ctypes.c_float, ctypes.c_int,
ctypes.c_float, ctypes.c_float, ctypes.c_bool]
fastllm_lib.launch_response_str_llm_model.restype = ctypes.c_int
fastllm_lib.fetch_response_str_llm_model.argtypes = [ctypes.c_int, ctypes.c_int]
fastllm_lib.fetch_response_str_llm_model.restype = ctypes.c_char_p
fastllm_lib.make_history_llm_model.argtype = [ctypes.c_int, ctypes.c_char_p, ctypes.c_int, ctypes.c_char_p, ctypes.c_char_p]
fastllm_lib.make_history_llm_model.restype = ctypes.c_char_p
fastllm_lib.make_input_llm_model.argtype = [ctypes.c_int, ctypes.c_char_p, ctypes.c_int, ctypes.c_char_p]
fastllm_lib.make_input_llm_model.restype = ctypes.c_char_p
fastllm_lib.add_tokenizer_word_llm_model.argtype = [ctypes.c_int, ctypes.c_char_p, ctypes.c_float, ctypes.c_int]
fastllm_lib.set_device_map.argtype = [ctypes.c_int, ctypes.c_void_p, ctypes.c_char_p, ctypes.c_void_p]
fastllm_lib.get_llm_model_type.argtype = [ctypes.c_int]
fastllm_lib.get_llm_model_type.restype = ctypes.c_char_p
fastllm_lib.response_batch_str_llm_model.argtypes = [ctypes.c_int, ctypes.POINTER(ctypes.c_char_p), ctypes.c_int,
ctypes.c_int, ctypes.c_bool, ctypes.c_float, ctypes.c_int,
ctypes.c_float, ctypes.c_float, ctypes.c_bool]
fastllm_lib.response_batch_str_llm_model.restype = ctypes.POINTER(ctypes.c_char_p)
fastllm_lib.response_batch_tokens_llm_model.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int),
ctypes.c_int, ctypes.c_bool, ctypes.c_float, ctypes.c_int,
ctypes.c_float, ctypes.c_float, ctypes.c_bool]
fastllm_lib.response_batch_tokens_llm_model.restype = ctypes.POINTER(ctypes.c_char_p)
def get_cpu_low_mem():
return fastllm_lib.get_cpu_low_mem(); | null |
8,018 | import ctypesimport math
import o
import threading
from typing import Optional, Tuple, Union, List, Callable, Dict, An
from copy import deepcopy
import platform
fastllm_lib.create_llm_model.argtypes = [ctypes.c_char_p]
fastllm_lib.create_llm_model.restype = ctypes.c_int
fastllm_lib.token_decode.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_char_p]
fastllm_lib.token_decode.restype = ctypes.c_int
fastllm_lib.token_encode_string.argtypes = [ctypes.c_int, ctypes.c_char_p, ctypes.c_int, ctypes.POINTER(ctypes.c_int)]
fastllm_lib.token_encode_string.restype = ctypes.c_int
fastllm_lib.launch_response_llm_model.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.c_void_p,
ctypes.c_int, ctypes.c_bool, ctypes.c_float, ctypes.c_int,
ctypes.c_float, ctypes.c_float, ctypes.c_bool]
fastllm_lib.launch_response_llm_model.restype = ctypes.c_int
fastllm_lib.fetch_response_llm_model.argtypes = [ctypes.c_int, ctypes.c_int]
fastllm_lib.fetch_response_llm_model.restype = ctypes.c_int
fastllm_lib.fetch_response_logits_llm_model.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.POINTER(ctypes.c_float)]
fastllm_lib.fetch_response_logits_llm_model.restype = ctypes.c_int
fastllm_lib.response_str_llm_model.argtypes = [ctypes.c_int, ctypes.c_char_p,
ctypes.c_int, ctypes.c_bool, ctypes.c_float, ctypes.c_int,
ctypes.c_float, ctypes.c_float, ctypes.c_bool]
fastllm_lib.response_str_llm_model.restype = ctypes.c_char_p
fastllm_lib.launch_response_str_llm_model.argtype = [ctypes.c_int, ctypes.c_char_p,
ctypes.c_int, ctypes.c_bool, ctypes.c_float, ctypes.c_int,
ctypes.c_float, ctypes.c_float, ctypes.c_bool]
fastllm_lib.launch_response_str_llm_model.restype = ctypes.c_int
fastllm_lib.fetch_response_str_llm_model.argtypes = [ctypes.c_int, ctypes.c_int]
fastllm_lib.fetch_response_str_llm_model.restype = ctypes.c_char_p
fastllm_lib.make_history_llm_model.argtype = [ctypes.c_int, ctypes.c_char_p, ctypes.c_int, ctypes.c_char_p, ctypes.c_char_p]
fastllm_lib.make_history_llm_model.restype = ctypes.c_char_p
fastllm_lib.make_input_llm_model.argtype = [ctypes.c_int, ctypes.c_char_p, ctypes.c_int, ctypes.c_char_p]
fastllm_lib.make_input_llm_model.restype = ctypes.c_char_p
fastllm_lib.add_tokenizer_word_llm_model.argtype = [ctypes.c_int, ctypes.c_char_p, ctypes.c_float, ctypes.c_int]
fastllm_lib.set_device_map.argtype = [ctypes.c_int, ctypes.c_void_p, ctypes.c_char_p, ctypes.c_void_p]
fastllm_lib.get_llm_model_type.argtype = [ctypes.c_int]
fastllm_lib.get_llm_model_type.restype = ctypes.c_char_p
fastllm_lib.response_batch_str_llm_model.argtypes = [ctypes.c_int, ctypes.POINTER(ctypes.c_char_p), ctypes.c_int,
ctypes.c_int, ctypes.c_bool, ctypes.c_float, ctypes.c_int,
ctypes.c_float, ctypes.c_float, ctypes.c_bool]
fastllm_lib.response_batch_str_llm_model.restype = ctypes.POINTER(ctypes.c_char_p)
fastllm_lib.response_batch_tokens_llm_model.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int),
ctypes.c_int, ctypes.c_bool, ctypes.c_float, ctypes.c_int,
ctypes.c_float, ctypes.c_float, ctypes.c_bool]
fastllm_lib.response_batch_tokens_llm_model.restype = ctypes.POINTER(ctypes.c_char_p)
def set_device_map(device_map):
devices = [];
values = [];
if (isinstance(device_map, str)):
devices.append(device_map);
values.append(1);
elif (isinstance(device_map, list)):
devices = [str(x) for x in device_map];
values = [1 for x in device_map];
elif (isinstance(device_map, dict)):
devices = [str(x) for x in device_map.keys()];
values = [int(device_map[x]) for x in device_map.keys()];
else:
print("set_device_map error.");
return;
device_str = ''.join(devices);
device_len = [len(x) for x in devices];
fastllm_lib.set_device_map(len(device_len),
(ctypes.c_int * len(device_len))(*device_len),
device_str.encode(),
(ctypes.c_int * len(values))(*values)); | null |
8,019 | import ctypesimport math
import o
import threading
from typing import Optional, Tuple, Union, List, Callable, Dict, An
from copy import deepcopy
import platform
;;;
def from_hf(model,
tokenizer = None,
dtype = "float16"):
from fastllm_pytools import hf_model;
return hf_model.create(model, tokenizer, dtype = dtype); | null |
8,020 | import struct
import numpy as np
import torch
def writeString(fo, s):
def writeKeyValue(fo, key, value):
fastllm_data_type_dict = {
"int4": 8,
"int8": 3,
"float16": 7,
"float32": 0,
}
fastllm_weight_type_dict = {
"linear": 1,
"embedding": 2
}
v = np.random.randint(-127, 127, [10, 20]
v = (v / c_scale + 128.5).clip(1, 255).astype(np.uint8)
def write_int8(fo, v):
c_max = np.expand_dims(np.abs(v).max(axis = -1), -1).clip(0.1, 1e100)
c_scale = c_max / 127.0
v = (v / c_scale + 128.5).clip(1, 255).astype(np.uint8)
fo.write(struct.pack('i', 3))
fo.write(struct.pack('i', 0))
for i in range(c_max.shape[0]):
fo.write(struct.pack('f', -c_max[i][0]));
fo.write(struct.pack('f', c_max[i][0]));
fo.write(v.data)
def write_int4(fo, v):
# c_min = np.expand_dims(-np.abs(v).max(axis = -1), -1)
# c_max = np.expand_dims(np.abs(v).max(axis = -1), -1)
# c_scale = c_max / 7.0
# c_min = c_scale * -8.0
c_min = np.expand_dims(v.min(axis = -1), -1)
c_max = np.expand_dims(v.max(axis = -1), -1)
c_scale = (c_max - c_min) / 15.0
c_zero = np.round(0.0 - c_min / c_scale)
c_zero = c_zero.clip(0, 15)
c_min = -c_scale * c_zero
v = (v - c_min) / c_scale
v = (v + 0.5).astype(np.int8).clip(0, 15).astype(np.uint8)
v = v[:, 0::2] * 16 + v[:, 1::2]
fo.write(struct.pack('i', 8))
fo.write(struct.pack('i', 0))
for i in range(c_min.shape[0]):
fo.write(struct.pack('f', c_min[i][0]));
fo.write(struct.pack('f', c_max[i][0]));
fo.write(v.data)
def tofile(exportPath,
model,
tokenizer = None,
pre_prompt = None,
user_role = None,
bot_role = None,
history_sep = None,
dtype = "float16"):
if (dtype not in fastllm_data_type_dict):
print("dtype should in ", list(fastllm_data_type_dict.keys()))
exit(0)
dict = model.state_dict()
fo = open(exportPath, "wb")
# 0. version id
fo.write(struct.pack('i', 2))
# 0.1 model info
if model.config.model_type == "chatglm" and model.config.transformers_version == "4.30.2":
model.config.model_type = "chatglm3"
modelInfo = model.config.__dict__
if model.generation_config is not None:
modelInfo.update(model.generation_config.__dict__)
if ("model_type" not in modelInfo):
print("unknown model_type.")
exit(0)
if (pre_prompt):
modelInfo["pre_prompt"] = pre_prompt
if (user_role):
modelInfo["user_role"] = user_role
if (bot_role):
modelInfo["bot_role"] = bot_role
if (history_sep):
modelInfo["history_sep"] = history_sep
if (modelInfo["model_type"] == "baichuan" and hasattr(model, "model") and hasattr(model.model, "get_alibi_mask")):
# Baichuan 2代
modelInfo["use_alibi"] = "1"
modelInfo["pre_prompt"] = ""
modelInfo["user_role"] = ("<FLM_FIX_TOKEN_" + str(model.generation_config.user_token_id) + ">") if hasattr(model.generation_config, "user_token_id") else "";
modelInfo["bot_role"] = ("<FLM_FIX_TOKEN_" + str(model.generation_config.assistant_token_id) + ">") if hasattr(model.generation_config, "assistant_token_id") else "";
modelInfo["history_sep"] = ""
if (modelInfo["model_type"] == "baichuan" and modelInfo["vocab_size"] == 125696):
# Baichuan 2代 7B
modelInfo["pre_prompt"] = ""
modelInfo["user_role"] = ("<FLM_FIX_TOKEN_" + str(model.generation_config.user_token_id) + ">") if hasattr(model.generation_config, "user_token_id") else "";
modelInfo["bot_role"] = ("<FLM_FIX_TOKEN_" + str(model.generation_config.assistant_token_id) + ">") if hasattr(model.generation_config, "assistant_token_id") else "";
modelInfo["history_sep"] = ""
if modelInfo["model_type"] == "qwen":
if modelInfo["chat_format"] == "chatml":
modelInfo["im_end_id"] = tokenizer.im_end_id
modelInfo["im_start_id"] = tokenizer.im_start_id
modelInfo["tokenizer_use_score"] = "1" # 分词带分数
if hasattr(model, "peft_config"):
adapter_size = len(model.peft_config)
modelInfo["peft_size"] = adapter_size
fo.write(struct.pack('i', len(modelInfo)))
for it in modelInfo.keys():
writeKeyValue(fo, str(it), str(modelInfo[it]))
if hasattr(model, "peft_config"):
for adapter_name in model.peft_config.keys():
adapter_dict = model.peft_config[adapter_name].__dict__
writeString(fo, adapter_name)
fo.write(struct.pack('i', len(adapter_dict)))
for it in adapter_dict.keys():
writeKeyValue(fo, str(it), str(adapter_dict[it]))
# 1. vocab
if (tokenizer):
if (hasattr(tokenizer, "tokenizer")):
if (modelInfo['model_type'] == "qwen"):
pass
else:
tokenizer = tokenizer.tokenizer
if (hasattr(tokenizer, "sp_model")):
piece_size = tokenizer.sp_model.piece_size()
fo.write(struct.pack('i', piece_size))
for i in range(piece_size):
s = tokenizer.sp_model.id_to_piece(i).encode()
fo.write(struct.pack('i', len(s)))
for c in s:
fo.write(struct.pack('i', c))
fo.write(struct.pack('i', i))
fo.write(struct.pack('f', float(tokenizer.sp_model.get_score(i))))
else:
vocab = tokenizer.get_vocab()
fo.write(struct.pack('i', len(vocab)))
for v in vocab.keys():
if (modelInfo['model_type'] == "qwen"):
s = v
elif (modelInfo["model_type"] == "moss"):
s = [(ord(c) if c not in tokenizer.byte_decoder else tokenizer.byte_decoder[c]) for c in v]
else:
s = v.encode()
fo.write(struct.pack('i', len(s)))
for c in s:
fo.write(struct.pack('i', c))
fo.write(struct.pack('i', vocab[v]))
fo.write(struct.pack('f', 1.0))
else:
fo.write(struct.pack('i', 0))
weight_type_dict = {}
module_dict = {}
for key, m in model.named_modules():
if (isinstance(m, torch.nn.Linear)):
weight_type_dict[key + ".weight"] = "linear"
module_dict[key + ".weight"] = m
if (isinstance(m, torch.nn.Embedding)):
weight_type_dict[key] = "embedding"
# 2. weight
fo.write(struct.pack('i', len(dict)))
tot = 0
for key in dict:
ori_data_type = 0
ori_np_data_type = np.float32
cur_weight_type = 0
if (key in weight_type_dict and weight_type_dict[key] in fastllm_weight_type_dict):
cur_weight_type = fastllm_weight_type_dict[weight_type_dict[key]]
to_data_type = 0
if (cur_weight_type == 1):
to_data_type = fastllm_data_type_dict[dtype]
if (to_data_type == 7):
ori_data_type = 7
ori_np_data_type = np.float16
cur = dict[key].numpy().astype(ori_np_data_type)
if hasattr(model, "peft_config"):
weight_name = key.replace('base_model.model.', '')
fo.write(struct.pack('i', len(weight_name)))
fo.write(weight_name.encode())
else:
fo.write(struct.pack('i', len(key)))
fo.write(key.encode())
fo.write(struct.pack('i', len(cur.shape)))
for i in cur.shape:
fo.write(struct.pack('i', i))
if (to_data_type == 3):
write_int8(fo, cur)
elif (to_data_type == 8):
write_int4(fo, cur)
else:
fo.write(struct.pack('i', to_data_type))
fo.write(cur.data)
tot += 1
print("output (", tot, "/", len(dict), end = " )\r")
print("\nfinish.")
fo.close() | null |
8,021 | import argparse
from fastllm_pytools import llm
def args_parser():
parser = argparse.ArgumentParser(description = 'qwen_chat_demo')
parser.add_argument('-p', '--path', type = str, required = True, default = '', help = '模型文件的路径')
args = parser.parse_args()
return args | null |
8,022 | import argparse
from fastllm_pytools import llm
import time
def args_parser():
parser = argparse.ArgumentParser(description = 'fastllm_chat_demo')
parser.add_argument('-p', '--path', type = str, required = True, default = '', help = '模型文件的路径')
args = parser.parse_args()
return args | null |
8,023 | import streamlit as st
from streamlit_chat import message
from fastllm_pytools import llm
import sys
;;
def get_model():
model = llm.model(sys.argv[1])
return model | null |
8,024 | import os
from argparse import ArgumentParser
import gradio as gr
import mdtex2html
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
DEFAULT_CKPT_PATH = 'Qwen/Qwen-7B-Chat'
def _get_args():
parser = ArgumentParser()
parser.add_argument("-c", "--checkpoint-path", type=str, default=DEFAULT_CKPT_PATH,
help="Checkpoint name or path, default to %(default)r")
parser.add_argument("--cpu-only", action="store_true", help="Run demo with CPU only")
parser.add_argument("--share", action="store_true", default=False,
help="Create a publicly shareable link for the interface.")
parser.add_argument("--inbrowser", action="store_true", default=False,
help="Automatically launch the interface in a new tab on the default browser.")
parser.add_argument("--server-port", type=int, default=8000,
help="Demo server port.")
parser.add_argument("--server-name", type=str, default="127.0.0.1",
help="Demo server name.")
args = parser.parse_args()
return args | null |
8,025 | import os
from argparse import ArgumentParser
import gradio as gr
import mdtex2html
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
def _load_model_tokenizer(args):
tokenizer = AutoTokenizer.from_pretrained(
args.checkpoint_path, trust_remote_code=True, resume_download=True,
)
if args.cpu_only:
device_map = "cpu"
else:
device_map = "auto"
model = AutoModelForCausalLM.from_pretrained(
args.checkpoint_path,
device_map=device_map,
trust_remote_code=True,
resume_download=True,
).eval()
config = GenerationConfig.from_pretrained(
args.checkpoint_path, trust_remote_code=True, resume_download=True,
)
return model, tokenizer, config | null |
8,026 | import os
from argparse import ArgumentParser
import gradio as gr
import mdtex2html
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
def postprocess(self, y):
if y is None:
return []
for i, (message, response) in enumerate(y):
y[i] = (
None if message is None else mdtex2html.convert(message),
None if response is None else mdtex2html.convert(response),
)
return y | null |
8,027 | import os
from argparse import ArgumentParser
import gradio as gr
import mdtex2html
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
gr.Chatbot.postprocess = postprocess
def _parse_text(text):
def _gc():
def _launch_demo(args, model, tokenizer, config):
def predict(_query, _chatbot, _task_history):
print(f"User: {_parse_text(_query)}")
_chatbot.append((_parse_text(_query), ""))
full_response = ""
for response in model.chat_stream(tokenizer, _query, history=_task_history, generation_config=config):
_chatbot[-1] = (_parse_text(_query), _parse_text(response))
yield _chatbot
full_response = _parse_text(response)
print(f"History: {_task_history}")
_task_history.append((_query, full_response))
print(f"Qwen-Chat: {_parse_text(full_response)}")
def regenerate(_chatbot, _task_history):
if not _task_history:
yield _chatbot
return
item = _task_history.pop(-1)
_chatbot.pop(-1)
yield from predict(item[0], _chatbot, _task_history)
def reset_user_input():
return gr.update(value="")
def reset_state(_chatbot, _task_history):
_task_history.clear()
_chatbot.clear()
_gc()
return _chatbot
with gr.Blocks() as demo:
gr.Markdown("""\
<p align="center"><img src="https://qianwen-res.oss-cn-beijing.aliyuncs.com/logo_qwen.jpg" style="height: 80px"/><p>""")
gr.Markdown("""<center><font size=8>Qwen-Chat Bot</center>""")
gr.Markdown(
"""\
<center><font size=3>This WebUI is based on Qwen-Chat, developed by Alibaba Cloud. \
(本WebUI基于Qwen-Chat打造,实现聊天机器人功能。)</center>""")
gr.Markdown("""\
<center><font size=4>
Qwen-7B <a href="https://modelscope.cn/models/qwen/Qwen-7B/summary">🤖 </a> |
<a href="https://huggingface.co/Qwen/Qwen-7B">🤗</a>  |
Qwen-7B-Chat <a href="https://modelscope.cn/models/qwen/Qwen-7B-Chat/summary">🤖 </a> |
<a href="https://huggingface.co/Qwen/Qwen-7B-Chat">🤗</a>  |
Qwen-14B <a href="https://modelscope.cn/models/qwen/Qwen-14B/summary">🤖 </a> |
<a href="https://huggingface.co/Qwen/Qwen-14B">🤗</a>  |
Qwen-14B-Chat <a href="https://modelscope.cn/models/qwen/Qwen-14B-Chat/summary">🤖 </a> |
<a href="https://huggingface.co/Qwen/Qwen-14B-Chat">🤗</a>  |
 <a href="https://github.com/QwenLM/Qwen">Github</a></center>""")
chatbot = gr.Chatbot(label='Qwen-Chat', elem_classes="control-height")
query = gr.Textbox(lines=2, label='Input')
task_history = gr.State([])
with gr.Row():
empty_btn = gr.Button("🧹 Clear History (清除历史)")
submit_btn = gr.Button("🚀 Submit (发送)")
regen_btn = gr.Button("🤔️ Regenerate (重试)")
submit_btn.click(predict, [query, chatbot, task_history], [chatbot], show_progress=True)
submit_btn.click(reset_user_input, [], [query])
empty_btn.click(reset_state, [chatbot, task_history], outputs=[chatbot], show_progress=True)
regen_btn.click(regenerate, [chatbot, task_history], [chatbot], show_progress=True)
gr.Markdown("""\
<font size=2>Note: This demo is governed by the original license of Qwen. \
We strongly advise users not to knowingly generate or allow others to knowingly generate harmful content, \
including hate speech, violence, pornography, deception, etc. \
(注:本演示受Qwen的许可协议限制。我们强烈建议,用户不应传播及不应允许他人传播以下内容,\
包括但不限于仇恨言论、暴力、色情、欺诈相关的有害信息。)""")
demo.queue().launch(
share=args.share,
inbrowser=args.inbrowser,
server_port=args.server_port,
server_name=args.server_name,
) | null |
8,028 | import os
import argparse
import re
import torch
import pandas as pd
from thefuzz import process
from tqdm import tqdm
from transformers.trainer_utils import set_seed
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
def load_models_tokenizer(args):
tokenizer = AutoTokenizer.from_pretrained(
args.checkpoint_path, trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
args.checkpoint_path, device_map="auto", trust_remote_code=True
).eval()
model.generation_config = GenerationConfig.from_pretrained(
args.checkpoint_path, trust_remote_code=True
)
model.generation_config.do_sample = False # use greedy decoding
model.generation_config.repetition_penalty = 1.0 # disable repetition penalty
return model, tokenizer | null |
8,029 | import os
import argparse
import re
import torch
import pandas as pd
from thefuzz import process
from tqdm import tqdm
from transformers.trainer_utils import set_seed
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
def count_substr(gen, pattern):
return len(re.findall(pattern, gen)) | null |
8,030 | import os
import argparse
import re
import torch
import pandas as pd
from thefuzz import process
from tqdm import tqdm
from transformers.trainer_utils import set_seed
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
def format_example(line):
example = line["question"] + "\n\n"
for choice in choices:
example += f'{choice}. {line[f"{choice}"]}\n'
return example
def extract_answer(response, row):
prompt = row["question"]
gen = process_before_extraction(
response, prompt, {choice: row[choice] for choice in choices}
)
if not isinstance(prompt, str):
prompt = prompt[0]
pred = extract_choice(gen, prompt, [row[choice] for choice in choices])
return pred
def eval_subject(
model,
tokenizer,
subject_name,
test_df,
save_result_dir=None,
overwrite=False,
**kwargs
):
result_path = os.path.join(save_result_dir, f"{subject_name}_result.csv")
if not overwrite and os.path.exists(result_path):
print(f"{result_path} existed, skip!")
score = []
for (_, datarow), (_, resultrow) in zip(
test_df.iterrows(), pd.read_csv(result_path).iterrows()
):
pred = extract_answer(resultrow["model_response"], datarow)
correct = 1 if pred == datarow["answer"] else 0
score.append(correct)
correct_ratio = 100 * sum(score) / len(score)
return correct_ratio
responses = []
result = []
score = []
for _, row in tqdm(test_df.iterrows(), total=len(test_df)):
question = format_example(row)
response, _ = model.chat(
tokenizer,
question,
history=None,
)
print(question)
print(response)
pred = extract_answer(response, row)
print(pred)
print("======================")
if "answer" in row:
correct = 1 if pred == row["answer"] else 0
score.append(correct)
if args.debug:
print(f'{question} pred: {pred} ref: {row["answer"]}')
responses.append(response)
result.append(pred)
if score:
correct_ratio = 100 * sum(score) / len(score)
if args.debug:
print(subject_name, correct_ratio)
else:
correct_ratio = 0
if save_result_dir:
test_df["model_response"] = responses
test_df["model_output"] = result
if score:
test_df["correctness"] = score
os.makedirs(save_result_dir, exist_ok=True)
test_df.to_csv(result_path, encoding="utf-8", index=False)
return correct_ratio | null |
8,031 | import os
import argparse
import re
import torch
import pandas as pd
from thefuzz import process
from tqdm import tqdm
from transformers.trainer_utils import set_seed
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
TASK_NAME_MAPPING = {
"computer_network": ["Computer Network", "\u8ba1\u7b97\u673a\u7f51\u7edc", "STEM"],
"operating_system": ["Operating System", "\u64cd\u4f5c\u7cfb\u7edf", "STEM"],
"computer_architecture": [
"Computer Architecture",
"\u8ba1\u7b97\u673a\u7ec4\u6210",
"STEM",
],
"college_programming": ["College Programming", "\u5927\u5b66\u7f16\u7a0b", "STEM"],
"college_physics": ["College Physics", "\u5927\u5b66\u7269\u7406", "STEM"],
"college_chemistry": ["College Chemistry", "\u5927\u5b66\u5316\u5b66", "STEM"],
"advanced_mathematics": [
"Advanced Mathematics",
"\u9ad8\u7b49\u6570\u5b66",
"STEM",
],
"probability_and_statistics": [
"Probability and Statistics",
"\u6982\u7387\u7edf\u8ba1",
"STEM",
],
"discrete_mathematics": [
"Discrete Mathematics",
"\u79bb\u6563\u6570\u5b66",
"STEM",
],
"electrical_engineer": [
"Electrical Engineer",
"\u6ce8\u518c\u7535\u6c14\u5de5\u7a0b\u5e08",
"STEM",
],
"metrology_engineer": [
"Metrology Engineer",
"\u6ce8\u518c\u8ba1\u91cf\u5e08",
"STEM",
],
"high_school_mathematics": [
"High School Mathematics",
"\u9ad8\u4e2d\u6570\u5b66",
"STEM",
],
"high_school_physics": ["High School Physics", "\u9ad8\u4e2d\u7269\u7406", "STEM"],
"high_school_chemistry": [
"High School Chemistry",
"\u9ad8\u4e2d\u5316\u5b66",
"STEM",
],
"high_school_biology": ["High School Biology", "\u9ad8\u4e2d\u751f\u7269", "STEM"],
"middle_school_mathematics": [
"Middle School Mathematics",
"\u521d\u4e2d\u6570\u5b66",
"STEM",
],
"middle_school_biology": [
"Middle School Biology",
"\u521d\u4e2d\u751f\u7269",
"STEM",
],
"middle_school_physics": [
"Middle School Physics",
"\u521d\u4e2d\u7269\u7406",
"STEM",
],
"middle_school_chemistry": [
"Middle School Chemistry",
"\u521d\u4e2d\u5316\u5b66",
"STEM",
],
"veterinary_medicine": ["Veterinary Medicine", "\u517d\u533b\u5b66", "STEM"],
"college_economics": [
"College Economics",
"\u5927\u5b66\u7ecf\u6d4e\u5b66",
"Social Science",
],
"business_administration": [
"Business Administration",
"\u5de5\u5546\u7ba1\u7406",
"Social Science",
],
"marxism": [
"Marxism",
"\u9a6c\u514b\u601d\u4e3b\u4e49\u57fa\u672c\u539f\u7406",
"Social Science",
],
"mao_zedong_thought": [
"Mao Zedong Thought",
"\u6bdb\u6cfd\u4e1c\u601d\u60f3\u548c\u4e2d\u56fd\u7279\u8272\u793e\u4f1a\u4e3b\u4e49\u7406\u8bba\u4f53\u7cfb\u6982\u8bba",
"Social Science",
],
"education_science": ["Education Science", "\u6559\u80b2\u5b66", "Social Science"],
"teacher_qualification": [
"Teacher Qualification",
"\u6559\u5e08\u8d44\u683c",
"Social Science",
],
"high_school_politics": [
"High School Politics",
"\u9ad8\u4e2d\u653f\u6cbb",
"Social Science",
],
"high_school_geography": [
"High School Geography",
"\u9ad8\u4e2d\u5730\u7406",
"Social Science",
],
"middle_school_politics": [
"Middle School Politics",
"\u521d\u4e2d\u653f\u6cbb",
"Social Science",
],
"middle_school_geography": [
"Middle School Geography",
"\u521d\u4e2d\u5730\u7406",
"Social Science",
],
"modern_chinese_history": [
"Modern Chinese History",
"\u8fd1\u4ee3\u53f2\u7eb2\u8981",
"Humanities",
],
"ideological_and_moral_cultivation": [
"Ideological and Moral Cultivation",
"\u601d\u60f3\u9053\u5fb7\u4fee\u517b\u4e0e\u6cd5\u5f8b\u57fa\u7840",
"Humanities",
],
"logic": ["Logic", "\u903b\u8f91\u5b66", "Humanities"],
"law": ["Law", "\u6cd5\u5b66", "Humanities"],
"chinese_language_and_literature": [
"Chinese Language and Literature",
"\u4e2d\u56fd\u8bed\u8a00\u6587\u5b66",
"Humanities",
],
"art_studies": ["Art Studies", "\u827a\u672f\u5b66", "Humanities"],
"professional_tour_guide": [
"Professional Tour Guide",
"\u5bfc\u6e38\u8d44\u683c",
"Humanities",
],
"legal_professional": [
"Legal Professional",
"\u6cd5\u5f8b\u804c\u4e1a\u8d44\u683c",
"Humanities",
],
"high_school_chinese": [
"High School Chinese",
"\u9ad8\u4e2d\u8bed\u6587",
"Humanities",
],
"high_school_history": [
"High School History",
"\u9ad8\u4e2d\u5386\u53f2",
"Humanities",
],
"middle_school_history": [
"Middle School History",
"\u521d\u4e2d\u5386\u53f2",
"Humanities",
],
"civil_servant": ["Civil Servant", "\u516c\u52a1\u5458", "Other"],
"sports_science": ["Sports Science", "\u4f53\u80b2\u5b66", "Other"],
"plant_protection": ["Plant Protection", "\u690d\u7269\u4fdd\u62a4", "Other"],
"basic_medicine": ["Basic Medicine", "\u57fa\u7840\u533b\u5b66", "Other"],
"clinical_medicine": ["Clinical Medicine", "\u4e34\u5e8a\u533b\u5b66", "Other"],
"urban_and_rural_planner": [
"Urban and Rural Planner",
"\u6ce8\u518c\u57ce\u4e61\u89c4\u5212\u5e08",
"Other",
],
"accountant": ["Accountant", "\u6ce8\u518c\u4f1a\u8ba1\u5e08", "Other"],
"fire_engineer": [
"Fire Engineer",
"\u6ce8\u518c\u6d88\u9632\u5de5\u7a0b\u5e08",
"Other",
],
"environmental_impact_assessment_engineer": [
"Environmental Impact Assessment Engineer",
"\u73af\u5883\u5f71\u54cd\u8bc4\u4ef7\u5de5\u7a0b\u5e08",
"Other",
],
"tax_accountant": ["Tax Accountant", "\u7a0e\u52a1\u5e08", "Other"],
"physician": ["Physician", "\u533b\u5e08\u8d44\u683c", "Other"],
}
hard_list = [
"advanced_mathematics",
"discrete_mathematics",
"probability_and_statistics",
"college_physics",
"college_chemistry",
"high_school_mathematics",
"high_school_physics",
"high_school_chemistry",
]
def cal_ceval(res):
acc_sum_dict = dict()
acc_norm_sum_dict = dict()
cnt_dict = dict()
acc_sum = 0.0
cnt = 0
hard_cnt = 0
hard_acc_sum = 0.0
for tt in res.keys():
name = tt.split("-")[-1]
acc_sum += float(res[tt])
cnt += 1
class_ = TASK_NAME_MAPPING[name][2]
if class_ not in acc_sum_dict:
acc_sum_dict[class_] = 0.0
acc_norm_sum_dict[class_] = 0.0
cnt_dict[class_] = 0.0
if name in hard_list:
hard_cnt += 1
hard_acc_sum += float(res[tt])
acc_sum_dict[class_] += float(res[tt])
cnt_dict[class_] += 1
print("\n\n\n")
for k in ["STEM", "Social Science", "Humanities", "Other"]:
if k in cnt_dict:
print("%s acc: %.2f " % (k, acc_sum_dict[k] / cnt_dict[k]))
if hard_cnt > 0:
print("Hard acc:%.2f " % (hard_acc_sum / hard_cnt))
print("AVERAGE acc:%.2f " % (acc_sum / cnt)) | null |
8,032 | import os
import argparse
import re
import torch
import pandas as pd
from tqdm import tqdm
from thefuzz import process
from transformers.trainer_utils import set_seed
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
def load_models_tokenizer(args):
tokenizer = AutoTokenizer.from_pretrained(
args.checkpoint_path, trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
args.checkpoint_path,
device_map="auto",
trust_remote_code=True,
bf16=True,
use_flash_attn=True,
).eval()
model.generation_config = GenerationConfig.from_pretrained(
args.checkpoint_path, trust_remote_code=True
)
model.generation_config.do_sample = False # use greedy decoding
model.generation_config.repetition_penalty = 1.0 # disable repetition penalty
return model, tokenizer | null |
8,033 | import os
import argparse
import re
import torch
import pandas as pd
from tqdm import tqdm
from thefuzz import process
from transformers.trainer_utils import set_seed
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
def format_example(line):
def extract_answer(response, row):
def eval_subject(
model,
tokenizer,
subject_name,
test_df,
save_result_dir=None,
overwrite=False,
**kwargs
):
result_path = os.path.join(save_result_dir, f"{subject_name}_result.csv")
if not overwrite and os.path.exists(result_path):
print(f"{result_path} existed, skip!")
score = []
for (_, datarow), (_, resultrow) in zip(
test_df.iterrows(), pd.read_csv(result_path).astype(str).iterrows()
):
# pred = extract_answer(resultrow['model_response'], datarow)
pred = resultrow["model_output"]
correct = 1 if pred == datarow["answer"] else 0
score.append(correct)
return score
result = []
score = []
for _, row in tqdm(test_df.iterrows(), total=len(test_df)):
question = format_example(row)
response, _ = model.chat(
tokenizer,
question,
history=None,
)
print(question)
print(response)
pred = extract_answer(response, row)
print(pred)
print("======================")
if "answer" in row:
correct = 1 if pred == row["answer"] else 0
score.append(correct)
if args.debug:
print(f'{question} pred: {pred} ref: {row["answer"]}')
result.append(pred)
if save_result_dir:
test_df["model_output"] = result
test_df["model_response"] = response
if score:
test_df["correctness"] = score
os.makedirs(save_result_dir, exist_ok=True)
test_df.to_csv(
os.path.join(save_result_dir, f"{subject_name}_result.csv"),
encoding="utf-8",
index=False,
)
return score | null |
8,034 | import os
import argparse
import re
import torch
import pandas as pd
from tqdm import tqdm
from thefuzz import process
from transformers.trainer_utils import set_seed
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
TASK_NAME_MAPPING = {
"stem": [
"abstract_algebra",
"anatomy",
"astronomy",
"college_biology",
"college_chemistry",
"college_computer_science",
"college_mathematics",
"college_physics",
"computer_security",
"conceptual_physics",
"electrical_engineering",
"elementary_mathematics",
"high_school_biology",
"high_school_chemistry",
"high_school_computer_science",
"high_school_mathematics",
"high_school_physics",
"high_school_statistics",
"machine_learning",
],
"Humanities": [
"formal_logic",
"high_school_european_history",
"high_school_us_history",
"high_school_world_history",
"international_law",
"jurisprudence",
"logical_fallacies",
"moral_disputes",
"moral_scenarios",
"philosophy",
"prehistory",
"professional_law",
"world_religions",
],
"other": [
"business_ethics",
"college_medicine",
"human_aging",
"management",
"marketing",
"medical_genetics",
"miscellaneous",
"nutrition",
"professional_accounting",
"professional_medicine",
"virology",
"global_facts",
"clinical_knowledge",
],
"social": [
"econometrics",
"high_school_geography",
"high_school_government_and_politics",
"high_school_macroeconomics",
"high_school_microeconomics",
"high_school_psychology",
"human_sexuality",
"professional_psychology",
"public_relations",
"security_studies",
"sociology",
"us_foreign_policy",
],
}
def cal_mmlu(res):
acc_sum_dict = dict()
acc_norm_sum_dict = dict()
cnt_dict = dict()
acc_sum = 0.0
cnt = 0
for class_ in TASK_NAME_MAPPING.keys():
acc_sum_dict[class_] = 0.0
acc_norm_sum_dict[class_] = 0.0
cnt_dict[class_] = 0.0
for tt in TASK_NAME_MAPPING[class_]:
acc_sum += sum(res[tt])
cnt += len(res[tt])
acc_sum_dict[class_] += sum(res[tt])
cnt_dict[class_] += len(res[tt])
print("\n\n\n")
for k in TASK_NAME_MAPPING.keys():
if k in cnt_dict:
print("%s ACC: %.2f " % (k, acc_sum_dict[k] * 100 / cnt_dict[k]))
print("AVERAGE ACC:%.2f " % (acc_sum * 100 / cnt)) | null |
8,035 | import os
import pandas as pd
import numpy as np
import argparse
import datasets
import torch
from collections import defaultdict
from typing import List
from tqdm import tqdm
from transformers.trainer_utils import set_seed
def load_models_tokenizer(args):
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
tokenizer = AutoTokenizer.from_pretrained(
args.checkpoint_path,
pad_token='<|extra_0|>',
eos_token='<|endoftext|>',
padding_side='left',
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
args.checkpoint_path,
pad_token_id=tokenizer.pad_token_id,
device_map="auto",
trust_remote_code=True
).eval()
model.generation_config = GenerationConfig.from_pretrained(
args.checkpoint_path,
pad_token_id=tokenizer.pad_token_id,
trust_remote_code=True
)
return model, tokenizer | null |
8,036 | import os
import pandas as pd
import numpy as np
import argparse
import datasets
import torch
from collections import defaultdict
from typing import List
from tqdm import tqdm
from transformers.trainer_utils import set_seed
def format_example(line, include_answer=True):
example = "问题:" + line["Question"]
for choice in choices:
example += f'\n{choice}. {line[f"{choice}"]}'
if include_answer:
example += "\n答案:" + line["Answer"] + "\n\n"
else:
example += "\n答案:"
return example
def generate_few_shot_prompt(k, subject, dev_df):
prompt = ""
if k == -1:
k = dev_df.shape[0]
for i in range(k):
prompt += format_example(
dev_df.iloc[i, :],
include_answer=True,
)
return prompt
def get_logits(tokenizer, model, inputs: List[str]):
input_ids = tokenizer(inputs, padding='longest')["input_ids"]
input_ids = torch.tensor(input_ids, device=model.device)
tokens = {"input_ids": input_ids}
attention_mask = input_ids.ne(tokenizer.pad_token_id)
outputs = model(input_ids, attention_mask=attention_mask)["logits"]
logits = outputs[:, -1, :]
log_probs = torch.nn.functional.softmax(logits, dim=-1)
return log_probs, {"tokens": tokens}
choices = ["A", "B", "C", "D"]
def eval_subject(
model,
tokenizer,
subject_name,
test_df,
k=5,
dev_df=None,
few_shot=False,
save_result_dir=None,
batch_size=1,
**kwargs,
):
result = []
score = []
few_shot_prompt = (
generate_few_shot_prompt(k, subject_name, dev_df) if few_shot else []
)
all_probs = {"prob_A": [], "prob_B": [], "prob_C": [], "prob_D": []}
if args.debug:
print(f"few_shot_prompt: {few_shot_prompt}")
choices_ids = torch.tensor(
tokenizer("A")["input_ids"] + tokenizer("B")["input_ids"] +
tokenizer("C")["input_ids"] + tokenizer("D")["input_ids"]
).unsqueeze(0).to(model.device)
idx_list = list(range(0, len(test_df), batch_size))
for i in tqdm(idx_list):
full_prompt_list = []
answer_list = []
for row in test_df.iloc[i:i+batch_size].to_dict(orient='records'):
question = format_example(row, include_answer=False)
full_prompt = few_shot_prompt + question
full_prompt_list.append(full_prompt)
if 'Answer' in row:
answer_list.append(row['Answer'])
logits, input_info = get_logits(tokenizer, model, full_prompt_list)
softval = logits.gather(1, choices_ids.expand(logits.size(0), -1)).softmax(1)
if softval.dtype in {torch.bfloat16, torch.float16}:
softval = softval.to(dtype=torch.float32)
probs = softval.detach().cpu().numpy()
for i in range(len(probs)):
for j, choice in enumerate(choices):
all_probs[f"prob_{choice}"].append(probs[i][j])
pred = {0: "A", 1: "B", 2: "C", 3: "D"}[np.argmax(probs[i])]
if answer_list != []:
correct = 1 if pred == answer_list[i] else 0
score.append(correct)
if args.debug:
print(f'{question} pred: {pred} ref: {answer_list[i]}')
result.append(pred)
if score:
correct_ratio = 100 * sum(score) / len(score)
if args.debug:
print(subject_name, correct_ratio)
else:
correct_ratio = 0
if save_result_dir:
test_df["model_output"] = result
for i, choice in enumerate(choices):
test_df[f"prob_{choice}"] = all_probs[f"prob_{choice}"]
if score:
test_df["correctness"] = score
os.makedirs(save_result_dir, exist_ok=True)
test_df.to_csv(
os.path.join(save_result_dir, f"{subject_name}_result.csv"),
encoding="utf-8",
index=False,
)
return correct_ratio | null |
8,037 | import os
import pandas as pd
import numpy as np
import argparse
import datasets
import torch
from collections import defaultdict
from typing import List
from tqdm import tqdm
from transformers.trainer_utils import set_seed
TASK_NAME_MAPPING = defaultdict(list)
for k, v in categories.items():
for subject, subcat in subcategories.items():
for c in subcat:
if c in v:
TASK_NAME_MAPPING[k].append(subject)
def cal_cmmlu(res):
print("\n\n\n")
res = {k.split("-")[-1]: float(v) for k, v in res.items()}
for k, v in TASK_NAME_MAPPING.items():
avg_acc = np.mean(list(map(lambda x: res[x], v)))
print(f"{k} acc: {avg_acc:.2f}")
avg_all_acc = np.mean(list(res.values()))
print(f"AVERAGE acc: {avg_all_acc:.2f}") | null |
8,038 | import argparse
import json
import os
import pprint
import json5
import jsonlines
from rouge_score import rouge_scorer
from tqdm import tqdm
from transformers import Agent, AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
from transformers.tools.evaluate_agent import evaluate_agent
from transformers.trainer_utils import set_seed
def is_callable(response, golden):
return response["action"].strip().lower() == golden["action"].strip().lower()
def process_res(response):
# parse response
response += "\n" # fix not-find bug
thought = response[: response.find("Action:")].strip()
action = response[
response.find("Action:") + len("Action:") : response.find("Action Input:")
].strip()
action_input = response[
response.find("Action Input:")
+ len("Action Input:") : response.find("Observation:")
].strip()
# TODO: This parsing result is incorrect if the response contains multiple Actions. To be fixed in the future.
observation = response[
response.find("Observation:") + len("Observation:") : response.rfind("Thought:")
].strip()
thought_last = response[
response.rfind("Thought:") + len("Thought:") : response.find("Final Answer:")
].strip()
final_answer = response[
response.find("Final Answer:") + len("Final Answer:") :
].strip()
try:
action_input = json.dumps(
json5.loads(action_input), ensure_ascii=False, sort_keys=True
)
except:
# print("JSON Load Error:", action_input)
action_input = ""
res_dict = {
"thought": thought,
"action": action,
"action_input": action_input,
"observation": observation,
"thought_last": thought_last,
"final_answer": final_answer,
}
return res_dict
def eval_action(job):
response = job["gen"][0]
golden = job["response"]
if "\nAction: " in response:
response, golden = process_res(response), process_res(golden)
if is_callable(response, golden):
return True
return False | null |
8,039 | import argparse
import json
import os
import pprint
import json5
import jsonlines
from rouge_score import rouge_scorer
from tqdm import tqdm
from transformers import Agent, AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
from transformers.tools.evaluate_agent import evaluate_agent
from transformers.trainer_utils import set_seed
def process_res(response):
# parse response
response += "\n" # fix not-find bug
thought = response[: response.find("Action:")].strip()
action = response[
response.find("Action:") + len("Action:") : response.find("Action Input:")
].strip()
action_input = response[
response.find("Action Input:")
+ len("Action Input:") : response.find("Observation:")
].strip()
# TODO: This parsing result is incorrect if the response contains multiple Actions. To be fixed in the future.
observation = response[
response.find("Observation:") + len("Observation:") : response.rfind("Thought:")
].strip()
thought_last = response[
response.rfind("Thought:") + len("Thought:") : response.find("Final Answer:")
].strip()
final_answer = response[
response.find("Final Answer:") + len("Final Answer:") :
].strip()
try:
action_input = json.dumps(
json5.loads(action_input), ensure_ascii=False, sort_keys=True
)
except:
# print("JSON Load Error:", action_input)
action_input = ""
res_dict = {
"thought": thought,
"action": action,
"action_input": action_input,
"observation": observation,
"thought_last": thought_last,
"final_answer": final_answer,
}
return res_dict
class _DummyTokenizer:
def tokenize(self, text: str):
return text.split()
def _get_tokenized_string(tokenizer, text_list):
token_ids_list, tokenized_string_list = [], []
for text in text_list:
assert tokenizer is not None
token_ids = tokenizer.encode(text)
tokens_bytes = tokenizer.convert_ids_to_tokens(token_ids)
tokens = [token.decode("utf-8", errors="replace") for token in tokens_bytes]
tokenized_string = " ".join(tokens)
token_ids_list.append(token_ids)
tokenized_string_list.append(tokenized_string)
return token_ids_list, tokenized_string_list
def eval_action_input(job, tokenizer):
response = job["gen"][0]
golden = job["response"]
response, golden = process_res(response), process_res(golden)
query = job["prompt"]
job = {}
job["prompt"] = query
job["gen"] = response["action_input"]
job["response"] = golden["action_input"]
job["_gen_tok"], job["_gen_tok_str"] = _get_tokenized_string(
tokenizer, [response["action_input"]]
)
job["_reference_tok"], job["_reference_tok_str"] = _get_tokenized_string(
tokenizer, [golden["action_input"]]
)
scorer = rouge_scorer.RougeScorer(
["rouge1", "rouge2", "rougeL"], tokenizer=_DummyTokenizer()
)
score = scorer.score(job["_reference_tok_str"][0], job["_gen_tok_str"][0])
rouge = score["rougeL"].fmeasure
return rouge | null |
8,040 | import argparse
import json
import os
import pprint
import json5
import jsonlines
from rouge_score import rouge_scorer
from tqdm import tqdm
from transformers import Agent, AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
from transformers.tools.evaluate_agent import evaluate_agent
from transformers.trainer_utils import set_seed
def load_models_tokenizer(args):
tokenizer = AutoTokenizer.from_pretrained(
args.checkpoint_path, trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
args.checkpoint_path,
device_map="auto",
trust_remote_code=True,
bf16=True,
use_flash_attn=True,
).eval()
model.generation_config = GenerationConfig.from_pretrained(
args.checkpoint_path, trust_remote_code=True
)
model.generation_config.do_sample = False # use greedy decoding
return model, tokenizer | null |
8,041 | import argparse
import json
import os
import pprint
import json5
import jsonlines
from rouge_score import rouge_scorer
from tqdm import tqdm
from transformers import Agent, AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
from transformers.tools.evaluate_agent import evaluate_agent
from transformers.trainer_utils import set_seed
data_root_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data")
def load_jobs(filename):
def react_inference(filename, model, tokenizer):
filename_cache = filename + ".cache"
if os.path.exists(os.path.join(data_root_path, filename_cache)):
jobs = load_jobs(filename=filename_cache)
print("Loaded from", filename_cache)
else:
with open(os.path.join(data_root_path, filename_cache), "w") as f:
jobs = load_jobs(filename=filename)
print("Inference:", filename)
for job in tqdm(jobs):
response, history = model.chat(tokenizer, job["prompt"], history=None)
job["gen"] = [response]
f.writelines(json.dumps(job, ensure_ascii=False) + "\n")
print(filename_cache, "is saved.")
return jobs | null |
8,042 | import os
from typing import List
import argparse
import torch
import pandas as pd
import numpy as np
from tqdm import tqdm
from transformers.trainer_utils import set_seed
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
def load_models_tokenizer(args):
tokenizer = AutoTokenizer.from_pretrained(
args.checkpoint_path,
pad_token='<|extra_0|>',
eos_token='<|endoftext|>',
padding_side='left',
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
args.checkpoint_path,
pad_token_id=tokenizer.pad_token_id,
device_map="auto",
trust_remote_code=True
).eval()
model.generation_config = GenerationConfig.from_pretrained(
args.checkpoint_path,
pad_token_id=tokenizer.pad_token_id,
trust_remote_code=True
)
return model, tokenizer | null |
8,043 | import os
from typing import List
import argparse
import torch
import pandas as pd
import numpy as np
from tqdm import tqdm
from transformers.trainer_utils import set_seed
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
def format_example(line, include_answer=True):
example = "问题:" + line["question"]
for choice in choices:
example += f'\n{choice}. {line[f"{choice}"]}'
if include_answer:
example += "\n答案:" + line["answer"] + "\n\n"
else:
example += "\n答案:"
return example
def generate_few_shot_prompt(k, subject, dev_df):
prompt = ""
if k == -1:
k = dev_df.shape[0]
for i in range(k):
prompt += format_example(
dev_df.iloc[i, :],
include_answer=True,
)
return prompt
def get_logits(tokenizer, model, inputs: List[str]):
input_ids = tokenizer(inputs, padding='longest')["input_ids"]
input_ids = torch.tensor(input_ids, device=model.device)
tokens = {"input_ids": input_ids}
attention_mask = input_ids.ne(tokenizer.pad_token_id)
outputs = model(input_ids, attention_mask=attention_mask)["logits"]
logits = outputs[:, -1, :]
log_probs = torch.nn.functional.softmax(logits, dim=-1)
return log_probs, {"tokens": tokens}
choices = ["A", "B", "C", "D"]
def eval_subject(
model,
tokenizer,
subject_name,
test_df,
k=5,
dev_df=None,
few_shot=False,
save_result_dir=None,
batch_size=1,
**kwargs,
):
result = []
score = []
few_shot_prompt = (
generate_few_shot_prompt(k, subject_name, dev_df) if few_shot else ""
)
all_probs = {"prob_A": [], "prob_B": [], "prob_C": [], "prob_D": []}
if args.debug:
print(f"few_shot_prompt: {few_shot_prompt}")
choices_ids = torch.tensor(
tokenizer("A")["input_ids"] + tokenizer("B")["input_ids"] +
tokenizer("C")["input_ids"] + tokenizer("D")["input_ids"]
).unsqueeze(0).to(model.device)
idx_list = list(range(0, len(test_df), batch_size))
for i in tqdm(idx_list):
full_prompt_list = []
answer_list = []
for row in test_df.iloc[i:i+batch_size].to_dict(orient='records'):
question = format_example(row, include_answer=False)
full_prompt = few_shot_prompt + question
full_prompt_list.append(full_prompt)
if 'answer' in row:
answer_list.append(row['answer'])
logits, input_info = get_logits(tokenizer, model, full_prompt_list)
softval = logits.gather(1, choices_ids.expand(logits.size(0), -1)).softmax(1)
if softval.dtype in {torch.bfloat16, torch.float16}:
softval = softval.to(dtype=torch.float32)
probs = softval.detach().cpu().numpy()
for i in range(len(probs)):
for j, choice in enumerate(choices):
all_probs[f"prob_{choice}"].append(probs[i][j])
pred = {0: "A", 1: "B", 2: "C", 3: "D"}[np.argmax(probs[i])]
if answer_list != []:
correct = 1 if pred == answer_list[i] else 0
score.append(correct)
if args.debug:
print(f'{question} pred: {pred} ref: {answer_list[i]}')
result.append(pred)
if score:
correct_ratio = 100 * sum(score) / len(score)
if args.debug:
print(subject_name, correct_ratio)
else:
correct_ratio = 0
if save_result_dir:
test_df["model_output"] = result
for i, choice in enumerate(choices):
test_df[f"prob_{choice}"] = all_probs[f"prob_{choice}"]
if score:
test_df["correctness"] = score
os.makedirs(save_result_dir, exist_ok=True)
test_df.to_csv(
os.path.join(save_result_dir, f"{subject_name}_result.csv"),
encoding="utf-8",
index=False,
)
return correct_ratio | null |
8,044 | import os
from typing import List
import argparse
import torch
import pandas as pd
import numpy as np
from tqdm import tqdm
from transformers.trainer_utils import set_seed
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
TASK_NAME_MAPPING = {
"computer_network": ["Computer Network", "\u8ba1\u7b97\u673a\u7f51\u7edc", "STEM"],
"operating_system": ["Operating System", "\u64cd\u4f5c\u7cfb\u7edf", "STEM"],
"computer_architecture": [
"Computer Architecture",
"\u8ba1\u7b97\u673a\u7ec4\u6210",
"STEM",
],
"college_programming": ["College Programming", "\u5927\u5b66\u7f16\u7a0b", "STEM"],
"college_physics": ["College Physics", "\u5927\u5b66\u7269\u7406", "STEM"],
"college_chemistry": ["College Chemistry", "\u5927\u5b66\u5316\u5b66", "STEM"],
"advanced_mathematics": [
"Advanced Mathematics",
"\u9ad8\u7b49\u6570\u5b66",
"STEM",
],
"probability_and_statistics": [
"Probability and Statistics",
"\u6982\u7387\u7edf\u8ba1",
"STEM",
],
"discrete_mathematics": [
"Discrete Mathematics",
"\u79bb\u6563\u6570\u5b66",
"STEM",
],
"electrical_engineer": [
"Electrical Engineer",
"\u6ce8\u518c\u7535\u6c14\u5de5\u7a0b\u5e08",
"STEM",
],
"metrology_engineer": [
"Metrology Engineer",
"\u6ce8\u518c\u8ba1\u91cf\u5e08",
"STEM",
],
"high_school_mathematics": [
"High School Mathematics",
"\u9ad8\u4e2d\u6570\u5b66",
"STEM",
],
"high_school_physics": ["High School Physics", "\u9ad8\u4e2d\u7269\u7406", "STEM"],
"high_school_chemistry": [
"High School Chemistry",
"\u9ad8\u4e2d\u5316\u5b66",
"STEM",
],
"high_school_biology": ["High School Biology", "\u9ad8\u4e2d\u751f\u7269", "STEM"],
"middle_school_mathematics": [
"Middle School Mathematics",
"\u521d\u4e2d\u6570\u5b66",
"STEM",
],
"middle_school_biology": [
"Middle School Biology",
"\u521d\u4e2d\u751f\u7269",
"STEM",
],
"middle_school_physics": [
"Middle School Physics",
"\u521d\u4e2d\u7269\u7406",
"STEM",
],
"middle_school_chemistry": [
"Middle School Chemistry",
"\u521d\u4e2d\u5316\u5b66",
"STEM",
],
"veterinary_medicine": ["Veterinary Medicine", "\u517d\u533b\u5b66", "STEM"],
"college_economics": [
"College Economics",
"\u5927\u5b66\u7ecf\u6d4e\u5b66",
"Social Science",
],
"business_administration": [
"Business Administration",
"\u5de5\u5546\u7ba1\u7406",
"Social Science",
],
"marxism": [
"Marxism",
"\u9a6c\u514b\u601d\u4e3b\u4e49\u57fa\u672c\u539f\u7406",
"Social Science",
],
"mao_zedong_thought": [
"Mao Zedong Thought",
"\u6bdb\u6cfd\u4e1c\u601d\u60f3\u548c\u4e2d\u56fd\u7279\u8272\u793e\u4f1a\u4e3b\u4e49\u7406\u8bba\u4f53\u7cfb\u6982\u8bba",
"Social Science",
],
"education_science": ["Education Science", "\u6559\u80b2\u5b66", "Social Science"],
"teacher_qualification": [
"Teacher Qualification",
"\u6559\u5e08\u8d44\u683c",
"Social Science",
],
"high_school_politics": [
"High School Politics",
"\u9ad8\u4e2d\u653f\u6cbb",
"Social Science",
],
"high_school_geography": [
"High School Geography",
"\u9ad8\u4e2d\u5730\u7406",
"Social Science",
],
"middle_school_politics": [
"Middle School Politics",
"\u521d\u4e2d\u653f\u6cbb",
"Social Science",
],
"middle_school_geography": [
"Middle School Geography",
"\u521d\u4e2d\u5730\u7406",
"Social Science",
],
"modern_chinese_history": [
"Modern Chinese History",
"\u8fd1\u4ee3\u53f2\u7eb2\u8981",
"Humanities",
],
"ideological_and_moral_cultivation": [
"Ideological and Moral Cultivation",
"\u601d\u60f3\u9053\u5fb7\u4fee\u517b\u4e0e\u6cd5\u5f8b\u57fa\u7840",
"Humanities",
],
"logic": ["Logic", "\u903b\u8f91\u5b66", "Humanities"],
"law": ["Law", "\u6cd5\u5b66", "Humanities"],
"chinese_language_and_literature": [
"Chinese Language and Literature",
"\u4e2d\u56fd\u8bed\u8a00\u6587\u5b66",
"Humanities",
],
"art_studies": ["Art Studies", "\u827a\u672f\u5b66", "Humanities"],
"professional_tour_guide": [
"Professional Tour Guide",
"\u5bfc\u6e38\u8d44\u683c",
"Humanities",
],
"legal_professional": [
"Legal Professional",
"\u6cd5\u5f8b\u804c\u4e1a\u8d44\u683c",
"Humanities",
],
"high_school_chinese": [
"High School Chinese",
"\u9ad8\u4e2d\u8bed\u6587",
"Humanities",
],
"high_school_history": [
"High School History",
"\u9ad8\u4e2d\u5386\u53f2",
"Humanities",
],
"middle_school_history": [
"Middle School History",
"\u521d\u4e2d\u5386\u53f2",
"Humanities",
],
"civil_servant": ["Civil Servant", "\u516c\u52a1\u5458", "Other"],
"sports_science": ["Sports Science", "\u4f53\u80b2\u5b66", "Other"],
"plant_protection": ["Plant Protection", "\u690d\u7269\u4fdd\u62a4", "Other"],
"basic_medicine": ["Basic Medicine", "\u57fa\u7840\u533b\u5b66", "Other"],
"clinical_medicine": ["Clinical Medicine", "\u4e34\u5e8a\u533b\u5b66", "Other"],
"urban_and_rural_planner": [
"Urban and Rural Planner",
"\u6ce8\u518c\u57ce\u4e61\u89c4\u5212\u5e08",
"Other",
],
"accountant": ["Accountant", "\u6ce8\u518c\u4f1a\u8ba1\u5e08", "Other"],
"fire_engineer": [
"Fire Engineer",
"\u6ce8\u518c\u6d88\u9632\u5de5\u7a0b\u5e08",
"Other",
],
"environmental_impact_assessment_engineer": [
"Environmental Impact Assessment Engineer",
"\u73af\u5883\u5f71\u54cd\u8bc4\u4ef7\u5de5\u7a0b\u5e08",
"Other",
],
"tax_accountant": ["Tax Accountant", "\u7a0e\u52a1\u5e08", "Other"],
"physician": ["Physician", "\u533b\u5e08\u8d44\u683c", "Other"],
}
hard_list = [
"advanced_mathematics",
"discrete_mathematics",
"probability_and_statistics",
"college_physics",
"college_chemistry",
"high_school_mathematics",
"high_school_physics",
"high_school_chemistry",
]
def cal_ceval(res):
acc_sum_dict = dict()
acc_norm_sum_dict = dict()
cnt_dict = dict()
acc_sum = 0.0
cnt = 0
hard_cnt = 0
hard_acc_sum = 0.0
for tt in res.keys():
name = tt.split("-")[-1]
acc_sum += float(res[tt])
cnt += 1
class_ = TASK_NAME_MAPPING[name][2]
if class_ not in acc_sum_dict:
acc_sum_dict[class_] = 0.0
acc_norm_sum_dict[class_] = 0.0
cnt_dict[class_] = 0.0
if name in hard_list:
hard_cnt += 1
hard_acc_sum += float(res[tt])
acc_sum_dict[class_] += float(res[tt])
cnt_dict[class_] += 1
print("\n\n\n")
for k in ["STEM", "Social Science", "Humanities", "Other"]:
if k in cnt_dict:
print("%s acc: %.2f " % (k, acc_sum_dict[k] / cnt_dict[k]))
if hard_cnt > 0:
print("Hard acc:%.2f " % (hard_acc_sum / hard_cnt))
print("AVERAGE acc:%.2f " % (acc_sum / cnt)) | null |
8,045 | import argparse
import tqdm
import torch
import jsonlines
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
def decode(tokens_list, tokenizer, raw_text_len):
sents = []
# print(len(tokens_list))
for tokens in tokens_list:
tokens = tokens.cpu().numpy().tolist()
sent = tokenizer.tokenizer.decode(tokens[raw_text_len:])
sent = sent.split("<|endoftext|>")[0]
sent = sent.split("\n\n\n")[0]
sent = sent.split("\n\n")[0]
sent = sent.split("def ")[0]
sents.append(sent)
return sents
def generate_sample(model, tokenizer, input_txt):
input_ids = tokenizer.tokenizer.encode(input_txt)
raw_text_len = len(input_ids)
context_enc = torch.tensor([input_ids]).to(model.device)
print(f"Input text: {input_txt}\n")
outputs = model.generate(context_enc)
output_text = decode(outputs, tokenizer, raw_text_len)[0]
print(f"\nOutput text: \n{output_text}\n")
return output_text | null |
8,046 | import json
import re
from pathlib import Path
import argparse
import requests
import math
import numpy as np
import tqdm
from datasets import load_from_disk, load_dataset
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
def doc_to_text(doc, use_fewshot):
if use_fewshot:
context = (
"Question: Angelo and Melanie want to plan how many hours over the next week they should study together for their test next week. They have 2 chapters of their textbook to study and 4 worksheets to memorize. They figure out that they should dedicate 3 hours to each chapter of their textbook and 1.5 hours for each worksheet. If they plan to study no more than 4 hours each day, how many days should they plan to study total over the next week if they take a 10-minute break every hour, include 3 10-minute snack breaks each day, and 30 minutes for lunch each day?\nLet's think step by step\n"
"Angelo and Melanie think they should dedicate 3 hours to each of the 2 chapters, 3 hours x 2 chapters = 6 hours total.\nFor the worksheets they plan to dedicate 1.5 hours for each worksheet, 1.5 hours x 4 worksheets = 6 hours total.\nAngelo and Melanie need to start with planning 12 hours to study, at 4 hours a day, 12 / 4 = 3 days.\nHowever, they need to include time for breaks and lunch. Every hour they want to include a 10-minute break, so 12 total hours x 10 minutes = 120 extra minutes for breaks.\nThey also want to include 3 10-minute snack breaks, 3 x 10 minutes = 30 minutes.\nAnd they want to include 30 minutes for lunch each day, so 120 minutes for breaks + 30 minutes for snack breaks + 30 minutes for lunch = 180 minutes, or 180 / 60 minutes per hour = 3 extra hours.\nSo Angelo and Melanie want to plan 12 hours to study + 3 hours of breaks = 15 hours total.\nThey want to study no more than 4 hours each day, 15 hours / 4 hours each day = 3.75\nThey will need to plan to study 4 days to allow for all the time they need.\nThe answer is 4\n\n"
"Question: Mark's basketball team scores 25 2 pointers, 8 3 pointers and 10 free throws. Their opponents score double the 2 pointers but half the 3 pointers and free throws. What's the total number of points scored by both teams added together?\nLet's think step by step\n"
"Mark's team scores 25 2 pointers, meaning they scored 25*2= 50 points in 2 pointers.\nHis team also scores 6 3 pointers, meaning they scored 8*3= 24 points in 3 pointers\nThey scored 10 free throws, and free throws count as one point so they scored 10*1=10 points in free throws.\nAll together his team scored 50+24+10= 84 points\nMark's opponents scored double his team's number of 2 pointers, meaning they scored 50*2=100 points in 2 pointers.\nHis opponents scored half his team's number of 3 pointers, meaning they scored 24/2= 12 points in 3 pointers.\nThey also scored half Mark's team's points in free throws, meaning they scored 10/2=5 points in free throws.\nAll together Mark's opponents scored 100+12+5=117 points\nThe total score for the game is both team's scores added together, so it is 84+117=201 points\nThe answer is 201\n\n"
"Question: Bella has two times as many marbles as frisbees. She also has 20 more frisbees than deck cards. If she buys 2/5 times more of each item, what would be the total number of the items she will have if she currently has 60 marbles?\nLet's think step by step\n"
"When Bella buys 2/5 times more marbles, she'll have increased the number of marbles by 2/5*60 = 24\nThe total number of marbles she'll have is 60+24 = 84\nIf Bella currently has 60 marbles, and she has two times as many marbles as frisbees, she has 60/2 = 30 frisbees.\nIf Bella buys 2/5 times more frisbees, she'll have 2/5*30 = 12 more frisbees.\nThe total number of frisbees she'll have will increase to 30+12 = 42\nBella also has 20 more frisbees than deck cards, meaning she has 30-20 = 10 deck cards\nIf she buys 2/5 times more deck cards, she'll have 2/5*10 = 4 more deck cards.\nThe total number of deck cards she'll have is 10+4 = 14\nTogether, Bella will have a total of 14+42+84 = 140 items\nThe answer is 140\n\n"
"Question: A group of 4 fruit baskets contains 9 apples, 15 oranges, and 14 bananas in the first three baskets and 2 less of each fruit in the fourth basket. How many fruits are there?\nLet's think step by step\n"
"For the first three baskets, the number of apples and oranges in one basket is 9+15=24\nIn total, together with bananas, the number of fruits in one basket is 24+14=38 for the first three baskets.\nSince there are three baskets each having 38 fruits, there are 3*38=114 fruits in the first three baskets.\nThe number of apples in the fourth basket is 9-2=7\nThere are also 15-2=13 oranges in the fourth basket\nThe combined number of oranges and apples in the fourth basket is 13+7=20\nThe fourth basket also contains 14-2=12 bananas.\nIn total, the fourth basket has 20+12=32 fruits.\nThe four baskets together have 32+114=146 fruits.\nThe answer is 146\n\n"
f"Question: {doc['question']}\nLet's think step by step"
)
else:
context = doc["question"]
return context | null |
8,047 | import json
import re
from pathlib import Path
import argparse
import requests
import math
import numpy as np
import tqdm
from datasets import load_from_disk, load_dataset
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
def generate_sample(model, tokenizer, question):
response, _ = model.chat(
tokenizer,
question,
history=None,
)
print(question)
print("-------------")
print(response)
print("=============")
return response | null |
8,048 | import json
import re
from pathlib import Path
import argparse
import requests
import math
import numpy as np
import tqdm
from datasets import load_from_disk, load_dataset
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
def extract_answer(s):
_PAT_LAST_DIGIT = re.compile(
r"([+-])?(?=([0-9]|\.[0-9]))(0|([1-9](\d{0,2}(,\d{3})*)|\d*))?(\.\d*)?(?=\D|$)"
)
match = list(_PAT_LAST_DIGIT.finditer(s))
if match:
last_digit = match[-1].group().replace(",", "").replace("+", "").strip()
# print(f"The last digit in {s} is {last_digit}")
else:
last_digit = None
print(f"No digits found in {s!r}", flush=True)
return last_digit
def is_correct(completion, answer):
gold = extract_answer(answer)
assert gold is not None, "No ground truth answer found in the document."
def number_equal(answer, pred):
if pred is None:
return False
try:
return math.isclose(eval(answer), eval(pred), rel_tol=0, abs_tol=1e-4)
except:
print(
f"cannot compare two numbers: answer={answer}, pred={pred}", flush=True
)
return False
return number_equal(gold, extract_answer(completion)) | null |
8,049 | import re
import textwrap
import argparse
from pathlib import Path
import tqdm
import jsonlines
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
def extract_code(text, entry_point):
# 正则表达式匹配代码块
code_block_pattern = re.compile(
rf"```(?:[Pp]ython\n)?.*?def\s+{entry_point}.*?:\n(.*?)\n```", re.DOTALL
)
code_block = code_block_pattern.search(text)
if code_block is None:
code_block_pattern = re.compile(
rf"def\s+{entry_point}.*?:\n(.*?)(?:\n(?!\n*(?: |\t))|$)", re.DOTALL
)
code_block = code_block_pattern.search(text)
if code_block is None:
code_block_pattern = re.compile(
r"def.*?:\n(.*?)(?:\n(?!\n*(?: |\t))|$)", re.DOTALL
)
code_block = code_block_pattern.search(text)
if code_block is not None:
return code_block.group(1)
# if no code block is found, assume the LM is simply filling the code
return textwrap.indent(text, " " * 4)
def generate_sample(model, tokenizer, question, entry_point):
response, _ = model.chat(
tokenizer,
question,
history=None,
)
print(question)
print(response)
answer = extract_code(response, entry_point)
return answer, response | null |
8,050 | import os
from typing import List
import pandas as pd
import numpy as np
import argparse
import torch
from tqdm import tqdm
from transformers.trainer_utils import set_seed
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
def load_models_tokenizer(args):
tokenizer = AutoTokenizer.from_pretrained(
args.checkpoint_path,
pad_token='<|extra_0|>',
eos_token='<|endoftext|>',
padding_side='left',
trust_remote_code=True
)
model = AutoModelForCausalLM.from_pretrained(
args.checkpoint_path,
pad_token_id=tokenizer.pad_token_id,
device_map="auto",
trust_remote_code=True
).eval()
model.generation_config = GenerationConfig.from_pretrained(
args.checkpoint_path,
pad_token_id=tokenizer.pad_token_id,
trust_remote_code=True
)
return model, tokenizer | null |
8,051 | import os
from typing import List
import pandas as pd
import numpy as np
import argparse
import torch
from tqdm import tqdm
from transformers.trainer_utils import set_seed
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
def format_example(line, include_answer=True):
def generate_few_shot_prompt(k, subject, dev_df):
def get_logits(tokenizer, model, inputs: List[str]):
choices = ["A", "B", "C", "D"]
def eval_subject(
model,
tokenizer,
subject_name,
test_df,
k=5,
dev_df=None,
few_shot=False,
save_result_dir=None,
batch_size=1,
**kwargs,
):
result = []
score = []
few_shot_prompt = (
generate_few_shot_prompt(k, subject_name, dev_df) if few_shot else []
)
all_probs = {"prob_A": [], "prob_B": [], "prob_C": [], "prob_D": []}
if args.debug:
print(f"few_shot_prompt: {few_shot_prompt}")
choices_ids = torch.tensor(
tokenizer(" A")["input_ids"] + tokenizer(" B")["input_ids"] +
tokenizer(" C")["input_ids"] + tokenizer(" D")["input_ids"]
).unsqueeze(0).to(model.device)
idx_list = list(range(0, len(test_df), batch_size))
for i in tqdm(idx_list):
full_prompt_list = []
answer_list = []
for row in test_df.iloc[i:i+batch_size].to_dict(orient='records'):
question = format_example(row, include_answer=False)
full_prompt = few_shot_prompt + question
full_prompt_list.append(full_prompt)
if 'answer' in row:
answer_list.append(row['answer'])
logits, input_info = get_logits(tokenizer, model, full_prompt_list)
softval = logits.gather(1, choices_ids.expand(logits.size(0), -1)).softmax(1)
if softval.dtype in {torch.bfloat16, torch.float16}:
softval = softval.to(dtype=torch.float32)
probs = softval.detach().cpu().numpy()
for i in range(len(probs)):
for j, choice in enumerate(choices):
all_probs[f"prob_{choice}"].append(probs[i][j])
pred = {0: "A", 1: "B", 2: "C", 3: "D"}[np.argmax(probs[i])]
if answer_list != []:
correct = 1 if pred == answer_list[i] else 0
score.append(correct)
if args.debug:
print(f'{question} pred: {pred} ref: {answer_list[i]}')
result.append(pred)
if save_result_dir:
test_df["model_output"] = result
for i, choice in enumerate(choices):
test_df[f"prob_{choice}"] = all_probs[f"prob_{choice}"]
if score:
test_df["correctness"] = score
os.makedirs(save_result_dir, exist_ok=True)
test_df.to_csv(
os.path.join(save_result_dir, f"{subject_name}_result.csv"),
encoding="utf-8",
index=False,
)
return score | null |
8,052 | import os
from typing import List
import pandas as pd
import numpy as np
import argparse
import torch
from tqdm import tqdm
from transformers.trainer_utils import set_seed
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
TASK_NAME_MAPPING = {
"stem": [
"abstract_algebra",
"anatomy",
"astronomy",
"college_biology",
"college_chemistry",
"college_computer_science",
"college_mathematics",
"college_physics",
"computer_security",
"conceptual_physics",
"electrical_engineering",
"elementary_mathematics",
"high_school_biology",
"high_school_chemistry",
"high_school_computer_science",
"high_school_mathematics",
"high_school_physics",
"high_school_statistics",
"machine_learning",
],
"Humanities": [
"formal_logic",
"high_school_european_history",
"high_school_us_history",
"high_school_world_history",
"international_law",
"jurisprudence",
"logical_fallacies",
"moral_disputes",
"moral_scenarios",
"philosophy",
"prehistory",
"professional_law",
"world_religions",
],
"other": [
"business_ethics",
"college_medicine",
"human_aging",
"management",
"marketing",
"medical_genetics",
"miscellaneous",
"nutrition",
"professional_accounting",
"professional_medicine",
"virology",
"global_facts",
"clinical_knowledge",
],
"social": [
"econometrics",
"high_school_geography",
"high_school_government_and_politics",
"high_school_macroeconomics",
"high_school_microeconomics",
"high_school_psychology",
"human_sexuality",
"professional_psychology",
"public_relations",
"security_studies",
"sociology",
"us_foreign_policy",
],
}
def cal_mmlu(res):
acc_sum_dict = dict()
acc_norm_sum_dict = dict()
cnt_dict = dict()
acc_sum = 0.0
cnt = 0
hard_cnt = 0
hard_acc_sum = 0.0
for class_ in TASK_NAME_MAPPING.keys():
acc_sum_dict[class_] = 0.0
acc_norm_sum_dict[class_] = 0.0
cnt_dict[class_] = 0.0
for tt in TASK_NAME_MAPPING[class_]:
acc_sum += sum(res[tt])
cnt += len(res[tt])
acc_sum_dict[class_] += sum(res[tt])
cnt_dict[class_] += len(res[tt])
print("\n\n\n", "total cnt:", cnt, "\n")
for k in TASK_NAME_MAPPING.keys():
if k in cnt_dict:
print("%s ACC: %.2f " % (k, acc_sum_dict[k] / cnt_dict[k] * 100))
print("AVERAGE ACC:%.2f " % (acc_sum / cnt * 100)) | null |
8,053 | import re
import torch
import argparse
import jsonlines
import numpy as np
import datasets
from datasets import load_from_disk, load_dataset
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
def doc_to_text(doc):
return (
fewshot_prompt
+ "\nQuestion: "
+ doc["question"]
+ "\nLet's think step by step\n"
) | null |
8,054 | import re
import torch
import argparse
import jsonlines
import numpy as np
import datasets
from datasets import load_from_disk, load_dataset
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
def decode(tokens_list, tokenizer, raw_text_len):
sents = []
# print(len(tokens_list))
for tokens in tokens_list:
tokens = tokens.cpu().numpy().tolist()
sent = tokenizer.tokenizer.decode(tokens[raw_text_len:])
sent = sent.split("<|endoftext|>")[0]
sent = sent.split("\n\n\n")[0]
sent = sent.split("\n\n")[0]
sent = sent.split("Question:")[0]
sents.append(sent)
return sents
def generate_sample(model, tokenizer, input_txt):
input_ids = tokenizer.tokenizer.encode(input_txt)
raw_text_len = len(input_ids)
context_enc = torch.tensor([input_ids]).to(model.device)
print(f"Input text: {input_txt}\n")
outputs = model.generate(context_enc)
output_text = decode(outputs, tokenizer, raw_text_len)[0]
print(f"\nOutput text: {output_text}\n")
return output_text | null |
8,055 | import re
import torch
import argparse
import jsonlines
import numpy as np
import datasets
from datasets import load_from_disk, load_dataset
from transformers import AutoModelForCausalLM, AutoTokenizer
from transformers.generation import GenerationConfig
INVALID_ANS = "[invalid]"
def extract_answer_hf(completion):
match = ANS_RE.search(completion)
if match:
match_str = match.group(1).strip()
match_str = match_str.replace(",", "")
return eval(match_str)
else:
return INVALID_ANS
def extract_answer(completion):
try:
last_number = re.findall(r"\d+", completion)[-1]
return eval(last_number)
except:
return INVALID_ANS
def is_correct(completion, answer):
gold = extract_answer_hf(answer)
assert gold != INVALID_ANS, "No ground truth answer found in the document."
return extract_answer(completion) == gold | null |
8,056 | from http import HTTPStatus
import numpy as np
from albumentations.pytorch.transforms import ToTensorV2
from fastapi import FastAPI, File, UploadFile
from PIL import Image
from image_to_latex.lit_models import LitResNetTransformer
async def load_model():
global lit_model
global transform
lit_model = LitResNetTransformer.load_from_checkpoint("artifacts/model.pt")
lit_model.freeze()
transform = ToTensorV2() | null |
8,057 | from http import HTTPStatus
import numpy as np
from albumentations.pytorch.transforms import ToTensorV2
from fastapi import FastAPI, File, UploadFile
from PIL import Image
from image_to_latex.lit_models import LitResNetTransformer
The provided code snippet includes necessary dependencies for implementing the `read_root` function. Write a Python function `def read_root()` to solve the following problem:
Health check.
Here is the function:
def read_root():
"""Health check."""
response = {
"message": HTTPStatus.OK.phrase,
"status-code": HTTPStatus.OK,
"data": {},
}
return response | Health check. |
8,058 | from http import HTTPStatus
import numpy as np
from albumentations.pytorch.transforms import ToTensorV2
from fastapi import FastAPI, File, UploadFile
from PIL import Image
from image_to_latex.lit_models import LitResNetTransformer
def predict(file: UploadFile = File(...)):
image = Image.open(file.file).convert("L")
image_tensor = transform(image=np.array(image))["image"] # type: ignore
pred = lit_model.model.predict(image_tensor.unsqueeze(0).float())[0] # type: ignore
decoded = lit_model.tokenizer.decode(pred.tolist()) # type: ignore
decoded_str = " ".join(decoded)
response = {
"message": HTTPStatus.OK.phrase,
"status-code": HTTPStatus.OK,
"data": {"pred": decoded_str},
}
return response | null |
8,059 | import json
import tarfile
from pathlib import Path
from typing import Callable, Dict, List, Optional, Tuple, Union
from urllib.request import urlretrieve
import numpy as np
from PIL import Image
from torch.utils.data import Dataset
from tqdm import tqdm
class TqdmUpTo(tqdm):
"""From https://github.com/tqdm/tqdm/blob/master/examples/tqdm_wget.py."""
def update_to(self, blocks=1, bsize=1, tsize=None) -> None:
"""Inform the progress bar how many data have been downloaded.
Args:
blocks: Number of blocks transferred so far.
bsize: Size of each block (in tqdm units).
tsize: Total size (in tqdm units).
"""
if tsize is not None:
self.total = tsize
self.update(blocks * bsize - self.n)
The provided code snippet includes necessary dependencies for implementing the `download_url` function. Write a Python function `def download_url(url: str, filename: str) -> None` to solve the following problem:
Download a file from url to filename, with a progress bar.
Here is the function:
def download_url(url: str, filename: str) -> None:
"""Download a file from url to filename, with a progress bar."""
with TqdmUpTo(unit="B", unit_scale=True, unit_divisor=1024, miniters=1) as t:
t.set_description(filename)
urlretrieve(url, filename, reporthook=t.update_to, data=None) | Download a file from url to filename, with a progress bar. |
8,060 | import json
import tarfile
from pathlib import Path
from typing import Callable, Dict, List, Optional, Tuple, Union
from urllib.request import urlretrieve
import numpy as np
from PIL import Image
from torch.utils.data import Dataset
from tqdm import tqdm
The provided code snippet includes necessary dependencies for implementing the `extract_tar_file` function. Write a Python function `def extract_tar_file(filename: str) -> None` to solve the following problem:
Extract a .tar or .tar.gz file.
Here is the function:
def extract_tar_file(filename: str) -> None:
"""Extract a .tar or .tar.gz file."""
print(f"Extracting {filename}...")
with tarfile.open(filename, "r") as f:
f.extractall() | Extract a .tar or .tar.gz file. |
8,061 | import json
import tarfile
from pathlib import Path
from typing import Callable, Dict, List, Optional, Tuple, Union
from urllib.request import urlretrieve
import numpy as np
from PIL import Image
from torch.utils.data import Dataset
from tqdm import tqdm
The provided code snippet includes necessary dependencies for implementing the `get_all_formulas` function. Write a Python function `def get_all_formulas(filename: Path) -> List[List[str]]` to solve the following problem:
Returns all the formulas in the formula file.
Here is the function:
def get_all_formulas(filename: Path) -> List[List[str]]:
"""Returns all the formulas in the formula file."""
with open(filename) as f:
all_formulas = [formula.strip("\n").split() for formula in f.readlines()]
return all_formulas | Returns all the formulas in the formula file. |
8,062 | import json
import tarfile
from pathlib import Path
from typing import Callable, Dict, List, Optional, Tuple, Union
from urllib.request import urlretrieve
import numpy as np
from PIL import Image
from torch.utils.data import Dataset
from tqdm import tqdm
def get_split(
all_formulas: List[List[str]],
filename: Path,
) -> Tuple[List[str], List[List[str]]]:
image_names = []
formulas = []
with open(filename) as f:
for line in f:
img_name, formula_idx = line.strip("\n").split()
image_names.append(img_name)
formulas.append(all_formulas[int(formula_idx)])
return image_names, formulas | null |
8,063 | import json
import tarfile
from pathlib import Path
from typing import Callable, Dict, List, Optional, Tuple, Union
from urllib.request import urlretrieve
import numpy as np
from PIL import Image
from torch.utils.data import Dataset
from tqdm import tqdm
def pil_loader(fp: Path, mode: str) -> Image.Image:
with open(fp, "rb") as f:
img = Image.open(f)
return img.convert(mode)
def first_and_last_nonzeros(arr):
for i in range(len(arr)):
if arr[i] != 0:
break
left = i
for i in reversed(range(len(arr))):
if arr[i] != 0:
break
right = i
return left, right
def crop(filename: Path, padding: int = 8) -> Optional[Image.Image]:
image = pil_loader(filename, mode="RGBA")
# Replace the transparency layer with a white background
new_image = Image.new("RGBA", image.size, "WHITE")
new_image.paste(image, (0, 0), image)
new_image = new_image.convert("L")
# Invert the color to have a black background and white text
arr = 255 - np.array(new_image)
# Area that has text should have nonzero pixel values
row_sums = np.sum(arr, axis=1)
col_sums = np.sum(arr, axis=0)
y_start, y_end = first_and_last_nonzeros(row_sums)
x_start, x_end = first_and_last_nonzeros(col_sums)
# Some images have no text
if y_start >= y_end or x_start >= x_end:
print(f"{filename.name} is ignored because it does not contain any text")
return None
# Cropping
cropped = arr[y_start : y_end + 1, x_start : x_end + 1]
H, W = cropped.shape
# Add paddings
new_arr = np.zeros((H + padding * 2, W + padding * 2))
new_arr[padding : H + padding, padding : W + padding] = cropped
# Invert the color back to have a white background and black text
new_arr = 255 - new_arr
return Image.fromarray(new_arr).convert("L") | null |
8,064 | import math
from typing import Union
import torch
import torch.nn as nn
import torchvision.models
from torch import Tensor
from .positional_encoding import PositionalEncoding1D, PositionalEncoding2D
The provided code snippet includes necessary dependencies for implementing the `generate_square_subsequent_mask` function. Write a Python function `def generate_square_subsequent_mask(size: int) -> Tensor` to solve the following problem:
Generate a triangular (size, size) mask.
Here is the function:
def generate_square_subsequent_mask(size: int) -> Tensor:
"""Generate a triangular (size, size) mask."""
mask = (torch.triu(torch.ones(size, size)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float("-inf")).masked_fill(mask == 1, float(0.0))
return mask | Generate a triangular (size, size) mask. |
8,065 | import math
from typing import Union
import torch
import torch.nn as nn
import torchvision.models
from torch import Tensor
from .positional_encoding import PositionalEncoding1D, PositionalEncoding2D
The provided code snippet includes necessary dependencies for implementing the `find_first` function. Write a Python function `def find_first(x: Tensor, element: Union[int, float], dim: int = 1) -> Tensor` to solve the following problem:
Find the first occurence of element in x along a given dimension. Args: x: The input tensor to be searched. element: The number to look for. dim: The dimension to reduce. Returns: Indices of the first occurence of the element in x. If not found, return the length of x along dim. Usage: >>> first_element(Tensor([[1, 2, 3], [2, 3, 3], [1, 1, 1]]), 3) tensor([2, 1, 3]) Reference: https://discuss.pytorch.org/t/first-nonzero-index/24769/9 I fixed an edge case where the element we are looking for is at index 0. The original algorithm will return the length of x instead of 0.
Here is the function:
def find_first(x: Tensor, element: Union[int, float], dim: int = 1) -> Tensor:
"""Find the first occurence of element in x along a given dimension.
Args:
x: The input tensor to be searched.
element: The number to look for.
dim: The dimension to reduce.
Returns:
Indices of the first occurence of the element in x. If not found, return the
length of x along dim.
Usage:
>>> first_element(Tensor([[1, 2, 3], [2, 3, 3], [1, 1, 1]]), 3)
tensor([2, 1, 3])
Reference:
https://discuss.pytorch.org/t/first-nonzero-index/24769/9
I fixed an edge case where the element we are looking for is at index 0. The
original algorithm will return the length of x instead of 0.
"""
mask = x == element
found, indices = ((mask.cumsum(dim) == 1) & mask).max(dim)
indices[(~found) & (indices == 0)] = x.shape[dim]
return indices | Find the first occurence of element in x along a given dimension. Args: x: The input tensor to be searched. element: The number to look for. dim: The dimension to reduce. Returns: Indices of the first occurence of the element in x. If not found, return the length of x along dim. Usage: >>> first_element(Tensor([[1, 2, 3], [2, 3, 3], [1, 1, 1]]), 3) tensor([2, 1, 3]) Reference: https://discuss.pytorch.org/t/first-nonzero-index/24769/9 I fixed an edge case where the element we are looking for is at index 0. The original algorithm will return the length of x instead of 0. |
8,066 | import argparse
import shutil
import tempfile
from pathlib import Path
import wandb
The provided code snippet includes necessary dependencies for implementing the `download_checkpoint` function. Write a Python function `def download_checkpoint(run_path: str) -> None` to solve the following problem:
Download model checkpoint from Weights & Biases. Args: run_path: The run path for a run, in the format of '<entity>/<project>/<run_id>'. To find the run path for a run, go to the Overview tab in wandb dashboard.
Here is the function:
def download_checkpoint(run_path: str) -> None:
"""Download model checkpoint from Weights & Biases.
Args:
run_path: The run path for a run, in the format of
'<entity>/<project>/<run_id>'. To find the run path for a run, go
to the Overview tab in wandb dashboard.
"""
artifacts_dirname = Path(__file__).parents[1].resolve() / "artifacts"
artifacts_dirname.mkdir(parents=True, exist_ok=True)
api = wandb.Api()
wandb_run = api.run(f"{run_path}")
checkpoint_wandb_files = [file for file in wandb_run.files() if file.name.endswith("ckpt")]
if not checkpoint_wandb_files:
print("Model checkpoint not found.")
return
wandb_file = checkpoint_wandb_files[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
print("Downloading model checkpoint...")
wandb_file.download(root=tmp_dirname, replace=True)
checkpoint_filename = f"{tmp_dirname}/{wandb_file.name}"
shutil.copyfile(src=checkpoint_filename, dst=artifacts_dirname / "model.pt")
print(f"Model checkpoint downloaded to {str(artifacts_dirname / 'model.pt')}.") | Download model checkpoint from Weights & Biases. Args: run_path: The run path for a run, in the format of '<entity>/<project>/<run_id>'. To find the run path for a run, go to the Overview tab in wandb dashboard. |
8,067 | import os
import re
import sys
from setuptools import find_packages, setup
pwd = os.path.dirname(__file__)
def readme():
with open(os.path.join(pwd, 'README.md'), encoding='utf-8') as f:
content = f.read()
return content | null |
8,068 | import os
import re
import sys
from setuptools import find_packages, setup
pwd = os.path.dirname(__file__)
version_file = 'lmdeploy/version.py'
def get_version():
with open(os.path.join(pwd, version_file), 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__'] | null |
8,069 | import os
import re
import sys
from setuptools import find_packages, setup
pwd = os.path.dirname(__file__)
def check_ext_modules():
if os.path.exists(os.path.join(pwd, 'lmdeploy', 'lib')):
return True
return False | null |
8,070 | import os
import re
import sys
from setuptools import find_packages, setup
cuda_pkgs = get_cuda_pkgs()
def get_cuda_pkgs():
arg_name = '--cuda='
arg_value = None
for arg in sys.argv[1:]:
if arg.startswith(arg_name):
arg_value = arg[len(arg_name):]
sys.argv.remove(arg)
break
cuda_pkgs = []
if arg_value == '11':
cuda_pkgs = [
'nvidia-nccl-cu11', 'nvidia-cuda-runtime-cu11',
'nvidia-cublas-cu11'
]
elif arg_value == '12':
cuda_pkgs = [
'nvidia-nccl-cu12', 'nvidia-cuda-runtime-cu12',
'nvidia-cublas-cu12'
]
return cuda_pkgs | null |
8,071 | import os
import re
import sys
from setuptools import find_packages, setup
cuda_pkgs = get_cuda_pkgs()
The provided code snippet includes necessary dependencies for implementing the `parse_requirements` function. Write a Python function `def parse_requirements(fname='requirements.txt', with_version=True)` to solve the following problem:
Parse the package dependencies listed in a file but strips specific versioning information. Args: fname (str): path to the file with_version (bool, default=False): if True include version specs Returns: List[str]: list of requirements items CommandLine: python -c "import setup; print(setup.parse_requirements())"
Here is the function:
def parse_requirements(fname='requirements.txt', with_version=True):
"""Parse the package dependencies listed in a file but strips specific
versioning information.
Args:
fname (str): path to the file
with_version (bool, default=False): if True include version specs
Returns:
List[str]: list of requirements items
CommandLine:
python -c "import setup; print(setup.parse_requirements())"
"""
require_fpath = fname
def parse_line(line):
"""Parse information from a line in a requirements text file."""
if line.startswith('-r '):
# Allow specifying requirements in other files
target = line.split(' ')[1]
for info in parse_require_file(target):
yield info
else:
info = {'line': line}
if line.startswith('-e '):
info['package'] = line.split('#egg=')[1]
elif '@git+' in line:
info['package'] = line
else:
# Remove versioning from the package
pat = '(' + '|'.join(['>=', '==', '>']) + ')'
parts = re.split(pat, line, maxsplit=1)
parts = [p.strip() for p in parts]
info['package'] = parts[0]
if len(parts) > 1:
op, rest = parts[1:]
if ';' in rest:
# Handle platform specific dependencies
# http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies
version, platform_deps = map(str.strip,
rest.split(';'))
info['platform_deps'] = platform_deps
else:
version = rest # NOQA
info['version'] = (op, version)
yield info
def parse_require_file(fpath):
with open(fpath, 'r') as f:
for line in f.readlines():
line = line.strip()
if line and not line.startswith('#'):
for info in parse_line(line):
yield info
def gen_packages_items():
if os.path.exists(require_fpath):
for info in parse_require_file(require_fpath):
parts = [info['package']]
if with_version and 'version' in info:
parts.extend(info['version'])
if not sys.version.startswith('3.4'):
# apparently package_deps are broken in 3.4
platform_deps = info.get('platform_deps')
if platform_deps is not None:
parts.append(';' + platform_deps)
item = ''.join(parts)
yield item
packages = list(gen_packages_items())
packages += cuda_pkgs
return packages | Parse the package dependencies listed in a file but strips specific versioning information. Args: fname (str): path to the file with_version (bool, default=False): if True include version specs Returns: List[str]: list of requirements items CommandLine: python -c "import setup; print(setup.parse_requirements())" |
8,072 | from typing import Tuple
The provided code snippet includes necessary dependencies for implementing the `parse_version_info` function. Write a Python function `def parse_version_info(version_str: str) -> Tuple` to solve the following problem:
Parse version from a string. Args: version_str (str): A string represents a version info. Returns: tuple: A sequence of integer and string represents version.
Here is the function:
def parse_version_info(version_str: str) -> Tuple:
"""Parse version from a string.
Args:
version_str (str): A string represents a version info.
Returns:
tuple: A sequence of integer and string represents version.
"""
_version_info = []
for x in version_str.split('.'):
if x.isdigit():
_version_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
_version_info.append(int(patch_version[0]))
_version_info.append(f'rc{patch_version[1]}')
return tuple(_version_info) | Parse version from a string. Args: version_str (str): A string represents a version info. Returns: tuple: A sequence of integer and string represents version. |
8,073 | import os
from typing import List, Literal, Optional, Union
from .archs import autoget_backend_config
from .messages import PytorchEngineConfig, TurbomindEngineConfig
from .model import ChatTemplateConfig
def serve(model_path: str,
model_name: Optional[str] = None,
backend: Literal['turbomind', 'pytorch'] = 'turbomind',
backend_config: Optional[Union[TurbomindEngineConfig,
PytorchEngineConfig]] = None,
chat_template_config: Optional[ChatTemplateConfig] = None,
server_name: str = '0.0.0.0',
server_port: int = 23333,
log_level: str = 'ERROR',
api_keys: Optional[Union[List[str], str]] = None,
ssl: bool = False,
**kwargs):
"""This will run the api_server in a subprocess.
Args:
model_path (str): the path of a model.
It could be one of the following options:
- i) A local directory path of a turbomind model which is
converted by `lmdeploy convert` command or download from
ii) and iii).
- ii) The model_id of a lmdeploy-quantized model hosted
inside a model repo on huggingface.co, such as
"InternLM/internlm-chat-20b-4bit",
"lmdeploy/llama2-chat-70b-4bit", etc.
- iii) The model_id of a model hosted inside a model repo
on huggingface.co, such as "internlm/internlm-chat-7b",
"Qwen/Qwen-7B-Chat ", "baichuan-inc/Baichuan2-7B-Chat"
and so on.
model_name (str): needed when model_path is a pytorch model on
huggingface.co, such as "internlm/internlm-chat-7b",
"Qwen/Qwen-7B-Chat ", "baichuan-inc/Baichuan2-7B-Chat" and so on.
backend (str): either `turbomind` or `pytorch` backend. Default to
`turbomind` backend.
backend_config (TurbomindEngineConfig | PytorchEngineConfig): backend
config instance. Default to none.
chat_template_config (ChatTemplateConfig): chat template configuration.
Default to None.
server_name (str): host ip for serving
server_port (int): server port
log_level(str): set log level whose value among [CRITICAL, ERROR, WARNING, INFO, DEBUG]
api_keys (List[str] | str | None): Optional list of API keys. Accepts string type as
a single api_key. Default to None, which means no api key applied.
ssl (bool): Enable SSL. Requires OS Environment variables 'SSL_KEYFILE' and 'SSL_CERTFILE'.
Return:
APIClient: A client chatbot for LLaMA series models.
Examples:
>>> import lmdeploy
>>> client = lmdeploy.serve('internlm/internlm-chat-7b', 'internlm-chat-7b')
>>> for output in client.chat('hi', 1):
... print(output)
""" # noqa E501
import time
from multiprocessing import Process
from lmdeploy.serve.openai.api_client import APIClient
from lmdeploy.serve.openai.api_server import serve
if type(backend_config) is not PytorchEngineConfig:
# set auto backend mode
backend_config = autoget_backend_config(model_path, backend_config)
backend = 'pytorch' if type(
backend_config) is PytorchEngineConfig else 'turbomind'
if 'tp' in kwargs:
tp = kwargs['tp']
kwargs.pop('tp')
else:
tp = 1 if backend_config is None else backend_config.tp
task = Process(target=serve,
args=(model_path, ),
kwargs=dict(model_name=model_name,
backend=backend,
backend_config=backend_config,
chat_template_config=chat_template_config,
server_name=server_name,
server_port=server_port,
tp=tp,
log_level=log_level,
api_keys=api_keys,
ssl=ssl,
**kwargs),
daemon=True)
task.start()
client = APIClient(f'http://{server_name}:{server_port}')
while True:
time.sleep(1)
try:
client.available_models
print(
f'Launched the api_server in process {task.pid}, user can '
f'kill the server by:\nimport os,signal\nos.kill({task.pid}, '
'signal.SIGKILL)')
return client
except: # noqa
pass
def autoget_backend_config(
model_path: str,
backend_config: Optional[Union[PytorchEngineConfig,
TurbomindEngineConfig]] = None
) -> Union[PytorchEngineConfig, TurbomindEngineConfig]:
"""Get backend config automatically.
Args:
model_path (str): The input model path.
backend_config (TurbomindEngineConfig | PytorchEngineConfig): The
input backend config. Default to None.
Returns:
(PytorchEngineConfig | TurbomindEngineConfig): The auto-determined
backend engine config.
"""
from dataclasses import asdict
backend = autoget_backend(model_path)
if backend == 'pytorch':
config = PytorchEngineConfig()
else:
config = TurbomindEngineConfig()
if backend_config is not None:
data = asdict(backend_config)
for k, v in data.items():
if v and hasattr(config, k):
setattr(config, k, v)
return config
class TurbomindEngineConfig:
"""TurboMind Engine config.
Args:
model_name (str): the name of the deployed model, deprecated and has no effect when version > 0.2.1
model_format (str): the layout of the deployed model. It can be one of the following values [hf, llama, awq], `hf` meaning `hf_llama`, `llama` meaning `meta_llama`, `awq` meaning the quantized model by AWQ.
tp (int): the number of GPU cards used in tensor parallelism, default to 1
session_len (int): the max session length of a sequence, default to None
max_batch_size (int): the max batch size during inference, default to 128
cache_max_entry_count (float): the percentage of gpu memory occupied by the k/v cache.
For versions of lmdeploy between `v0.2.0` and `v0.2.1`, it defaults to 0.5, depicting the percentage of TOTAL GPU memory to be allocated to the k/v cache.
For lmdeploy versions greater than `v0.2.1`, it defaults to 0.8, signifying the percentage of FREE GPU memory to be reserved for the k/v cache
quant_policy (int): , default to 0. When k/v is quantized into 8 bit, set it to 4
rope_scaling_factor (int): scaling factor used for dynamic ntk, default to 0. TurboMind follows the implementation of transformer LlamaAttention
use_logn_attn (bool): whether or not to use log attn: default to False
download_dir (str): Directory to download and load the weights, default to the default cache directory of huggingface.
revision (str): The specific model version to use. It can be a branch name, a tag name, or a commit id. If unspecified, will use the default version.
max_prefill_token_num(int): the number of tokens each iteration during prefill, default to 8192
""" # noqa: E501
model_name: Optional[str] = None
model_format: Optional[str] = None
tp: int = 1
session_len: Optional[int] = None
max_batch_size: int = 128
cache_max_entry_count: float = 0.8
quant_policy: int = 0
rope_scaling_factor: float = 0.0
use_logn_attn: bool = False
download_dir: Optional[str] = None
revision: Optional[str] = None
max_prefill_token_num: int = 8192
class PytorchEngineConfig:
"""PyTorch Engine Config.
Args:
model_name (str): name of the given model.
tp (int): Tensor Parallelism. default 1.
session_len (int): Max session length. Default None.
max_batch_size (int): Max batch size. Default 128.
cache_max_entry_count (float): the percentage of gpu memory occupied
by the k/v cache. For lmdeploy versions greater than `v0.2.1`,
it defaults to 0.8, signifying the percentage of FREE GPU memory
to be reserved for the k/v cache
eviction_type (str): What action to perform when kv cache
is full, ['recompute', 'copy'], Default 'recompute'.
prefill_interval (int): Interval to perform prefill,
Default 16.
block_size (int): paging cache block size, default 64.
num_cpu_blocks (int): Num cpu blocks. If num is 0, cache
would be allocate according to current environment.
num_gpu_blocks (int): Num gpu blocks. If num is 0, cache
would be allocate according to current environment.
adapters (dict): The path configs to lora adapters.
max_prefill_token_num (int): tokens per iteration.
thread_safe (bool): thread safe engine instance.
download_dir (str): Directory to download and load the weights,
default to the default cache directory of huggingface.
revision (str): The specific model version to use.
It can be a branch name, a tag name, or a commit id.
If unspecified, will use the default version.
"""
model_name: str = ''
tp: int = 1
session_len: int = None
max_batch_size: int = 128
cache_max_entry_count: float = 0.8
eviction_type: str = 'recompute'
prefill_interval: int = 16
block_size: int = 64
num_cpu_blocks: int = 0
num_gpu_blocks: int = 0
adapters: Dict[str, str] = None
max_prefill_token_num: int = 8192
thread_safe: bool = False
download_dir: str = None
revision: str = None
class ChatTemplateConfig:
"""Parameters for chat template.
Args:
model_name (str): the name of the deployed model. Determine which chat template will be applied.
All the chat template names: `lmdeploy list`
system (str | None): begin of the system prompt
meta_instruction (str | None): system prompt
eosys (str | None): end of the system prompt
user (str | None): begin of the user prompt
eoh (str | None): end of the user prompt
assistant (str | None): begin of the assistant prompt
eoa (str | None): end of the assistant prompt
capability: ('completion' | 'infilling' | 'chat' | 'python') = None
""" # noqa: E501
model_name: str
system: Optional[str] = None
meta_instruction: Optional[str] = None
eosys: Optional[str] = None
user: Optional[str] = None
eoh: Optional[str] = None
assistant: Optional[str] = None
eoa: Optional[str] = None
separator: Optional[str] = None
capability: Optional[Literal['completion', 'infilling', 'chat',
'python']] = None
def chat_template(self):
attrs = {
key: value
for key, value in dataclasses.asdict(self).items()
if value is not None
}
if self.model_name in MODELS.module_dict.keys():
model: BaseModel = MODELS.get(self.model_name)(**attrs)
else:
logger.warning(
f'Could not find {self.model_name} in registered models. '
f'Register {self.model_name} using the BaseChatTemplate.')
model = BaseChatTemplate(**attrs)
return model
])
class AsyncEngine:
"""Async inference engine. Maintaining a bunch of tm_model instances.
Args:
model_path (str): the path of a model.
It could be one of the following options:
- i) A local directory path of a turbomind model which is
converted by `lmdeploy convert` command or download from
ii) and iii).
- ii) The model_id of a lmdeploy-quantized model hosted
inside a model repo on huggingface.co, such as
"InternLM/internlm-chat-20b-4bit",
"lmdeploy/llama2-chat-70b-4bit", etc.
- iii) The model_id of a model hosted inside a model repo
on huggingface.co, such as "internlm/internlm-chat-7b",
"Qwen/Qwen-7B-Chat ", "baichuan-inc/Baichuan2-7B-Chat"
and so on.
model_name (str): needed when model_path is a pytorch model on
huggingface.co, such as "internlm/internlm-chat-7b",
"Qwen/Qwen-7B-Chat ", "baichuan-inc/Baichuan2-7B-Chat" and so on.
backend (str): either `turbomind` or `pytorch` backend. Default to
`turbomind` backend.
backend_config (TurbomindEngineConfig | PytorchEngineConfig): beckend
config instance. Default to none.
chat_template_config (ChatTemplateConfig): chat template configuration.
Default to None.
tp (int): tensor parallel
"""
def __init__(self,
model_path: str,
model_name: Optional[str] = None,
backend: Literal['turbomind', 'pytorch'] = 'turbomind',
backend_config: Optional[Union[TurbomindEngineConfig,
PytorchEngineConfig]] = None,
chat_template_config: Optional[ChatTemplateConfig] = None,
tp: int = 1,
**kwargs) -> None:
logger.info(
f'input backend={backend}, backend_config={backend_config}')
logger.info(f'input chat_template_config={chat_template_config}')
self.model_name = deduce_a_name(model_path, model_name, backend_config,
chat_template_config)
# build chat template config
if chat_template_config is None:
chat_template_config = ChatTemplateConfig(self.model_name)
elif chat_template_config.model_name is None:
chat_template_config.model_name = self.model_name
self.chat_template = chat_template_config.chat_template
# prevent bc
for k in list(kwargs.keys()):
if hasattr(chat_template_config, k):
logger.warning(f'{k} was deprecated. Please use '
'chat_template_config instead')
v = kwargs.pop(k)
setattr(chat_template_config, k, v)
logger.info(f'updated chat_template_onfig={chat_template_config}')
# build backend engine
if backend == 'turbomind':
self._build_turbomind(model_path=model_path,
backend_config=backend_config,
chat_template_config=chat_template_config,
tp=tp,
**kwargs)
elif backend == 'pytorch':
self._build_pytorch(model_path=model_path,
backend_config=backend_config,
**kwargs)
else:
raise ValueError(f'unsupported backend {backend}')
logger.info(f'updated backend_config={self.backend_config}')
# parameters for member functions
self.session_len = self.backend_config.session_len
self.stop_words = _stop_words(self.chat_template.stop_words,
self.engine.tokenizer)
if self.stop_words is not None:
self.stop_words = self.stop_words[0][0].tolist()
self.backend = backend
self.instance_num = self.backend_config.max_batch_size
self.tokenizer = self.engine.tokenizer
self.id2step = {}
self.id2generator = {}
self.loop = asyncio.get_event_loop()
self.running_session_ids = set()
self.gens_set = set()
for i in range(self.instance_num):
self.gens_set.add(self.engine.create_instance())
def _build_turbomind(
self,
model_path: str,
backend_config: Optional[Union[TurbomindEngineConfig,
PytorchEngineConfig]] = None,
chat_template_config: Optional[ChatTemplateConfig] = None,
tp: int = 1,
**kwargs):
"""Innter build method for turbomind backend."""
if backend_config is None:
backend_config = TurbomindEngineConfig(model_name=self.model_name,
tp=tp)
assert isinstance(backend_config, TurbomindEngineConfig), 'Please'\
' use TurbomindEngineConfig imported from lmdeploy.messages for ' \
'turbomind backend'
if backend_config.session_len is None:
backend_config.session_len = self.chat_template.session_len
from lmdeploy import turbomind as tm
self.engine = tm.TurboMind.from_pretrained(
model_path,
engine_config=backend_config,
chat_template_config=chat_template_config,
**kwargs)
self.backend_config = backend_config
def _build_pytorch(
self,
model_path: str,
backend_config: Optional[Union[TurbomindEngineConfig,
PytorchEngineConfig]] = None,
**kwargs):
"""Innter build method for pytorch backend."""
from lmdeploy.pytorch.engine import Engine
if backend_config is None:
backend_config = PytorchEngineConfig(self.model_name)
assert isinstance(backend_config, PytorchEngineConfig), 'Please '\
'use PytorchEngineConfig imported from lmdeploy.messages for ' \
'pytorch backend'
if backend_config.session_len is None:
backend_config.session_len = self.chat_template.session_len
self.engine = Engine(model_path=model_path,
engine_config=backend_config)
self.backend_config = backend_config
def __call__(self,
prompts: Union[List[str], str, List[Dict], List[List[Dict]]],
gen_config: Optional[GenerationConfig] = None,
request_output_len=512,
top_k: int = 40,
top_p: float = 0.8,
temperature: float = 0.8,
repetition_penalty: float = 1.0,
ignore_eos: bool = False,
do_preprocess: bool = True,
**kwargs):
"""Inference a batch of prompts.
Args:
prompts (List[str] | str | List[Dict] | List[Dict]): a batch of
prompts. It accepts: string prompt, a list of string prompts,
a chat history in OpenAI format or a list of chat history.
gen_config (GenerationConfig | None): a instance of
GenerationConfig. Default to None.
chat_template_config (ChatTemplateConfig | None):a instance of
ChatTemplateConfig. Default to None.
request_output_len (int): output token nums
top_k (int): The number of the highest probability vocabulary
tokens to keep for top-k-filtering
top_p (float): If set to float < 1, only the smallest set of most
probable tokens with probabilities that add up to top_p or higher
are kept for generation.
temperature (float): to modulate the next token probability
repetition_penalty (float): The parameter for repetition penalty.
1.0 means no penalty
ignore_eos (bool): indicator for ignoring eos
do_preprocess (bool): whether pre-process the messages. Default to
True, which means chat_template will be applied.
"""
if gen_config is None:
gen_config = GenerationConfig(
max_new_tokens=request_output_len,
top_k=top_k,
top_p=top_p,
temperature=temperature,
repetition_penalty=repetition_penalty,
ignore_eos=ignore_eos)
return self.batch_infer(prompts,
gen_config=gen_config,
do_preprocess=do_preprocess,
**kwargs)
async def stop_session(self, session_id: int):
"""Stop a session by a session_id."""
if str(session_id) in self.id2generator:
await self.id2generator[str(session_id)].async_cancel(session_id)
self.gens_set.add(self.id2generator[str(session_id)])
self.running_session_ids.discard(session_id)
async def end_session(self, session_id: int):
"""Clear a session by a session_id."""
if str(session_id) in self.id2generator:
await self.id2generator[str(session_id)].async_end(session_id)
self.id2step[str(session_id)] = 0
self.gens_set.add(self.id2generator[str(session_id)])
self.running_session_ids.discard(session_id)
async def safe_run(self, session_id: Optional[int] = None):
"""A context manager to make sure server's safe running."""
try:
yield
except (Exception, asyncio.CancelledError) as e: # noqa
await self.stop_session(session_id)
raise e
if str(session_id) in self.id2generator:
self.gens_set.add(self.id2generator[str(session_id)])
self.running_session_ids.discard(session_id)
async def get_generator(self, stop: bool, session_id: int):
"""Only return the model instance if it is available."""
if stop:
return self.engine.create_instance()
# waiting no generator is available or the same session_id is running
while self.gens_set == set() or session_id in self.running_session_ids:
await asyncio.sleep(0.1)
generator = self.gens_set.pop()
self.id2generator[str(session_id)] = generator
self.running_session_ids.add(session_id)
return generator
def batch_infer(self,
prompts: Union[List[str], str, List[Dict],
List[List[Dict]]],
gen_config: Optional[Union[GenerationConfig,
EngineGenerationConfig]] = None,
do_preprocess: bool = True,
**kwargs):
"""Inference a batch of prompts.
Args:
prompts (List[str] | str | List[Dict] | List[Dict]): a batch of
prompts. It accepts: string prompt, a list of string prompts,
a chat history in OpenAI format or a list of chat history.
gen_config (GenerationConfig | None): a instance of
GenerationConfig. Default to None.
do_preprocess (bool): whether pre-process the messages. Default to
True, which means chat_template will be applied.
"""
need_list_wrap = isinstance(prompts, str) or isinstance(
prompts[0], Dict)
prompts = [prompts] if need_list_wrap else prompts
assert isinstance(prompts, List), 'prompts should be a list'
if gen_config is None:
gen_config = GenerationConfig()
if type(gen_config) is GenerationConfig:
gen_config = EngineGenerationConfig.From(gen_config,
self.tokenizer)
# set random if it is not set
if gen_config.random_seed is None:
gen_config.random_seed = random.getrandbits(64)
prompt_num = len(prompts)
outputs = [Response('', 0, 0, i) for i in range(prompt_num)]
for j in range(0, prompt_num, self.instance_num):
batch_prompts = prompts[j:j + self.instance_num]
generators = []
for i, prompt in enumerate(batch_prompts):
generators.append(
self.generate(prompt,
i,
gen_config=gen_config,
stream_response=True,
sequence_start=True,
sequence_end=True,
do_preprocess=do_preprocess,
**kwargs))
async def _inner_call(i, generator):
async for out in generator:
outputs[i + j].text += out.response
outputs[i + j].generate_token_len = out.generate_token_len
outputs[i + j].input_token_len = out.input_token_len
outputs[i + j].finish_reason = out.finish_reason
async def gather():
await asyncio.gather(*[
_inner_call(i, generators[i])
for i in range(len(batch_prompts))
])
self.loop.run_until_complete(gather())
outputs = outputs[0] if need_list_wrap else outputs
return outputs
def stream_infer(
self,
prompts: Union[List[str], str, List[Dict], List[List[Dict]]],
gen_config: Optional[Union[GenerationConfig,
EngineGenerationConfig]] = None,
do_preprocess: bool = True,
**kwargs):
"""Inference a batch of prompts with stream mode.
Args:
prompts (List[str] | str | List[Dict] | List[Dict]): a batch of
prompts. It accepts: string prompt, a list of string prompts,
a chat history in OpenAI format or a list of chat history.
gen_config (GenerationConfig | None): a instance of
GenerationConfig. Default to None.
do_preprocess (bool): whether pre-process the messages. Default to
True, which means chat_template will be applied.
"""
need_list_wrap = isinstance(prompts, str) or isinstance(
prompts[0], Dict)
prompts = [prompts] if need_list_wrap else prompts
assert isinstance(prompts, List), 'prompts should be a list'
if gen_config is None:
gen_config = GenerationConfig()
if type(gen_config) is GenerationConfig:
gen_config = EngineGenerationConfig.From(gen_config,
self.tokenizer)
# set random if it is not set
if gen_config.random_seed is None:
gen_config.random_seed = random.getrandbits(64)
prompt_num = len(prompts)
outputs = Queue()
generators = []
for j in range(0, prompt_num, self.instance_num):
batch_prompts = prompts[j:j + self.instance_num]
generators = []
for i, prompt in enumerate(batch_prompts):
generators.append(
self.generate(prompt,
i,
gen_config=gen_config,
stream_response=True,
sequence_start=True,
sequence_end=True,
do_preprocess=do_preprocess,
**kwargs))
async def _inner_call(i, generator):
async for out in generator:
outputs.put(
Response(out.response, out.generate_token_len,
out.input_token_len, i + j,
out.finish_reason))
async def gather():
await asyncio.gather(*[
_inner_call(i, generators[i])
for i in range(len(batch_prompts))
])
outputs.put(None)
proc = Thread(
target=lambda: self.loop.run_until_complete(gather()))
proc.start()
while True:
try:
out = outputs.get(timeout=0.001)
if out is None:
break
yield out
except Empty:
pass
proc.join()
async def generate(
self,
messages,
session_id: int,
gen_config: Optional[Union[GenerationConfig,
EngineGenerationConfig]] = None,
stream_response: bool = True,
sequence_start: bool = True,
sequence_end: bool = True, # no interactive mode by default
step: int = 0,
do_preprocess: bool = True,
**kwargs):
"""Generate responses.
Args:
messages (str | List): chat history or prompt
session_id (int): the session id
gen_config (GenerationConfig | None): a instance of
GenerationConfig. Default to None.
stream_response (bool): whether return responses streamingly
sequence_start (bool): indicator for starting a sequence
sequence_end (bool): indicator for ending a sequence
step (int): the offset of the k/v cache
do_preprocess (bool): whether pre-process the messages. Default to
True, which means chat_template will be applied.
"""
if str(session_id) not in self.id2step:
self.id2step[str(session_id)] = 0
if step != 0:
self.id2step[str(session_id)] = step
if gen_config is None:
gen_config = GenerationConfig()
if type(gen_config) is GenerationConfig:
gen_config = EngineGenerationConfig.From(gen_config,
self.tokenizer)
if gen_config.stop_words is None:
gen_config.stop_words = self.stop_words
# set random if it is not set and sequence_start is True
if gen_config.random_seed is None and sequence_start:
gen_config.random_seed = random.getrandbits(64)
prompt = messages
if do_preprocess:
prompt = self.chat_template.messages2prompt(prompt, sequence_start)
input_ids = self.tokenizer.encode(prompt, add_bos=sequence_start)
if gen_config.max_new_tokens is None:
# for interactive endpoint, will try maximum possible token num
gen_config.max_new_tokens = max(
128, self.session_len - self.id2step[str(session_id)] -
len(input_ids))
finish_reason = None
if self.id2step[str(session_id)] + len(
input_ids) + gen_config.max_new_tokens > self.session_len:
finish_reason = 'length'
yield GenOut('', self.id2step[str(session_id)], len(input_ids), 0,
finish_reason)
if sequence_end is True and sequence_start is False:
await self.end_session(session_id)
else:
generator = await self.get_generator(False, session_id)
async with self.safe_run(session_id):
state = DetokenizeState()
async for outputs in generator.async_stream_infer(
session_id=session_id,
input_ids=input_ids,
gen_config=gen_config,
stream_output=stream_response,
sequence_start=(sequence_start),
sequence_end=sequence_end,
step=self.id2step[str(session_id)]):
_, res, tokens = outputs
# decode res
response, state = self.tokenizer.detokenize_incrementally(
res,
state,
skip_special_tokens=gen_config.skip_special_tokens)
# response, history token len,
# input token len, gen token len
yield GenOut(response, self.id2step[str(session_id)],
len(input_ids), tokens, finish_reason)
finish_reason = 'length' \
if tokens >= gen_config.max_new_tokens else 'stop'
# utf-8 char at the end means it's a potential unfinished
# byte sequence
if not response.endswith('�'):
response = '' # avaid returning the last response twice
yield GenOut(response, self.id2step[str(session_id)],
len(input_ids), tokens, finish_reason)
# update step
self.id2step[str(session_id)] += len(input_ids) + tokens
if sequence_end:
self.id2step[str(session_id)] = 0
# manually end pytorch session
# TODO modify pytorch or turbomind api
if self.backend == 'pytorch' and sequence_end:
await self.end_session(session_id)
def get_logger(
name: Optional[str] = None,
log_file: Optional[str] = None,
log_level: int = logging.INFO,
file_mode: str = 'w',
log_formatter: str = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
) -> Logger:
"""Initialize and get a logger by name.
If the logger has not been initialized, this method will initialize the
logger by adding one or two handlers, otherwise the initialized logger will
be directly returned. During initialization, a StreamHandler will always be
added. If `log_file` is specified, a FileHandler will also be added.
Args:
name (str): Logger name.
log_file (str | None): The log filename. If specified, a FileHandler
will be added to the logger.
log_level (int): The logger level.
file_mode (str): The file mode used in opening log file.
Defaults to 'w'.
log_formatter (str): The logger output format.
Returns:
logging.Logger: The expected logger.
"""
logger = logging.getLogger(name)
if name in logger_initialized:
return logger
# handle hierarchical names
# e.g., logger "a" is initialized, then logger "a.b" will skip the
# initialization since it is a child of "a".
for logger_name in logger_initialized:
if name.startswith(logger_name):
return logger
# handle duplicate logs to the console
for handler in logger.root.handlers:
if type(handler) is logging.StreamHandler:
handler.setLevel(logging.ERROR)
stream_handler = logging.StreamHandler(stream=sys.stdout)
handlers = [stream_handler]
if log_file is not None:
# Here, the default behaviour of the official logger is 'a'. Thus, we
# provide an interface to change the file mode to the default
# behaviour.
file_handler = logging.FileHandler(log_file, file_mode)
handlers.append(file_handler)
formatter = ColorFormatter(log_formatter)
for handler in handlers:
handler.setFormatter(formatter)
handler.setLevel(log_level)
handler.addFilter(FilterDuplicateWarning(name))
logger.addHandler(handler)
logger.setLevel(log_level)
logger.propagate = False
logger_initialized[name] = True
return logger
def serve(model_path: str,
model_name: Optional[str] = None,
backend: Literal['turbomind', 'pytorch'] = 'turbomind',
backend_config: Optional[Union[PytorchEngineConfig,
TurbomindEngineConfig]] = None,
chat_template_config: Optional[ChatTemplateConfig] = None,
server_name: str = '0.0.0.0',
server_port: int = 23333,
tp: int = 1,
allow_origins: List[str] = ['*'],
allow_credentials: bool = True,
allow_methods: List[str] = ['*'],
allow_headers: List[str] = ['*'],
log_level: str = 'ERROR',
api_keys: Optional[Union[List[str], str]] = None,
ssl: bool = False,
qos_config_path: str = '',
**kwargs):
"""An example to perform model inference through the command line
interface.
Args:
model_path (str): the path of a model.
It could be one of the following options:
- i) A local directory path of a turbomind model which is
converted by `lmdeploy convert` command or download from
ii) and iii).
- ii) The model_id of a lmdeploy-quantized model hosted
inside a model repo on huggingface.co, such as
"InternLM/internlm-chat-20b-4bit",
"lmdeploy/llama2-chat-70b-4bit", etc.
- iii) The model_id of a model hosted inside a model repo
on huggingface.co, such as "internlm/internlm-chat-7b",
"Qwen/Qwen-7B-Chat ", "baichuan-inc/Baichuan2-7B-Chat"
and so on.
model_name (str): needed when model_path is a pytorch model on
huggingface.co, such as "InternLM/internlm-chat-7b"
backend (str): either `turbomind` or `pytorch` backend. Default to
`turbomind` backend.
backend_config (TurbomindEngineConfig | PytorchEngineConfig): beckend
config instance. Default to none.
chat_template_config (ChatTemplateConfig): chat template configuration.
Default to None.
server_name (str): host ip for serving
server_port (int): server port
tp (int): tensor parallel
allow_origins (List[str]): a list of allowed origins for CORS
allow_credentials (bool): whether to allow credentials for CORS
allow_methods (List[str]): a list of allowed HTTP methods for CORS
allow_headers (List[str]): a list of allowed HTTP headers for CORS
log_level(str): set log level whose value among [CRITICAL, ERROR, WARNING, INFO, DEBUG]
api_keys (List[str] | str | None): Optional list of API keys. Accepts string type as
a single api_key. Default to None, which means no api key applied.
ssl (bool): Enable SSL. Requires OS Environment variables 'SSL_KEYFILE' and 'SSL_CERTFILE'.
qos_config_path (str): qos policy config path
""" # noqa E501
if os.getenv('TM_LOG_LEVEL') is None:
os.environ['TM_LOG_LEVEL'] = log_level
if allow_origins:
app.add_middleware(
CORSMiddleware,
allow_origins=allow_origins,
allow_credentials=allow_credentials,
allow_methods=allow_methods,
allow_headers=allow_headers,
)
if api_keys is not None:
if isinstance(api_keys, str):
api_keys = api_keys.split(',')
VariableInterface.api_keys = api_keys
ssl_keyfile, ssl_certfile, http_or_https = None, None, 'http'
if ssl:
ssl_keyfile = os.environ['SSL_KEYFILE']
ssl_certfile = os.environ['SSL_CERTFILE']
http_or_https = 'https'
VariableInterface.async_engine = AsyncEngine(
model_path=model_path,
model_name=model_name,
backend=backend,
backend_config=backend_config,
chat_template_config=chat_template_config,
tp=tp,
**kwargs)
if qos_config_path:
try:
with open(qos_config_path, 'r') as file:
qos_config_str = file.read()
VariableInterface.qos_engine = QosEngine(
qos_tag=qos_config_str,
engine=VariableInterface.async_engine,
**kwargs)
VariableInterface.qos_engine.start()
except FileNotFoundError:
VariableInterface.qos_engine = None
else:
# hide qos functions if not applied
for i in range(len(app.router.routes)):
if 'qos' in app.router.routes[i].path:
app.router.routes[i].include_in_schema = False
for i in range(3):
print(
f'HINT: Please open \033[93m\033[1m{http_or_https}://'
f'{server_name}:{server_port}\033[0m in a browser for detailed api'
' usage!!!')
uvicorn.run(app=app,
host=server_name,
port=server_port,
log_level='info',
ssl_keyfile=ssl_keyfile,
ssl_certfile=ssl_certfile)
The provided code snippet includes necessary dependencies for implementing the `pipeline` function. Write a Python function `def pipeline(model_path: str, model_name: Optional[str] = None, backend_config: Optional[Union[TurbomindEngineConfig, PytorchEngineConfig]] = None, chat_template_config: Optional[ChatTemplateConfig] = None, log_level='ERROR', **kwargs)` to solve the following problem:
Args: model_path (str): the path of a model. It could be one of the following options: - i) A local directory path of a turbomind model which is converted by `lmdeploy convert` command or download from ii) and iii). - ii) The model_id of a lmdeploy-quantized model hosted inside a model repo on huggingface.co, such as "InternLM/internlm-chat-20b-4bit", "lmdeploy/llama2-chat-70b-4bit", etc. - iii) The model_id of a model hosted inside a model repo on huggingface.co, such as "internlm/internlm-chat-7b", "Qwen/Qwen-7B-Chat ", "baichuan-inc/Baichuan2-7B-Chat" and so on. model_name (str): needed when model_path is a pytorch model on huggingface.co, such as "internlm/internlm-chat-7b", "Qwen/Qwen-7B-Chat ", "baichuan-inc/Baichuan2-7B-Chat" and so on. backend_config (TurbomindEngineConfig | PytorchEngineConfig): backend config instance. Default to None. chat_template_config (ChatTemplateConfig): chat template configuration. Default to None. log_level(str): set log level whose value among [CRITICAL, ERROR, WARNING, INFO, DEBUG] Examples: >>> import lmdeploy >>> pipe = lmdeploy.pipeline('internlm/internlm-chat-7b') >>> response = pipe(['hi','say this is a test']) >>> print(response)
Here is the function:
def pipeline(model_path: str,
model_name: Optional[str] = None,
backend_config: Optional[Union[TurbomindEngineConfig,
PytorchEngineConfig]] = None,
chat_template_config: Optional[ChatTemplateConfig] = None,
log_level='ERROR',
**kwargs):
"""
Args:
model_path (str): the path of a model.
It could be one of the following options:
- i) A local directory path of a turbomind model which is
converted by `lmdeploy convert` command or download from
ii) and iii).
- ii) The model_id of a lmdeploy-quantized model hosted
inside a model repo on huggingface.co, such as
"InternLM/internlm-chat-20b-4bit",
"lmdeploy/llama2-chat-70b-4bit", etc.
- iii) The model_id of a model hosted inside a model repo
on huggingface.co, such as "internlm/internlm-chat-7b",
"Qwen/Qwen-7B-Chat ", "baichuan-inc/Baichuan2-7B-Chat"
and so on.
model_name (str): needed when model_path is a pytorch model on
huggingface.co, such as "internlm/internlm-chat-7b",
"Qwen/Qwen-7B-Chat ", "baichuan-inc/Baichuan2-7B-Chat" and so on.
backend_config (TurbomindEngineConfig | PytorchEngineConfig): backend
config instance. Default to None.
chat_template_config (ChatTemplateConfig): chat template configuration.
Default to None.
log_level(str): set log level whose value among [CRITICAL, ERROR, WARNING, INFO, DEBUG]
Examples:
>>> import lmdeploy
>>> pipe = lmdeploy.pipeline('internlm/internlm-chat-7b')
>>> response = pipe(['hi','say this is a test'])
>>> print(response)
""" # noqa E501
from lmdeploy.serve.async_engine import AsyncEngine
if os.getenv('TM_LOG_LEVEL') is None:
os.environ['TM_LOG_LEVEL'] = log_level
from lmdeploy.utils import get_logger
logger = get_logger('lmdeploy')
logger.setLevel(log_level)
if type(backend_config) is not PytorchEngineConfig:
# set auto backend mode
backend_config = autoget_backend_config(model_path, backend_config)
backend = 'pytorch' if type(
backend_config) is PytorchEngineConfig else 'turbomind'
logger.info(f'Using {backend} engine')
if 'tp' in kwargs:
logger.warning(
'The argument "tp" is deprecated and will be removed soon. '
'Please set "tp" in "backend_config"')
tp = kwargs['tp']
kwargs.pop('tp')
else:
tp = 1 if backend_config is None else backend_config.tp
return AsyncEngine(model_path,
model_name=model_name,
backend=backend,
backend_config=backend_config,
chat_template_config=chat_template_config,
tp=tp,
**kwargs) | Args: model_path (str): the path of a model. It could be one of the following options: - i) A local directory path of a turbomind model which is converted by `lmdeploy convert` command or download from ii) and iii). - ii) The model_id of a lmdeploy-quantized model hosted inside a model repo on huggingface.co, such as "InternLM/internlm-chat-20b-4bit", "lmdeploy/llama2-chat-70b-4bit", etc. - iii) The model_id of a model hosted inside a model repo on huggingface.co, such as "internlm/internlm-chat-7b", "Qwen/Qwen-7B-Chat ", "baichuan-inc/Baichuan2-7B-Chat" and so on. model_name (str): needed when model_path is a pytorch model on huggingface.co, such as "internlm/internlm-chat-7b", "Qwen/Qwen-7B-Chat ", "baichuan-inc/Baichuan2-7B-Chat" and so on. backend_config (TurbomindEngineConfig | PytorchEngineConfig): backend config instance. Default to None. chat_template_config (ChatTemplateConfig): chat template configuration. Default to None. log_level(str): set log level whose value among [CRITICAL, ERROR, WARNING, INFO, DEBUG] Examples: >>> import lmdeploy >>> pipe = lmdeploy.pipeline('internlm/internlm-chat-7b') >>> response = pipe(['hi','say this is a test']) >>> print(response) |
8,074 | import os
import random
from lmdeploy.messages import EngineGenerationConfig
from lmdeploy.model import ChatTemplateConfig
from lmdeploy.tokenizer import DetokenizeState
The provided code snippet includes necessary dependencies for implementing the `input_prompt` function. Write a Python function `def input_prompt(model_name)` to solve the following problem:
Input a prompt in the consolo interface.
Here is the function:
def input_prompt(model_name):
"""Input a prompt in the consolo interface."""
if model_name == 'codellama':
print('\nenter !! to end the input >>>\n', end='')
sentinel = '!!'
else:
print('\ndouble enter to end input >>> ', end='')
sentinel = '' # ends when this string is seen
return '\n'.join(iter(input, sentinel)) | Input a prompt in the consolo interface. |
8,075 | import json
import os
from huggingface_hub import hf_hub_download
from transformers.utils import ExplicitEnum
from lmdeploy.utils import get_logger
class ModelSource(ExplicitEnum):
"""Turbomind model source."""
WORKSPACE = 'workspace'
HF_MODEL = 'hf_model'
def get_hf_config_content(pretrained_model_name_or_path, **kwargs) -> dict:
"""Get config content of a hf model."""
config_path = get_hf_config_path(pretrained_model_name_or_path, **kwargs)
with open(config_path, 'r') as f:
config = json.load(f)
return config
def get_model_source(pretrained_model_name_or_path: str,
**kwargs) -> ModelSource:
"""Get model source."""
triton_model_path = os.path.join(pretrained_model_name_or_path,
'triton_models')
if os.path.exists(triton_model_path):
return ModelSource.WORKSPACE
return ModelSource.HF_MODEL
The provided code snippet includes necessary dependencies for implementing the `check_tm_model_input` function. Write a Python function `def check_tm_model_input(pretrained_model_name_or_path, **kwargs)` to solve the following problem:
Check if single input pretrained_model_name_or_path is enough to use.
Here is the function:
def check_tm_model_input(pretrained_model_name_or_path, **kwargs):
"""Check if single input pretrained_model_name_or_path is enough to use."""
if kwargs.get('model_name', None):
return
model_source = get_model_source(pretrained_model_name_or_path, **kwargs)
if model_source == ModelSource.WORKSPACE:
return
config = get_hf_config_content(pretrained_model_name_or_path, **kwargs)
if 'turbomind' in config and config['turbomind']['model_name'] != '':
return
assert (0), '\nCan not get model name from input model, '\
'please supply model name with arg --model-name,' \
'you can list supported models by `lmdeploy list`' | Check if single input pretrained_model_name_or_path is enough to use. |
8,076 | import json
import os
from huggingface_hub import hf_hub_download
from transformers.utils import ExplicitEnum
from lmdeploy.utils import get_logger
def get_model_from_config(model_dir: str):
import json
config_file = os.path.join(model_dir, 'config.json')
default = 'llama'
if not os.path.exists(config_file):
return default
with open(config_file) as f:
config = json.load(f)
ARCH_MAP = {
'LlamaForCausalLM': default,
'InternLM2ForCausalLM': 'internlm2',
'InternLMForCausalLM': default,
'BaiChuanForCausalLM': 'baichuan', # Baichuan-7B
'BaichuanForCausalLM': 'baichuan2', # not right for Baichuan-13B-Chat
'QWenLMHeadModel': 'qwen',
}
arch = 'LlamaForCausalLM'
if 'auto_map' in config:
arch = config['auto_map']['AutoModelForCausalLM'].split('.')[-1]
elif 'architectures' in config:
arch = config['architectures'][0]
return ARCH_MAP[arch] | null |
8,077 | from typing import List
import torch
from ..source_model.base import BaseInputModel, BaseReader
from .base import (OUTPUT_MODELS, BaseOutputModel, TurbomindModelConfig,
merge_qkv, permute)
The provided code snippet includes necessary dependencies for implementing the `transpose_tensor` function. Write a Python function `def transpose_tensor(input: List[torch.Tensor])` to solve the following problem:
Transpose tensor.
Here is the function:
def transpose_tensor(input: List[torch.Tensor]):
"""Transpose tensor."""
output = [x.cuda().t() for x in input]
return output | Transpose tensor. |
8,078 | import configparser
import copy
import inspect
import io
import json
import os.path as osp
from abc import ABC, abstractmethod
from configparser import ConfigParser
import torch
import tqdm
from mmengine import Registry
from pydantic.dataclasses import dataclass
from lmdeploy.messages import TurbomindEngineConfig
from lmdeploy.model import MODELS
from ..source_model.base import BaseInputModel, BaseReader
def tprint(*args, **kwargs):
to_file = kwargs.pop('to_file', False)
if not to_file:
return
from io import StringIO
s = StringIO()
print(*args, **kwargs, file=s, end='')
tqdm.tqdm.write(s.getvalue()) | null |
8,079 | import configparser
import copy
import inspect
import io
import json
import os.path as osp
from abc import ABC, abstractmethod
from configparser import ConfigParser
import torch
import tqdm
from mmengine import Registry
from pydantic.dataclasses import dataclass
from lmdeploy.messages import TurbomindEngineConfig
from lmdeploy.model import MODELS
from ..source_model.base import BaseInputModel, BaseReader
The provided code snippet includes necessary dependencies for implementing the `_weight_dtype_map` function. Write a Python function `def _weight_dtype_map(weight_type: str, default=None)` to solve the following problem:
get weight dtype map.
Here is the function:
def _weight_dtype_map(weight_type: str, default=None):
"""get weight dtype map."""
_WEIGHT_DTYPE_MAP = dict(
int4=torch.float16,
fp16=torch.float16,
fp32=torch.float16,
bf16=torch.bfloat16
if torch.cuda.is_bf16_supported() else torch.float16,
)
return _WEIGHT_DTYPE_MAP.get(weight_type, default) | get weight dtype map. |
8,080 | import configparser
import copy
import inspect
import io
import json
import os.path as osp
from abc import ABC, abstractmethod
from configparser import ConfigParser
import torch
import tqdm
from mmengine import Registry
from pydantic.dataclasses import dataclass
from lmdeploy.messages import TurbomindEngineConfig
from lmdeploy.model import MODELS
from ..source_model.base import BaseInputModel, BaseReader
def merge_qkv(q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, tp: int,
dim: int):
def reshape(x):
return x.view(x.size(0), tp, -1) if dim == 2 else x.view(tp, -1)
qkv = torch.cat((reshape(q), reshape(k), reshape(v)), dim=-1)
# (input_dim, head_num + 2 * kv_head_num)
return qkv.view(q.size(0), -1) | null |
8,081 | import os.path as osp
import sys
import torch
import lmdeploy
from ..source_model.base import BaseInputModel, BaseReader
from .base import (OUTPUT_MODELS, BaseOutputModel, TurbomindModelConfig,
merge_qkv, permute)
import _turbomind as _tm
def transpose_qk_s4(src: torch.Tensor, group_size):
assert src.is_contiguous()
dst = torch.zeros_like(src)
_tm.transpose_qk_s4_k_m8(src, dst,
src.size(-1) * 8, src.size(0), group_size)
return dst | null |
8,082 | import os.path as osp
import sys
import torch
import lmdeploy
from ..source_model.base import BaseInputModel, BaseReader
from .base import (OUTPUT_MODELS, BaseOutputModel, TurbomindModelConfig,
merge_qkv, permute)
import _turbomind as _tm
def permute(x: torch.Tensor, size_per_head: int = 128):
if x.shape[-1] > 1:
dim = x.shape[-1]
n_heads = dim // size_per_head
return x.view(-1, n_heads, 2,
dim // n_heads // 2).transpose(2, 3).reshape(-1, dim)
else: # scales, zeros
dim = x.shape[0]
n_heads = dim // size_per_head
return x.view(n_heads, 2, dim // n_heads // 2,
1).transpose(1, 2).reshape(dim, 1)
def fuse_w1_w3_s4(w1_qw: torch.Tensor, w1_qz: torch.Tensor, w1_s: torch.Tensor,
w3_qw: torch.Tensor, w3_qz: torch.Tensor,
w3_s: torch.Tensor):
def fuse(a: torch.Tensor, b: torch.Tensor):
ab = torch.cat((a, b)).contiguous()
_ab = torch.zeros_like(ab)
_tm.fuse_w1_w3_s4_k_m8(ab, _ab, a.size(-1) * 8, a.size(0))
return _ab.view(a.size(0), -1)
w13_qw = fuse(w1_qw, w3_qw)
w13_qz = fuse(w1_qz, w3_qz)
w13_s = torch.cat((w1_s, w3_s)).view(2, w1_s.size(0), -1)
w13_s = w13_s.permute(1, 2, 0).contiguous().view(w1_s.size(0), -1)
return w13_qw, w13_qz, w13_s | null |
8,083 | import os.path as osp
import sys
import torch
import lmdeploy
from ..source_model.base import BaseInputModel, BaseReader
from .base import (OUTPUT_MODELS, BaseOutputModel, TurbomindModelConfig,
merge_qkv, permute)
import _turbomind as _tm
def convert_s4(qw: torch.Tensor, qz: torch.Tensor, s: torch.Tensor,
group_size: int):
assert qw.is_contiguous()
assert qz.is_contiguous()
assert s.is_contiguous()
_qw = torch.zeros_like(qw)
_sz = torch.zeros_like(s, dtype=torch.int32) # half2
_ws = torch.zeros_like(s)
_tm.convert_s4_k_m8(_qw, _sz, _ws, qw, s, qz,
qw.size(-1) * 8, qw.size(0), group_size)
return _qw, _sz | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.