|
|
| from transformers import AutoModel, AutoTokenizer |
| import time |
| import threading |
| import importlib |
| from toolbox import update_ui, get_conf |
| from multiprocessing import Process, Pipe |
|
|
| load_message = "jittorllms尚未加载,加载需要一段时间。注意,请避免混用多种jittor模型,否则可能导致显存溢出而造成卡顿,取决于`config.py`的配置,jittorllms消耗大量的内存(CPU)或显存(GPU),也许会导致低配计算机卡死 ……" |
|
|
| |
| class GetGLMHandle(Process): |
| def __init__(self): |
| super().__init__(daemon=True) |
| self.parent, self.child = Pipe() |
| self.jittorllms_model = None |
| self.info = "" |
| self.local_history = [] |
| self.success = True |
| self.check_dependency() |
| self.start() |
| self.threadLock = threading.Lock() |
| |
| def check_dependency(self): |
| try: |
| import pandas |
| self.info = "依赖检测通过" |
| self.success = True |
| except: |
| from toolbox import trimmed_format_exc |
| self.info = r"缺少jittorllms的依赖,如果要使用jittorllms,除了基础的pip依赖以外,您还需要运行`pip install -r request_llm/requirements_jittorllms.txt -i https://pypi.jittor.org/simple -I`"+\ |
| r"和`git clone https://gitlink.org.cn/jittor/JittorLLMs.git --depth 1 request_llm/jittorllms`两个指令来安装jittorllms的依赖(在项目根目录运行这两个指令)。" +\ |
| r"警告:安装jittorllms依赖后将完全破坏现有的pytorch环境,建议使用docker环境!" + trimmed_format_exc() |
| self.success = False |
|
|
| def ready(self): |
| return self.jittorllms_model is not None |
|
|
| def run(self): |
| |
| |
| def validate_path(): |
| import os, sys |
| dir_name = os.path.dirname(__file__) |
| env = os.environ.get("PATH", "") |
| os.environ["PATH"] = env.replace('/cuda/bin', '/x/bin') |
| root_dir_assume = os.path.abspath(os.path.dirname(__file__) + '/..') |
| os.chdir(root_dir_assume + '/request_llm/jittorllms') |
| sys.path.append(root_dir_assume + '/request_llm/jittorllms') |
| validate_path() |
|
|
| def load_model(): |
| import types |
| try: |
| if self.jittorllms_model is None: |
| device, = get_conf('LOCAL_MODEL_DEVICE') |
| from .jittorllms.models import get_model |
| |
| args_dict = {'model': 'pangualpha'} |
| print('self.jittorllms_model = get_model(types.SimpleNamespace(**args_dict))') |
| self.jittorllms_model = get_model(types.SimpleNamespace(**args_dict)) |
| print('done get model') |
| except: |
| self.child.send('[Local Message] Call jittorllms fail 不能正常加载jittorllms的参数。') |
| raise RuntimeError("不能正常加载jittorllms的参数!") |
| print('load_model') |
| load_model() |
|
|
| |
| print('进入任务等待状态') |
| while True: |
| |
| kwargs = self.child.recv() |
| query = kwargs['query'] |
| history = kwargs['history'] |
| |
| if len(self.local_history) > 0 and len(history)==0: |
| print('触发重置') |
| self.jittorllms_model.reset() |
| self.local_history.append(query) |
|
|
| print('收到消息,开始请求') |
| try: |
| for response in self.jittorllms_model.stream_chat(query, history): |
| print(response) |
| self.child.send(response) |
| except: |
| from toolbox import trimmed_format_exc |
| print(trimmed_format_exc()) |
| self.child.send('[Local Message] Call jittorllms fail.') |
| |
| self.child.send('[Finish]') |
|
|
| def stream_chat(self, **kwargs): |
| |
| self.threadLock.acquire() |
| self.parent.send(kwargs) |
| while True: |
| res = self.parent.recv() |
| if res != '[Finish]': |
| yield res |
| else: |
| break |
| self.threadLock.release() |
| |
| global pangu_glm_handle |
| pangu_glm_handle = None |
| |
| def predict_no_ui_long_connection(inputs, llm_kwargs, history=[], sys_prompt="", observe_window=[], console_slience=False): |
| """ |
| 多线程方法 |
| 函数的说明请见 request_llm/bridge_all.py |
| """ |
| global pangu_glm_handle |
| if pangu_glm_handle is None: |
| pangu_glm_handle = GetGLMHandle() |
| if len(observe_window) >= 1: observe_window[0] = load_message + "\n\n" + pangu_glm_handle.info |
| if not pangu_glm_handle.success: |
| error = pangu_glm_handle.info |
| pangu_glm_handle = None |
| raise RuntimeError(error) |
|
|
| |
| history_feedin = [] |
| for i in range(len(history)//2): |
| history_feedin.append([history[2*i], history[2*i+1]] ) |
|
|
| watch_dog_patience = 5 |
| response = "" |
| for response in pangu_glm_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=sys_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']): |
| print(response) |
| if len(observe_window) >= 1: observe_window[0] = response |
| if len(observe_window) >= 2: |
| if (time.time()-observe_window[1]) > watch_dog_patience: |
| raise RuntimeError("程序终止。") |
| return response |
|
|
|
|
|
|
| def predict(inputs, llm_kwargs, plugin_kwargs, chatbot, history=[], system_prompt='', stream = True, additional_fn=None): |
| """ |
| 单线程方法 |
| 函数的说明请见 request_llm/bridge_all.py |
| """ |
| chatbot.append((inputs, "")) |
|
|
| global pangu_glm_handle |
| if pangu_glm_handle is None: |
| pangu_glm_handle = GetGLMHandle() |
| chatbot[-1] = (inputs, load_message + "\n\n" + pangu_glm_handle.info) |
| yield from update_ui(chatbot=chatbot, history=[]) |
| if not pangu_glm_handle.success: |
| pangu_glm_handle = None |
| return |
|
|
| if additional_fn is not None: |
| import core_functional |
| importlib.reload(core_functional) |
| core_functional = core_functional.get_core_functions() |
| if "PreProcess" in core_functional[additional_fn]: inputs = core_functional[additional_fn]["PreProcess"](inputs) |
| inputs = core_functional[additional_fn]["Prefix"] + inputs + core_functional[additional_fn]["Suffix"] |
|
|
| |
| history_feedin = [] |
| for i in range(len(history)//2): |
| history_feedin.append([history[2*i], history[2*i+1]] ) |
|
|
| |
| response = "[Local Message]: 等待jittorllms响应中 ..." |
| for response in pangu_glm_handle.stream_chat(query=inputs, history=history_feedin, system_prompt=system_prompt, max_length=llm_kwargs['max_length'], top_p=llm_kwargs['top_p'], temperature=llm_kwargs['temperature']): |
| chatbot[-1] = (inputs, response) |
| yield from update_ui(chatbot=chatbot, history=history) |
|
|
| |
| if response == "[Local Message]: 等待jittorllms响应中 ...": |
| response = "[Local Message]: jittorllms响应异常 ..." |
| history.extend([inputs, response]) |
| yield from update_ui(chatbot=chatbot, history=history) |
|
|