Spaces:
Sleeping
Sleeping
guangliang.yin commited on
Commit ·
5fb9ac1
1
Parent(s): a4d283e
问答机器改为清华知谱
Browse files- app.py +2 -1
- project/llm/__init__.py +0 -0
- project/llm/self_llm.py +47 -0
- project/llm/zhipuai_llm.py +229 -0
- requirements.txt +2 -1
app.py
CHANGED
|
@@ -8,6 +8,7 @@ from langchain.text_splitter import CharacterTextSplitter
|
|
| 8 |
from langchain.chains import RetrievalQAWithSourcesChain
|
| 9 |
from langchain.llms import OpenAI
|
| 10 |
import uuid
|
|
|
|
| 11 |
|
| 12 |
chain: Optional[Callable] = None
|
| 13 |
|
|
@@ -44,7 +45,7 @@ def web_loader(url_list, openai_key, zilliz_uri, user, password):
|
|
| 44 |
|
| 45 |
global chain
|
| 46 |
chain = RetrievalQAWithSourcesChain.from_chain_type(
|
| 47 |
-
|
| 48 |
chain_type="map_reduce",
|
| 49 |
retriever=docsearch.as_retriever(),
|
| 50 |
)
|
|
|
|
| 8 |
from langchain.chains import RetrievalQAWithSourcesChain
|
| 9 |
from langchain.llms import OpenAI
|
| 10 |
import uuid
|
| 11 |
+
from project.llm.zhipuai_llm import ZhipuAILLM
|
| 12 |
|
| 13 |
chain: Optional[Callable] = None
|
| 14 |
|
|
|
|
| 45 |
|
| 46 |
global chain
|
| 47 |
chain = RetrievalQAWithSourcesChain.from_chain_type(
|
| 48 |
+
ZhipuAILLM(model="glm-3-turbo", temperature=0.1, zhipuai_api_key=openai_key),
|
| 49 |
chain_type="map_reduce",
|
| 50 |
retriever=docsearch.as_retriever(),
|
| 51 |
)
|
project/llm/__init__.py
ADDED
|
File without changes
|
project/llm/self_llm.py
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python
|
| 2 |
+
# -*- encoding: utf-8 -*-
|
| 3 |
+
'''
|
| 4 |
+
@File : self_llm.py
|
| 5 |
+
@Time : 2023/10/16 18:48:08
|
| 6 |
+
@Author : Logan Zou
|
| 7 |
+
@Version : 1.0
|
| 8 |
+
@Contact : loganzou0421@163.com
|
| 9 |
+
@License : (C)Copyright 2017-2018, Liugroup-NLPR-CASIA
|
| 10 |
+
@Desc : 在 LangChain LLM 基础上封装的项目类,统一了 GPT、文心、讯飞、智谱多种 API 调用
|
| 11 |
+
'''
|
| 12 |
+
|
| 13 |
+
from langchain.llms.base import LLM
|
| 14 |
+
from typing import Dict, Any, Mapping
|
| 15 |
+
from pydantic import Field
|
| 16 |
+
|
| 17 |
+
class Self_LLM(LLM):
|
| 18 |
+
# 自定义 LLM
|
| 19 |
+
# 继承自 langchain.llms.base.LLM
|
| 20 |
+
# 原生接口地址
|
| 21 |
+
url : str = None
|
| 22 |
+
# 默认选用 GPT-3.5 模型,即目前一般所说的百度文心大模型
|
| 23 |
+
model_name: str = "gpt-3.5-turbo"
|
| 24 |
+
# 访问时延上限
|
| 25 |
+
request_timeout: float = None
|
| 26 |
+
# 温度系数
|
| 27 |
+
temperature: float = 0.1
|
| 28 |
+
# API_Key
|
| 29 |
+
api_key: str = None
|
| 30 |
+
# 必备的可选参数
|
| 31 |
+
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
|
| 32 |
+
|
| 33 |
+
# 定义一个返回默认参数的方法
|
| 34 |
+
@property
|
| 35 |
+
def _default_params(self) -> Dict[str, Any]:
|
| 36 |
+
"""获取调用默认参数。"""
|
| 37 |
+
normal_params = {
|
| 38 |
+
"temperature": self.temperature,
|
| 39 |
+
"request_timeout": self.request_timeout,
|
| 40 |
+
}
|
| 41 |
+
# print(type(self.model_kwargs))
|
| 42 |
+
return {**normal_params}
|
| 43 |
+
|
| 44 |
+
@property
|
| 45 |
+
def _identifying_params(self) -> Mapping[str, Any]:
|
| 46 |
+
"""Get the identifying parameters."""
|
| 47 |
+
return {**{"model_name": self.model_name}, **self._default_params}
|
project/llm/zhipuai_llm.py
ADDED
|
@@ -0,0 +1,229 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python
|
| 2 |
+
# -*- encoding: utf-8 -*-
|
| 3 |
+
'''
|
| 4 |
+
@File : zhipuai_llm.py
|
| 5 |
+
@Time : 2023/10/16 22:06:26
|
| 6 |
+
@Author : 0-yy-0
|
| 7 |
+
@Version : 1.0
|
| 8 |
+
@Contact : 310484121@qq.com
|
| 9 |
+
@License : (C)Copyright 2017-2018, Liugroup-NLPR-CASIA
|
| 10 |
+
@Desc : 基于智谱 AI 大模型自定义 LLM 类
|
| 11 |
+
'''
|
| 12 |
+
|
| 13 |
+
from __future__ import annotations
|
| 14 |
+
|
| 15 |
+
import logging
|
| 16 |
+
from typing import (
|
| 17 |
+
Any,
|
| 18 |
+
AsyncIterator,
|
| 19 |
+
Dict,
|
| 20 |
+
Iterator,
|
| 21 |
+
List,
|
| 22 |
+
Optional,
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
from langchain.callbacks.manager import (
|
| 26 |
+
AsyncCallbackManagerForLLMRun,
|
| 27 |
+
CallbackManagerForLLMRun,
|
| 28 |
+
)
|
| 29 |
+
from langchain.llms.base import LLM
|
| 30 |
+
from langchain.pydantic_v1 import Field, root_validator
|
| 31 |
+
from langchain.schema.output import GenerationChunk
|
| 32 |
+
from langchain.utils import get_from_dict_or_env
|
| 33 |
+
from project.llm.self_llm import Self_LLM
|
| 34 |
+
|
| 35 |
+
logger = logging.getLogger(__name__)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class ZhipuAILLM(Self_LLM):
|
| 39 |
+
"""Zhipuai hosted open source or customized models.
|
| 40 |
+
|
| 41 |
+
To use, you should have the ``zhipuai`` python package installed, and
|
| 42 |
+
the environment variable ``zhipuai_api_key`` set with
|
| 43 |
+
your API key and Secret Key.
|
| 44 |
+
|
| 45 |
+
zhipuai_api_key are required parameters which you could get from
|
| 46 |
+
https://open.bigmodel.cn/usercenter/apikeys
|
| 47 |
+
|
| 48 |
+
Example:
|
| 49 |
+
.. code-block:: python
|
| 50 |
+
|
| 51 |
+
from langchain.llms import ZhipuAILLM
|
| 52 |
+
zhipuai_model = ZhipuAILLM(model="chatglm_std", temperature=temperature)
|
| 53 |
+
|
| 54 |
+
"""
|
| 55 |
+
|
| 56 |
+
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
|
| 57 |
+
|
| 58 |
+
client: Any
|
| 59 |
+
|
| 60 |
+
model: str = "chatglm_std"
|
| 61 |
+
"""Model name in chatglm_pro, chatglm_std, chatglm_lite. """
|
| 62 |
+
|
| 63 |
+
zhipuai_api_key: Optional[str] = None
|
| 64 |
+
|
| 65 |
+
incremental: Optional[bool] = True
|
| 66 |
+
"""Whether to incremental the results or not."""
|
| 67 |
+
|
| 68 |
+
streaming: Optional[bool] = False
|
| 69 |
+
"""Whether to streaming the results or not."""
|
| 70 |
+
# streaming = -incremental
|
| 71 |
+
|
| 72 |
+
request_timeout: Optional[int] = 60
|
| 73 |
+
"""request timeout for chat http requests"""
|
| 74 |
+
|
| 75 |
+
top_p: Optional[float] = 0.8
|
| 76 |
+
temperature: Optional[float] = 0.95
|
| 77 |
+
request_id: Optional[float] = None
|
| 78 |
+
|
| 79 |
+
@root_validator()
|
| 80 |
+
def validate_enviroment(cls, values: Dict) -> Dict:
|
| 81 |
+
|
| 82 |
+
values["zhipuai_api_key"] = get_from_dict_or_env(
|
| 83 |
+
values,
|
| 84 |
+
"zhipuai_api_key",
|
| 85 |
+
"ZHIPUAI_API_KEY",
|
| 86 |
+
)
|
| 87 |
+
|
| 88 |
+
params = {
|
| 89 |
+
"zhipuai_api_key": values["zhipuai_api_key"],
|
| 90 |
+
"model": values["model"],
|
| 91 |
+
}
|
| 92 |
+
try:
|
| 93 |
+
#import zhipuai
|
| 94 |
+
|
| 95 |
+
#zhipuai.api_key = values["zhipuai_api_key"]
|
| 96 |
+
#values["client"] = zhipuai()
|
| 97 |
+
|
| 98 |
+
from zhipuai import ZhipuAI
|
| 99 |
+
|
| 100 |
+
conf_api_key = values["zhipuai_api_key"]
|
| 101 |
+
client = ZhipuAI(api_key=conf_api_key)
|
| 102 |
+
values["client"] = client
|
| 103 |
+
except ImportError:
|
| 104 |
+
raise ValueError(
|
| 105 |
+
"zhipuai package not found, please install it with "
|
| 106 |
+
"`pip install zhipuai`"
|
| 107 |
+
)
|
| 108 |
+
return values
|
| 109 |
+
|
| 110 |
+
@property
|
| 111 |
+
def _identifying_params(self) -> Dict[str, Any]:
|
| 112 |
+
return {
|
| 113 |
+
**{"model": self.model},
|
| 114 |
+
**super()._identifying_params,
|
| 115 |
+
}
|
| 116 |
+
|
| 117 |
+
@property
|
| 118 |
+
def _llm_type(self) -> str:
|
| 119 |
+
"""Return type of llm."""
|
| 120 |
+
return "zhipuai"
|
| 121 |
+
|
| 122 |
+
@property
|
| 123 |
+
def _default_params(self) -> Dict[str, Any]:
|
| 124 |
+
"""Get the default parameters for calling OpenAI API."""
|
| 125 |
+
normal_params = {
|
| 126 |
+
"streaming": self.streaming,
|
| 127 |
+
"top_p": self.top_p,
|
| 128 |
+
"temperature": self.temperature,
|
| 129 |
+
"request_id": self.request_id,
|
| 130 |
+
}
|
| 131 |
+
|
| 132 |
+
return {**normal_params, **self.model_kwargs}
|
| 133 |
+
|
| 134 |
+
def _convert_prompt_msg_params(
|
| 135 |
+
self,
|
| 136 |
+
prompt: str,
|
| 137 |
+
**kwargs: Any,
|
| 138 |
+
) -> dict:
|
| 139 |
+
return {
|
| 140 |
+
**{"prompt": prompt, "model": self.model},
|
| 141 |
+
**self._default_params,
|
| 142 |
+
**kwargs,
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
def _call(
|
| 146 |
+
self,
|
| 147 |
+
prompt: str,
|
| 148 |
+
stop: Optional[List[str]] = None,
|
| 149 |
+
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
| 150 |
+
**kwargs: Any,
|
| 151 |
+
) -> str:
|
| 152 |
+
"""Call out to an zhipuai models endpoint for each generation with a prompt.
|
| 153 |
+
Args:
|
| 154 |
+
prompt: The prompt to pass into the model.
|
| 155 |
+
Returns:
|
| 156 |
+
The string generated by the model.
|
| 157 |
+
|
| 158 |
+
Example:
|
| 159 |
+
.. code-block:: python
|
| 160 |
+
response = zhipuai_model("Tell me a joke.")
|
| 161 |
+
"""
|
| 162 |
+
if self.streaming:
|
| 163 |
+
completion = ""
|
| 164 |
+
for chunk in self._stream(prompt, stop, run_manager, **kwargs):
|
| 165 |
+
completion += chunk.text
|
| 166 |
+
return completion
|
| 167 |
+
params = self._convert_prompt_msg_params(prompt, **kwargs)
|
| 168 |
+
|
| 169 |
+
print("params:", params)
|
| 170 |
+
|
| 171 |
+
params = {"messages": [{"role": "user", "content": params['prompt']}],
|
| 172 |
+
"model": self.model, "stream": False, "top_p": 0.8, "temperature": 0.01, "request_id": None}
|
| 173 |
+
response_payload = self.client.chat.completions.create(**params)
|
| 174 |
+
print("response_payload", response_payload)
|
| 175 |
+
|
| 176 |
+
return response_payload.choices[0].message.content
|
| 177 |
+
|
| 178 |
+
async def _acall(
|
| 179 |
+
self,
|
| 180 |
+
prompt: str,
|
| 181 |
+
stop: Optional[List[str]] = None,
|
| 182 |
+
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
| 183 |
+
**kwargs: Any,
|
| 184 |
+
) -> str:
|
| 185 |
+
if self.streaming:
|
| 186 |
+
completion = ""
|
| 187 |
+
async for chunk in self._astream(prompt, stop, run_manager, **kwargs):
|
| 188 |
+
completion += chunk.text
|
| 189 |
+
return completion
|
| 190 |
+
|
| 191 |
+
params = self._convert_prompt_msg_params(prompt, **kwargs)
|
| 192 |
+
|
| 193 |
+
response = await self.client.async_invoke(**params)
|
| 194 |
+
|
| 195 |
+
return response_payload
|
| 196 |
+
|
| 197 |
+
def _stream(
|
| 198 |
+
self,
|
| 199 |
+
prompt: str,
|
| 200 |
+
stop: Optional[List[str]] = None,
|
| 201 |
+
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
| 202 |
+
**kwargs: Any,
|
| 203 |
+
) -> Iterator[GenerationChunk]:
|
| 204 |
+
params = self._convert_prompt_msg_params(prompt, **kwargs)
|
| 205 |
+
|
| 206 |
+
for res in self.client.invoke(**params):
|
| 207 |
+
if res:
|
| 208 |
+
chunk = GenerationChunk(text=res)
|
| 209 |
+
yield chunk
|
| 210 |
+
if run_manager:
|
| 211 |
+
run_manager.on_llm_new_token(chunk.text)
|
| 212 |
+
|
| 213 |
+
async def _astream(
|
| 214 |
+
|
| 215 |
+
self,
|
| 216 |
+
prompt: str,
|
| 217 |
+
stop: Optional[List[str]] = None,
|
| 218 |
+
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
| 219 |
+
**kwargs: Any,
|
| 220 |
+
) -> AsyncIterator[GenerationChunk]:
|
| 221 |
+
params = self._convert_prompt_msg_params(prompt, **kwargs)
|
| 222 |
+
|
| 223 |
+
async for res in await self.client.ado(**params):
|
| 224 |
+
if res:
|
| 225 |
+
chunk = GenerationChunk(text=res["data"]["choices"]["content"])
|
| 226 |
+
|
| 227 |
+
yield chunk
|
| 228 |
+
if run_manager:
|
| 229 |
+
await run_manager.on_llm_new_token(chunk.text)
|
requirements.txt
CHANGED
|
@@ -4,4 +4,5 @@ openai
|
|
| 4 |
tiktoken
|
| 5 |
gradio
|
| 6 |
bs4
|
| 7 |
-
uuid
|
|
|
|
|
|
| 4 |
tiktoken
|
| 5 |
gradio
|
| 6 |
bs4
|
| 7 |
+
uuid
|
| 8 |
+
zhipuai
|