Spaces:
Runtime error
Runtime error
Update chatllm.py
Browse filesfix bug: openai param
- chatllm.py +9 -12
chatllm.py
CHANGED
|
@@ -2,6 +2,7 @@ import os
|
|
| 2 |
from typing import Dict, List, Optional, Tuple, Union
|
| 3 |
|
| 4 |
import torch
|
|
|
|
| 5 |
import requests
|
| 6 |
from langchain.llms.base import LLM
|
| 7 |
from langchain.llms.utils import enforce_stop_tokens
|
|
@@ -87,18 +88,14 @@ class ChatLLM(LLM):
|
|
| 87 |
messages.append({"role": "user", "content": prompt})
|
| 88 |
|
| 89 |
# 配置OPENAI模型参数
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
"frequency_penalty":0
|
| 99 |
-
}
|
| 100 |
-
|
| 101 |
-
response = requests.post(OPENAI_URL, headers=headers, json=payload)
|
| 102 |
result = response.choices[0].text
|
| 103 |
|
| 104 |
# 将当次的ai回复内容加入history
|
|
|
|
| 2 |
from typing import Dict, List, Optional, Tuple, Union
|
| 3 |
|
| 4 |
import torch
|
| 5 |
+
import openai
|
| 6 |
import requests
|
| 7 |
from langchain.llms.base import LLM
|
| 8 |
from langchain.llms.utils import enforce_stop_tokens
|
|
|
|
| 88 |
messages.append({"role": "user", "content": prompt})
|
| 89 |
|
| 90 |
# 配置OPENAI模型参数
|
| 91 |
+
response = openai.Completion.create(
|
| 92 |
+
model = 'gpt-3.5-turbo',
|
| 93 |
+
messages = messages,
|
| 94 |
+
temperature = self.temperature,
|
| 95 |
+
top_p = self.top_p,
|
| 96 |
+
presence_penalty = 0,
|
| 97 |
+
frequency_penalty = 0
|
| 98 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 99 |
result = response.choices[0].text
|
| 100 |
|
| 101 |
# 将当次的ai回复内容加入history
|