stupidHIGH commited on
Commit ·
7c66e4f
1
Parent(s): f38e4e6
'update'
Browse files- app.py +95 -45
- chat_completion.py +62 -0
- openai_api_key +2 -0
- requirements.txt +2 -1
- start_server.sh +3 -0
app.py
CHANGED
|
@@ -1,57 +1,107 @@
|
|
| 1 |
-
import gradio as gr
|
| 2 |
-
import os, openai
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
conversation = []
|
| 6 |
-
|
| 7 |
-
class ChatGPT:
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
def get_response(self, user_input):
|
| 19 |
-
openai.api_key = self.api_key
|
| 20 |
-
conversation.append({"role": "user", "content": user_input})
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
response = openai.ChatCompletion.create(
|
| 24 |
-
model=self.model,
|
| 25 |
-
messages = self.messages
|
| 26 |
-
|
| 27 |
-
)
|
| 28 |
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
print("AI回答內容:")
|
| 32 |
-
print(response['choices'][0]['message']['content'].strip())
|
| 33 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 34 |
|
| 35 |
-
|
| 36 |
-
|
| 37 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 38 |
|
| 39 |
-
|
|
|
|
|
|
|
| 40 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
|
| 42 |
-
def
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
|
| 47 |
-
|
|
|
|
| 48 |
|
| 49 |
-
|
|
|
|
|
|
|
| 50 |
|
| 51 |
-
demo = gr.Interface(
|
| 52 |
-
fn=greet,
|
| 53 |
-
inputs=["text", "text"],
|
| 54 |
-
outputs=["text"],
|
| 55 |
-
)
|
| 56 |
|
| 57 |
-
demo.launch()
|
|
|
|
| 1 |
+
# import gradio as gr
|
| 2 |
+
# import os, openai
|
| 3 |
+
#
|
| 4 |
+
#
|
| 5 |
+
# conversation = []
|
| 6 |
+
#
|
| 7 |
+
# class ChatGPT:
|
| 8 |
+
#
|
| 9 |
+
#
|
| 10 |
+
# def __init__(self):
|
| 11 |
+
# self.api_key = ""
|
| 12 |
+
# self.messages = conversation
|
| 13 |
+
# self.model = os.getenv("OPENAI_MODEL", default = "gpt-3.5-turbo")
|
| 14 |
+
#
|
| 15 |
+
# def save_api_key(self, user_input0):
|
| 16 |
+
# self.api_key = user_input0
|
| 17 |
+
#
|
| 18 |
+
# def get_response(self, user_input):
|
| 19 |
+
# openai.api_key = self.api_key
|
| 20 |
+
# conversation.append({"role": "user", "content": user_input})
|
| 21 |
+
#
|
| 22 |
+
#
|
| 23 |
+
# response = openai.ChatCompletion.create(
|
| 24 |
+
# model=self.model,
|
| 25 |
+
# messages = self.messages
|
| 26 |
+
#
|
| 27 |
+
# )
|
| 28 |
+
#
|
| 29 |
+
# conversation.append({"role": "assistant", "content": response['choices'][0]['message']['content']})
|
| 30 |
+
#
|
| 31 |
+
# print("AI回答內容:")
|
| 32 |
+
# print(response['choices'][0]['message']['content'].strip())
|
| 33 |
+
#
|
| 34 |
+
#
|
| 35 |
+
#
|
| 36 |
+
# return response['choices'][0]['message']['content'].strip()
|
| 37 |
+
#
|
| 38 |
+
#
|
| 39 |
+
# chatgpt = ChatGPT()
|
| 40 |
+
#
|
| 41 |
+
#
|
| 42 |
+
# def greet(prompt, api_key):
|
| 43 |
+
# chatgpt.save_api_key(api_key)
|
| 44 |
+
#
|
| 45 |
+
# reply_text = chatgpt.get_response(prompt)
|
| 46 |
+
#
|
| 47 |
+
# greeting = f"{reply_text}"
|
| 48 |
+
#
|
| 49 |
+
# return greeting
|
| 50 |
+
#
|
| 51 |
+
# demo = gr.Interface(
|
| 52 |
+
# fn=greet,
|
| 53 |
+
# inputs=["text", "text"],
|
| 54 |
+
# outputs=["text"],
|
| 55 |
+
# )
|
| 56 |
+
#
|
| 57 |
+
# demo.launch()
|
| 58 |
+
|
| 59 |
+
import argparse
|
| 60 |
|
| 61 |
+
import gradio as gr
|
| 62 |
+
from loguru import logger
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 63 |
|
| 64 |
+
from chat_completion import ChatCompletion
|
|
|
|
|
|
|
|
|
|
| 65 |
|
| 66 |
+
parser = argparse.ArgumentParser()
|
| 67 |
+
parser.add_argument('--api_key_path', type=str, default='./openai_api_key')
|
| 68 |
+
parser.add_argument('--log_path', type=str, default='./log.txt')
|
| 69 |
+
parser.add_argument('--share', action='store_true', default=False)
|
| 70 |
+
parser.add_argument('--welcome', type=str, default='Say something to ChatGPT here ...')
|
| 71 |
+
parser.add_argument('--title', type=str, default='ChatGPT')
|
| 72 |
+
parser.add_argument('--setting', type=str, default=None)
|
| 73 |
+
args = parser.parse_args()
|
| 74 |
|
| 75 |
+
bot = ChatCompletion(api_key_path=args.api_key_path)
|
| 76 |
+
logger.add(args.log_path)
|
| 77 |
|
| 78 |
+
with gr.Blocks(title=args.title) as demo:
|
| 79 |
+
chatbot = gr.Chatbot(show_label=False)
|
| 80 |
+
msg = gr.TextArea(show_label=False, placeholder=args.welcome)
|
| 81 |
+
send_btn = gr.Button('Send')
|
| 82 |
+
retry_btn = gr.Button('Retry')
|
| 83 |
+
reset_btn = gr.Button('Reset')
|
| 84 |
|
| 85 |
+
def send(user_message, history):
|
| 86 |
+
if not user_message:
|
| 87 |
+
return '', history
|
| 88 |
|
| 89 |
+
logger.info(f'[MSG] {user_message}')
|
| 90 |
+
response = bot(user_message, setting=args.setting) if user_message != 'retry' else bot.retry()
|
| 91 |
+
logger.info(f'[ANS] {response}')
|
| 92 |
+
return '', history + [[user_message, response]]
|
| 93 |
|
| 94 |
+
def reset():
|
| 95 |
+
bot.reset()
|
| 96 |
+
logger.info('[RESET]')
|
| 97 |
+
return None, [[None, None]]
|
| 98 |
|
| 99 |
+
def retry(history):
|
| 100 |
+
return send('retry', history)
|
| 101 |
|
| 102 |
+
send_btn.click(send, inputs=[msg, chatbot], outputs=[msg, chatbot], show_progress=True)
|
| 103 |
+
reset_btn.click(reset, inputs=None, outputs=[msg, chatbot])
|
| 104 |
+
retry_btn.click(retry, inputs=chatbot, outputs=[msg, chatbot])
|
| 105 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 106 |
|
| 107 |
+
demo.launch(share=args.share)
|
chat_completion.py
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import linecache
|
| 2 |
+
import re
|
| 3 |
+
from typing import Dict, List, Optional
|
| 4 |
+
|
| 5 |
+
import openai
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class ChatCompletion:
|
| 9 |
+
def __init__(self, model: str = 'gpt-3.5-turbo',
|
| 10 |
+
api_key: Optional[str] = None, api_key_path: str = './openai_api_key'):
|
| 11 |
+
if api_key is None:
|
| 12 |
+
openai.api_key = api_key
|
| 13 |
+
api_key = linecache.getline(api_key_path, 2).strip('\n')
|
| 14 |
+
if len(api_key) == 0:
|
| 15 |
+
raise EnvironmentError
|
| 16 |
+
openai.api_key = api_key
|
| 17 |
+
|
| 18 |
+
self.model = model
|
| 19 |
+
self.system_messages = []
|
| 20 |
+
self.user_messages = []
|
| 21 |
+
|
| 22 |
+
def chat(self, msg: str, setting: Optional[str] = None, model: Optional[str] = None) -> str:
|
| 23 |
+
if self._context_length() > 2048:
|
| 24 |
+
self.reset()
|
| 25 |
+
if setting is not None:
|
| 26 |
+
if setting not in self.system_messages:
|
| 27 |
+
self.system_messages.append(setting)
|
| 28 |
+
if not self.user_messages or msg != self.user_messages[-1]:
|
| 29 |
+
self.user_messages.append(msg)
|
| 30 |
+
|
| 31 |
+
return self._run(model)
|
| 32 |
+
|
| 33 |
+
def retry(self, model: Optional[str] = None) -> str:
|
| 34 |
+
return self._run(model)
|
| 35 |
+
|
| 36 |
+
def reset(self):
|
| 37 |
+
self.system_messages.clear()
|
| 38 |
+
self.user_messages.clear()
|
| 39 |
+
|
| 40 |
+
def _make_message(self) -> List[Dict]:
|
| 41 |
+
sys_messages = [{'role': 'system', 'content': msg} for msg in self.system_messages]
|
| 42 |
+
user_messages = [{'role': 'user', 'content': msg} for msg in self.user_messages]
|
| 43 |
+
return sys_messages + user_messages
|
| 44 |
+
|
| 45 |
+
def _context_length(self) -> int:
|
| 46 |
+
return len(''.join(self.system_messages)) + len(''.join(self.user_messages))
|
| 47 |
+
|
| 48 |
+
def _run(self, model: Optional[str] = None) -> str:
|
| 49 |
+
if model is None:
|
| 50 |
+
model = self.model
|
| 51 |
+
try:
|
| 52 |
+
response = openai.ChatCompletion.create(model=model, messages=self._make_message())
|
| 53 |
+
ans = response['choices'][0]['message']['content']
|
| 54 |
+
ans = re.sub(r'^\n+', '', ans)
|
| 55 |
+
except openai.error.OpenAIError as e:
|
| 56 |
+
ans = e
|
| 57 |
+
except Exception as e:
|
| 58 |
+
print(e)
|
| 59 |
+
return ans
|
| 60 |
+
|
| 61 |
+
def __call__(self, msg: str, setting: Optional[str] = None, model: Optional[str] = None) -> str:
|
| 62 |
+
return self.chat(msg, setting, model)
|
openai_api_key
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Replace the following string with your OpenAI API key
|
| 2 |
+
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
|
requirements.txt
CHANGED
|
@@ -1,2 +1,3 @@
|
|
| 1 |
gradio
|
| 2 |
-
|
|
|
|
|
|
| 1 |
gradio
|
| 2 |
+
loguru
|
| 3 |
+
openai
|
start_server.sh
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
python server.py
|