Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -7,20 +7,7 @@ import random
|
|
| 7 |
openai.api_key = os.environ.get('SessionToken')
|
| 8 |
logger.info(f"session_token_: {openai.api_key}")
|
| 9 |
|
| 10 |
-
def
|
| 11 |
-
api = None
|
| 12 |
-
try:
|
| 13 |
-
api = openai
|
| 14 |
-
# api.refresh_auth()
|
| 15 |
-
except Exception as e:
|
| 16 |
-
print(f'get_api_error:', e)
|
| 17 |
-
api = None
|
| 18 |
-
return api
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
def get_response_from_chatgpt(api, text):
|
| 22 |
-
if api is None:
|
| 23 |
-
return "Openai said: I'm too tired. Let me lie down for a few days. If you like, you can visit my home."
|
| 24 |
try:
|
| 25 |
response = openai.Completion.create(engine='text-davinci-003', prompt=text, max_tokens=100)
|
| 26 |
response_str = response["choices"][0]["text"].replace("\n", "")
|
|
@@ -246,21 +233,19 @@ for space_id in space_ids.keys():
|
|
| 246 |
except Exception as e:
|
| 247 |
logger.info(f"load_fail__{space_id}_{e}")
|
| 248 |
|
| 249 |
-
def chat(
|
| 250 |
out_chat = []
|
| 251 |
if chat_history != '':
|
| 252 |
out_chat = json.loads(chat_history)
|
| 253 |
logger.info(f"out_chat_: {len(out_chat)} / {chat_radio}")
|
| 254 |
if chat_radio == "Talk to chatGPT":
|
| 255 |
-
response = get_response_from_chatgpt(
|
| 256 |
-
# response = get_response_from_microsoft(input0)
|
| 257 |
-
# response = get_response_from_skywork(input0)
|
| 258 |
out_chat.append((input0, response))
|
| 259 |
chat_history = json.dumps(out_chat)
|
| 260 |
-
return
|
| 261 |
else:
|
| 262 |
prompt_en = getTextTrans(input0, source='zh', target='en') + f',{random.randint(0,sys.maxsize)}'
|
| 263 |
-
return
|
| 264 |
|
| 265 |
with gr.Blocks(title='Talk to chatGPT') as demo:
|
| 266 |
gr.HTML("<p>You can duplicating this space and use your own session token: <a style='display:inline-block' href='https://huggingface.co/spaces/yizhangliu/chatGPT?duplicate=true'><img src='https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14' alt='Duplicate Space'></a></p>")
|
|
@@ -293,10 +278,10 @@ with gr.Blocks(title='Talk to chatGPT') as demo:
|
|
| 293 |
rounded=(True, True, True, True),
|
| 294 |
width=100
|
| 295 |
)
|
| 296 |
-
|
| 297 |
submit_btn.click(fn=chat,
|
| 298 |
-
inputs=[
|
| 299 |
-
outputs=[
|
| 300 |
)
|
| 301 |
with gr.Row(elem_id='tab_img', visible=False).style(height=5):
|
| 302 |
tab_img = gr.TabbedInterface(tab_actions, tab_titles)
|
|
|
|
| 7 |
openai.api_key = os.environ.get('SessionToken')
|
| 8 |
logger.info(f"session_token_: {openai.api_key}")
|
| 9 |
|
| 10 |
+
def get_response_from_chatgpt(text):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
try:
|
| 12 |
response = openai.Completion.create(engine='text-davinci-003', prompt=text, max_tokens=100)
|
| 13 |
response_str = response["choices"][0]["text"].replace("\n", "")
|
|
|
|
| 233 |
except Exception as e:
|
| 234 |
logger.info(f"load_fail__{space_id}_{e}")
|
| 235 |
|
| 236 |
+
def chat(input0, input1, chat_radio, chat_history):
|
| 237 |
out_chat = []
|
| 238 |
if chat_history != '':
|
| 239 |
out_chat = json.loads(chat_history)
|
| 240 |
logger.info(f"out_chat_: {len(out_chat)} / {chat_radio}")
|
| 241 |
if chat_radio == "Talk to chatGPT":
|
| 242 |
+
response = get_response_from_chatgpt(input0)
|
|
|
|
|
|
|
| 243 |
out_chat.append((input0, response))
|
| 244 |
chat_history = json.dumps(out_chat)
|
| 245 |
+
return out_chat, input1, chat_history
|
| 246 |
else:
|
| 247 |
prompt_en = getTextTrans(input0, source='zh', target='en') + f',{random.randint(0,sys.maxsize)}'
|
| 248 |
+
return out_chat, prompt_en, chat_history
|
| 249 |
|
| 250 |
with gr.Blocks(title='Talk to chatGPT') as demo:
|
| 251 |
gr.HTML("<p>You can duplicating this space and use your own session token: <a style='display:inline-block' href='https://huggingface.co/spaces/yizhangliu/chatGPT?duplicate=true'><img src='https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14' alt='Duplicate Space'></a></p>")
|
|
|
|
| 278 |
rounded=(True, True, True, True),
|
| 279 |
width=100
|
| 280 |
)
|
| 281 |
+
|
| 282 |
submit_btn.click(fn=chat,
|
| 283 |
+
inputs=[prompt_input0, prompt_input1, chat_radio, chat_history],
|
| 284 |
+
outputs=[chatbot, prompt_input1, chat_history],
|
| 285 |
)
|
| 286 |
with gr.Row(elem_id='tab_img', visible=False).style(height=5):
|
| 287 |
tab_img = gr.TabbedInterface(tab_actions, tab_titles)
|