Spaces:
Runtime error
Runtime error
Commit
·
d378f2a
1
Parent(s):
d5b5de9
removed system from gpt
Browse files
app.py
CHANGED
|
@@ -20,20 +20,18 @@ def update_api_key(new_key):
|
|
| 20 |
os.environ['OPENAI_API_TOKEN'] = new_key
|
| 21 |
openai.api_key = os.environ['OPENAI_API_TOKEN']
|
| 22 |
|
| 23 |
-
def chat(
|
| 24 |
''' Normal call of OpenAI API '''
|
| 25 |
response = openai.ChatCompletion.create(
|
| 26 |
temperature = temperature,
|
| 27 |
model=model,
|
| 28 |
messages=[
|
| 29 |
-
{"role": "system", "content": system_prompt},
|
| 30 |
{"role": "user", "content": user_prompt}
|
| 31 |
])
|
| 32 |
|
| 33 |
res = response['choices'][0]['message']['content']
|
| 34 |
|
| 35 |
if verbose:
|
| 36 |
-
print('System prompt:', system_prompt)
|
| 37 |
print('User prompt:', user_prompt)
|
| 38 |
print('GPT response:', res)
|
| 39 |
|
|
@@ -52,10 +50,9 @@ def gpt_respond(have_key, tab_name, message, chat_history, max_convo_length = 10
|
|
| 52 |
return "", chat_history
|
| 53 |
|
| 54 |
formatted_prompt = format_chat_prompt(message, chat_history, max_convo_length)
|
| 55 |
-
print('Prompt + Context:')
|
| 56 |
print(formatted_prompt)
|
| 57 |
-
bot_message = chat(
|
| 58 |
-
user_prompt = formatted_prompt)
|
| 59 |
chat_history.insert(0, (message, bot_message))
|
| 60 |
return "", chat_history
|
| 61 |
|
|
@@ -117,10 +114,9 @@ def gpt_strategies_respond(have_key, strategy, task_name, task_ling_ent, message
|
|
| 117 |
formatted_system_prompt = f'''"{demon_chunk}". Using the POS tag structure above, POS tag the following sentence: "{message}"'''
|
| 118 |
|
| 119 |
formatted_prompt = format_chat_prompt(message, chat_history, max_convo_length)
|
| 120 |
-
print('Prompt + Context:')
|
| 121 |
print(formatted_prompt)
|
| 122 |
-
bot_message = chat(
|
| 123 |
-
user_prompt = formatted_prompt)
|
| 124 |
chat_history.insert(0, (message, bot_message))
|
| 125 |
return "", chat_history
|
| 126 |
|
|
@@ -251,12 +247,12 @@ def interface():
|
|
| 251 |
outputs=[ling_ents_prompt, gpt_ling_ents_chatbot])
|
| 252 |
|
| 253 |
# Event Handler for LLaMA Chatbot
|
| 254 |
-
ling_ents_btn.click(llama_respond, inputs=[linguistic_entities, ling_ents_prompt, llama_ling_ents_chatbot],
|
| 255 |
-
|
| 256 |
|
| 257 |
# Event Handler for Vicuna Chatbot
|
| 258 |
-
ling_ents_btn.click(vicuna_respond, inputs=[linguistic_entities, ling_ents_prompt, vicuna_ling_ents_chatbot],
|
| 259 |
-
|
| 260 |
|
| 261 |
with gr.Tab("CoreNLP"):
|
| 262 |
with gr.Row():
|
|
@@ -349,8 +345,8 @@ def interface():
|
|
| 349 |
# outputs=[task, task_prompt, llama_S1_chatbot])
|
| 350 |
# task_btn.click(llama_strategies_respond, inputs=[strategy2, task, task_linguistic_entities, task_prompt, llama_S2_chatbot],
|
| 351 |
# outputs=[task, task_prompt, llama_S2_chatbot])
|
| 352 |
-
task_btn.click(llama_strategies_respond, inputs=[strategy3, task, task_linguistic_entities, task_prompt, llama_S3_chatbot],
|
| 353 |
-
|
| 354 |
|
| 355 |
# vicuna_strategies_respond(strategy, task_name, task_ling_ent, message, chat_history):
|
| 356 |
# Event Handlers for Vicuna Chatbot POS/Chunk
|
|
@@ -358,8 +354,8 @@ def interface():
|
|
| 358 |
# outputs=[task, task_prompt, vicuna_S1_chatbot])
|
| 359 |
# task_btn.click(vicuna_strategies_respond, inputs=[strategy2, task, task_linguistic_entities, task_prompt, vicuna_S2_chatbot],
|
| 360 |
# outputs=[task, task_prompt, vicuna_S2_chatbot])
|
| 361 |
-
task_btn.click(vicuna_strategies_respond, inputs=[strategy3, task, task_linguistic_entities, task_prompt, vicuna_S3_chatbot],
|
| 362 |
-
|
| 363 |
|
| 364 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
| 365 |
gr.Markdown("""
|
|
|
|
| 20 |
os.environ['OPENAI_API_TOKEN'] = new_key
|
| 21 |
openai.api_key = os.environ['OPENAI_API_TOKEN']
|
| 22 |
|
| 23 |
+
def chat(user_prompt, model = 'gpt-3.5-turbo', temperature = 0, verbose = False):
|
| 24 |
''' Normal call of OpenAI API '''
|
| 25 |
response = openai.ChatCompletion.create(
|
| 26 |
temperature = temperature,
|
| 27 |
model=model,
|
| 28 |
messages=[
|
|
|
|
| 29 |
{"role": "user", "content": user_prompt}
|
| 30 |
])
|
| 31 |
|
| 32 |
res = response['choices'][0]['message']['content']
|
| 33 |
|
| 34 |
if verbose:
|
|
|
|
| 35 |
print('User prompt:', user_prompt)
|
| 36 |
print('GPT response:', res)
|
| 37 |
|
|
|
|
| 50 |
return "", chat_history
|
| 51 |
|
| 52 |
formatted_prompt = format_chat_prompt(message, chat_history, max_convo_length)
|
| 53 |
+
print('GPT ling ents Prompt + Context:')
|
| 54 |
print(formatted_prompt)
|
| 55 |
+
bot_message = chat(user_prompt = f'''Generate the output only for the assistant. Output any <{tab_name}> in the following sentence one per line: "{formatted_prompt}"''')
|
|
|
|
| 56 |
chat_history.insert(0, (message, bot_message))
|
| 57 |
return "", chat_history
|
| 58 |
|
|
|
|
| 114 |
formatted_system_prompt = f'''"{demon_chunk}". Using the POS tag structure above, POS tag the following sentence: "{message}"'''
|
| 115 |
|
| 116 |
formatted_prompt = format_chat_prompt(message, chat_history, max_convo_length)
|
| 117 |
+
print('GPT coreNLP Prompt + Context:')
|
| 118 |
print(formatted_prompt)
|
| 119 |
+
bot_message = chat(user_prompt = formatted_system_prompt)
|
|
|
|
| 120 |
chat_history.insert(0, (message, bot_message))
|
| 121 |
return "", chat_history
|
| 122 |
|
|
|
|
| 247 |
outputs=[ling_ents_prompt, gpt_ling_ents_chatbot])
|
| 248 |
|
| 249 |
# Event Handler for LLaMA Chatbot
|
| 250 |
+
# ling_ents_btn.click(llama_respond, inputs=[linguistic_entities, ling_ents_prompt, llama_ling_ents_chatbot],
|
| 251 |
+
# outputs=[linguistic_entities, ling_ents_prompt, llama_ling_ents_chatbot])
|
| 252 |
|
| 253 |
# Event Handler for Vicuna Chatbot
|
| 254 |
+
# ling_ents_btn.click(vicuna_respond, inputs=[linguistic_entities, ling_ents_prompt, vicuna_ling_ents_chatbot],
|
| 255 |
+
# outputs=[linguistic_entities, ling_ents_prompt, vicuna_ling_ents_chatbot])
|
| 256 |
|
| 257 |
with gr.Tab("CoreNLP"):
|
| 258 |
with gr.Row():
|
|
|
|
| 345 |
# outputs=[task, task_prompt, llama_S1_chatbot])
|
| 346 |
# task_btn.click(llama_strategies_respond, inputs=[strategy2, task, task_linguistic_entities, task_prompt, llama_S2_chatbot],
|
| 347 |
# outputs=[task, task_prompt, llama_S2_chatbot])
|
| 348 |
+
# task_btn.click(llama_strategies_respond, inputs=[strategy3, task, task_linguistic_entities, task_prompt, llama_S3_chatbot],
|
| 349 |
+
# outputs=[task, task_prompt, llama_S3_chatbot])
|
| 350 |
|
| 351 |
# vicuna_strategies_respond(strategy, task_name, task_ling_ent, message, chat_history):
|
| 352 |
# Event Handlers for Vicuna Chatbot POS/Chunk
|
|
|
|
| 354 |
# outputs=[task, task_prompt, vicuna_S1_chatbot])
|
| 355 |
# task_btn.click(vicuna_strategies_respond, inputs=[strategy2, task, task_linguistic_entities, task_prompt, vicuna_S2_chatbot],
|
| 356 |
# outputs=[task, task_prompt, vicuna_S2_chatbot])
|
| 357 |
+
# task_btn.click(vicuna_strategies_respond, inputs=[strategy3, task, task_linguistic_entities, task_prompt, vicuna_S3_chatbot],
|
| 358 |
+
# outputs=[task, task_prompt, vicuna_S3_chatbot])
|
| 359 |
|
| 360 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
| 361 |
gr.Markdown("""
|