Update app.py
Browse files
app.py
CHANGED
|
@@ -67,7 +67,8 @@ def submit_message(prompt, prompt_template, good_foods, bad_foods, temperature,
|
|
| 67 |
# completion2 = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=system_prompt + food_priming_prompt + history[-1:] + [classification_msg], temperature=temperature, max_tokens=max_tokens)
|
| 68 |
# print(completion2)
|
| 69 |
messages2 = system_prompt + food_priming_prompt + answer + [classification_msg]
|
| 70 |
-
print('Messages
|
|
|
|
| 71 |
completion2 = client.chat.completions.create(
|
| 72 |
model=LLM_MODEL,
|
| 73 |
messages=messages2,
|
|
@@ -87,7 +88,7 @@ def submit_message(prompt, prompt_template, good_foods, bad_foods, temperature,
|
|
| 87 |
})
|
| 88 |
|
| 89 |
total_tokens_used_msg = f"Total tokens used: {state['total_tokens']}"
|
| 90 |
-
print(history)
|
| 91 |
chat_messages = [(history[i]['content'], history[i+1]['content']) for i in range(0, len(history)-1, 2)]
|
| 92 |
|
| 93 |
return '', chat_messages, total_tokens_used_msg, state, table
|
|
|
|
| 67 |
# completion2 = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=system_prompt + food_priming_prompt + history[-1:] + [classification_msg], temperature=temperature, max_tokens=max_tokens)
|
| 68 |
# print(completion2)
|
| 69 |
messages2 = system_prompt + food_priming_prompt + answer + [classification_msg]
|
| 70 |
+
print('Messages')
|
| 71 |
+
print(messages2)
|
| 72 |
completion2 = client.chat.completions.create(
|
| 73 |
model=LLM_MODEL,
|
| 74 |
messages=messages2,
|
|
|
|
| 88 |
})
|
| 89 |
|
| 90 |
total_tokens_used_msg = f"Total tokens used: {state['total_tokens']}"
|
| 91 |
+
# print(history)
|
| 92 |
chat_messages = [(history[i]['content'], history[i+1]['content']) for i in range(0, len(history)-1, 2)]
|
| 93 |
|
| 94 |
return '', chat_messages, total_tokens_used_msg, state, table
|