Update app.py
Browse files
app.py
CHANGED
|
@@ -59,14 +59,14 @@ def submit_message(prompt, prompt_template, good_foods, bad_foods, temperature,
|
|
| 59 |
stream=False)
|
| 60 |
|
| 61 |
history.append(prompt_msg)
|
| 62 |
-
answer = completion.choices[0].message.content
|
| 63 |
-
history.append(
|
| 64 |
|
| 65 |
state['total_tokens'] += completion['usage']['total_tokens']
|
| 66 |
|
| 67 |
# completion2 = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=system_prompt + food_priming_prompt + history[-1:] + [classification_msg], temperature=temperature, max_tokens=max_tokens)
|
| 68 |
# print(completion2)
|
| 69 |
-
messages2 = system_prompt + food_priming_prompt +
|
| 70 |
completion2 = client.chat.completions.create(
|
| 71 |
model=LLM_MODEL,
|
| 72 |
messages=messages2,
|
|
|
|
| 59 |
stream=False)
|
| 60 |
|
| 61 |
history.append(prompt_msg)
|
| 62 |
+
answer = {'role': 'assistant', 'content': completion.choices[0].message.content }
|
| 63 |
+
history.append(answer)
|
| 64 |
|
| 65 |
state['total_tokens'] += completion['usage']['total_tokens']
|
| 66 |
|
| 67 |
# completion2 = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=system_prompt + food_priming_prompt + history[-1:] + [classification_msg], temperature=temperature, max_tokens=max_tokens)
|
| 68 |
# print(completion2)
|
| 69 |
+
messages2 = system_prompt + food_priming_prompt + answer + [classification_msg]
|
| 70 |
completion2 = client.chat.completions.create(
|
| 71 |
model=LLM_MODEL,
|
| 72 |
messages=messages2,
|