venthan commited on
Commit
5b2530d
·
verified ·
1 Parent(s): 950b488

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -1
app.py CHANGED
@@ -67,6 +67,7 @@ def submit_message(prompt, prompt_template, good_foods, bad_foods, temperature,
67
  # completion2 = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=system_prompt + food_priming_prompt + history[-1:] + [classification_msg], temperature=temperature, max_tokens=max_tokens)
68
  # print(completion2)
69
  messages2 = system_prompt + food_priming_prompt + answer + [classification_msg]
 
70
  completion2 = client.chat.completions.create(
71
  model=LLM_MODEL,
72
  messages=messages2,
@@ -87,7 +88,7 @@ def submit_message(prompt, prompt_template, good_foods, bad_foods, temperature,
87
 
88
  total_tokens_used_msg = f"Total tokens used: {state['total_tokens']}"
89
  print(history)
90
- chat_messages = [(history[i].content, history[i+1].content) for i in range(0, len(history)-1, 2)]
91
 
92
  return '', chat_messages, total_tokens_used_msg, state, table
93
 
 
67
  # completion2 = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=system_prompt + food_priming_prompt + history[-1:] + [classification_msg], temperature=temperature, max_tokens=max_tokens)
68
  # print(completion2)
69
  messages2 = system_prompt + food_priming_prompt + answer + [classification_msg]
70
+ print('Messages %s',messages2)
71
  completion2 = client.chat.completions.create(
72
  model=LLM_MODEL,
73
  messages=messages2,
 
88
 
89
  total_tokens_used_msg = f"Total tokens used: {state['total_tokens']}"
90
  print(history)
91
+ chat_messages = [(history[i]['content'], history[i+1]['content']) for i in range(0, len(history)-1, 2)]
92
 
93
  return '', chat_messages, total_tokens_used_msg, state, table
94