kunal-sinha-coding commited on
Commit
3bd9d74
·
1 Parent(s): b99b768

Set max token limit to avoid errors

Browse files
Files changed (1) hide show
  1. app.py +2 -0
app.py CHANGED
@@ -13,6 +13,7 @@ openai.api_key = os.getenv("OPENAI_API_KEY")
13
  CHAT_ENDPOINT="https://api.openai.com/v1/chat/completions"
14
  CHAT_MODEL = "gpt-3.5-turbo"
15
  CHAT_AUTH = {"Authorization": "Bearer " + openai.api_key}
 
16
  gpt_history = []
17
 
18
  class ChatRoles():
@@ -24,6 +25,7 @@ def get_assistant_response():
24
  params = {
25
  "model": CHAT_MODEL,
26
  "messages": gpt_history,
 
27
  }
28
  response = requests.post(url=CHAT_ENDPOINT, json=params, headers=CHAT_AUTH)
29
  print(literal_eval(response.content.decode("utf-8")))
 
13
  CHAT_ENDPOINT="https://api.openai.com/v1/chat/completions"
14
  CHAT_MODEL = "gpt-3.5-turbo"
15
  CHAT_AUTH = {"Authorization": "Bearer " + openai.api_key}
16
+ MAX_TOKENS = 4096
17
  gpt_history = []
18
 
19
  class ChatRoles():
 
25
  params = {
26
  "model": CHAT_MODEL,
27
  "messages": gpt_history,
28
+ "max_tokens": MAX_TOKENS
29
  }
30
  response = requests.post(url=CHAT_ENDPOINT, json=params, headers=CHAT_AUTH)
31
  print(literal_eval(response.content.decode("utf-8")))