Afeezee commited on
Commit
45e8e5a
·
verified ·
1 Parent(s): f0ed964

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -28,7 +28,7 @@ def start_trivia_game():
28
  model="llama-3.1-70b-versatile",
29
  messages=conversation_history,
30
  temperature=1,
31
- max_tokens=128000,
32
  top_p=1,
33
  stream=True,
34
  stop=None,
@@ -49,7 +49,7 @@ def continue_trivia_game(user_response):
49
  conversation_history.append({"role": "user", "content": user_response})
50
 
51
  # Token limit management
52
- max_tokens = 128000 # Maximum token limit for the LLM (example value)
53
  current_tokens = count_tokens(conversation_history)
54
 
55
  while current_tokens > max_tokens:
@@ -65,7 +65,7 @@ def continue_trivia_game(user_response):
65
  model="llama-3.1-70b-versatile",
66
  messages=conversation_history,
67
  temperature=1,
68
- max_tokens=128000,
69
  top_p=1,
70
  stream=True,
71
  stop=None,
 
28
  model="llama-3.1-70b-versatile",
29
  messages=conversation_history,
30
  temperature=1,
31
+ max_tokens=8000,
32
  top_p=1,
33
  stream=True,
34
  stop=None,
 
49
  conversation_history.append({"role": "user", "content": user_response})
50
 
51
  # Token limit management
52
+ max_tokens = 8000 # Maximum token limit for the LLM (example value)
53
  current_tokens = count_tokens(conversation_history)
54
 
55
  while current_tokens > max_tokens:
 
65
  model="llama-3.1-70b-versatile",
66
  messages=conversation_history,
67
  temperature=1,
68
+ max_tokens=8000,
69
  top_p=1,
70
  stream=True,
71
  stop=None,