Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
|
@@ -28,7 +28,7 @@ def start_trivia_game():
|
|
| 28 |
model="llama-3.1-70b-versatile",
|
| 29 |
messages=conversation_history,
|
| 30 |
temperature=1,
|
| 31 |
-
max_tokens=
|
| 32 |
top_p=1,
|
| 33 |
stream=True,
|
| 34 |
stop=None,
|
|
@@ -49,7 +49,7 @@ def continue_trivia_game(user_response):
|
|
| 49 |
conversation_history.append({"role": "user", "content": user_response})
|
| 50 |
|
| 51 |
# Token limit management
|
| 52 |
-
max_tokens =
|
| 53 |
current_tokens = count_tokens(conversation_history)
|
| 54 |
|
| 55 |
while current_tokens > max_tokens:
|
|
@@ -65,7 +65,7 @@ def continue_trivia_game(user_response):
|
|
| 65 |
model="llama-3.1-70b-versatile",
|
| 66 |
messages=conversation_history,
|
| 67 |
temperature=1,
|
| 68 |
-
max_tokens=
|
| 69 |
top_p=1,
|
| 70 |
stream=True,
|
| 71 |
stop=None,
|
|
|
|
| 28 |
model="llama-3.1-70b-versatile",
|
| 29 |
messages=conversation_history,
|
| 30 |
temperature=1,
|
| 31 |
+
max_tokens=8000,
|
| 32 |
top_p=1,
|
| 33 |
stream=True,
|
| 34 |
stop=None,
|
|
|
|
| 49 |
conversation_history.append({"role": "user", "content": user_response})
|
| 50 |
|
| 51 |
# Token limit management
|
| 52 |
+
max_tokens = 8000 # Maximum token limit for the LLM (example value)
|
| 53 |
current_tokens = count_tokens(conversation_history)
|
| 54 |
|
| 55 |
while current_tokens > max_tokens:
|
|
|
|
| 65 |
model="llama-3.1-70b-versatile",
|
| 66 |
messages=conversation_history,
|
| 67 |
temperature=1,
|
| 68 |
+
max_tokens=8000,
|
| 69 |
top_p=1,
|
| 70 |
stream=True,
|
| 71 |
stop=None,
|