Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
|
@@ -11,58 +11,80 @@ conversation_history = [
|
|
| 11 |
{"role": "system", "content": "You are an assistant in a trivia game focused on Nigerian music."}
|
| 12 |
]
|
| 13 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
# Function to get the initial LLM output and start the conversation
|
| 15 |
def start_trivia_game():
|
| 16 |
# Initial message to start the game
|
| 17 |
-
initial_message = "Start a Trivia on
|
| 18 |
-
|
| 19 |
# Add the initial message to the conversation history
|
| 20 |
conversation_history.append({"role": "user", "content": initial_message})
|
| 21 |
-
|
| 22 |
# Get completion from the LLM for the initial question
|
| 23 |
completion = client.chat.completions.create(
|
| 24 |
model="llama-3.1-70b-versatile",
|
| 25 |
messages=conversation_history,
|
| 26 |
temperature=1,
|
| 27 |
-
max_tokens=
|
| 28 |
top_p=1,
|
| 29 |
stream=True,
|
| 30 |
stop=None,
|
| 31 |
)
|
| 32 |
-
|
| 33 |
llm_output = ""
|
| 34 |
for chunk in completion:
|
| 35 |
llm_output += chunk.choices[0].delta.content or ""
|
| 36 |
-
|
| 37 |
# Add the assistant's response to the conversation history
|
| 38 |
conversation_history.append({"role": "assistant", "content": llm_output})
|
| 39 |
-
|
| 40 |
return llm_output
|
| 41 |
|
| 42 |
# Function to handle user response and continue the conversation
|
| 43 |
def continue_trivia_game(user_response):
|
| 44 |
# Add user's response to the conversation history
|
| 45 |
conversation_history.append({"role": "user", "content": user_response})
|
| 46 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 47 |
# Get completion from the LLM for the user's response
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 66 |
|
| 67 |
# Start the game and get the initial LLM output
|
| 68 |
initial_output = start_trivia_game()
|
|
@@ -70,22 +92,21 @@ initial_output = start_trivia_game()
|
|
| 70 |
# Using gr.Blocks to create the interface
|
| 71 |
with gr.Blocks() as demo:
|
| 72 |
# Title and Description
|
| 73 |
-
gr.Markdown("# TriviaVilla\n A simple trivia game on Nigerian Music using LLama 3.1 LLM.")
|
| 74 |
-
|
| 75 |
# LLM Output Textbox
|
| 76 |
llm_output = gr.Textbox(label="LLM Output", placeholder="The output from the LLM will appear here", lines=10, value=initial_output)
|
| 77 |
-
|
| 78 |
# User Response Textbox
|
| 79 |
user_response = gr.Textbox(label="Your Response", placeholder="Type your response here", lines=3)
|
| 80 |
-
|
| 81 |
# Button to submit the user's response and update the LLM output
|
| 82 |
submit_button = gr.Button("Submit")
|
| 83 |
-
|
| 84 |
# Function to update the LLM output upon submission
|
| 85 |
def update_llm_output(user_input):
|
| 86 |
-
# Update the LLM output and return it
|
| 87 |
return continue_trivia_game(user_input)
|
| 88 |
-
|
| 89 |
# Define interactions
|
| 90 |
submit_button.click(fn=update_llm_output, inputs=user_response, outputs=llm_output)
|
| 91 |
|
|
|
|
| 11 |
{"role": "system", "content": "You are an assistant in a trivia game focused on Nigerian music."}
|
| 12 |
]
|
| 13 |
|
| 14 |
+
# Function to count tokens (approximation)
|
| 15 |
+
def count_tokens(messages):
|
| 16 |
+
return sum(len(message["content"].split()) for message in messages)
|
| 17 |
+
|
| 18 |
# Function to get the initial LLM output and start the conversation
|
| 19 |
def start_trivia_game():
|
| 20 |
# Initial message to start the game
|
| 21 |
+
initial_message = "Start a Trivia on Nigerian music. Make 'None of the Above' an option available so that if the answer to the question is not in the options, users can choose 'None of the above'. Assess the answer and provide the percentage score. The highest percentage is 100%."
|
| 22 |
+
|
| 23 |
# Add the initial message to the conversation history
|
| 24 |
conversation_history.append({"role": "user", "content": initial_message})
|
| 25 |
+
|
| 26 |
# Get completion from the LLM for the initial question
|
| 27 |
completion = client.chat.completions.create(
|
| 28 |
model="llama-3.1-70b-versatile",
|
| 29 |
messages=conversation_history,
|
| 30 |
temperature=1,
|
| 31 |
+
max_tokens=2048,
|
| 32 |
top_p=1,
|
| 33 |
stream=True,
|
| 34 |
stop=None,
|
| 35 |
)
|
| 36 |
+
|
| 37 |
llm_output = ""
|
| 38 |
for chunk in completion:
|
| 39 |
llm_output += chunk.choices[0].delta.content or ""
|
| 40 |
+
|
| 41 |
# Add the assistant's response to the conversation history
|
| 42 |
conversation_history.append({"role": "assistant", "content": llm_output})
|
| 43 |
+
|
| 44 |
return llm_output
|
| 45 |
|
| 46 |
# Function to handle user response and continue the conversation
|
| 47 |
def continue_trivia_game(user_response):
|
| 48 |
# Add user's response to the conversation history
|
| 49 |
conversation_history.append({"role": "user", "content": user_response})
|
| 50 |
+
|
| 51 |
+
# Token limit management
|
| 52 |
+
max_tokens = 2048 # Maximum token limit for the LLM (example value)
|
| 53 |
+
current_tokens = count_tokens(conversation_history)
|
| 54 |
+
|
| 55 |
+
while current_tokens > max_tokens:
|
| 56 |
+
# Remove the oldest user-assistant pair
|
| 57 |
+
if len(conversation_history) > 2:
|
| 58 |
+
conversation_history.pop(1) # Removing the second item as the first is the system message
|
| 59 |
+
conversation_history.pop(1) # Remove the corresponding assistant response
|
| 60 |
+
current_tokens = count_tokens(conversation_history)
|
| 61 |
+
|
| 62 |
# Get completion from the LLM for the user's response
|
| 63 |
+
try:
|
| 64 |
+
completion = client.chat.completions.create(
|
| 65 |
+
model="llama-3.1-70b-versatile",
|
| 66 |
+
messages=conversation_history,
|
| 67 |
+
temperature=1,
|
| 68 |
+
max_tokens=2048,
|
| 69 |
+
top_p=1,
|
| 70 |
+
stream=True,
|
| 71 |
+
stop=None,
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
llm_output = ""
|
| 75 |
+
for chunk in completion:
|
| 76 |
+
llm_output += chunk.choices[0].delta.content or ""
|
| 77 |
+
|
| 78 |
+
# Add the assistant's response to the conversation history
|
| 79 |
+
conversation_history.append({"role": "assistant", "content": llm_output})
|
| 80 |
+
|
| 81 |
+
return llm_output
|
| 82 |
+
except Exception as e:
|
| 83 |
+
# Check for specific rate limit error
|
| 84 |
+
if "rate_limit_exceeded" in str(e):
|
| 85 |
+
return "You've reached the maximum number of requests. Please wait a few minutes before trying again."
|
| 86 |
+
else:
|
| 87 |
+
return f"An error occurred Try again in 10 minutes: {str(e)}"
|
| 88 |
|
| 89 |
# Start the game and get the initial LLM output
|
| 90 |
initial_output = start_trivia_game()
|
|
|
|
| 92 |
# Using gr.Blocks to create the interface
|
| 93 |
with gr.Blocks() as demo:
|
| 94 |
# Title and Description
|
| 95 |
+
gr.Markdown("# TriviaVilla\n A simple trivia game on Nigerian Music Industry using LLama 3.1 LLM.")
|
| 96 |
+
|
| 97 |
# LLM Output Textbox
|
| 98 |
llm_output = gr.Textbox(label="LLM Output", placeholder="The output from the LLM will appear here", lines=10, value=initial_output)
|
| 99 |
+
|
| 100 |
# User Response Textbox
|
| 101 |
user_response = gr.Textbox(label="Your Response", placeholder="Type your response here", lines=3)
|
| 102 |
+
|
| 103 |
# Button to submit the user's response and update the LLM output
|
| 104 |
submit_button = gr.Button("Submit")
|
| 105 |
+
|
| 106 |
# Function to update the LLM output upon submission
|
| 107 |
def update_llm_output(user_input):
|
|
|
|
| 108 |
return continue_trivia_game(user_input)
|
| 109 |
+
|
| 110 |
# Define interactions
|
| 111 |
submit_button.click(fn=update_llm_output, inputs=user_response, outputs=llm_output)
|
| 112 |
|