Afeezee commited on
Commit
88ebbe2
·
verified ·
1 Parent(s): e04ec51

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +112 -0
app.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from groq import Groq
3
+
4
+ # Initialize the Groq client with the API key
5
+ client = Groq(api_key="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")
6
+
7
+ # List to maintain the conversation history, starting with a default prompt
8
+ conversation_history = [
9
+ {"role": "system", "content": "You are an experienced fact-checker and facts curator with over 30 years of experience in gathering and verifying facts. You will present a fact/claim and ask the user to verify if it is true or false. The fact/claim must be concise and you will award points for each correct answer."}
10
+ ]
11
+
12
+ # Function to count tokens (approximation)
13
+ def count_tokens(messages):
14
+ return sum(len(message["content"].split()) for message in messages)
15
+
16
+ # Function to get the initial LLM output and start the conversation
17
+ def start_trivia_game():
18
+ # Initial message to start the game
19
+ initial_message = "You will present a fact/claim and ask me to verify if it is true or false. The fact/claim must be concise. On inputing stop, show the total score and percentage score concisely"
20
+
21
+ # Add the initial message to the conversation history
22
+ conversation_history.append({"role": "user", "content": initial_message})
23
+
24
+ # Get completion from the LLM for the initial question
25
+ completion = client.chat.completions.create(
26
+ model="llama-3.1-70b-versatile",
27
+ messages=conversation_history,
28
+ temperature=0.5,
29
+ max_tokens=2048,
30
+ top_p=1,
31
+ stream=True,
32
+ stop=None,
33
+ )
34
+
35
+ llm_output = ""
36
+ for chunk in completion:
37
+ llm_output += chunk.choices[0].delta.content or ""
38
+
39
+ # Add the assistant's response to the conversation history
40
+ conversation_history.append({"role": "assistant", "content": llm_output})
41
+
42
+ return llm_output
43
+
44
+ # Function to handle user response and continue the conversation
45
+ def continue_trivia_game(user_response):
46
+ # Add user's response to the conversation history
47
+ conversation_history.append({"role": "user", "content": user_response})
48
+
49
+ # Token limit management
50
+ max_tokens = 1024 # Maximum token limit for the LLM (example value)
51
+ current_tokens = count_tokens(conversation_history)
52
+
53
+ while current_tokens > max_tokens:
54
+ # Remove the oldest user-assistant pair
55
+ if len(conversation_history) > 2:
56
+ conversation_history.pop(1) # Removing the second item as the first is the system message
57
+ conversation_history.pop(1) # Remove the corresponding assistant response
58
+ current_tokens = count_tokens(conversation_history)
59
+
60
+ # Get completion from the LLM for the user's response
61
+ try:
62
+ completion = client.chat.completions.create(
63
+ model="llama-3.1-70b-versatile",
64
+ messages=conversation_history,
65
+ temperature=0.5,
66
+ max_tokens=2048,
67
+ top_p=1,
68
+ stream=True,
69
+ stop=None,
70
+ )
71
+
72
+ llm_output = ""
73
+ for chunk in completion:
74
+ llm_output += chunk.choices[0].delta.content or ""
75
+
76
+ # Add the assistant's response to the conversation history
77
+ conversation_history.append({"role": "assistant", "content": llm_output})
78
+
79
+ return llm_output
80
+ except Exception as e:
81
+ # Check for specific rate limit error
82
+ if "rate_limit_exceeded" in str(e):
83
+ return "You've reached the maximum number of requests. Please wait a few minutes before trying again."
84
+ else:
85
+ return f"An error occurred Try again in 10 minutes: {str(e)}"
86
+
87
+ # Start the game and get the initial LLM output
88
+ initial_output = start_trivia_game()
89
+
90
+ # Using gr.Blocks to create the interface
91
+ with gr.Blocks() as demo:
92
+ # Title and Description
93
+ gr.Markdown("# Knowledge Quest\nA trivia game to test your knowledge by verifying if the given claims are true or false. Points are awarded for each correct answer. Type 'Stop' to end the game and see your score.")
94
+
95
+ # LLM Output Textbox
96
+ llm_output = gr.Textbox(label="LLM Output", placeholder="The output from the LLM will appear here", lines=10, value=initial_output)
97
+
98
+ # User Response Textbox
99
+ user_response = gr.Textbox(label="Your Response", placeholder="Type your response here", lines=3)
100
+
101
+ # Button to submit the user's response and update the LLM output
102
+ submit_button = gr.Button("Submit")
103
+
104
+ # Function to update the LLM output upon submission
105
+ def update_llm_output(user_input):
106
+ return continue_trivia_game(user_input)
107
+
108
+ # Define interactions
109
+ submit_button.click(fn=update_llm_output, inputs=user_response, outputs=llm_output)
110
+
111
+ # Launch the Gradio app
112
+ demo.launch()