pratham0011 commited on
Commit
8e91f6d
·
verified ·
1 Parent(s): 12d0a26

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +248 -248
app.py CHANGED
@@ -1,248 +1,248 @@
1
- import gradio as gr
2
- from agents.tutor_agent import TutorAgent
3
- import time
4
- import logging
5
- import traceback
6
- import os
7
-
8
- # Configure logging
9
- logging.basicConfig(
10
- level=logging.INFO,
11
- format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
12
- handlers=[
13
- logging.FileHandler("tutoring_bot.log"),
14
- logging.StreamHandler()
15
- ]
16
- )
17
-
18
- class TutoringBotApp:
19
- """Main application class for the Multi-Agent Tutoring Bot."""
20
-
21
- def __init__(self):
22
- self.tutor_agent = TutorAgent()
23
- self.conversation_history = []
24
-
25
- def chat_response(self, message, history):
26
- """Handle chat responses with conversation history."""
27
- if not message.strip():
28
- return history, ""
29
-
30
- # Process the query
31
- try:
32
- logging.info(f"Processing query: {message}")
33
- response = self.tutor_agent.process_query(message)
34
- logging.info(f"Query processed successfully. Response: {response[:100]}...")
35
-
36
- # Ensure response is a string
37
- if response is None:
38
- response = "I apologize, but I couldn't generate a response. Please try again."
39
- logging.warning("Response was None, using default message")
40
-
41
- # Add to history using the new messages format
42
- new_history = list(history) # Create a copy to avoid modifying the original
43
- new_history.append({"role": "user", "content": message})
44
- new_history.append({"role": "assistant", "content": response})
45
-
46
- # Log the history for debugging
47
- logging.info(f"Updated history length: {len(new_history)}")
48
- if len(new_history) > 0:
49
- logging.info(f"Last history item: {new_history[-1]}")
50
-
51
- # Store in conversation history
52
- self.conversation_history.append({
53
- "user": message,
54
- "bot": response,
55
- "timestamp": time.time()
56
- })
57
-
58
- return new_history, ""
59
-
60
- except Exception as e:
61
- error_details = traceback.format_exc()
62
- logging.error(f"Error processing query: {str(e)}")
63
- logging.error(f"Traceback: {error_details}")
64
-
65
- error_response = f"I apologize, but I encountered an error. Please make sure Ollama is running and try again. Error: {str(e)}"
66
-
67
- new_history = list(history) # Create a copy to avoid modifying the original
68
- new_history.append({"role": "user", "content": message})
69
- new_history.append({"role": "assistant", "content": error_response})
70
-
71
- return new_history, ""
72
-
73
- def show_capabilities(self):
74
- """Display bot capabilities."""
75
- return self.tutor_agent.get_capabilities()
76
-
77
- def clear_conversation(self):
78
- """Clear the conversation history."""
79
- self.conversation_history = []
80
- return []
81
-
82
- def create_interface(self):
83
- """Create and configure the Gradio interface."""
84
- with gr.Blocks(
85
- title="Multi-Agent Tutoring Bot",
86
- theme=gr.themes.Soft(),
87
- css="""
88
- .main-header {
89
- text-align: center;
90
- color: #2E8B57;
91
- margin-bottom: 20px;
92
- }
93
- .info-box {
94
- background-color: #f0f8ff;
95
- padding: 15px;
96
- border-radius: 10px;
97
- border: 1px solid #add8e6;
98
- margin: 10px 0;
99
- }
100
- """
101
- ) as demo:
102
-
103
- gr.Markdown(
104
- """
105
- # 🎓 Multi-Agent Tutoring Bot
106
- ### Powered by LangChain, Ollama, and Gradio
107
-
108
- Get help with **Mathematics** and **Physics** from specialized AI agents!
109
- """,
110
- elem_classes=["main-header"]
111
- )
112
-
113
- with gr.Row():
114
- with gr.Column(scale=2):
115
- chatbot = gr.Chatbot(
116
- height=500,
117
- show_label=False,
118
- avatar_images=[
119
- "https://cdn-icons-png.flaticon.com/512/3135/3135810.png", # Student icon
120
- "https://cdn-icons-png.flaticon.com/512/4712/4712027.png" # Robot icon
121
- ],
122
- type="messages",
123
- render_markdown=True
124
- )
125
-
126
- msg = gr.Textbox(
127
- placeholder="Ask me about mathematics or physics...",
128
- label="Your Question",
129
- lines=2
130
- )
131
-
132
- with gr.Row():
133
- submit_btn = gr.Button("Send", variant="primary")
134
- clear_btn = gr.Button("Clear Chat", variant="secondary")
135
-
136
- with gr.Column(scale=1):
137
- gr.Markdown(
138
- """
139
- ### 📚 Quick Examples
140
- **Mathematics:**
141
- - "Solve the equation 2x + 5 = 11"
142
- - "What is the derivative of x²?"
143
- - "Calculate 15 × 23"
144
-
145
- **Physics:**
146
- - "What is Newton's second law?"
147
- - "Explain kinetic energy"
148
- - "What is the speed of light?"
149
-
150
- ### ⚙️ System Info
151
- - **Model:** Qwen3 0.6b via Ollama
152
- - **Framework:** LangChain
153
- - **Agents:** Math & Physics specialists
154
- """,
155
- elem_classes=["info-box"]
156
- )
157
-
158
- capabilities_btn = gr.Button("Show Full Capabilities")
159
- capabilities_output = gr.Markdown(visible=False)
160
-
161
- # Event handlers
162
- def submit_message(message, history):
163
- logging.info(f"Submit message called with message: '{message}'")
164
- logging.info(f"Current history length: {len(history) if history else 0}")
165
-
166
- # Ensure message is not empty
167
- if not message or not message.strip():
168
- logging.warning("Empty message submitted, ignoring")
169
- return history, ""
170
-
171
- # Process the message and get updated history
172
- updated_history, _ = self.chat_response(message, history)
173
- logging.info(f"Updated history returned with length: {len(updated_history)}")
174
-
175
- return updated_history, ""
176
-
177
- def clear_chat():
178
- return self.clear_conversation()
179
-
180
- def toggle_capabilities():
181
- capabilities_text = self.show_capabilities()
182
- return gr.Markdown(capabilities_text, visible=True)
183
-
184
- # Wire up the events
185
- msg.submit(
186
- submit_message,
187
- inputs=[msg, chatbot],
188
- outputs=[chatbot, msg]
189
- )
190
-
191
- submit_btn.click(
192
- submit_message,
193
- inputs=[msg, chatbot],
194
- outputs=[chatbot, msg]
195
- )
196
-
197
- clear_btn.click(
198
- clear_chat,
199
- outputs=[chatbot]
200
- )
201
-
202
- capabilities_btn.click(
203
- toggle_capabilities,
204
- outputs=[capabilities_output]
205
- )
206
-
207
- # Welcome message - updated for new format
208
- demo.load(
209
- lambda: [{"role": "assistant", "content": "Hello! I'm your AI tutoring assistant. I can help you with mathematics and physics questions. What would you like to learn about today?"}],
210
- outputs=[chatbot]
211
- )
212
-
213
- return demo
214
-
215
- def main():
216
- """Main function to run the application."""
217
- print("Starting Multi-Agent Tutoring Bot...")
218
- print("Make sure Ollama is running with qwen3:0.6b model")
219
-
220
- logging.info("Initializing Tutoring Bot application")
221
-
222
- try:
223
- app = TutoringBotApp()
224
- demo = app.create_interface()
225
-
226
- logging.info("Application initialized successfully")
227
-
228
- # Launch the app
229
- server_name = os.environ.get("SERVER_NAME", "127.0.0.1")
230
- server_port = int(os.environ.get("SERVER_PORT", "7860"))
231
- logging.info(f"Launching web interface on http://{server_name}:{server_port}")
232
- demo.launch(
233
- server_name=server_name,
234
- server_port=server_port,
235
- share=False,
236
- show_error=True,
237
- quiet=False,
238
- debug=True
239
- )
240
- except Exception as e:
241
- error_details = traceback.format_exc()
242
- logging.error(f"Failed to start application: {str(e)}")
243
- logging.error(f"Traceback: {error_details}")
244
- print(f"Error starting application: {str(e)}")
245
- print("Check tutoring_bot.log for details")
246
-
247
- if __name__ == "__main__":
248
- main()
 
1
+ import gradio as gr
2
+ from agents.tutor_agent import TutorAgent
3
+ import time
4
+ import logging
5
+ import traceback
6
+ import os
7
+
8
+ # Configure logging
9
+ logging.basicConfig(
10
+ level=logging.INFO,
11
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
12
+ handlers=[
13
+ logging.FileHandler("tutoring_bot.log"),
14
+ logging.StreamHandler()
15
+ ]
16
+ )
17
+
18
+ class TutoringBotApp:
19
+ """Main application class for the Multi-Agent Tutoring Bot."""
20
+
21
+ def __init__(self):
22
+ self.tutor_agent = TutorAgent()
23
+ self.conversation_history = []
24
+
25
+ def chat_response(self, message, history):
26
+ """Handle chat responses with conversation history."""
27
+ if not message.strip():
28
+ return history, ""
29
+
30
+ # Process the query
31
+ try:
32
+ logging.info(f"Processing query: {message}")
33
+ response = self.tutor_agent.process_query(message)
34
+ logging.info(f"Query processed successfully. Response: {response[:100]}...")
35
+
36
+ # Ensure response is a string
37
+ if response is None:
38
+ response = "I apologize, but I couldn't generate a response. Please try again."
39
+ logging.warning("Response was None, using default message")
40
+
41
+ # Add to history using the new messages format
42
+ new_history = list(history) # Create a copy to avoid modifying the original
43
+ new_history.append({"role": "user", "content": message})
44
+ new_history.append({"role": "assistant", "content": response})
45
+
46
+ # Log the history for debugging
47
+ logging.info(f"Updated history length: {len(new_history)}")
48
+ if len(new_history) > 0:
49
+ logging.info(f"Last history item: {new_history[-1]}")
50
+
51
+ # Store in conversation history
52
+ self.conversation_history.append({
53
+ "user": message,
54
+ "bot": response,
55
+ "timestamp": time.time()
56
+ })
57
+
58
+ return new_history, ""
59
+
60
+ except Exception as e:
61
+ error_details = traceback.format_exc()
62
+ logging.error(f"Error processing query: {str(e)}")
63
+ logging.error(f"Traceback: {error_details}")
64
+
65
+ error_response = f"I apologize, but I encountered an error. Please make sure Ollama is running and try again. Error: {str(e)}"
66
+
67
+ new_history = list(history) # Create a copy to avoid modifying the original
68
+ new_history.append({"role": "user", "content": message})
69
+ new_history.append({"role": "assistant", "content": error_response})
70
+
71
+ return new_history, ""
72
+
73
+ def show_capabilities(self):
74
+ """Display bot capabilities."""
75
+ return self.tutor_agent.get_capabilities()
76
+
77
+ def clear_conversation(self):
78
+ """Clear the conversation history."""
79
+ self.conversation_history = []
80
+ return []
81
+
82
+ def create_interface(self):
83
+ """Create and configure the Gradio interface."""
84
+ with gr.Blocks(
85
+ title="Multi-Agent Tutoring Bot",
86
+ theme=gr.themes.Soft(),
87
+ css="""
88
+ .main-header {
89
+ text-align: center;
90
+ color: #2E8B57;
91
+ margin-bottom: 20px;
92
+ }
93
+ .info-box {
94
+ background-color: #f0f8ff;
95
+ padding: 15px;
96
+ border-radius: 10px;
97
+ border: 1px solid #add8e6;
98
+ margin: 10px 0;
99
+ }
100
+ """
101
+ ) as demo:
102
+
103
+ gr.Markdown(
104
+ """
105
+ # 🎓 Multi-Agent Tutoring Bot
106
+ ### Powered by LangChain, Ollama, and Gradio
107
+
108
+ Get help with **Mathematics** and **Physics** from specialized AI agents!
109
+ """,
110
+ elem_classes=["main-header"]
111
+ )
112
+
113
+ with gr.Row():
114
+ with gr.Column(scale=2):
115
+ chatbot = gr.Chatbot(
116
+ height=500,
117
+ show_label=False,
118
+ avatar_images=[
119
+ "https://cdn-icons-png.flaticon.com/512/3135/3135810.png", # Student icon
120
+ "https://cdn-icons-png.flaticon.com/512/4712/4712027.png" # Robot icon
121
+ ],
122
+ type="messages",
123
+ render_markdown=True
124
+ )
125
+
126
+ msg = gr.Textbox(
127
+ placeholder="Ask me about mathematics or physics...",
128
+ label="Your Question",
129
+ lines=2
130
+ )
131
+
132
+ with gr.Row():
133
+ submit_btn = gr.Button("Send", variant="primary")
134
+ clear_btn = gr.Button("Clear Chat", variant="secondary")
135
+
136
+ with gr.Column(scale=1):
137
+ gr.Markdown(
138
+ """
139
+ ### 📚 Quick Examples
140
+ **Mathematics:**
141
+ - "Solve the equation 2x + 5 = 11"
142
+ - "What is the derivative of x²?"
143
+ - "Calculate 15 × 23"
144
+
145
+ **Physics:**
146
+ - "What is Newton's second law?"
147
+ - "Explain kinetic energy"
148
+ - "What is the speed of light?"
149
+
150
+ ### ⚙️ System Info
151
+ - **Model:** Qwen3 0.6b via Ollama
152
+ - **Framework:** LangChain
153
+ - **Agents:** Math & Physics specialists
154
+ """,
155
+ elem_classes=["info-box"]
156
+ )
157
+
158
+ capabilities_btn = gr.Button("Show Full Capabilities")
159
+ capabilities_output = gr.Markdown(visible=False)
160
+
161
+ # Event handlers
162
+ def submit_message(message, history):
163
+ logging.info(f"Submit message called with message: '{message}'")
164
+ logging.info(f"Current history length: {len(history) if history else 0}")
165
+
166
+ # Ensure message is not empty
167
+ if not message or not message.strip():
168
+ logging.warning("Empty message submitted, ignoring")
169
+ return history, ""
170
+
171
+ # Process the message and get updated history
172
+ updated_history, _ = self.chat_response(message, history)
173
+ logging.info(f"Updated history returned with length: {len(updated_history)}")
174
+
175
+ return updated_history, ""
176
+
177
+ def clear_chat():
178
+ return self.clear_conversation()
179
+
180
+ def toggle_capabilities():
181
+ capabilities_text = self.show_capabilities()
182
+ return gr.Markdown(capabilities_text, visible=True)
183
+
184
+ # Wire up the events
185
+ msg.submit(
186
+ submit_message,
187
+ inputs=[msg, chatbot],
188
+ outputs=[chatbot, msg]
189
+ )
190
+
191
+ submit_btn.click(
192
+ submit_message,
193
+ inputs=[msg, chatbot],
194
+ outputs=[chatbot, msg]
195
+ )
196
+
197
+ clear_btn.click(
198
+ clear_chat,
199
+ outputs=[chatbot]
200
+ )
201
+
202
+ capabilities_btn.click(
203
+ toggle_capabilities,
204
+ outputs=[capabilities_output]
205
+ )
206
+
207
+ # Welcome message - updated for new format
208
+ demo.load(
209
+ lambda: [{"role": "assistant", "content": "Hello! I'm your AI tutoring assistant. I can help you with mathematics and physics questions. What would you like to learn about today?"}],
210
+ outputs=[chatbot]
211
+ )
212
+
213
+ return demo
214
+
215
+ def main():
216
+ """Main function to run the application."""
217
+ print("Starting Multi-Agent Tutoring Bot...")
218
+ print("Make sure Ollama is running with qwen3:0.6b model")
219
+
220
+ logging.info("Initializing Tutoring Bot application")
221
+
222
+ try:
223
+ app = TutoringBotApp()
224
+ demo = app.create_interface()
225
+
226
+ logging.info("Application initialized successfully")
227
+
228
+ # Launch the app
229
+ server_name = os.environ.get("SERVER_NAME", "127.0.0.1")
230
+ server_port = int(os.environ.get("SERVER_PORT", "7860"))
231
+ logging.info(f"Launching web interface on http://{server_name}:{server_port}")
232
+ demo.launch(
233
+ server_name="0.0.0.0",
234
+ server_port=7860
235
+ share=False,
236
+ show_error=True,
237
+ quiet=False,
238
+ debug=True
239
+ )
240
+ except Exception as e:
241
+ error_details = traceback.format_exc()
242
+ logging.error(f"Failed to start application: {str(e)}")
243
+ logging.error(f"Traceback: {error_details}")
244
+ print(f"Error starting application: {str(e)}")
245
+ print("Check tutoring_bot.log for details")
246
+
247
+ if __name__ == "__main__":
248
+ main()