Files changed (1) hide show
  1. app.py +107 -250
app.py CHANGED
@@ -1,320 +1,177 @@
1
  import gradio as gr
2
  import os
3
  import requests
4
- import json
5
- from typing import List, Tuple
6
  import time
7
 
8
- # Load GROQ API key from environment (set it in Hugging Face secrets)
9
  GROQ_API_KEY = os.environ.get("GROQ_API_KEY")
10
- GROQ_API_URL = "https://api.groq.com/openai/v1/chat/completions"
11
 
12
- # ✅ UPDATED: Correct GROQ model names (as of Dec 2024)
13
- MODELS = {
14
- "Llama 3.2 (3B) - Fast": "llama-3.2-3b-preview",
15
- "Llama 3.2 (1B) - Light": "llama-3.2-1b-preview",
16
- "Llama 3.2 (90B Text) - Powerful": "llama-3.2-90b-text-preview",
17
- "Llama 3.2 (11B Text) - Balanced": "llama-3.2-11b-text-preview",
18
- "Mixtral (8x7B)": "mixtral-8x7b-32768",
19
- "Gemma 2 (9B)": "gemma2-9b-it"
20
- }
21
-
22
- # 🎯 Customize this system prompt based on your bot's role
23
- SYSTEM_PROMPT = """You are CodeMentor, a friendly and knowledgeable programming tutor.
24
- Your role is to help users learn programming concepts, debug code, and understand different programming languages.
25
-
26
- Key personality traits:
27
- 1. Patient and encouraging - never make users feel bad for not knowing something
28
- 2. Explain concepts clearly with simple analogies first
29
- 3. Provide practical code examples
30
- 4. When debugging, guide users to discover the solution rather than just giving the answer
31
- 5. Adapt explanations to the user's skill level
32
- 6. Include best practices and common pitfalls
33
- 7. Be enthusiastic about programming!
34
-
35
- Always format code examples with proper syntax highlighting using markdown code blocks.
36
- If a user asks about something non-programming related, gently steer the conversation back to programming topics."""
37
-
38
- def query_groq_api(message: str, chat_history: List[Tuple[str, str]], model: str, temperature: float, max_tokens: int) -> str:
39
- """Send request to GROQ API and get response"""
40
 
41
  if not GROQ_API_KEY:
42
- return "⚠️ API Key not configured. Please set GROQ_API_KEY in environment variables."
43
 
 
44
  headers = {
45
  "Authorization": f"Bearer {GROQ_API_KEY}",
46
  "Content-Type": "application/json"
47
  }
48
 
49
- # Build messages list
50
- messages = [{"role": "system", "content": SYSTEM_PROMPT}]
 
 
51
 
52
- # Add chat history
53
- for user_msg, bot_msg in chat_history:
54
- messages.append({"role": "user", "content": user_msg})
55
- messages.append({"role": "assistant", "content": bot_msg})
56
 
57
  # Add current message
58
  messages.append({"role": "user", "content": message})
59
 
60
- # Get the actual model name from the dictionary
61
- actual_model = MODELS.get(model, "llama-3.2-3b-preview")
62
-
63
- # Prepare payload
64
- payload = {
65
- "model": actual_model,
66
  "messages": messages,
67
- "temperature": temperature,
68
- "max_tokens": max_tokens,
69
- "top_p": 0.9,
70
- "stream": False
71
  }
72
 
73
  try:
74
- response = requests.post(GROQ_API_URL, headers=headers, json=payload, timeout=30)
 
 
 
 
 
75
 
76
  if response.status_code == 200:
77
- data = response.json()
78
- return data["choices"][0]["message"]["content"]
 
79
  elif response.status_code == 404:
80
- # Specific error for model not found
81
- return f"❌ Error: Model '{actual_model}' not found. Available models are: {', '.join(MODELS.values())}"
82
  else:
83
- return f"❌ Error {response.status_code}: {response.text}"
84
-
85
  except requests.exceptions.Timeout:
86
- return " Request timeout. Please try again."
87
- except requests.exceptions.RequestException as e:
88
- return f"🚫 Connection error: {str(e)}"
89
  except Exception as e:
90
- return f"⚠️ Unexpected error: {str(e)}"
91
 
92
- def respond(message: str, chat_history: List[Tuple[str, str]], model: str, temperature: float, max_tokens: int):
93
- """Process user message and return bot response"""
94
 
95
- if not message.strip():
96
- return "", chat_history
97
-
98
- # Show typing indicator
99
- chat_history.append((message, "🤔 Thinking..."))
100
- yield "", chat_history
101
-
102
- # Get bot response
103
- bot_reply = query_groq_api(message, chat_history[:-1], model, temperature, max_tokens)
104
-
105
- # Replace typing indicator with actual response
106
- chat_history[-1] = (message, bot_reply)
107
 
108
- return "", chat_history
109
-
110
- def clear_chat():
111
- """Clear chat history"""
112
- return [], []
113
-
114
- def update_example_questions(programming_language: str):
115
- """Update example questions based on selected programming language"""
116
 
117
- examples = {
118
- "Python": [
119
- "Explain list comprehensions with examples",
120
- "How do decorators work in Python?",
121
- "What's the difference between 'is' and '=='?",
122
- "Show me how to handle exceptions properly"
123
- ],
124
- "JavaScript": [
125
- "Explain promises and async/await",
126
- "What is the event loop?",
127
- "How does 'this' keyword work?",
128
- "Explain closure with an example"
129
- ],
130
- "Java": [
131
- "Explain polymorphism with examples",
132
- "Difference between abstract class and interface",
133
- "How does garbage collection work?",
134
- "What are Java Streams?"
135
- ],
136
- "General": [
137
- "What's the difference between SQL and NoSQL?",
138
- "Explain REST API principles",
139
- "What are design patterns?",
140
- "How does Git branching work?"
141
- ]
142
- }
143
 
144
- return gr.update(choices=examples.get(programming_language, examples["General"]))
145
-
146
- # Create Gradio interface
147
- with gr.Blocks(theme=gr.themes.Soft(), title="CodeMentor - Programming Tutor") as demo:
 
 
148
 
149
- # Store chat history in state
150
  chat_state = gr.State([])
151
 
152
- gr.Markdown("""
153
- # 👨‍💻 CodeMentor - Your Personal Programming Tutor
 
 
 
 
 
 
 
 
 
 
154
 
155
- Hi! I'm CodeMentor, your friendly AI programming assistant. I can help you with:
156
- - Learning programming concepts
157
- - Debugging code
158
- - Understanding different languages
159
- - Best practices and design patterns
160
 
161
- Select your preferences below and start asking questions!
 
 
 
162
 
163
- ⚠️ **Note**: Using GROQ API with free tier (limited requests per minute)
164
- """)
165
 
166
  with gr.Row():
167
- with gr.Column(scale=1):
168
- # UI Improvements
169
- gr.Markdown("### ⚙️ Settings")
170
-
171
- # Model selection dropdown
172
- model_dropdown = gr.Dropdown(
173
- choices=list(MODELS.keys()),
174
- value="Llama 3.2 (3B) - Fast",
175
- label="Select AI Model",
176
- info="✅ Updated with correct GROQ model names"
177
- )
178
-
179
- # Programming language selection
180
- language_dropdown = gr.Dropdown(
181
- choices=["Python", "JavaScript", "Java", "C++", "General"],
182
- value="Python",
183
- label="Programming Language Focus",
184
- info="Get language-specific examples"
185
- )
186
-
187
- # Temperature slider
188
- temperature_slider = gr.Slider(
189
- minimum=0.1,
190
- maximum=1.0,
191
- value=0.7,
192
- step=0.1,
193
- label="Creativity (Temperature)",
194
- info="Lower = more focused, Higher = more creative"
195
- )
196
-
197
- # Response length slider
198
- max_tokens_slider = gr.Slider(
199
- minimum=100,
200
- maximum=2000,
201
- value=500,
202
- step=100,
203
- label="Response Length (Tokens)",
204
- info="Maximum length of responses"
205
- )
206
-
207
- # Example questions dropdown
208
- gr.Markdown("### 💡 Example Questions")
209
- example_dropdown = gr.Dropdown(
210
- choices=[
211
- "Explain list comprehensions with examples",
212
- "How do decorators work in Python?",
213
- "What's the difference between 'is' and '=='?",
214
- "Show me how to handle exceptions properly"
215
- ],
216
- label="Quick Questions",
217
- info="Select a question to ask",
218
- allow_custom_value=True
219
- )
220
-
221
- # Quick action buttons
222
- gr.Markdown("### ⚡ Quick Actions")
223
- with gr.Row():
224
- clear_btn = gr.Button("🧹 Clear Chat", variant="secondary", size="sm")
225
- reset_btn = gr.Button("🔄 Reset Settings", variant="secondary", size="sm")
226
 
227
- with gr.Column(scale=2):
228
- # Chat interface
229
- chatbot = gr.Chatbot(
230
- value=[],
231
- label="CodeMentor",
232
- height=500,
233
- bubble_full_width=False
234
  )
235
-
236
- # Message input
237
- msg = gr.Textbox(
238
- placeholder="Type your programming question here... (Press Enter to send)",
239
- label="Your Question",
240
- lines=2
241
- )
242
-
243
- with gr.Row():
244
- send_btn = gr.Button("🚀 Send", variant="primary")
245
- stop_btn = gr.Button("⏹️ Stop", variant="stop")
246
-
247
- # Update example questions when language changes
248
- language_dropdown.change(
249
- fn=update_example_questions,
250
- inputs=language_dropdown,
251
- outputs=example_dropdown
252
- )
253
-
254
- # Handle example question selection
255
- example_dropdown.change(
256
- fn=lambda x: x,
257
- inputs=[example_dropdown],
258
- outputs=msg
259
- )
260
 
261
- # Handle message submission
262
  msg.submit(
263
  fn=respond,
264
- inputs=[msg, chat_state, model_dropdown, temperature_slider, max_tokens_slider],
265
  outputs=[msg, chatbot]
266
  )
267
 
268
  send_btn.click(
269
  fn=respond,
270
- inputs=[msg, chat_state, model_dropdown, temperature_slider, max_tokens_slider],
271
  outputs=[msg, chatbot]
272
  )
273
 
274
- # Handle clear button
275
  clear_btn.click(
276
  fn=clear_chat,
277
- inputs=None,
278
  outputs=[chatbot, chat_state]
279
  )
280
 
281
- # Handle reset button
282
- def reset_settings():
283
- return [
284
- "Llama 3.2 (3B) - Fast", # model_dropdown
285
- "Python", # language_dropdown
286
- 0.7, # temperature_slider
287
- 500, # max_tokens_slider
288
- "Explain list comprehensions with examples" # example_dropdown
289
- ]
290
-
291
- reset_btn.click(
292
- fn=reset_settings,
293
- inputs=None,
294
- outputs=[model_dropdown, language_dropdown, temperature_slider, max_tokens_slider, example_dropdown]
295
- )
296
-
297
- # Footer with troubleshooting info
298
  gr.Markdown("""
299
  ---
300
- ### ℹ️ About & Troubleshooting
301
 
302
- **Powered by**: GROQ API
303
- **Current Models Available**:
304
- - `llama-3.2-3b-preview` (Fast, 3B parameters)
305
- - `llama-3.2-1b-preview` (Lightweight, 1B)
306
- - `llama-3.2-90b-text-preview` (Most powerful, 90B)
307
- - `llama-3.2-11b-text-preview` (Balanced, 11B)
308
- - `mixtral-8x7b-32768` (Mixture of experts)
309
- - `gemma2-9b-it` (Google's model)
310
 
311
- **If you see "model not found" error**:
312
- 1. Check GROQ Console for available models
313
- 2. Ensure your API key has access to the selected model
314
- 3. Try a different model from the dropdown
315
 
316
- **Note**: Free tier has rate limits. If requests fail, wait 1 minute and try again.
 
317
  """)
318
 
 
319
  if __name__ == "__main__":
 
320
  demo.launch(debug=False, server_name="0.0.0.0", server_port=7860)
 
1
  import gradio as gr
2
  import os
3
  import requests
 
 
4
  import time
5
 
6
+ # Configuration
7
  GROQ_API_KEY = os.environ.get("GROQ_API_KEY")
8
+ MODEL = "llama-3.2-3b-preview" # Fastest free model
9
 
10
+ def ask_groq(message, history):
11
+ """Simple function to query GROQ"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
  if not GROQ_API_KEY:
14
+ return " **ERROR**: Please add GROQ_API_KEY in Hugging Face Secrets (Settings → Secrets)"
15
 
16
+ # Prepare request
17
  headers = {
18
  "Authorization": f"Bearer {GROQ_API_KEY}",
19
  "Content-Type": "application/json"
20
  }
21
 
22
+ # Build conversation history
23
+ messages = [
24
+ {"role": "system", "content": "You are a helpful programming tutor. Keep answers concise (2-3 sentences)."}
25
+ ]
26
 
27
+ # Add conversation history (last 2 exchanges max for speed)
28
+ for human, assistant in history[-2:]:
29
+ messages.append({"role": "user", "content": human})
30
+ messages.append({"role": "assistant", "content": assistant})
31
 
32
  # Add current message
33
  messages.append({"role": "user", "content": message})
34
 
35
+ data = {
36
+ "model": MODEL,
 
 
 
 
37
  "messages": messages,
38
+ "temperature": 0.7,
39
+ "max_tokens": 200, # Short for faster responses
40
+ "top_p": 0.9
 
41
  }
42
 
43
  try:
44
+ response = requests.post(
45
+ "https://api.groq.com/openai/v1/chat/completions",
46
+ headers=headers,
47
+ json=data,
48
+ timeout=20 # 20 second timeout
49
+ )
50
 
51
  if response.status_code == 200:
52
+ return response.json()["choices"][0]["message"]["content"]
53
+ elif response.status_code == 429:
54
+ return "⏰ **RATE LIMITED**: Free tier allows ~1 request/minute. Wait 60 seconds."
55
  elif response.status_code == 404:
56
+ return f"❌ **MODEL ERROR**: '{MODEL}' not found. Try 'mixtral-8x7b-32768' instead."
 
57
  else:
58
+ return f"❌ **API ERROR {response.status_code}**: {response.text[:100]}..."
59
+
60
  except requests.exceptions.Timeout:
61
+ return "⏱️ **TIMEOUT**: GROQ is busy. Try shorter questions."
62
+ except requests.exceptions.ConnectionError:
63
+ return "🌐 **CONNECTION ERROR**: Check your internet or API key."
64
  except Exception as e:
65
+ return f"⚠️ **ERROR**: {str(e)[:80]}"
66
 
67
+ # Create the Gradio interface
68
+ with gr.Blocks(theme=gr.themes.Soft(), title=" Programming Tutor - GROQ") as demo:
69
 
70
+ gr.Markdown("""
71
+ # 👨‍💻 Programming Tutor Chatbot
72
+ **Powered by GROQ API (Free Tier)**
 
 
 
 
 
 
 
 
 
73
 
74
+ ⚠️ **NOTE**: Free tier has rate limits. If stuck, wait 60 seconds between requests.
75
+ """)
 
 
 
 
 
 
76
 
77
+ # Create chatbot interface
78
+ chatbot = gr.Chatbot(
79
+ label="Chat History",
80
+ height=400
81
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82
 
83
+ # Create message input
84
+ msg = gr.Textbox(
85
+ placeholder="Ask a programming question... (Keep it short for faster responses)",
86
+ label="Your Question",
87
+ lines=2
88
+ )
89
 
90
+ # Create state for chat history
91
  chat_state = gr.State([])
92
 
93
+ # Function to handle message submission
94
+ def respond(message, history):
95
+ if not message.strip():
96
+ return "", history
97
+
98
+ # Get response from GROQ
99
+ bot_reply = ask_groq(message, history)
100
+
101
+ # Add to history
102
+ history.append((message, bot_reply))
103
+
104
+ return "", history
105
 
106
+ # Clear function
107
+ def clear_chat():
108
+ return [], []
 
 
109
 
110
+ # Create buttons in a row
111
+ with gr.Row():
112
+ send_btn = gr.Button("🚀 Send Message", variant="primary")
113
+ clear_btn = gr.Button("🗑️ Clear Chat", variant="secondary")
114
 
115
+ # Create example questions in a separate row
116
+ gr.Markdown("### 💡 Quick Example Questions:")
117
 
118
  with gr.Row():
119
+ examples = [
120
+ "Hello! Are you working?",
121
+ "Print 'Hello World' in Python",
122
+ "What is a function?",
123
+ "How to fix syntax errors?"
124
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125
 
126
+ for example in examples:
127
+ btn = gr.Button(example, size="sm")
128
+ # When clicked, set the msg textbox to this example
129
+ btn.click(
130
+ fn=lambda x=example: x,
131
+ inputs=[],
132
+ outputs=msg
133
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134
 
135
+ # Set up event handlers
136
  msg.submit(
137
  fn=respond,
138
+ inputs=[msg, chat_state],
139
  outputs=[msg, chatbot]
140
  )
141
 
142
  send_btn.click(
143
  fn=respond,
144
+ inputs=[msg, chat_state],
145
  outputs=[msg, chatbot]
146
  )
147
 
 
148
  clear_btn.click(
149
  fn=clear_chat,
150
+ inputs=[],
151
  outputs=[chatbot, chat_state]
152
  )
153
 
154
+ # Footer with instructions
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
155
  gr.Markdown("""
156
  ---
157
+ ### 🔧 **Troubleshooting Guide**
158
 
159
+ **If you see errors or long delays:**
160
+ 1. **Wait 60 seconds** between requests (free tier limit)
161
+ 2. **Keep questions short** (under 15 words)
162
+ 3. **Test with "Hello"** first
163
+ 4. **Check API key** in Hugging Face Secrets
 
 
 
164
 
165
+ **Working questions to test:**
166
+ - "Say hello"
167
+ - "What is Python?"
168
+ - "How to declare a variable?"
169
 
170
+ **Model**: `llama-3.2-3b-preview` (Fastest free model)
171
+ **Timeout**: 20 seconds
172
  """)
173
 
174
+ # Launch the app
175
  if __name__ == "__main__":
176
+
177
  demo.launch(debug=False, server_name="0.0.0.0", server_port=7860)