IW2025 commited on
Commit
6f935de
Β·
verified Β·
1 Parent(s): 97458eb

Upload llm_app.py

Browse files
Files changed (1) hide show
  1. llm_app.py +282 -106
llm_app.py CHANGED
@@ -6,17 +6,157 @@ from langchain_community.embeddings import HuggingFaceEmbeddings
6
  from langchain_community.vectorstores import Chroma
7
  from langchain.prompts import PromptTemplate
8
  from langchain.chains import LLMChain
9
- import requests
10
- import json
11
  import base64
12
  from PIL import Image
13
  import io
14
  import re
 
15
  from dotenv import load_dotenv
16
 
17
  # Load environment variables from .env file
18
  load_dotenv()
19
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  # --- LLM-Powered Curriculum Assistant ---
21
 
22
  class LLMCurriculumAssistant:
@@ -77,35 +217,23 @@ class LLMCurriculumAssistant:
77
  print("βœ… Vector database built successfully")
78
 
79
  def _setup_llm(self):
80
- """Setup DeepSeek LLM"""
81
  try:
82
- # Initialize DeepSeek client
83
- self.deepseek_api_key = os.environ.get("DEEPSEEK_API_KEY")
84
- self.deepseek_base_url = "https://api.deepseek.com/v1/chat/completions"
85
-
86
- # Check if API key is available
87
- if not self.deepseek_api_key:
88
- print("❌ DEEPSEEK_API_KEY not found in environment variables")
89
- print("Please set your DeepSeek API key as a HuggingFace secret named 'DEEPSEEK_API_KEY'")
90
- raise Exception("DeepSeek API key not found")
91
-
92
- print(f"βœ… DeepSeek API key found: {self.deepseek_api_key[:20]}...")
93
 
94
  # Create content selection prompt
95
  content_selection_template = """Hi! I'm helping a student find the best curriculum slide for their question.
96
-
97
  The student asked: "{question}"
98
-
99
  Here are some slides that might be relevant:
100
  {slide_contents}
101
-
102
  Could you help me pick the slide that best answers their specific question? Look for:
103
  - Slides that specifically mention what they're asking about
104
  - Slides with clear explanations and examples
105
  - Slides that match the exact terms they used (like "for loops" vs just "loops")
106
-
107
  Just respond with the slide number (1, 2, 3, etc.) that you think is most helpful. If none really fit, say "0".
108
-
109
  Thanks! Slide number:"""
110
 
111
  self.content_selection_prompt = PromptTemplate(
@@ -115,12 +243,9 @@ Thanks! Slide number:"""
115
 
116
  # Create answer generation prompt
117
  answer_template = """Hey there! I'm helping a student understand a programming concept. They asked:
118
-
119
  "{question}"
120
-
121
  Here's what the curriculum slide says about it:
122
  {slide_content}
123
-
124
  Could you help me explain this to them in a friendly, educational way? I'd like you to:
125
  - Break it down in simple terms
126
  - Use examples if the slide has them
@@ -128,7 +253,6 @@ Could you help me explain this to them in a friendly, educational way? I'd like
128
  - Add some helpful context if the slide is brief
129
  - Use bullet points or lists to make it clear
130
  - Make sure your answer directly addresses what they asked
131
-
132
  Thanks for your help! Here's what I'd tell the student:"""
133
 
134
  self.answer_prompt = PromptTemplate(
@@ -140,7 +264,7 @@ Thanks for your help! Here's what I'd tell the student:"""
140
 
141
  except Exception as e:
142
  print(f"❌ Error setting up LLM: {e}")
143
- self.deepseek_api_key = None
144
  self.content_selection_prompt = None
145
  self.answer_prompt = None
146
 
@@ -180,7 +304,7 @@ Thanks for your help! Here's what I'd tell the student:"""
180
  selected_content = None
181
  selected_result = None
182
 
183
- if self.deepseek_api_key and self.content_selection_prompt:
184
  try:
185
  # Prepare slide contents for LLM analysis
186
  slide_contents = []
@@ -192,7 +316,7 @@ Thanks for your help! Here's what I'd tell the student:"""
192
 
193
  slide_contents_text = "\n\n".join(slide_contents)
194
 
195
- print("πŸ€– Using DeepSeek to select most relevant content...")
196
 
197
  # Format the prompt
198
  prompt = self.content_selection_prompt.format(
@@ -200,32 +324,16 @@ Thanks for your help! Here's what I'd tell the student:"""
200
  slide_contents=slide_contents_text
201
  )
202
 
203
- # Get DeepSeek's selection
204
- headers = {
205
- "Authorization": f"Bearer {self.deepseek_api_key}",
206
- "Content-Type": "application/json"
207
- }
208
-
209
- data = {
210
- "model": "deepseek-chat",
211
- "messages": [{"role": "user", "content": prompt}],
212
- "max_tokens": 1500,
213
- "temperature": 0.7
214
- }
215
-
216
- response = requests.post(self.deepseek_base_url, headers=headers, json=data)
217
-
218
- if response.status_code == 401:
219
- print("❌ DeepSeek API key is invalid or expired")
220
- print("Please check your DeepSeek API key in HuggingFace secrets")
221
- raise Exception("Invalid DeepSeek API key")
222
- elif response.status_code != 200:
223
- print(f"❌ DeepSeek API error: {response.status_code} - {response.text}")
224
- raise Exception(f"DeepSeek API error: {response.status_code}")
225
 
226
- response.raise_for_status()
227
- selection_response = response.json()["choices"][0]["message"]["content"]
228
- print(f"DeepSeek Selection Response: {selection_response}")
229
 
230
  # Parse the selection
231
  try:
@@ -261,9 +369,9 @@ Thanks for your help! Here's what I'd tell the student:"""
261
 
262
  # Step 3: LLM answer generation
263
  answer = ""
264
- if self.deepseek_api_key and self.answer_prompt and selected_content:
265
  try:
266
- print("πŸ€– Generating DeepSeek answer...")
267
 
268
  # Format the prompt
269
  prompt = self.answer_prompt.format(
@@ -271,35 +379,19 @@ Thanks for your help! Here's what I'd tell the student:"""
271
  slide_content=selected_content
272
  )
273
 
274
- # Get DeepSeek's answer
275
- headers = {
276
- "Authorization": f"Bearer {self.deepseek_api_key}",
277
- "Content-Type": "application/json"
278
- }
279
-
280
- data = {
281
- "model": "deepseek-chat",
282
- "messages": [{"role": "user", "content": prompt}],
283
- "max_tokens": 1500,
284
- "temperature": 0.7
285
- }
286
-
287
- response = requests.post(self.deepseek_base_url, headers=headers, json=data)
288
-
289
- if response.status_code == 401:
290
- print("❌ DeepSeek API key is invalid or expired")
291
- print("Please check your DeepSeek API key in HuggingFace secrets")
292
- raise Exception("Invalid DeepSeek API key")
293
- elif response.status_code != 200:
294
- print(f"❌ DeepSeek API error: {response.status_code} - {response.text}")
295
- raise Exception(f"DeepSeek API error: {response.status_code}")
296
 
297
- response.raise_for_status()
298
- answer = response.json()["choices"][0]["message"]["content"].strip()
299
- print(f"βœ… DeepSeek answer generated: {answer[:100]}...")
300
 
301
  except Exception as e:
302
- print(f"Error generating DeepSeek answer: {e}")
303
  answer = f"Based on the curriculum slide:\n\n{selected_content}\n\nThis slide contains relevant information about your question."
304
  else:
305
  answer = f"Based on the curriculum slide:\n\n{selected_content}\n\nThis slide contains relevant information about your question."
@@ -342,44 +434,128 @@ Thanks for your help! Here's what I'd tell the student:"""
342
 
343
  # --- Gradio UI ---
344
  assistant = LLMCurriculumAssistant()
 
345
 
346
  def gradio_chat(query):
347
  """Gradio chat interface"""
348
  answer, relevant_slides, recommended_slide, recommended_label = assistant.chat(query)
349
  return answer, relevant_slides
350
 
 
 
 
 
 
 
 
 
 
 
351
  with gr.Blocks(title="LLM Curriculum Assistant", theme=gr.themes.Soft()) as demo:
352
- gr.Markdown("# πŸ€– LLM Curriculum Assistant\nYour AI programming tutor with LLM-powered content selection and answers!")
353
 
354
- with gr.Row():
355
- # Left Column - Chatbot Interface
356
- with gr.Column(scale=1):
357
- gr.Markdown("### πŸ’¬ Chatbot")
358
- gr.Markdown("**Ask questions about programming concepts:**")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
359
 
360
- question = gr.Textbox(
361
- label="Question Input",
362
- placeholder="e.g., What are for loops? How do variables work? Explain functions...",
363
- lines=3
364
- )
365
- submit = gr.Button("πŸ€– Ask AI", variant="primary", size="lg")
366
- answer = gr.Markdown(label="LLM Generated Answer")
367
 
368
- # Right Column - Slides Display
369
- with gr.Column(scale=1):
370
- gr.Markdown("### πŸ“„ Most Relevant Slides")
371
- gallery = gr.Gallery(
372
- label="Curriculum Slides",
373
- columns=1,
374
- rows=3,
375
- height="600px",
376
- object_fit="contain",
377
- show_label=False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
378
  )
379
-
380
- # Event handlers
381
- submit.click(fn=gradio_chat, inputs=[question], outputs=[answer, gallery])
382
- question.submit(fn=gradio_chat, inputs=[question], outputs=[answer, gallery])
383
 
384
  if __name__ == "__main__":
385
  demo.launch()
 
6
  from langchain_community.vectorstores import Chroma
7
  from langchain.prompts import PromptTemplate
8
  from langchain.chains import LLMChain
9
+ import anthropic
 
10
  import base64
11
  from PIL import Image
12
  import io
13
  import re
14
+ import random
15
  from dotenv import load_dotenv
16
 
17
  # Load environment variables from .env file
18
  load_dotenv()
19
 
20
+ # --- Code Practice Assistant ---
21
+
22
+ class CodePracticeAssistant:
23
+ def __init__(self):
24
+ self.anthropic_client = None
25
+ self._setup_llm()
26
+
27
+ def _setup_llm(self):
28
+ """Setup Claude LLM for code practice"""
29
+ try:
30
+ self.anthropic_client = anthropic.Anthropic(
31
+ api_key=os.environ.get("ANTHROPIC_KEY")
32
+ )
33
+ print("βœ… Code Practice LLM setup successful!")
34
+ except Exception as e:
35
+ print(f"❌ Error setting up Code Practice LLM: {e}")
36
+ self.anthropic_client = None
37
+
38
+ def generate_practice_problem(self, topic, problem_type):
39
+ """Generate a practice problem based on topic and type"""
40
+ if not self.anthropic_client:
41
+ return "LLM not available. Please check your API key.", ""
42
+
43
+ # Map dropdown choices to internal problem types
44
+ problem_type_mapping = {
45
+ "Create Practice Problems": "create",
46
+ "Debug - Identify Error Type": "debug_error_type",
47
+ "Debug - Explain Error Reason": "debug_error_reason",
48
+ "Debug - Fix the Error": "debug_fix",
49
+ "Optimize Code Performance": "optimize"
50
+ }
51
+
52
+ internal_type = problem_type_mapping.get(problem_type, "create")
53
+
54
+ problem_types = {
55
+ "create": "Create a coding problem where students need to write code from scratch",
56
+ "debug_error_type": "Create a coding problem with a bug where students need to identify what type of error it is",
57
+ "debug_error_reason": "Create a coding problem with a bug where students need to explain why the error occurs",
58
+ "debug_fix": "Create a coding problem with a bug where students need to fix the code",
59
+ "optimize": "Create a coding problem where students need to optimize/improve the code performance"
60
+ }
61
+
62
+ prompt = f"""Create a programming practice problem for a student learning {topic}.
63
+
64
+ Problem Type: {problem_types.get(internal_type, internal_type)}
65
+
66
+ Requirements:
67
+ - Make it appropriate for beginners to intermediate level
68
+ - Include clear instructions
69
+ - Provide a specific, focused problem
70
+ - If it's a debug problem, include the buggy code
71
+ - If it's an optimization problem, provide the original code
72
+ - Make it engaging and educational
73
+
74
+ Format your response as:
75
+ PROBLEM: [The problem description and requirements]
76
+ CODE: [Any starter code if applicable, or "Write your code here:"]
77
+
78
+ Keep it concise but clear."""
79
+
80
+ try:
81
+ response = self.anthropic_client.messages.create(
82
+ model="claude-3-5-haiku-20241022",
83
+ max_tokens=1000,
84
+ temperature=0.7,
85
+ messages=[{"role": "user", "content": prompt}]
86
+ )
87
+
88
+ result = response.content[0].text.strip()
89
+
90
+ # Parse the response to separate problem and code
91
+ if "PROBLEM:" in result and "CODE:" in result:
92
+ parts = result.split("CODE:")
93
+ problem = parts[0].replace("PROBLEM:", "").strip()
94
+ code = parts[1].strip() if len(parts) > 1 else ""
95
+ else:
96
+ problem = result
97
+ code = ""
98
+
99
+ return problem, code
100
+
101
+ except Exception as e:
102
+ return f"Error generating problem: {str(e)}", ""
103
+
104
+ def analyze_student_code(self, topic, problem_type, problem_description, student_code):
105
+ """Analyze student's code and provide feedback"""
106
+ if not self.anthropic_client:
107
+ return "LLM not available. Please check your API key."
108
+
109
+ # Map dropdown choices to internal problem types
110
+ problem_type_mapping = {
111
+ "Create Practice Problems": "create",
112
+ "Debug - Identify Error Type": "debug_error_type",
113
+ "Debug - Explain Error Reason": "debug_error_reason",
114
+ "Debug - Fix the Error": "debug_fix",
115
+ "Optimize Code Performance": "optimize"
116
+ }
117
+
118
+ internal_type = problem_type_mapping.get(problem_type, "create")
119
+
120
+ analysis_types = {
121
+ "create": "Evaluate the code for correctness, completeness, and best practices",
122
+ "debug_error_type": "Identify what type of error the code has and explain it",
123
+ "debug_error_reason": "Explain why the error occurs in the code",
124
+ "debug_fix": "Provide the corrected code and explain the fixes",
125
+ "optimize": "Suggest optimizations and explain how they improve performance"
126
+ }
127
+
128
+ prompt = f"""Analyze this student's code for a {topic} practice problem.
129
+
130
+ Problem Type: {problem_type}
131
+ Problem Description: {problem_description}
132
+
133
+ Student's Code:
134
+ {student_code}
135
+
136
+ Analysis Type: {analysis_types.get(internal_type, "General analysis")}
137
+
138
+ Please provide:
139
+ 1. A detailed analysis of their code
140
+ 2. What they did well
141
+ 3. Areas for improvement
142
+ 4. If applicable, the correct solution or fixes
143
+ 5. Helpful tips and explanations
144
+
145
+ Be encouraging but honest. Focus on learning and improvement."""
146
+
147
+ try:
148
+ response = self.anthropic_client.messages.create(
149
+ model="claude-3-5-haiku-20241022",
150
+ max_tokens=1500,
151
+ temperature=0.7,
152
+ messages=[{"role": "user", "content": prompt}]
153
+ )
154
+
155
+ return response.content[0].text.strip()
156
+
157
+ except Exception as e:
158
+ return f"Error analyzing code: {str(e)}"
159
+
160
  # --- LLM-Powered Curriculum Assistant ---
161
 
162
  class LLMCurriculumAssistant:
 
217
  print("βœ… Vector database built successfully")
218
 
219
  def _setup_llm(self):
220
+ """Setup Claude LLM"""
221
  try:
222
+ # Initialize Claude client
223
+ self.anthropic_client = anthropic.Anthropic(
224
+ api_key=os.environ.get("ANTHROPIC_KEY")
225
+ )
 
 
 
 
 
 
 
226
 
227
  # Create content selection prompt
228
  content_selection_template = """Hi! I'm helping a student find the best curriculum slide for their question.
 
229
  The student asked: "{question}"
 
230
  Here are some slides that might be relevant:
231
  {slide_contents}
 
232
  Could you help me pick the slide that best answers their specific question? Look for:
233
  - Slides that specifically mention what they're asking about
234
  - Slides with clear explanations and examples
235
  - Slides that match the exact terms they used (like "for loops" vs just "loops")
 
236
  Just respond with the slide number (1, 2, 3, etc.) that you think is most helpful. If none really fit, say "0".
 
237
  Thanks! Slide number:"""
238
 
239
  self.content_selection_prompt = PromptTemplate(
 
243
 
244
  # Create answer generation prompt
245
  answer_template = """Hey there! I'm helping a student understand a programming concept. They asked:
 
246
  "{question}"
 
247
  Here's what the curriculum slide says about it:
248
  {slide_content}
 
249
  Could you help me explain this to them in a friendly, educational way? I'd like you to:
250
  - Break it down in simple terms
251
  - Use examples if the slide has them
 
253
  - Add some helpful context if the slide is brief
254
  - Use bullet points or lists to make it clear
255
  - Make sure your answer directly addresses what they asked
 
256
  Thanks for your help! Here's what I'd tell the student:"""
257
 
258
  self.answer_prompt = PromptTemplate(
 
264
 
265
  except Exception as e:
266
  print(f"❌ Error setting up LLM: {e}")
267
+ self.anthropic_client = None
268
  self.content_selection_prompt = None
269
  self.answer_prompt = None
270
 
 
304
  selected_content = None
305
  selected_result = None
306
 
307
+ if self.anthropic_client and self.content_selection_prompt:
308
  try:
309
  # Prepare slide contents for LLM analysis
310
  slide_contents = []
 
316
 
317
  slide_contents_text = "\n\n".join(slide_contents)
318
 
319
+ print("πŸ€– Using LLM to select most relevant content...")
320
 
321
  # Format the prompt
322
  prompt = self.content_selection_prompt.format(
 
324
  slide_contents=slide_contents_text
325
  )
326
 
327
+ # Get LLM's selection
328
+ response = self.anthropic_client.messages.create(
329
+ model="claude-3-5-haiku-20241022",
330
+ max_tokens=1500,
331
+ temperature=0.7,
332
+ messages=[{"role": "user", "content": prompt}]
333
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
334
 
335
+ selection_response = response.content[0].text
336
+ print(f"LLM Selection Response: {selection_response}")
 
337
 
338
  # Parse the selection
339
  try:
 
369
 
370
  # Step 3: LLM answer generation
371
  answer = ""
372
+ if self.anthropic_client and self.answer_prompt and selected_content:
373
  try:
374
+ print("πŸ€– Generating LLM answer...")
375
 
376
  # Format the prompt
377
  prompt = self.answer_prompt.format(
 
379
  slide_content=selected_content
380
  )
381
 
382
+ # Get LLM's answer
383
+ response = self.anthropic_client.messages.create(
384
+ model="claude-3-5-haiku-20241022",
385
+ max_tokens=1500,
386
+ temperature=0.7,
387
+ messages=[{"role": "user", "content": prompt}]
388
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
389
 
390
+ answer = response.content[0].text.strip()
391
+ print(f"βœ… LLM answer generated: {answer[:100]}...")
 
392
 
393
  except Exception as e:
394
+ print(f"Error generating LLM answer: {e}")
395
  answer = f"Based on the curriculum slide:\n\n{selected_content}\n\nThis slide contains relevant information about your question."
396
  else:
397
  answer = f"Based on the curriculum slide:\n\n{selected_content}\n\nThis slide contains relevant information about your question."
 
434
 
435
  # --- Gradio UI ---
436
  assistant = LLMCurriculumAssistant()
437
+ practice_assistant = CodePracticeAssistant()
438
 
439
  def gradio_chat(query):
440
  """Gradio chat interface"""
441
  answer, relevant_slides, recommended_slide, recommended_label = assistant.chat(query)
442
  return answer, relevant_slides
443
 
444
+ def generate_problem(topic, problem_type):
445
+ """Generate a practice problem"""
446
+ problem, code = practice_assistant.generate_practice_problem(topic, problem_type)
447
+ return problem, code
448
+
449
+ def analyze_code(topic, problem_type, problem_description, student_code):
450
+ """Analyze student's code"""
451
+ analysis = practice_assistant.analyze_student_code(topic, problem_type, problem_description, student_code)
452
+ return analysis
453
+
454
  with gr.Blocks(title="LLM Curriculum Assistant", theme=gr.themes.Soft()) as demo:
455
+ gr.Markdown("# πŸ€– LLM Curriculum Assistant\nYour AI programming tutor with LLM-powered content selection and code practice!")
456
 
457
+ with gr.Tabs():
458
+ # Tab 1: Chat Assistant
459
+ with gr.Tab("πŸ’¬ Chat Assistant"):
460
+ with gr.Row():
461
+ # Left Column - Chatbot Interface
462
+ with gr.Column(scale=1):
463
+ gr.Markdown("### πŸ’¬ Chatbot")
464
+ gr.Markdown("**Ask questions about programming concepts:**")
465
+
466
+ question = gr.Textbox(
467
+ label="Question Input",
468
+ placeholder="e.g., What are for loops? How do variables work? Explain functions...",
469
+ lines=3
470
+ )
471
+ submit = gr.Button("πŸ€– Ask AI", variant="primary", size="lg")
472
+ answer = gr.Markdown(label="LLM Generated Answer")
473
+
474
+ # Right Column - Slides Display
475
+ with gr.Column(scale=1):
476
+ gr.Markdown("### πŸ“„ Most Relevant Slides")
477
+ gallery = gr.Gallery(
478
+ label="Curriculum Slides",
479
+ columns=1,
480
+ rows=3,
481
+ height="600px",
482
+ object_fit="contain",
483
+ show_label=False
484
+ )
485
 
486
+ # Event handlers for chat
487
+ submit.click(fn=gradio_chat, inputs=[question], outputs=[answer, gallery])
488
+ question.submit(fn=gradio_chat, inputs=[question], outputs=[answer, gallery])
 
 
 
 
489
 
490
+ # Tab 2: Code Practice
491
+ with gr.Tab("πŸ’» Code Practice"):
492
+ gr.Markdown("### 🎯 Practice Programming Skills")
493
+ gr.Markdown("Choose a topic and problem type to get started!")
494
+
495
+ with gr.Row():
496
+ # Left Column - Problem Setup
497
+ with gr.Column(scale=1):
498
+ gr.Markdown("#### πŸ“ Problem Setup")
499
+
500
+ topic_input = gr.Textbox(
501
+ label="Topic to Practice",
502
+ placeholder="e.g., for loops, functions, variables, arrays, recursion...",
503
+ lines=2
504
+ )
505
+
506
+ problem_type = gr.Dropdown(
507
+ label="Problem Type",
508
+ choices=[
509
+ "Create Practice Problems",
510
+ "Debug - Identify Error Type",
511
+ "Debug - Explain Error Reason",
512
+ "Debug - Fix the Error",
513
+ "Optimize Code Performance"
514
+ ],
515
+ value="Create Practice Problems"
516
+ )
517
+
518
+ generate_btn = gr.Button("🎲 Generate Problem", variant="primary", size="lg")
519
+
520
+ gr.Markdown("#### πŸ“‹ Problem Description")
521
+ problem_description = gr.Markdown(label="Problem will appear here...")
522
+
523
+ gr.Markdown("#### πŸ’» Starter Code (if applicable)")
524
+ starter_code = gr.Code(
525
+ label="Code Editor",
526
+ language="python",
527
+ lines=10,
528
+ placeholder="# Write your code here..."
529
+ )
530
+
531
+ # Right Column - Student Work & Analysis
532
+ with gr.Column(scale=1):
533
+ gr.Markdown("#### ✍️ Your Solution")
534
+
535
+ student_code = gr.Code(
536
+ label="Your Code",
537
+ language="python",
538
+ lines=15,
539
+ placeholder="# Write your solution here..."
540
+ )
541
+
542
+ analyze_btn = gr.Button("πŸ” Analyze My Code", variant="secondary", size="lg")
543
+
544
+ gr.Markdown("#### πŸ“Š AI Analysis")
545
+ analysis_output = gr.Markdown(label="Analysis will appear here...")
546
+
547
+ # Event handlers for practice
548
+ generate_btn.click(
549
+ fn=generate_problem,
550
+ inputs=[topic_input, problem_type],
551
+ outputs=[problem_description, starter_code]
552
+ )
553
+
554
+ analyze_btn.click(
555
+ fn=analyze_code,
556
+ inputs=[topic_input, problem_type, problem_description, student_code],
557
+ outputs=[analysis_output]
558
  )
 
 
 
 
559
 
560
  if __name__ == "__main__":
561
  demo.launch()