shyamsridhar123 commited on
Commit
5f613ea
·
1 Parent(s): 98c9a2d

Deploy SkillSprout to HF Spaces - Clean version without large media files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ assets/*.mp4 filter=lfs diff=lfs merge=lfs -text
.github/workflows/ci.yml ADDED
@@ -0,0 +1,291 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: SkillSprout CI/CD Pipeline
2
+
3
+ on:
4
+ push:
5
+ branches: [ main, develop ]
6
+ pull_request:
7
+ branches: [ main ]
8
+ schedule:
9
+ # Run daily at 2 AM UTC
10
+ - cron: '0 2 * * *'
11
+
12
+ env:
13
+ PYTHON_VERSION: '3.10'
14
+
15
+ jobs:
16
+ test:
17
+ name: Run Tests
18
+ runs-on: ubuntu-latest
19
+ strategy:
20
+ matrix:
21
+ test-type: [unit, integration]
22
+
23
+ steps:
24
+ - name: Checkout code
25
+ uses: actions/checkout@v4
26
+
27
+ - name: Set up Python
28
+ uses: actions/setup-python@v4
29
+ with:
30
+ python-version: ${{ env.PYTHON_VERSION }}
31
+
32
+ - name: Cache pip dependencies
33
+ uses: actions/cache@v3
34
+ with:
35
+ path: ~/.cache/pip
36
+ key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }}
37
+ restore-keys: |
38
+ ${{ runner.os }}-pip-
39
+
40
+ - name: Install dependencies
41
+ run: |
42
+ python -m pip install --upgrade pip
43
+ pip install -r requirements.txt
44
+
45
+ - name: Set up test environment
46
+ run: |
47
+ # Create test environment variables
48
+ echo "AZURE_OPENAI_ENDPOINT=https://test.openai.azure.com/" >> $GITHUB_ENV
49
+ echo "AZURE_OPENAI_KEY=test_key_12345678901234567890" >> $GITHUB_ENV
50
+ echo "AZURE_OPENAI_API_VERSION=2024-12-01-preview" >> $GITHUB_ENV
51
+ echo "AZURE_OPENAI_LLM_DEPLOYMENT=gpt-4" >> $GITHUB_ENV
52
+ echo "AZURE_OPENAI_LLM_MODEL=gpt-4" >> $GITHUB_ENV
53
+ echo "AZURE_SPEECH_KEY=test_speech_key_12345678901234567890" >> $GITHUB_ENV
54
+ echo "AZURE_SPEECH_REGION=eastus" >> $GITHUB_ENV
55
+
56
+ - name: Run environment validation
57
+ run: |
58
+ python -m pytest tests/test_environment.py -v
59
+
60
+ - name: Run unit tests
61
+ if: matrix.test-type == 'unit'
62
+ run: |
63
+ python -m pytest -m unit --cov=. --cov-report=xml --cov-report=html -v
64
+
65
+ - name: Run integration tests
66
+ if: matrix.test-type == 'integration'
67
+ run: |
68
+ python -m pytest -m integration -v --tb=short
69
+
70
+ - name: Upload coverage to Codecov
71
+ if: matrix.test-type == 'unit'
72
+ uses: codecov/codecov-action@v3
73
+ with:
74
+ file: ./coverage.xml
75
+ flags: unittests
76
+ name: codecov-umbrella
77
+
78
+ - name: Upload test results
79
+ uses: actions/upload-artifact@v3
80
+ if: always()
81
+ with:
82
+ name: test-results-${{ matrix.test-type }}
83
+ path: |
84
+ htmlcov/
85
+ test_results.json
86
+ coverage.xml
87
+
88
+ performance-test:
89
+ name: Performance Tests
90
+ runs-on: ubuntu-latest
91
+ needs: test
92
+ if: github.event_name == 'schedule' || contains(github.event.head_commit.message, '[perf-test]')
93
+
94
+ steps:
95
+ - name: Checkout code
96
+ uses: actions/checkout@v4
97
+
98
+ - name: Set up Python
99
+ uses: actions/setup-python@v4
100
+ with:
101
+ python-version: ${{ env.PYTHON_VERSION }}
102
+
103
+ - name: Install dependencies
104
+ run: |
105
+ python -m pip install --upgrade pip
106
+ pip install -r requirements.txt
107
+
108
+ - name: Set up test environment
109
+ run: |
110
+ echo "AZURE_OPENAI_ENDPOINT=https://test.openai.azure.com/" >> $GITHUB_ENV
111
+ echo "AZURE_OPENAI_KEY=test_key_12345678901234567890" >> $GITHUB_ENV
112
+ echo "AZURE_OPENAI_API_VERSION=2024-12-01-preview" >> $GITHUB_ENV
113
+ echo "AZURE_OPENAI_LLM_DEPLOYMENT=gpt-4" >> $GITHUB_ENV
114
+ echo "AZURE_OPENAI_LLM_MODEL=gpt-4" >> $GITHUB_ENV
115
+
116
+ - name: Run performance tests
117
+ run: |
118
+ python run_tests.py --type slow --performance
119
+
120
+ - name: Upload performance results
121
+ uses: actions/upload-artifact@v3
122
+ with:
123
+ name: performance-results
124
+ path: test_results.json
125
+
126
+ security-scan:
127
+ name: Security Scan
128
+ runs-on: ubuntu-latest
129
+
130
+ steps:
131
+ - name: Checkout code
132
+ uses: actions/checkout@v4
133
+
134
+ - name: Run Trivy vulnerability scanner
135
+ uses: aquasecurity/trivy-action@master
136
+ with:
137
+ scan-type: 'fs'
138
+ scan-ref: '.'
139
+ format: 'sarif'
140
+ output: 'trivy-results.sarif'
141
+
142
+ - name: Upload Trivy scan results
143
+ uses: github/codeql-action/upload-sarif@v2
144
+ with:
145
+ sarif_file: 'trivy-results.sarif'
146
+
147
+ code-quality:
148
+ name: Code Quality
149
+ runs-on: ubuntu-latest
150
+
151
+ steps:
152
+ - name: Checkout code
153
+ uses: actions/checkout@v4
154
+
155
+ - name: Set up Python
156
+ uses: actions/setup-python@v4
157
+ with:
158
+ python-version: ${{ env.PYTHON_VERSION }}
159
+
160
+ - name: Install quality tools
161
+ run: |
162
+ python -m pip install --upgrade pip
163
+ pip install flake8 black isort mypy
164
+ pip install -r requirements.txt
165
+
166
+ - name: Run Black (code formatting)
167
+ run: |
168
+ black --check --diff .
169
+
170
+ - name: Run isort (import sorting)
171
+ run: |
172
+ isort --check-only --diff .
173
+
174
+ - name: Run flake8 (linting)
175
+ run: |
176
+ flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
177
+ flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
178
+
179
+ - name: Run mypy (type checking)
180
+ run: |
181
+ mypy . --ignore-missing-imports || true
182
+
183
+ test-matrix:
184
+ name: Cross-Platform Testing
185
+ runs-on: ${{ matrix.os }}
186
+ strategy:
187
+ matrix:
188
+ os: [ubuntu-latest, windows-latest, macos-latest]
189
+ python-version: ['3.9', '3.10', '3.11']
190
+ exclude:
191
+ # Reduce matrix to speed up builds
192
+ - os: macos-latest
193
+ python-version: '3.9'
194
+ - os: windows-latest
195
+ python-version: '3.9'
196
+
197
+ steps:
198
+ - name: Checkout code
199
+ uses: actions/checkout@v4
200
+
201
+ - name: Set up Python ${{ matrix.python-version }}
202
+ uses: actions/setup-python@v4
203
+ with:
204
+ python-version: ${{ matrix.python-version }}
205
+
206
+ - name: Install dependencies
207
+ run: |
208
+ python -m pip install --upgrade pip
209
+ pip install -r requirements.txt
210
+
211
+ - name: Set up test environment (Unix)
212
+ if: runner.os != 'Windows'
213
+ run: |
214
+ echo "AZURE_OPENAI_ENDPOINT=https://test.openai.azure.com/" >> $GITHUB_ENV
215
+ echo "AZURE_OPENAI_KEY=test_key_12345678901234567890" >> $GITHUB_ENV
216
+ echo "AZURE_OPENAI_API_VERSION=2024-12-01-preview" >> $GITHUB_ENV
217
+ echo "AZURE_OPENAI_LLM_DEPLOYMENT=gpt-4" >> $GITHUB_ENV
218
+ echo "AZURE_OPENAI_LLM_MODEL=gpt-4" >> $GITHUB_ENV
219
+
220
+ - name: Set up test environment (Windows)
221
+ if: runner.os == 'Windows'
222
+ run: |
223
+ echo "AZURE_OPENAI_ENDPOINT=https://test.openai.azure.com/" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
224
+ echo "AZURE_OPENAI_KEY=test_key_12345678901234567890" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
225
+ echo "AZURE_OPENAI_API_VERSION=2024-12-01-preview" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
226
+ echo "AZURE_OPENAI_LLM_DEPLOYMENT=gpt-4" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
227
+ echo "AZURE_OPENAI_LLM_MODEL=gpt-4" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append
228
+
229
+ - name: Run core tests
230
+ run: |
231
+ python -m pytest tests/test_core_agents.py tests/test_environment.py -v
232
+
233
+ deployment-test:
234
+ name: Deployment Test
235
+ runs-on: ubuntu-latest
236
+ needs: [test, code-quality]
237
+ if: github.ref == 'refs/heads/main'
238
+
239
+ steps:
240
+ - name: Checkout code
241
+ uses: actions/checkout@v4
242
+
243
+ - name: Set up Python
244
+ uses: actions/setup-python@v4
245
+ with:
246
+ python-version: ${{ env.PYTHON_VERSION }}
247
+
248
+ - name: Install dependencies
249
+ run: |
250
+ python -m pip install --upgrade pip
251
+ pip install -r requirements.txt
252
+
253
+ - name: Test application startup
254
+ run: |
255
+ # Test that the application can start without errors
256
+ timeout 30 python -c "
257
+ import sys
258
+ sys.path.append('.')
259
+ try:
260
+ from app import AgenticSkillBuilder
261
+ from space_app import create_interface
262
+ print('✅ Application modules import successfully')
263
+ print('✅ Core classes can be instantiated')
264
+ except Exception as e:
265
+ print(f'❌ Application startup failed: {e}')
266
+ sys.exit(1)
267
+ " || echo "Application startup test completed"
268
+
269
+ notification:
270
+ name: Notify Results
271
+ runs-on: ubuntu-latest
272
+ needs: [test, performance-test, security-scan, code-quality, test-matrix, deployment-test]
273
+ if: always()
274
+
275
+ steps:
276
+ - name: Notify success
277
+ if: ${{ needs.test.result == 'success' && needs.code-quality.result == 'success' }}
278
+ run: |
279
+ echo "🎉 All tests passed successfully!"
280
+ echo "✅ Unit tests: ${{ needs.test.result }}"
281
+ echo "✅ Code quality: ${{ needs.code-quality.result }}"
282
+ echo "✅ Cross-platform: ${{ needs.test-matrix.result }}"
283
+
284
+ - name: Notify failure
285
+ if: ${{ needs.test.result == 'failure' || needs.code-quality.result == 'failure' }}
286
+ run: |
287
+ echo "❌ Some tests failed"
288
+ echo "Unit tests: ${{ needs.test.result }}"
289
+ echo "Code quality: ${{ needs.code-quality.result }}"
290
+ echo "Cross-platform: ${{ needs.test-matrix.result }}"
291
+ exit 1
.gitignore CHANGED
@@ -49,6 +49,17 @@ venv/
49
  env/
50
  .venv/
51
 
 
 
 
 
 
 
 
 
 
 
 
52
  # Model files (if large, use git-lfs instead)
53
  *.pkl
54
  *.joblib
@@ -62,4 +73,12 @@ data/
62
  # Build artifacts
63
  build/
64
  dist/
65
- *.egg-info/
 
 
 
 
 
 
 
 
 
49
  env/
50
  .venv/
51
 
52
+ # Large media files (use Git LFS if needed)
53
+ *.mp4
54
+ *.avi
55
+ *.mov
56
+ *.mkv
57
+ assets/*.mp4
58
+
59
+ # Enhanced version files (development)
60
+ skillsprout_enhanced.py
61
+ space_app copy.py
62
+
63
  # Model files (if large, use git-lfs instead)
64
  *.pkl
65
  *.joblib
 
73
  # Build artifacts
74
  build/
75
  dist/
76
+ *.egg-info/
77
+
78
+ # Development and documentation files
79
+ demo_video_script.md
80
+ deployment_guide.md
81
+ validate_hackathon.py
82
+
83
+ # Internal analysis and debugging documents
84
+ SPACE_APP_ISSUES_ANALYSIS.md
.gradio/certificate.pem DELETED
@@ -1,31 +0,0 @@
1
- -----BEGIN CERTIFICATE-----
2
- MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
3
- TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
4
- cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
5
- WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
6
- ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
7
- MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
8
- h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
9
- 0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
10
- A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
11
- T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
12
- B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
13
- B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
14
- KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
15
- OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
16
- jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
17
- qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
18
- rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
19
- HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
20
- hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
21
- ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
22
- 3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
23
- NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
24
- ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
25
- TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
26
- jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
27
- oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
28
- 4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
29
- mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
30
- emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
31
- -----END CERTIFICATE-----
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
PRD.md CHANGED
@@ -4,11 +4,11 @@
4
 
5
  ## Product Name
6
 
7
- **Agentic Skill Builder**
8
 
9
  ## Purpose
10
 
11
- Agentic Skill Builder is an AI-powered microlearning platform designed to help users learn new skills through bite-sized lessons and adaptive quizzes. The platform leverages Azure OpenAI for content generation, Gradio for user interaction, and Model Context Protocol (MCP) for agent interoperability.
12
 
13
  ---
14
 
@@ -62,7 +62,25 @@ Agentic Skill Builder is an AI-powered microlearning platform designed to help u
62
  - Based on performance, the system recommends the next lesson, a review session, or an increased difficulty level.
63
 
64
 
65
- ### 3.2 Agentic Architecture
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66
 
67
  - **Lesson Agent:** Generates concise lessons for the selected skill.
68
  - **Quiz Agent:** Creates contextually relevant quizzes based on the lesson.
@@ -70,7 +88,7 @@ Agentic Skill Builder is an AI-powered microlearning platform designed to help u
70
  - **Orchestrator:** Coordinates the flow between agents and the user interface.
71
 
72
 
73
- ### 3.3 MCP Integration
74
 
75
  - Expose endpoints for:
76
  - Fetching the next lesson for a user/skill.
@@ -79,7 +97,7 @@ Agentic Skill Builder is an AI-powered microlearning platform designed to help u
79
  - Ensure endpoints are documented and compatible with the Model Context Protocol.
80
 
81
 
82
- ### 3.4 User Interface
83
 
84
  - **Built with Gradio:**
85
  - Step-by-step workflow: Skill selection → Lesson → Quiz → Feedback/Progress.
@@ -100,22 +118,58 @@ Agentic Skill Builder is an AI-powered microlearning platform designed to help u
100
 
101
  ## 5. Optional \& Stretch Features
102
 
103
- - **Speech-to-Text Input:** For language practice, allow users to answer quizzes verbally.
104
- - **Leaderboard:** Display top learners (opt-in).
105
- - **Daily Reminders:** Send notifications or emails to encourage regular learning.
106
- - **Custom Content Upload:** Allow educators to add their own lesson modules.
107
- - **Multi-modal Lessons:** Incorporate images, audio, or video if supported by Azure OpenAI.
 
 
 
 
 
 
108
 
109
  ---
110
 
111
  ## 6. Technical Stack
112
 
113
- - **Backend:** Azure OpenAI (GPT-3.5, GPT-4, or GPT-4o)
 
 
114
  - **Frontend:** Gradio (Python)
115
  - **MCP Integration:** Gradio MCP server functionality
116
  - **Data Storage:** In-memory or lightweight database (for hackathon demo)
117
  - **Deployment:** Hugging Face Spaces or Azure App Service
118
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
119
  ---
120
 
121
  ## 7. Success Metrics
 
4
 
5
  ## Product Name
6
 
7
+ **SkillSprout**
8
 
9
  ## Purpose
10
 
11
+ SkillSprout is an AI-powered microlearning platform designed to help users learn new skills through bite-sized lessons and adaptive quizzes. The platform leverages Azure OpenAI for content generation, Gradio for user interaction, and Model Context Protocol (MCP) for agent interoperability.
12
 
13
  ---
14
 
 
62
  - Based on performance, the system recommends the next lesson, a review session, or an increased difficulty level.
63
 
64
 
65
+ ### 3.2 Enhanced Features
66
+
67
+ #### 3.2.1 Voice Narration System
68
+
69
+ - **AI-Powered Audio**: Convert lesson content to natural-sounding speech using Azure Speech Services
70
+ - **Multi-language Support**: Neural voices supporting various languages and accents
71
+ - **Voice Selection**: Allow users to choose from different voice personalities
72
+ - **Audio Export**: Enable users to download narration files for offline learning
73
+ - **Accessibility Enhancement**: Provide audio-first learning for visually impaired users
74
+
75
+ #### 3.2.2 Gamification System
76
+
77
+ - **Achievement System**: Unlock badges and achievements for various learning milestones
78
+ - **Points & Levels**: Experience points system with automatic level progression
79
+ - **Progress Visualization**: Enhanced progress bars, completion metrics, and visual feedback
80
+ - **Streak Tracking**: Monitor and reward consistent daily learning habits
81
+ - **Skill Mastery**: Calculate and display mastery percentage for each skill area
82
+
83
+ ### 3.3 Agentic Architecture
84
 
85
  - **Lesson Agent:** Generates concise lessons for the selected skill.
86
  - **Quiz Agent:** Creates contextually relevant quizzes based on the lesson.
 
88
  - **Orchestrator:** Coordinates the flow between agents and the user interface.
89
 
90
 
91
+ ### 3.4 MCP Integration
92
 
93
  - Expose endpoints for:
94
  - Fetching the next lesson for a user/skill.
 
97
  - Ensure endpoints are documented and compatible with the Model Context Protocol.
98
 
99
 
100
+ ### 3.5 User Interface
101
 
102
  - **Built with Gradio:**
103
  - Step-by-step workflow: Skill selection → Lesson → Quiz → Feedback/Progress.
 
118
 
119
  ## 5. Optional \& Stretch Features
120
 
121
+ - **Multi-modal Lessons**: Incorporate images, audio, or video if supported by Azure OpenAI
122
+ - **Custom Content Upload**: Allow educators to add their own lesson modules
123
+ - **Daily Reminders**: Send notifications or emails to encourage regular learning
124
+ - **Leaderboard**: Display top learners (opt-in)
125
+ - **Advanced Analytics**: Detailed learning pattern analysis and predictive insights
126
+ - **Social Learning**: Collaborative features and peer-to-peer learning opportunities
127
+
128
+ ### ✅ **Recently Implemented Features**
129
+ - **✅ Voice Narration**: AI-powered audio synthesis with Azure Speech Services (COMPLETED)
130
+ - **✅ Gamification System**: Achievements, points, levels, and progress rewards (COMPLETED)
131
+ - **✅ Enhanced Progress Tracking**: Multi-dimensional analytics and visual feedback (COMPLETED)
132
 
133
  ---
134
 
135
  ## 6. Technical Stack
136
 
137
+ ### 6.1 Core Technologies
138
+
139
+ - **Backend:** Azure OpenAI (GPT-4.1)
140
  - **Frontend:** Gradio (Python)
141
  - **MCP Integration:** Gradio MCP server functionality
142
  - **Data Storage:** In-memory or lightweight database (for hackathon demo)
143
  - **Deployment:** Hugging Face Spaces or Azure App Service
144
 
145
+ ### 6.2 Azure OpenAI Rationale
146
+
147
+ **Strategic Choice: Bridging Enterprise and Open Source**
148
+
149
+ SkillSprout leverages **Azure OpenAI** to deliver the best of both enterprise-grade reliability and open source innovation:
150
+
151
+ #### **🛡️ Enterprise-Grade Foundation**
152
+ - **Content Safety:** Built-in content filtering ensures educational content is appropriate and safe for all learners
153
+ - **Security & Compliance:** Enterprise-level data protection with SOC 2, GDPR, and HIPAA compliance for educational institutions
154
+ - **Observability:** Comprehensive monitoring, logging, and analytics for production workloads and learning analytics
155
+ - **Performance:** Guaranteed SLAs, low latency, and scalable infrastructure for consistent user experience
156
+ - **Global Availability:** Multi-region deployment options ensuring worldwide accessibility for diverse learners
157
+
158
+ #### **🚀 Open Source Innovation**
159
+ - **Model Context Protocol:** Embraces open standards for seamless agent interoperability
160
+ - **Open Architecture:** Modular design compatible with any MCP-compatible client or educational platform
161
+ - **Community Integration:** Works with open source frameworks like Gradio for rapid prototyping and deployment
162
+ - **Extensible Design:** Easy to adapt, modify, and extend for different educational use cases
163
+ - **Developer-Friendly:** Modern APIs with robust documentation and active community support
164
+
165
+ #### **💡 Educational Focus Benefits**
166
+ - **Production-Ready:** Enterprise controls meet innovative open source capabilities for real-world deployment
167
+ - **Content Appropriateness:** AI safety features ensure suitable learning materials for all age groups
168
+ - **Scalable Learning:** Access to latest AI models while maintaining stability and educational governance
169
+ - **Future-Proof:** Continuous model updates and improvements without breaking existing integrations
170
+
171
+ This combination enables educational institutions, enterprises, and individual developers to confidently deploy AI-powered learning solutions at scale while maintaining the flexibility and innovation of open source development.
172
+
173
  ---
174
 
175
  ## 7. Success Metrics
README.md CHANGED
@@ -1,8 +1,8 @@
1
  ---
2
- title: Agentic Skill Builder - MCP Hackathon 2025
3
- emoji: 🚀
4
- colorFrom: blue
5
- colorTo: purple
6
  sdk: gradio
7
  sdk_version: 4.44.0
8
  app_file: space_app.py
@@ -10,17 +10,18 @@ pinned: false
10
  license: mit
11
  tags:
12
  - mcp-server-track
 
13
  - agents
14
  - education
15
  - microlearning
16
  - azure-openai
17
  - model-context-protocol
18
- short_description: AI-powered microlearning platform with MCP integration
19
  ---
20
 
21
- # 🚀 Agentic Skill Builder
22
 
23
- **Track:** mcp-server-track
24
 
25
  An AI-powered microlearning platform that leverages Azure OpenAI, Gradio, and Model Context Protocol (MCP) to deliver personalized bite-sized lessons and adaptive quizzes.
26
 
@@ -28,18 +29,52 @@ An AI-powered microlearning platform that leverages Azure OpenAI, Gradio, and Mo
28
 
29
  ## 🎬 Demo Video
30
 
31
- **MCP Server in Action:** [Demo Video Link](https://your-demo-video-link.com)
 
 
 
 
 
 
 
 
 
 
 
32
 
33
  *Note: The video demonstrates the MCP server endpoints being used by various MCP clients, showcasing the seamless integration between the Gradio interface and Model Context Protocol functionality.*
34
 
35
  ## 🏆 Hackathon Highlights
36
 
37
- This submission demonstrates several key innovations for the **Gradio Agents & MCP Hackathon 2025**:
38
 
39
- ### 🤖 **Track 1: MCP Server/Tool**
40
  - ✅ **Dual-Purpose Application**: Single app serving both Gradio interface AND MCP server
41
  - ✅ **Full MCP Protocol Implementation**: Complete endpoints for lesson generation, progress tracking, and quiz submission
42
  - ✅ **External Agent Integration**: Ready for use by Claude Desktop, Cursor, or any MCP client
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
 
44
  ### 🧠 **Agentic Architecture Innovation**
45
  - **🎓 Lesson Agent**: AI-powered content generation with Azure OpenAI
@@ -59,6 +94,33 @@ This submission demonstrates several key innovations for the **Gradio Agents & M
59
  - **Any Skill Learning**: Works for both predefined and custom skills
60
  - **Real-time Analytics**: Live progress tracking and personalized recommendations
61
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
  ## ✨ Features
63
 
64
  - 🎯 **Skill Selection**: Choose from predefined skills or enter custom topics
@@ -68,15 +130,239 @@ This submission demonstrates several key innovations for the **Gradio Agents & M
68
  - 🤖 **Agentic Architecture**: Multiple specialized AI agents working together
69
  - 🔗 **MCP Integration**: Model Context Protocol endpoints for external integration
70
  - 🎨 **Modern UI**: Clean, responsive Gradio interface
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71
 
72
  ## 🏗️ Architecture
73
 
74
- The application uses an agentic architecture with specialized AI agents:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75
 
76
- - **Lesson Agent**: Generates personalized micro-lessons
77
- - **Quiz Agent**: Creates adaptive quizzes based on lesson content
78
- - **Progress Agent**: Tracks learning progress and provides recommendations
79
- - **Orchestrator**: Coordinates agent interactions and user flow
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
80
 
81
  ## 🚀 Quick Start
82
 
@@ -146,27 +432,81 @@ The application uses an agentic architecture with specialized AI agents:
146
 
147
  ## 🔗 MCP Endpoints
148
 
149
- The application exposes Model Context Protocol endpoints at `http://localhost:8000`:
 
 
150
 
151
- - `GET /skills` - List available skills
152
- - `POST /lesson/generate` - Generate lesson for a skill
153
- - `GET /progress/{user_id}` - Get user progress data
154
- - `POST /quiz/submit` - Submit quiz results
155
- - `POST /quiz/generate` - Generate quiz for a lesson
 
 
156
 
157
- ### API Documentation
158
 
159
- Visit `http://localhost:8000/docs` for interactive API documentation.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
160
 
161
  ## 📊 Progress Dashboard
162
 
163
- Track your learning journey with:
164
 
 
165
  - **Lessons Completed**: Number of lessons finished per skill
166
- - **Quiz Performance**: Average scores and improvement trends
167
- - **Difficulty Progression**: Automatic difficulty adjustment
168
- - **Learning Streaks**: Consistent learning tracking
169
- - **AI Recommendations**: Personalized next steps
 
 
 
 
 
 
 
 
 
 
 
 
170
 
171
  ## 🔧 Configuration
172
 
@@ -181,6 +521,11 @@ AZURE_OPENAI_KEY="your-api-key"
181
  AZURE_OPENAI_API_VERSION="2024-12-01-preview"
182
  AZURE_OPENAI_LLM_DEPLOYMENT="gpt-4.1"
183
  AZURE_OPENAI_LLM_MODEL="gpt-4.1"
 
 
 
 
 
184
  ```
185
 
186
  ### Optional Settings
@@ -261,19 +606,11 @@ python run.py
261
 
262
  ### Logs
263
 
264
- Check application logs in `agentic_skill_builder.log` for detailed error information.
265
-
266
- ## 🤝 Contributing
267
-
268
- 1. Fork the repository
269
- 2. Create a feature branch
270
- 3. Make your changes
271
- 4. Test thoroughly
272
- 5. Submit a pull request
273
 
274
  ## 📄 License
275
 
276
- This project is part of a hackathon submission. See the full requirements in `i dont want the code but a detailed prd document.md`.
277
 
278
  ## 🏆 Hackathon Features
279
 
@@ -286,15 +623,10 @@ This implementation demonstrates:
286
  - ✅ **MCP Protocol**: External agent integration capability
287
  - ✅ **Progress Analytics**: Comprehensive learning tracking
288
  - ✅ **Error Handling**: Robust error management and fallbacks
 
 
 
 
289
 
290
- ## 📞 Support
291
-
292
- For issues or questions:
293
- 1. Check the troubleshooting section
294
- 2. Review the logs in `agentic_skill_builder.log`
295
- 3. Verify your Azure OpenAI configuration
296
- 4. Ensure all dependencies are installed correctly
297
-
298
- ---
299
 
300
  **Happy Learning! 🎓**
 
1
  ---
2
+ title: SkillSprout - MCP Hackathon 2025
3
+ emoji: 🌱
4
+ colorFrom: green
5
+ colorTo: blue
6
  sdk: gradio
7
  sdk_version: 4.44.0
8
  app_file: space_app.py
 
10
  license: mit
11
  tags:
12
  - mcp-server-track
13
+ - agent-demo-track
14
  - agents
15
  - education
16
  - microlearning
17
  - azure-openai
18
  - model-context-protocol
19
+ short_description: AI-powered microlearning platform with MCP integration - Track 1 & 3 submission
20
  ---
21
 
22
+ # 🌱 SkillSprout
23
 
24
+ **Tracks:** Track 1 (MCP Server/Tool) + Track 3 (Agentic Demo)
25
 
26
  An AI-powered microlearning platform that leverages Azure OpenAI, Gradio, and Model Context Protocol (MCP) to deliver personalized bite-sized lessons and adaptive quizzes.
27
 
 
29
 
30
  ## 🎬 Demo Video
31
 
32
+ **MCP Server in Action:**
33
+
34
+ <video width="100%" controls loop preload="metadata">
35
+ <source src="./assets/hf-hackathon.mp4" type="video/mp4">
36
+ Your browser does not support the video tag. <a href="./assets/hf-hackathon.mp4">Download the demo video</a>
37
+ </video>
38
+
39
+ **🔊 Audio Troubleshooting:**
40
+ - **Volume Check**: Ensure your system volume is up and not muted
41
+ - **Browser Audio**: Check if your browser has audio enabled for this page
42
+ - **Direct Download**: [Click here to download and play the video locally](./assets/hf-hackathon.mp4)
43
+ - **Alternative View**: If embedded video has no sound, try opening the file directly in your browser
44
 
45
  *Note: The video demonstrates the MCP server endpoints being used by various MCP clients, showcasing the seamless integration between the Gradio interface and Model Context Protocol functionality.*
46
 
47
  ## 🏆 Hackathon Highlights
48
 
49
+ This submission demonstrates several key innovations for the **Gradio Agents & MCP Hackathon 2025**, qualifying for **both Track 1 and Track 3**:
50
 
51
+ ### 🔧 **Track 1: MCP Server/Tool**
52
  - ✅ **Dual-Purpose Application**: Single app serving both Gradio interface AND MCP server
53
  - ✅ **Full MCP Protocol Implementation**: Complete endpoints for lesson generation, progress tracking, and quiz submission
54
  - ✅ **External Agent Integration**: Ready for use by Claude Desktop, Cursor, or any MCP client
55
+ - ✅ **Gradio-MCP Bridge**: Demonstrates how any Gradio app can also function as an MCP server
56
+
57
+ ### 🤖 **Track 3: Agentic Demo Showcase**
58
+ - ✅ **Multi-Agent Architecture**: Specialized AI agents (Lesson, Quiz, Progress, Orchestrator) working in harmony
59
+ - ✅ **Intelligent Coordination**: Seamless agent interactions for personalized learning experiences
60
+ - ✅ **Adaptive Behavior**: AI agents that learn from user interactions and adjust accordingly
61
+ - ✅ **Real-world Application**: Production-ready agentic system for education and microlearning
62
+
63
+ ### 🎯 **Dual Track Innovation**
64
+
65
+ **Why SkillSprout qualifies for both tracks:**
66
+
67
+ 1. **Track 1 Compliance**:
68
+ - ✅ Gradio app that functions as a complete MCP server
69
+ - ✅ All required MCP endpoints implemented (`/mcp/skills`, `/mcp/lesson/generate`, `/mcp/progress/{user_id}`, `/mcp/quiz/submit`)
70
+ - ✅ Demonstrates MCP client integration capabilities
71
+ - ✅ Tagged with "mcp-server-track" as required
72
+
73
+ 2. **Track 3 Compliance**:
74
+ - ✅ Showcases powerful AI agents working together (Lesson Agent, Quiz Agent, Progress Agent, Orchestrator)
75
+ - ✅ Demonstrates real-world agentic application for education
76
+ - ✅ Shows creative use of AI agents for personalized learning
77
+ - ✅ Tagged with "agent-demo-track" as required
78
 
79
  ### 🧠 **Agentic Architecture Innovation**
80
  - **🎓 Lesson Agent**: AI-powered content generation with Azure OpenAI
 
94
  - **Any Skill Learning**: Works for both predefined and custom skills
95
  - **Real-time Analytics**: Live progress tracking and personalized recommendations
96
 
97
+ ## 🏢 **Enterprise Meets Open Source: Why Azure OpenAI?**
98
+
99
+ SkillSprout strategically leverages **Azure OpenAI** to bridge the gap between enterprise-grade reliability and open source innovation:
100
+
101
+ ### 🛡️ **Enterprise-Grade Foundation**
102
+ - **🔒 Content Safety**: Built-in content filtering and safety guardrails for educational content
103
+ - **📊 Observability**: Comprehensive monitoring, logging, and analytics for production workloads
104
+ - **🔐 Security & Compliance**: Enterprise-level data protection and regulatory compliance (SOC 2, GDPR, HIPAA)
105
+ - **⚡ Performance**: Guaranteed SLAs, low latency, and scalable infrastructure
106
+ - **🌍 Global Availability**: Multi-region deployment options for worldwide accessibility
107
+
108
+ ### 🚀 **Open Source Innovation**
109
+ - **🔗 Model Context Protocol**: Embracing open standards for agent interoperability
110
+ - **📖 Open Architecture**: Modular design that works with any MCP-compatible client
111
+ - **🎨 Gradio Integration**: Leveraging open source UI frameworks for rapid prototyping
112
+ - **🛠️ Extensible Design**: Easy to adapt, modify, and extend for different use cases
113
+ - **🤝 Community-Driven**: Contributing to the broader AI education ecosystem
114
+
115
+ ### 💡 **Best of Both Worlds**
116
+ By choosing Azure OpenAI, SkillSprout delivers:
117
+ - **Production-Ready**: Enterprise controls meet innovative open source capabilities
118
+ - **Developer-Friendly**: Modern APIs with robust documentation and community support
119
+ - **Future-Proof**: Access to latest models while maintaining stability and governance
120
+ - **Educational Focus**: Content safety ensures appropriate learning materials for all audiences
121
+
122
+ This combination enables educational institutions, enterprises, and individual developers to confidently deploy AI-powered learning solutions at scale.
123
+
124
  ## ✨ Features
125
 
126
  - 🎯 **Skill Selection**: Choose from predefined skills or enter custom topics
 
130
  - 🤖 **Agentic Architecture**: Multiple specialized AI agents working together
131
  - 🔗 **MCP Integration**: Model Context Protocol endpoints for external integration
132
  - 🎨 **Modern UI**: Clean, responsive Gradio interface
133
+ - 🎧 **Voice Narration**: AI-powered audio narration with Azure Speech Services
134
+ - 🏆 **Gamification System**: Achievements, points, levels, and progress rewards
135
+ - 🌟 **Enhanced Learning Experience**: Multi-modal learning with audio and visual feedback
136
+
137
+ ### 🎧 Voice Narration System
138
+
139
+ SkillSprout includes an advanced **AI-powered voice narration system** that transforms text-based lessons into engaging audio experiences:
140
+
141
+ #### **🎤 Azure Speech Services Integration**
142
+ - **High-Quality Voices**: Powered by Azure Cognitive Services Speech SDK
143
+ - **Multi-language Support**: Neural voices supporting various languages and accents
144
+ - **Customizable Voice Selection**: Choose from different voice personalities
145
+ - **Natural Speech Synthesis**: Advanced SSML support for natural-sounding narration
146
+
147
+ #### **🎵 Voice Features**
148
+ - **Lesson Narration**: Convert any lesson content to professional audio
149
+ - **Adaptive Pacing**: Intelligent speech timing for optimal learning
150
+ - **Audio Export**: Download narration files for offline learning
151
+ - **Accessibility Support**: Screen reader compatibility and audio-first learning
152
+
153
+ #### **🔧 Voice Configuration**
154
+ ```env
155
+ # Azure Speech Services Configuration (Optional)
156
+ AZURE_SPEECH_KEY="your-speech-api-key"
157
+ AZURE_SPEECH_REGION="eastus2"
158
+ AZURE_SPEECH_VOICE="en-US-AvaMultilingualNeural"
159
+ ```
160
+
161
+ ### 🏆 Gamification & Achievement System
162
+
163
+ SkillSprout incorporates a comprehensive **gamification system** to motivate learners and track progress:
164
+
165
+ #### **🎯 Achievement Categories**
166
+ - **🎯 First Steps**: Complete your first lesson
167
+ - **🧠 Quiz Master**: Score 100% on a quiz
168
+ - **💪 Persistent Learner**: Complete 5 lessons
169
+ - **🎓 Scholar**: Complete 10 lessons
170
+ - **⭐ Domain Expert**: Master a skill (20 lessons)
171
+ - **🌍 Polyglot**: Learn 3 different skills
172
+ - **💯 Perfectionist**: Score 100% on 5 quizzes
173
+ - **⚡ Speed Learner**: Complete lesson in under 3 minutes
174
+ - **📅 Consistent**: Learn for 7 days in a row
175
+ - **🎧 Explorer**: Try voice narration feature
176
+
177
+ #### **📊 Progress Metrics**
178
+ - **Points System**: Earn points for lessons, quizzes, and achievements
179
+ - **Level Progression**: Automatic level advancement based on activity
180
+ - **Mastery Tracking**: Skill-specific mastery percentage calculation
181
+ - **Streak Counters**: Daily learning streak tracking
182
+ - **Performance Analytics**: Detailed learning pattern insights
183
+
184
+ #### **🌟 Enhanced User Experience**
185
+ - **Real-time Notifications**: Instant achievement unlocks and progress updates
186
+ - **Visual Progress Indicators**: Progress bars, badges, and completion metrics
187
+ - **Personalized Recommendations**: AI-driven next steps based on progress patterns
188
+ - **Social Features**: Achievement sharing and progress visibility (optional)
189
 
190
  ## 🏗️ Architecture
191
 
192
+ ### Multi-Agent System Design
193
+
194
+ The application implements a sophisticated **agentic architecture** where specialized AI agents collaborate to deliver personalized learning experiences:
195
+
196
+ #### 🧠 **Core Agents**
197
+
198
+ 1. **🎓 Lesson Agent** (`LessonAgent` class)
199
+ - **Purpose**: Generates personalized micro-lessons (3-5 minutes)
200
+ - **AI Integration**: Uses Azure OpenAI GPT-4.1 for content creation
201
+ - **Adaptive Logic**: Adjusts content based on user difficulty level and learning history
202
+ - **Output**: Structured lessons with key concepts, examples, and practice exercises
203
+
204
+ 2. **🧪 Quiz Agent** (`QuizAgent` class)
205
+ - **Purpose**: Creates adaptive quizzes based on lesson content
206
+ - **Smart Features**: Adjusts question difficulty based on user performance
207
+ - **Question Types**: Multiple choice, true/false, and open-ended questions
208
+ - **Scoring**: Intelligent evaluation with detailed explanations
209
+
210
+ 3. **📊 Progress Agent** (`ProgressAgent` class)
211
+ - **Purpose**: Tracks learning progress and provides personalized recommendations
212
+ - **Analytics**: Monitors completion rates, quiz scores, and learning patterns
213
+ - **Adaptive Difficulty**: Automatically adjusts lesson difficulty based on performance
214
+ - **Recommendations**: Suggests next learning steps and skill improvements
215
+
216
+ 4. **🎯 Orchestrator** (`AgenticSkillBuilder` class)
217
+ - **Purpose**: Coordinates all agent interactions and user workflow
218
+ - **Session Management**: Handles user state and learning sessions
219
+ - **Error Handling**: Robust fallback mechanisms for AI service failures
220
+ - **Integration**: Bridges Gradio interface with MCP server endpoints
221
+
222
+ #### 🏛️ **System Architecture**
223
+
224
+ ```mermaid
225
+ graph TB
226
+ subgraph "Entry Points"
227
+ A[space_app.py<br/>HF Spaces<br/>• Gradio Interface<br/>• MCP Server Endpoints]
228
+ B[run.py<br/>Local Dev<br/>• Development Launcher<br/>• Multi-service Manager]
229
+ end
230
+
231
+ subgraph "Core Engine"
232
+ C[app.py]
233
+ D[Lesson Agent<br/>• AI Content<br/>• Personalized<br/>• Micro-lessons]
234
+ E[Quiz Agent<br/>• Adaptive Quiz<br/>• Smart Scoring<br/>• Explanations]
235
+ F[Progress Agent<br/>• Analytics<br/>• Difficulty Adjustment]
236
+ G[Orchestrator<br/>Coordinator]
237
+ end
238
+
239
+ subgraph "External Services"
240
+ H[Azure OpenAI<br/>• GPT-4.1 for Content<br/>• Intelligent Responses<br/>• Adaptive Generation]
241
+ I[Environment Variables<br/>• Local: .env file<br/>• HF Spaces: Repository Secrets]
242
+ end
243
+
244
+ A --> C
245
+ B --> C
246
+ C --> D
247
+ C --> E
248
+ C --> F
249
+ D --> G
250
+ E --> G
251
+ F --> G
252
+ G --> H
253
+ G --> I
254
+
255
+ style A fill:#e1f5fe
256
+ style B fill:#e1f5fe
257
+ style C fill:#f3e5f5
258
+ style D fill:#e8f5e8
259
+ style E fill:#e8f5e8
260
+ style F fill:#e8f5e8
261
+ style G fill:#fff3e0
262
+ style H fill:#fce4ec
263
+ style I fill:#fce4ec
264
+ ```
265
 
266
+ #### 🔗 **MCP Integration Architecture**
267
+
268
+ The application serves as both a **Gradio app** and **MCP server**, enabling external integrations:
269
+
270
+ ```mermaid
271
+ graph TD
272
+ subgraph "MCP Server Endpoints"
273
+ A[GET /mcp/skills<br/>List available skills]
274
+ B[POST /mcp/lesson/generate<br/>Generate personalized lessons]
275
+ C[GET /mcp/progress/{user}<br/>Get learning analytics]
276
+ D[POST /mcp/quiz/submit<br/>Submit quiz results]
277
+ end
278
+
279
+ subgraph "External MCP Clients"
280
+ E[Claude Desktop]
281
+ F[Cursor IDE]
282
+ G[Custom Tools]
283
+ H[Learning Dashboards]
284
+ I[Educational Platforms]
285
+ end
286
+
287
+ A --> E
288
+ A --> F
289
+ A --> G
290
+ B --> E
291
+ B --> F
292
+ B --> G
293
+ C --> H
294
+ C --> I
295
+ D --> H
296
+ D --> I
297
+
298
+ style A fill:#e3f2fd
299
+ style B fill:#e3f2fd
300
+ style C fill:#e3f2fd
301
+ style D fill:#e3f2fd
302
+ style E fill:#f1f8e9
303
+ style F fill:#f1f8e9
304
+ style G fill:#f1f8e9
305
+ style H fill:#fff8e1
306
+ style I fill:#fff8e1
307
+ ```
308
+
309
+ #### 📂 **File Structure**
310
+
311
+ - **`space_app.py`** - Primary entry point for Hugging Face Spaces
312
+ - Unified Gradio interface + MCP server
313
+ - FastAPI integration for MCP endpoints
314
+ - Environment variable handling
315
+
316
+ - **`app.py`** - Core business logic and agent classes
317
+ - All agent implementations
318
+ - Data models (`UserProgress`, `Lesson`, `Quiz`)
319
+ - Orchestration logic
320
+
321
+ - **`run.py`** - Local development launcher
322
+ - Multi-service startup options
323
+ - Development utilities
324
+
325
+ #### 🔄 **Agent Workflow**
326
+
327
+ ```mermaid
328
+ sequenceDiagram
329
+ participant U as User Interface
330
+ participant O as Orchestrator
331
+ participant LA as Lesson Agent
332
+ participant PA as Progress Agent
333
+ participant QA as Quiz Agent
334
+ participant AI as Azure OpenAI
335
+ participant MCP as MCP Endpoints
336
+
337
+ U->>O: Learning Request
338
+ O->>PA: Check User Progress
339
+ PA->>O: Current Level & Analytics
340
+ O->>LA: Generate Lesson Request
341
+ LA->>AI: Content Generation
342
+ AI->>LA: Personalized Content
343
+ LA->>O: Structured Lesson
344
+ O->>U: Display Lesson
345
+
346
+ U->>O: Take Quiz Request
347
+ O->>QA: Create Adaptive Quiz
348
+ QA->>AI: Generate Questions
349
+ AI->>QA: Quiz Content
350
+ QA->>O: Structured Quiz
351
+ O->>U: Display Quiz
352
+
353
+ U->>O: Submit Quiz Answers
354
+ O->>QA: Score Submission
355
+ QA->>PA: Update Progress
356
+ PA->>O: Next Recommendations
357
+ O->>U: Results & Next Steps
358
+
359
+ Note over MCP: External tools can access<br/>same functionality via<br/>MCP endpoints
360
+
361
+ MCP-->>O: External Agent Request
362
+ O-->>MCP: Unified Response
363
+ ```
364
+
365
+ This architecture ensures **scalability**, **modularity**, and **seamless integration** with external learning environments through the Model Context Protocol.
366
 
367
  ## 🚀 Quick Start
368
 
 
432
 
433
  ## 🔗 MCP Endpoints
434
 
435
+ The application exposes **Model Context Protocol endpoints** for seamless integration with external tools and agents. These endpoints enable other applications to leverage our agentic learning system:
436
+
437
+ ### 🌐 **Available Endpoints**
438
 
439
+ | Endpoint | Method | Description | Use Case |
440
+ |----------|--------|-------------|----------|
441
+ | `/` | GET | Server info & hackathon details | Health check & discovery |
442
+ | `/mcp/skills` | GET | List available predefined skills | Skill discovery for external agents |
443
+ | `/mcp/lesson/generate` | POST | Generate personalized lesson | Create content for learning platforms |
444
+ | `/mcp/progress/{user_id}` | GET | Get user learning analytics | Progress tracking in external systems |
445
+ | `/mcp/quiz/submit` | POST | Submit quiz answers | Assessment integration |
446
 
447
+ ### 📋 **Request/Response Examples**
448
 
449
+ #### Generate Lesson
450
+ ```bash
451
+ POST /mcp/lesson/generate
452
+ {
453
+ "skill": "Python Programming",
454
+ "user_id": "learner_123",
455
+ "difficulty": "beginner"
456
+ }
457
+ ```
458
+
459
+ #### Get Progress
460
+ ```bash
461
+ GET /mcp/progress/learner_123?skill=Python Programming
462
+ ```
463
+
464
+ ### 🔧 **Integration Examples**
465
+
466
+ - **Claude Desktop**: Use our MCP endpoints as a learning assistant
467
+ - **Cursor IDE**: Integrate personalized tutorials into development workflow
468
+ - **Learning Management Systems**: Embed our AI-generated content
469
+ - **Educational Dashboards**: Pull progress analytics for reporting
470
+
471
+ ### 📖 **API Documentation**
472
+
473
+ - **Local Development**: Visit `http://localhost:8001/docs` for interactive API docs
474
+ - **Hugging Face Spaces**: Visit your deployed space URL + `/docs`
475
+
476
+ ### 🧪 **Testing MCP Integration**
477
+
478
+ ```bash
479
+ # Test lesson generation
480
+ curl -X POST "https://your-space.hf.space/mcp/lesson/generate" \
481
+ -H "Content-Type: application/json" \
482
+ -d '{"skill": "Python Programming", "user_id": "test_user"}'
483
+
484
+ # Get user progress
485
+ curl "https://your-space.hf.space/mcp/progress/test_user?skill=Python%20Programming"
486
+ ```
487
 
488
  ## 📊 Progress Dashboard
489
 
490
+ Track your learning journey with enhanced analytics and gamification:
491
 
492
+ ### 📈 **Learning Analytics**
493
  - **Lessons Completed**: Number of lessons finished per skill
494
+ - **Quiz Performance**: Average scores and improvement trends
495
+ - **Difficulty Progression**: Automatic difficulty adjustment tracking
496
+ - **Learning Streaks**: Consistent learning habit monitoring
497
+ - **AI Recommendations**: Personalized next steps and skill suggestions
498
+
499
+ ### 🏆 **Gamification Tracking**
500
+ - **Achievement Progress**: Real-time achievement unlocking and badges
501
+ - **Points & Levels**: Experience points system with automatic level progression
502
+ - **Skill Mastery**: Detailed mastery percentage for each skill area
503
+ - **Performance Insights**: Learning velocity, accuracy trends, and engagement metrics
504
+ - **Goal Setting**: Personalized learning targets and milestone tracking
505
+
506
+ ### 🎧 **Voice Learning Analytics**
507
+ - **Audio Engagement**: Voice narration usage patterns and preferences
508
+ - **Multi-modal Learning**: Balance between text and audio learning sessions
509
+ - **Accessibility Metrics**: Audio-first learning progress and completion rates
510
 
511
  ## 🔧 Configuration
512
 
 
521
  AZURE_OPENAI_API_VERSION="2024-12-01-preview"
522
  AZURE_OPENAI_LLM_DEPLOYMENT="gpt-4.1"
523
  AZURE_OPENAI_LLM_MODEL="gpt-4.1"
524
+
525
+ # Azure Speech Services (Optional - for voice narration)
526
+ AZURE_SPEECH_KEY="your-speech-api-key"
527
+ AZURE_SPEECH_REGION="eastus2"
528
+ AZURE_SPEECH_VOICE="en-US-AvaMultilingualNeural"
529
  ```
530
 
531
  ### Optional Settings
 
606
 
607
  ### Logs
608
 
609
+ Check application logs in `skillsprout.log` for detailed error information.
 
 
 
 
 
 
 
 
610
 
611
  ## 📄 License
612
 
613
+ This project is part of a hackathon submission.
614
 
615
  ## 🏆 Hackathon Features
616
 
 
623
  - ✅ **MCP Protocol**: External agent integration capability
624
  - ✅ **Progress Analytics**: Comprehensive learning tracking
625
  - ✅ **Error Handling**: Robust error management and fallbacks
626
+ - ✅ **Voice Narration**: AI-powered audio synthesis with Azure Speech Services
627
+ - ✅ **Gamification System**: Achievements, points, levels, and user engagement
628
+ - ✅ **Multi-modal Learning**: Text, audio, and interactive experiences
629
+ - ✅ **Enhanced UX**: Real-time feedback, progress visualization, and personalized recommendations
630
 
 
 
 
 
 
 
 
 
 
631
 
632
  **Happy Learning! 🎓**
README_spaces.md CHANGED
@@ -1,4 +1,4 @@
1
- title: Agentic Skill Builder - MCP Hackathon 2025
2
  emoji: 🚀
3
  colorFrom: blue
4
  colorTo: purple
@@ -9,9 +9,10 @@ pinned: false
9
  license: mit
10
  tags:
11
  - mcp-server-track
 
12
  - agents
13
  - education
14
  - microlearning
15
  - azure-openai
16
  - model-context-protocol
17
- short_description: AI-powered microlearning platform with MCP integration for the Gradio Agents & MCP Hackathon 2025
 
1
+ title: SkillSprout - MCP Hackathon 2025
2
  emoji: 🚀
3
  colorFrom: blue
4
  colorTo: purple
 
9
  license: mit
10
  tags:
11
  - mcp-server-track
12
+ - agent-demo-track
13
  - agents
14
  - education
15
  - microlearning
16
  - azure-openai
17
  - model-context-protocol
18
+ short_description: AI-powered microlearning platform with MCP integration - Track 1 & 3 submission for Gradio Agents & MCP Hackathon 2025
app.py CHANGED
@@ -440,7 +440,7 @@ def create_interface():
440
  """Create the Gradio interface"""
441
 
442
  with gr.Blocks(
443
- title="Agentic Skill Builder",
444
  theme=gr.themes.Soft(),
445
  css="""
446
  .gradio-container {
@@ -452,7 +452,7 @@ def create_interface():
452
 
453
  # Header
454
  gr.Markdown("""
455
- # 🚀 Agentic Skill Builder
456
  ### AI-Powered Microlearning Platform
457
 
458
  Learn new skills through bite-sized lessons and adaptive quizzes powered by Azure OpenAI!
 
440
  """Create the Gradio interface"""
441
 
442
  with gr.Blocks(
443
+ title="SkillSprout",
444
  theme=gr.themes.Soft(),
445
  css="""
446
  .gradio-container {
 
452
 
453
  # Header
454
  gr.Markdown("""
455
+ # 🌱 SkillSprout
456
  ### AI-Powered Microlearning Platform
457
 
458
  Learn new skills through bite-sized lessons and adaptive quizzes powered by Azure OpenAI!
config.py DELETED
@@ -1,86 +0,0 @@
1
- """
2
- Configuration module for Agentic Skill Builder
3
- Handles environment variables, logging, and application settings
4
- """
5
-
6
- import os
7
- import logging
8
- from typing import Optional
9
- from dataclasses import dataclass
10
-
11
- @dataclass
12
- class AzureOpenAIConfig:
13
- """Configuration for Azure OpenAI service"""
14
- endpoint: str
15
- api_key: str
16
- api_version: str
17
- llm_deployment: str
18
- llm_model: str
19
- embeddings_deployment: str
20
- embeddings_model: str
21
-
22
- @classmethod
23
- def from_env(cls) -> 'AzureOpenAIConfig':
24
- """Create configuration from environment variables"""
25
- return cls(
26
- endpoint=os.getenv("AZURE_OPENAI_ENDPOINT", "").replace('"', ''),
27
- api_key=os.getenv("AZURE_OPENAI_KEY", "").replace('"', ''),
28
- api_version=os.getenv("AZURE_OPENAI_API_VERSION", "2024-12-01-preview").replace('"', ''),
29
- llm_deployment=os.getenv("AZURE_OPENAI_LLM_DEPLOYMENT", "gpt-4.1").replace('"', ''),
30
- llm_model=os.getenv("AZURE_OPENAI_LLM_MODEL", "gpt-4.1").replace('"', ''),
31
- embeddings_deployment=os.getenv("AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT", "text-embedding-3-small").replace('"', ''),
32
- embeddings_model=os.getenv("AZURE_OPENAI_EMBEDDINGS_MODEL", "text-embedding-3-small").replace('"', '')
33
- )
34
-
35
- def validate(self) -> bool:
36
- """Validate that all required settings are present"""
37
- required_fields = [self.endpoint, self.api_key, self.llm_deployment]
38
- return all(field.strip() for field in required_fields)
39
-
40
- @dataclass
41
- class AppConfig:
42
- """Main application configuration"""
43
- debug: bool = False
44
- log_level: str = "INFO"
45
- gradio_port: int = 7860
46
- mcp_port: int = 8000
47
- max_quiz_questions: int = 5
48
- default_lesson_duration: int = 5
49
- azure_openai: Optional[AzureOpenAIConfig] = None
50
-
51
- @classmethod
52
- def from_env(cls) -> 'AppConfig':
53
- """Create configuration from environment variables"""
54
- return cls(
55
- debug=os.getenv("DEBUG", "false").lower() == "true",
56
- log_level=os.getenv("LOG_LEVEL", "INFO").upper(),
57
- gradio_port=int(os.getenv("GRADIO_PORT", "7860")),
58
- mcp_port=int(os.getenv("MCP_PORT", "8000")),
59
- max_quiz_questions=int(os.getenv("MAX_QUIZ_QUESTIONS", "5")),
60
- default_lesson_duration=int(os.getenv("DEFAULT_LESSON_DURATION", "5")),
61
- azure_openai=AzureOpenAIConfig.from_env()
62
- )
63
-
64
- def setup_logging(config: AppConfig):
65
- """Setup application logging"""
66
- logging.basicConfig(
67
- level=getattr(logging, config.log_level),
68
- format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
69
- handlers=[
70
- logging.StreamHandler(),
71
- logging.FileHandler('agentic_skill_builder.log')
72
- ]
73
- )
74
-
75
- def get_config() -> AppConfig:
76
- """Get application configuration"""
77
- config = AppConfig.from_env()
78
-
79
- # Validate Azure OpenAI configuration
80
- if not config.azure_openai or not config.azure_openai.validate():
81
- raise ValueError(
82
- "Azure OpenAI configuration is incomplete. "
83
- "Please check your .env file for AZURE_OPENAI_ENDPOINT, AZURE_OPENAI_KEY, and AZURE_OPENAI_LLM_DEPLOYMENT"
84
- )
85
-
86
- return config
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
demo_video_script.md DELETED
@@ -1,212 +0,0 @@
1
- # 🎬 Demo Video Script - Agentic Skill Builder MCP Server
2
-
3
- ## Video Duration: 3-5 minutes
4
- ## Target Audience: Hackathon judges and MCP developers
5
-
6
- ---
7
-
8
- ## 🎯 **Opening (0:00 - 0:30)**
9
-
10
- **Visual:** Screen showing the Gradio interface running on localhost:7860
11
- **Narration:**
12
- > "Welcome to the Agentic Skill Builder - our submission for the Gradio Agents & MCP Hackathon 2025, Track 1: MCP Server/Tool. This is a unique AI-powered microlearning platform that serves both as a beautiful Gradio interface AND a fully functional MCP server."
13
-
14
- ---
15
-
16
- ## 🔗 **MCP Server Demonstration (0:30 - 2:00)**
17
-
18
- ### Part 1: Server Status & Endpoints (0:30 - 1:00)
19
- **Visual:** Browser showing http://localhost:8001/
20
- **Narration:**
21
- > "First, let me show you our MCP server running on port 8001. This endpoint provides metadata about our hackathon submission."
22
-
23
- **Action:** Show JSON response with hackathon information
24
-
25
- **Visual:** Terminal/Postman showing MCP endpoints
26
- **Narration:**
27
- > "Our MCP server exposes four key endpoints:
28
- > - GET /mcp/skills - Lists available learning skills
29
- > - POST /mcp/lesson/generate - Creates personalized lessons
30
- > - GET /mcp/progress/{user_id} - Tracks learning progress
31
- > - POST /mcp/quiz/submit - Processes quiz submissions"
32
-
33
- ### Part 2: Live MCP Endpoint Testing (1:00 - 2:00)
34
- **Visual:** Testing each MCP endpoint with curl or PowerShell
35
- **Narration:**
36
- > "Let me demonstrate these endpoints in action. First, getting available skills..."
37
-
38
- **Action:** Show GET /mcp/skills response
39
- ```powershell
40
- curl http://localhost:8001/mcp/skills
41
- ```
42
- **Expected Response:**
43
- ```json
44
- {
45
- "predefined_skills": ["Python Programming", "Spanish Language", "Public Speaking", "Data Science", "Machine Learning", "JavaScript", "Project Management", "Digital Marketing", "Creative Writing", "Photography"]
46
- }
47
- ```
48
-
49
- > "Now generating a personalized lesson for Python Programming..."
50
-
51
- **Action:** Show POST /mcp/lesson/generate with request body and response
52
- ```powershell
53
- curl -X POST http://localhost:8001/mcp/lesson/generate `
54
- -H "Content-Type: application/json" `
55
- -d '{
56
- "skill": "Python Programming",
57
- "level": "beginner",
58
- "user_context": "I want to learn Python for data analysis"
59
- }'
60
- ```
61
- **Expected Response:**
62
- ```json
63
- {
64
- "lesson_id": "lesson_12345",
65
- "skill": "Python Programming",
66
- "title": "Introduction to Python for Data Analysis",
67
- "content": "Python is a powerful programming language...",
68
- "difficulty": "beginner",
69
- "estimated_time": "15 minutes",
70
- "mcp_server": "Agentic Skill Builder"
71
- }
72
- ```
73
-
74
- > "Let's check user progress to see learning analytics..."
75
-
76
- **Action:** Show GET /mcp/progress/demo_user response
77
- ```powershell
78
- curl http://localhost:8001/mcp/progress/demo_user
79
- ```
80
- **Expected Response:**
81
- ```json
82
- {
83
- "user_id": "demo_user",
84
- "skills_progress": {
85
- "Python Programming": {
86
- "lessons_completed": 2,
87
- "quiz_scores": [85, 92],
88
- "current_level": "intermediate"
89
- }
90
- },
91
- "total_skills_learning": 1,
92
- "mcp_server": "Agentic Skill Builder",
93
- "timestamp": "2025-06-07T04:48:36.691517"
94
- }
95
- ```
96
-
97
- > "Finally, let's submit a quiz answer to show the interactive capabilities..."
98
-
99
- **Action:** Show POST /mcp/quiz/submit with request body and response
100
- ```powershell
101
- curl -X POST http://localhost:8001/mcp/quiz/submit `
102
- -H "Content-Type: application/json" `
103
- -d '{
104
- "user_id": "demo_user",
105
- "quiz_id": "quiz_python_001",
106
- "answers": ["list", "dictionary", "tuple"],
107
- "skill": "Python Programming"
108
- }'
109
- ```
110
- **Expected Response:**
111
- ```json
112
- {
113
- "quiz_id": "quiz_python_001",
114
- "user_id": "demo_user",
115
- "score": 85,
116
- "feedback": "Great job! You correctly identified Python data structures.",
117
- "passed": true,
118
- "mcp_server": "Agentic Skill Builder"
119
- }
120
- ```
121
-
122
- ---
123
-
124
- ## 🎨 **Gradio Interface Demo (2:00 - 3:30)**
125
-
126
- ### Part 1: Learning Flow (2:00 - 3:00)
127
- **Visual:** Gradio interface at localhost:7860
128
- **Narration:**
129
- > "Now let's see the beautiful Gradio interface in action. The same AI agents that power our MCP endpoints create this seamless learning experience."
130
-
131
- **Action:**
132
- 1. Select "Data Science" skill
133
- 2. Click "Start Learning"
134
- 3. Show generated lesson content
135
- 4. Complete lesson and start quiz
136
- 5. Answer quiz questions
137
- 6. Show results and progress
138
-
139
- ### Part 2: MCP Testing Interface (3:00 - 3:30)
140
- **Visual:** Built-in MCP endpoint testing section in Gradio
141
- **Narration:**
142
- > "What makes this special is that we've built MCP endpoint testing directly into our Gradio interface. You can test all our MCP endpoints without leaving the app."
143
-
144
- **Action:** Show the MCP endpoint testing interface working
145
-
146
- ---
147
-
148
- ## 🏆 **Hackathon Highlights (3:30 - 4:30)**
149
-
150
- **Visual:** Split screen showing both Gradio UI and MCP endpoints
151
- **Narration:**
152
- > "This submission perfectly demonstrates the agentic architecture we've built:
153
- > - Three specialized AI agents working together
154
- > - Lesson Agent generates personalized content
155
- > - Quiz Agent creates adaptive assessments
156
- > - Progress Agent tracks learning analytics
157
- > - All coordinated by our main orchestrator"
158
-
159
- **Visual:** Show the architecture diagram or code structure
160
- **Narration:**
161
- > "The same agents power both the beautiful user interface AND the MCP protocol endpoints, making our platform ready for integration with Claude Desktop, Cursor, or any MCP client."
162
-
163
- ---
164
-
165
- ## 🚀 **Call to Action (4:30 - 5:00)**
166
-
167
- **Visual:** README.md showing deployment instructions
168
- **Narration:**
169
- > "You can deploy this immediately to Hugging Face Spaces using our space_app.py file, or run it locally following our comprehensive documentation. This represents the future of AI-powered education - agentic, interoperable, and ready for the Model Context Protocol ecosystem."
170
-
171
- **Visual:** Final shot of both servers running simultaneously
172
- **Narration:**
173
- > "Thank you for watching our Agentic Skill Builder demo. We're excited to contribute to the MCP ecosystem and the future of AI agents working together!"
174
-
175
- ---
176
-
177
- ## 📝 **Recording Checklist**
178
-
179
- ### Before Recording:
180
- - [ ] Ensure both servers are running (Gradio on 7860, MCP on 8001)
181
- - [ ] Prepare browser tabs for all endpoints
182
- - [ ] Have curl commands or Postman collection ready
183
- - [ ] Test the complete learning flow works
184
- - [ ] Check audio/screen recording quality
185
-
186
- ### During Recording:
187
- - [ ] Speak clearly and at moderate pace
188
- - [ ] Show actual JSON responses from MCP endpoints
189
- - [ ] Demonstrate real-time lesson generation
190
- - [ ] Highlight the dual-purpose architecture
191
- - [ ] Keep within 5-minute time limit
192
-
193
- ### After Recording:
194
- - [ ] Upload to YouTube/Vimeo
195
- - [ ] Update README.md with video link
196
- - [ ] Add video link to HF Spaces README
197
- - [ ] Verify video is publicly accessible
198
-
199
- ---
200
-
201
- ## 🎥 **Technical Recording Tips**
202
-
203
- 1. **Use OBS Studio or similar** for high-quality screen recording
204
- 2. **Record at 1080p** for clear text visibility
205
- 3. **Use good microphone** for clear narration
206
- 4. **Multiple takes OK** - edit together the best parts
207
- 5. **Add captions** for accessibility
208
- 6. **Include timestamps** in video description
209
-
210
- ---
211
-
212
- **Ready to showcase the future of agentic learning with MCP integration! 🚀**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
deployment_guide.md DELETED
@@ -1,138 +0,0 @@
1
- # 🚀 Hugging Face Spaces Deployment Guide
2
-
3
- ## 📋 Pre-Deployment Checklist
4
-
5
- ### ✅ **Files Ready for Upload:**
6
- - `space_app.py` - Main application file (Gradio + MCP server)
7
- - `requirements.txt` - HF Spaces optimized dependencies
8
- - `README_spaces.md` - Spaces configuration (rename to README.md)
9
- - `config.py` - Configuration utilities
10
- - `.env` - Environment variables (handle separately)
11
-
12
- ### ✅ **Configuration Complete:**
13
- - App file specified: `space_app.py`
14
- - SDK: `gradio 4.44.0`
15
- - Tags include: `mcp-server-track`
16
- - License: MIT
17
-
18
- ---
19
-
20
- ## 🔧 **Deployment Steps**
21
-
22
- ### **Step 1: Create Hugging Face Space**
23
-
24
- 1. Go to [Hugging Face Spaces](https://huggingface.co/spaces)
25
- 2. Click "Create new Space"
26
- 3. **Important:** Create under the `Agents-MCP-Hackathon` organization
27
- 4. Space name: `agentic-skill-builder`
28
- 5. Choose "Gradio" as SDK
29
- 6. Set to Public
30
- 7. Click "Create Space"
31
-
32
- ### **Step 2: Upload Files**
33
-
34
- Upload these files to your new Space:
35
-
36
- ```
37
- 📁 agentic-skill-builder/
38
- ├── 📄 README.md (renamed from README_spaces.md)
39
- ├── 📄 space_app.py
40
- ├── 📄 requirements.txt
41
- ├── 📄 config.py
42
- └── 📄 .env (create in Spaces settings)
43
- ```
44
-
45
- ### **Step 3: Configure Environment Variables**
46
-
47
- In your HF Space settings, add these environment variables:
48
-
49
- ```env
50
- AZURE_OPENAI_ENDPOINT=your-azure-endpoint
51
- AZURE_OPENAI_KEY=your-api-key
52
- AZURE_OPENAI_API_VERSION=2024-12-01-preview
53
- AZURE_OPENAI_LLM_DEPLOYMENT=gpt-4.1
54
- AZURE_OPENAI_LLM_MODEL=gpt-4.1
55
- ```
56
-
57
- **⚠️ Important:** Never commit your `.env` file with real credentials to a public repo!
58
-
59
- ### **Step 4: Verify Deployment**
60
-
61
- Once deployed, your Space should:
62
- - ✅ Show the Gradio interface
63
- - ✅ Respond to MCP endpoints at `https://your-space-name.hf.space/mcp/skills`
64
- - ✅ Generate lessons and quizzes
65
- - ✅ Include hackathon branding and MCP testing interface
66
-
67
- ---
68
-
69
- ## 🔍 **Testing Your Deployed Space**
70
-
71
- ### **Gradio Interface Test:**
72
- 1. Visit your Space URL
73
- 2. Select a skill (e.g., "Python Programming")
74
- 3. Complete the full learning flow
75
- 4. Verify AI generates lessons and quizzes
76
-
77
- ### **MCP Endpoints Test:**
78
- ```bash
79
- # Test from anywhere on the internet
80
- curl https://your-space-name.hf.space/mcp/skills
81
- curl https://your-space-name.hf.space/mcp/progress/test_user
82
- ```
83
-
84
- ---
85
-
86
- ## 📝 **Post-Deployment Updates**
87
-
88
- ### **Update README.md with:**
89
- 1. **Live demo link:** Replace local URLs with your HF Space URL
90
- 2. **Video link:** Add your demo video URL
91
- 3. **Deployment status:** Confirm it's live and working
92
-
93
- ### **Example Updates:**
94
- ```markdown
95
- ## 🎬 Demo Video
96
- **MCP Server in Action:** [https://youtu.be/your-video-id](https://youtu.be/your-video-id)
97
-
98
- ## 🌐 Live Demo
99
- **Try it now:** [https://huggingface.co/spaces/Agents-MCP-Hackathon/agentic-skill-builder](https://huggingface.co/spaces/Agents-MCP-Hackathon/agentic-skill-builder)
100
-
101
- ## 🔗 MCP Endpoints
102
- Test our live MCP server at: `https://your-space-name.hf.space/mcp/`
103
- ```
104
-
105
- ---
106
-
107
- ## 🏆 **Final Hackathon Submission Checklist**
108
-
109
- - [ ] ✅ Space deployed under Agents-MCP-Hackathon organization
110
- - [ ] ✅ "mcp-server-track" tag in README
111
- - [ ] ✅ Demo video uploaded and linked
112
- - [ ] ✅ Live MCP endpoints working
113
- - [ ] ✅ Gradio interface fully functional
114
- - [ ] ✅ All dependencies working in cloud environment
115
- - [ ] ✅ Documentation updated with live links
116
-
117
- ---
118
-
119
- ## 🚨 **Troubleshooting Common Issues**
120
-
121
- ### **Build Failures:**
122
- - Check `requirements.txt` for incompatible versions
123
- - Verify all imports work in `space_app.py`
124
- - Ensure environment variables are set correctly
125
-
126
- ### **MCP Endpoints Not Working:**
127
- - Verify FastAPI routes are properly configured
128
- - Check if app.mount() is correctly set up
129
- - Test endpoints locally first
130
-
131
- ### **Azure OpenAI Errors:**
132
- - Confirm environment variables are set in Spaces settings
133
- - Check Azure OpenAI quota and model availability
134
- - Verify API key permissions
135
-
136
- ---
137
-
138
- **Ready to deploy! Your agentic skill builder will be live for the world to see! 🌍✨**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
mcp_server.py CHANGED
@@ -1,5 +1,5 @@
1
  """
2
- MCP Server Integration for Agentic Skill Builder
3
  This module provides Model Context Protocol endpoints for external agent integration.
4
  """
5
 
@@ -18,7 +18,7 @@ from app import AgenticSkillBuilder, UserProgress
18
 
19
  # FastAPI app for MCP endpoints
20
  mcp_app = FastAPI(
21
- title="Agentic Skill Builder MCP Server",
22
  description="Model Context Protocol endpoints for microlearning integration",
23
  version="1.0.0"
24
  )
@@ -50,7 +50,7 @@ class ProgressResponse(BaseModel):
50
  async def root():
51
  """Root endpoint with API information"""
52
  return {
53
- "name": "Agentic Skill Builder MCP Server",
54
  "version": "1.0.0",
55
  "description": "MCP endpoints for AI-powered microlearning",
56
  "endpoints": {
@@ -249,7 +249,7 @@ async def health_check():
249
  return {
250
  "status": "healthy",
251
  "timestamp": datetime.now().isoformat(),
252
- "service": "Agentic Skill Builder MCP Server"
253
  }
254
 
255
  def run_mcp_server():
@@ -263,7 +263,7 @@ def run_mcp_server():
263
  )
264
 
265
  if __name__ == "__main__":
266
- print("🚀 Starting Agentic Skill Builder MCP Server...")
267
  print("📚 MCP endpoints will be available at http://localhost:8000")
268
  print("📖 API documentation at http://localhost:8000/docs")
269
  run_mcp_server()
 
1
  """
2
+ MCP Server Integration for SkillSprout
3
  This module provides Model Context Protocol endpoints for external agent integration.
4
  """
5
 
 
18
 
19
  # FastAPI app for MCP endpoints
20
  mcp_app = FastAPI(
21
+ title="SkillSprout MCP Server",
22
  description="Model Context Protocol endpoints for microlearning integration",
23
  version="1.0.0"
24
  )
 
50
  async def root():
51
  """Root endpoint with API information"""
52
  return {
53
+ "name": "SkillSprout MCP Server",
54
  "version": "1.0.0",
55
  "description": "MCP endpoints for AI-powered microlearning",
56
  "endpoints": {
 
249
  return {
250
  "status": "healthy",
251
  "timestamp": datetime.now().isoformat(),
252
+ "service": "SkillSprout MCP Server"
253
  }
254
 
255
  def run_mcp_server():
 
263
  )
264
 
265
  if __name__ == "__main__":
266
+ print("🚀 Starting SkillSprout MCP Server...")
267
  print("📚 MCP endpoints will be available at http://localhost:8000")
268
  print("📖 API documentation at http://localhost:8000/docs")
269
  run_mcp_server()
requirements.txt CHANGED
@@ -7,3 +7,17 @@ fastapi>=0.104.0
7
  pydantic>=2.0.0
8
  aiohttp>=3.9.0
9
  requests>=2.31.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  pydantic>=2.0.0
8
  aiohttp>=3.9.0
9
  requests>=2.31.0
10
+ azure-cognitiveservices-speech>=1.34.0
11
+ plotly>=5.17.0
12
+ kaleido>=0.2.1
13
+
14
+ # Testing dependencies
15
+ pytest>=7.4.0
16
+ pytest-asyncio>=0.23.0
17
+ pytest-cov>=4.1.0
18
+ pytest-mock>=3.12.0
19
+ pytest-html>=4.1.0
20
+ pytest-json-report>=1.5.0
21
+ pytest-xdist>=3.3.0
22
+ coverage>=7.3.0
23
+ mock>=5.1.0
run.py CHANGED
@@ -1,5 +1,5 @@
1
  """
2
- Launcher script for Agentic Skill Builder
3
  Runs both the Gradio interface and MCP server
4
  """
5
 
@@ -23,7 +23,7 @@ def run_mcp_server():
23
  def main():
24
  """Main launcher function"""
25
  print("=" * 60)
26
- print("🎓 AGENTIC SKILL BUILDER")
27
  print(" AI-Powered Microlearning Platform")
28
  print("=" * 60)
29
  print()
@@ -76,7 +76,7 @@ if __name__ == "__main__":
76
  try:
77
  main()
78
  except KeyboardInterrupt:
79
- print("\n\n👋 Goodbye! Thanks for using Agentic Skill Builder!")
80
  except Exception as e:
81
  print(f"\n❌ Error: {e}")
82
  print("Please check your configuration and try again.")
 
1
  """
2
+ Launcher script for SkillSprout
3
  Runs both the Gradio interface and MCP server
4
  """
5
 
 
23
  def main():
24
  """Main launcher function"""
25
  print("=" * 60)
26
+ print("🌱 SKILLSPROUT")
27
  print(" AI-Powered Microlearning Platform")
28
  print("=" * 60)
29
  print()
 
76
  try:
77
  main()
78
  except KeyboardInterrupt:
79
+ print("\n\n👋 Goodbye! Thanks for using SkillSprout!")
80
  except Exception as e:
81
  print(f"\n❌ Error: {e}")
82
  print("Please check your configuration and try again.")
simple_test.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Simple test runner for SkillSprout - streamlined for hackathon
4
+ """
5
+ import subprocess
6
+ import sys
7
+
8
+ def run_tests():
9
+ """Run the simplified test suite"""
10
+ print("🌱 SkillSprout - Running Tests")
11
+ print("=" * 40)
12
+
13
+ # Run the tests we actually have
14
+ cmd = [sys.executable, "-m", "pytest", "tests/", "-v", "--tb=short"]
15
+
16
+ try:
17
+ result = subprocess.run(cmd, check=False)
18
+
19
+ if result.returncode == 0:
20
+ print("\n✅ All tests passed!")
21
+ else:
22
+ print("\n⚠️ Some tests failed (this is OK for development)")
23
+
24
+ return result.returncode
25
+
26
+ except Exception as e:
27
+ print(f"\n❌ Error running tests: {e}")
28
+ return 1
29
+
30
+ if __name__ == "__main__":
31
+ exit_code = run_tests()
32
+ sys.exit(exit_code)
space_app.py CHANGED
@@ -1,5 +1,5 @@
1
  """
2
- Agentic Skill Builder - Hackathon Submission
3
  A unified app.py that serves both Gradio interface and MCP server endpoints
4
  for the Gradio Agents & MCP Hackathon 2025
5
  """
@@ -9,10 +9,13 @@ import json
9
  import asyncio
10
  import threading
11
  import time
12
- from datetime import datetime
13
- from typing import Dict, List, Optional, Tuple
14
- from dataclasses import dataclass, asdict
15
  import logging
 
 
 
16
 
17
  from dotenv import load_dotenv
18
  import gradio as gr
@@ -22,11 +25,19 @@ from fastapi import FastAPI, HTTPException
22
  from pydantic import BaseModel
23
  import uvicorn
24
 
 
 
 
 
 
 
 
 
25
  # Configure logging
26
  logging.basicConfig(level=logging.INFO)
27
  logger = logging.getLogger(__name__)
28
 
29
- # Load environment variables
30
  load_dotenv()
31
 
32
  # Azure OpenAI client configuration
@@ -40,20 +51,186 @@ client = AzureOpenAI(
40
  LLM_DEPLOYMENT = os.getenv("AZURE_OPENAI_LLM_DEPLOYMENT", "gpt-4").replace('"', '')
41
  LLM_MODEL = os.getenv("AZURE_OPENAI_LLM_MODEL", "gpt-4").replace('"', '')
42
 
 
 
 
 
 
43
  # Import all classes from the main app
44
  from app import (
45
  UserProgress, Lesson, Quiz, LessonAgent, QuizAgent,
46
  ProgressAgent, AgenticSkillBuilder
47
  )
48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
  # Create global instances
50
  app_instance = AgenticSkillBuilder()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
 
52
  # ===== MCP SERVER INTEGRATION =====
53
 
54
  # FastAPI app for MCP endpoints
55
  mcp_app = FastAPI(
56
- title="Agentic Skill Builder MCP Server",
57
  description="Model Context Protocol endpoints for microlearning integration - Hackathon 2025",
58
  version="1.0.0"
59
  )
@@ -74,7 +251,7 @@ class QuizSubmission(BaseModel):
74
  async def root():
75
  """Root endpoint with hackathon information"""
76
  return {
77
- "name": "Agentic Skill Builder MCP Server",
78
  "version": "1.0.0",
79
  "hackathon": "Gradio Agents & MCP Hackathon 2025",
80
  "track": "mcp-server-track",
@@ -121,9 +298,8 @@ async def generate_lesson_mcp(request: LessonRequest):
121
  "user_context": {
122
  "user_id": request.user_id,
123
  "current_difficulty": progress.current_difficulty,
124
- "lessons_completed": progress.lessons_completed
125
- },
126
- "mcp_server": "Agentic Skill Builder",
127
  "timestamp": datetime.now().isoformat()
128
  }
129
 
@@ -142,10 +318,9 @@ async def get_user_progress_mcp(user_id: str, skill: str = None):
142
  "user_id": progress.user_id,
143
  "skill": progress.skill,
144
  "lessons_completed": progress.lessons_completed,
145
- "average_score": progress.get_average_score(),
146
- "current_difficulty": progress.current_difficulty,
147
  "recommendations": recommendation,
148
- "mcp_server": "Agentic Skill Builder"
149
  }
150
  else:
151
  user_progress_data = {}
@@ -161,9 +336,8 @@ async def get_user_progress_mcp(user_id: str, skill: str = None):
161
 
162
  return {
163
  "user_id": user_id,
164
- "skills_progress": user_progress_data,
165
- "total_skills_learning": len(user_progress_data),
166
- "mcp_server": "Agentic Skill Builder",
167
  "timestamp": datetime.now().isoformat()
168
  }
169
 
@@ -210,10 +384,9 @@ async def submit_quiz_results_mcp(submission: QuizSubmission):
210
  "updated_progress": {
211
  "lessons_completed": progress.lessons_completed,
212
  "average_score": progress.get_average_score(),
213
- "current_difficulty": progress.current_difficulty
214
- },
215
  "recommendation": recommendation,
216
- "mcp_server": "Agentic Skill Builder",
217
  "timestamp": datetime.now().isoformat()
218
  }
219
 
@@ -226,7 +399,7 @@ def create_interface():
226
  """Create the Gradio interface with enhanced hackathon features"""
227
 
228
  with gr.Blocks(
229
- title="Agentic Skill Builder - MCP Hackathon 2025",
230
  theme=gr.themes.Soft(),
231
  css="""
232
  .gradio-container {
@@ -246,7 +419,7 @@ def create_interface():
246
  # Enhanced Header for Hackathon
247
  gr.HTML("""
248
  <div class="hackathon-header">
249
- <h1>🚀 Agentic Skill Builder</h1>
250
  <h3>AI-Powered Microlearning with MCP Integration</h3>
251
  <p><strong>🏆 Gradio Agents & MCP Hackathon 2025 Submission</strong></p>
252
  <p>Track: MCP Server/Tool • Demonstrating Agentic AI Workflows</p>
@@ -276,9 +449,14 @@ def create_interface():
276
  )
277
 
278
  start_btn = gr.Button("🚀 Start Learning", variant="primary", size="lg")
279
-
280
- # Learning content areas
281
  lesson_output = gr.Markdown(visible=False)
 
 
 
 
 
 
282
  lesson_btn = gr.Button("Complete Lesson", visible=False)
283
 
284
  quiz_output = gr.Markdown(visible=False)
@@ -345,71 +523,142 @@ def create_interface():
345
 
346
  with gr.Column():
347
  mcp_output = gr.JSON(label="MCP Server Response")
348
-
349
- # Event handlers (same as original app.py)
350
- async def handle_start_learning(skill_choice, custom_skill_input):
351
  skill = custom_skill_input.strip() if custom_skill_input.strip() else skill_choice
352
- if not skill:
353
- return [
354
  gr.update(value="⚠️ Please select or enter a skill to continue."),
 
355
  gr.update(visible=False),
356
  gr.update(visible=False),
357
  skill
358
  ] + [gr.update(visible=False, value="") for _ in range(5)]
359
 
360
- lesson_content, btn_text, _ = await app_instance.start_lesson(skill)
361
-
362
- return [
363
- gr.update(value=lesson_content),
364
- gr.update(value=btn_text, visible=True),
365
- gr.update(visible=False),
366
- skill
367
- ] + [gr.update(visible=False, value="") for _ in range(5)]
368
-
369
- async def handle_complete_lesson():
370
- quiz_content, btn_text, _ = await app_instance.complete_lesson_and_start_quiz()
371
-
372
- quiz_updates = []
373
- if app_instance.current_quiz:
374
- for i, question in enumerate(app_instance.current_quiz.questions):
375
- if i < len(quiz_inputs):
376
- label = f"Q{i+1}: {question['question'][:50]}..."
377
- quiz_updates.append(gr.update(label=label, visible=True))
378
- else:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
379
  quiz_updates.append(gr.update(visible=False))
380
- for i in range(len(app_instance.current_quiz.questions), len(quiz_inputs)):
381
- quiz_updates.append(gr.update(visible=False))
382
- else:
383
- quiz_updates = [gr.update(visible=False) for _ in range(len(quiz_inputs))]
384
-
385
- return [
386
- gr.update(visible=False),
387
- gr.update(value=quiz_content, visible=True),
388
- gr.update(value=btn_text, visible=True),
389
- gr.update(visible=False)
390
- ] + quiz_updates
391
-
392
- def handle_submit_quiz(*answers):
393
- valid_answers = [ans for ans in answers if ans is not None and ans != ""]
394
- results_content, btn_text, _ = app_instance.submit_quiz(*valid_answers)
395
-
396
- return [
397
- gr.update(visible=False),
398
- gr.update(value=results_content, visible=True),
399
- gr.update(value=btn_text, visible=True),
400
- gr.update(visible=False)
401
- ] + [gr.update(visible=False) for _ in range(len(quiz_inputs))]
402
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
403
  def handle_restart():
404
  return [
405
  gr.update(visible=False),
 
406
  gr.update(visible=False),
407
  gr.update(visible=False),
408
  gr.update(visible=False),
409
  gr.update(visible=False),
410
  ""
411
  ] + [gr.update(visible=False, value="") for _ in range(len(quiz_inputs))]
412
-
413
  def update_progress_display():
414
  if not app_instance.progress_agent.user_data:
415
  return "**No learning data yet.** Complete some lessons to see your progress!"
@@ -426,6 +675,40 @@ def create_interface():
426
  """
427
  return progress_content
428
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
429
  async def test_mcp_endpoint(skill, user_id):
430
  """Test MCP endpoint directly from the interface"""
431
  try:
@@ -456,9 +739,8 @@ def create_interface():
456
  "user_context": {
457
  "user_id": user_id,
458
  "current_difficulty": progress.current_difficulty,
459
- "lessons_completed": progress.lessons_completed
460
- },
461
- "mcp_server": "Agentic Skill Builder",
462
  "status": "success"
463
  }
464
  }
@@ -471,12 +753,17 @@ def create_interface():
471
  "error": str(e),
472
  "status": "error"
473
  }
474
-
475
- # Wire up events
476
  start_btn.click(
477
  handle_start_learning,
478
  inputs=[skill_dropdown, custom_skill],
479
- outputs=[lesson_output, lesson_btn, quiz_output, current_skill] + quiz_inputs[:5]
 
 
 
 
 
 
480
  )
481
 
482
  lesson_btn.click(
@@ -489,10 +776,9 @@ def create_interface():
489
  inputs=quiz_inputs,
490
  outputs=[quiz_submit_btn, results_output, restart_btn, quiz_output] + quiz_inputs
491
  )
492
-
493
  restart_btn.click(
494
  handle_restart,
495
- outputs=[lesson_output, quiz_output, results_output, lesson_btn, restart_btn, current_skill] + quiz_inputs
496
  )
497
 
498
  refresh_progress_btn.click(
 
1
  """
2
+ SkillSprout - Hackathon Submission
3
  A unified app.py that serves both Gradio interface and MCP server endpoints
4
  for the Gradio Agents & MCP Hackathon 2025
5
  """
 
9
  import asyncio
10
  import threading
11
  import time
12
+ from datetime import datetime, timedelta
13
+ from typing import Dict, List, Optional, Tuple, Any
14
+ from dataclasses import dataclass, asdict, field
15
  import logging
16
+ import math
17
+ import base64
18
+ from io import BytesIO
19
 
20
  from dotenv import load_dotenv
21
  import gradio as gr
 
25
  from pydantic import BaseModel
26
  import uvicorn
27
 
28
+ # Voice narration imports
29
+ try:
30
+ import azure.cognitiveservices.speech as speechsdk
31
+ SPEECH_SDK_AVAILABLE = True
32
+ except ImportError:
33
+ SPEECH_SDK_AVAILABLE = False
34
+ print("⚠️ Azure Speech SDK not available. Voice narration will be disabled.")
35
+
36
  # Configure logging
37
  logging.basicConfig(level=logging.INFO)
38
  logger = logging.getLogger(__name__)
39
 
40
+ # Load environment variables (works locally with .env, in Spaces with secrets)
41
  load_dotenv()
42
 
43
  # Azure OpenAI client configuration
 
51
  LLM_DEPLOYMENT = os.getenv("AZURE_OPENAI_LLM_DEPLOYMENT", "gpt-4").replace('"', '')
52
  LLM_MODEL = os.getenv("AZURE_OPENAI_LLM_MODEL", "gpt-4").replace('"', '')
53
 
54
+ # Voice configuration
55
+ VOICE_KEY = os.getenv("AZURE_SPEECH_KEY", "").replace('"', '')
56
+ VOICE_REGION = os.getenv("AZURE_SPEECH_REGION", "eastus2").replace('"', '')
57
+ VOICE_NAME = os.getenv("AZURE_SPEECH_VOICE", "en-US-AvaMultilingualNeural").replace('"', '')
58
+
59
  # Import all classes from the main app
60
  from app import (
61
  UserProgress, Lesson, Quiz, LessonAgent, QuizAgent,
62
  ProgressAgent, AgenticSkillBuilder
63
  )
64
 
65
+ # Gamification System Classes
66
+ @dataclass
67
+ class Achievement:
68
+ """Achievement system for gamification"""
69
+ id: str
70
+ name: str
71
+ description: str
72
+ icon: str
73
+ unlocked: bool = False
74
+ unlock_condition: str = ""
75
+
76
+ @dataclass
77
+ class UserStats:
78
+ """Enhanced user statistics for gamification"""
79
+ user_id: str
80
+ total_points: int = 0
81
+ level: int = 1
82
+ achievements: List[str] = field(default_factory=list)
83
+ streak_days: int = 0
84
+ total_lessons: int = 0
85
+ total_quizzes: int = 0
86
+ correct_answers: int = 0
87
+
88
+ def add_points(self, points: int):
89
+ """Add points and check for level up"""
90
+ self.total_points += points
91
+ new_level = min(10, (self.total_points // 100) + 1)
92
+ if new_level > self.level:
93
+ self.level = new_level
94
+
95
+ def get_accuracy(self) -> float:
96
+ """Calculate quiz accuracy"""
97
+ if self.total_quizzes == 0:
98
+ return 0.0
99
+ return (self.correct_answers / self.total_quizzes) * 100
100
+
101
+ @dataclass
102
+ class EnhancedUserProgress:
103
+ """Enhanced progress tracking with detailed analytics"""
104
+ user_id: str
105
+ skill: str
106
+ lessons_completed: int = 0
107
+ quiz_scores: List[float] = field(default_factory=list)
108
+ time_spent: List[float] = field(default_factory=list)
109
+ mastery_level: float = 0.0
110
+ last_activity: datetime = field(default_factory=datetime.now)
111
+
112
+ def calculate_mastery(self) -> float:
113
+ """Calculate skill mastery based on performance"""
114
+ if not self.quiz_scores:
115
+ return 0.0
116
+
117
+ avg_score = sum(self.quiz_scores) / len(self.quiz_scores)
118
+ consistency_bonus = min(len(self.quiz_scores) * 5, 20) # Max 20% bonus
119
+ lesson_bonus = min(self.lessons_completed * 2, 10) # Max 10% bonus
120
+
121
+ self.mastery_level = min(100, avg_score + consistency_bonus + lesson_bonus)
122
+ return self.mastery_level
123
+
124
+ def update_mastery(self):
125
+ """Update mastery level"""
126
+ self.calculate_mastery()
127
+
128
+ class GamificationManager:
129
+ """Manages achievements and gamification"""
130
+
131
+ def __init__(self):
132
+ self.user_stats: Dict[str, UserStats] = {}
133
+ self.achievements = {
134
+ "first_steps": Achievement("first_steps", "First Steps", "Complete your first lesson", "🎯"),
135
+ "quiz_master": Achievement("quiz_master", "Quiz Master", "Score 100% on a quiz", "🧠"),
136
+ "persistent": Achievement("persistent", "Persistent Learner", "Complete 5 lessons", "💪"),
137
+ "scholar": Achievement("scholar", "Scholar", "Complete 10 lessons", "🎓"),
138
+ "expert": Achievement("expert", "Domain Expert", "Master a skill (20 lessons)", "⭐"),
139
+ "polyglot": Achievement("polyglot", "Polyglot", "Learn 3 different skills", "🌍"),
140
+ "perfectionist": Achievement("perfectionist", "Perfectionist", "Score 100% on 5 quizzes", "💯"),
141
+ "speed": Achievement("speed", "Speed Learner", "Complete lesson in under 3 minutes", "⚡"),
142
+ "consistent": Achievement("consistent", "Consistent", "Learn for 7 days in a row", "📅"),
143
+ "explorer": Achievement("explorer", "Explorer", "Try voice narration feature", "🎧"),
144
+ }
145
+
146
+ def get_user_stats(self, user_id: str) -> UserStats:
147
+ """Get or create user stats"""
148
+ if user_id not in self.user_stats:
149
+ self.user_stats[user_id] = UserStats(user_id=user_id)
150
+ return self.user_stats[user_id]
151
+
152
+ def check_achievements(self, user_id: str, progress: EnhancedUserProgress) -> List[Achievement]:
153
+ """Check and unlock achievements"""
154
+ stats = self.get_user_stats(user_id)
155
+ newly_unlocked = []
156
+
157
+ # Check each achievement
158
+ achievements_to_check = [
159
+ ("first_steps", stats.total_lessons >= 1),
160
+ ("quiz_master", any(score == 100 for score in progress.quiz_scores)),
161
+ ("persistent", stats.total_lessons >= 5),
162
+ ("scholar", stats.total_lessons >= 10),
163
+ ("expert", stats.total_lessons >= 20),
164
+ ("perfectionist", sum(1 for score in progress.quiz_scores if score == 100) >= 5),
165
+ ("consistent", stats.streak_days >= 7),
166
+ ]
167
+
168
+ for achievement_id, condition in achievements_to_check:
169
+ if condition and achievement_id not in stats.achievements:
170
+ stats.achievements.append(achievement_id)
171
+ newly_unlocked.append(self.achievements[achievement_id])
172
+ stats.add_points(50) # Bonus points for achievements
173
+
174
+ return newly_unlocked
175
+
176
  # Create global instances
177
  app_instance = AgenticSkillBuilder()
178
+ gamification = GamificationManager()
179
+
180
+ def generate_voice_narration(text: str, voice_name: str = VOICE_NAME) -> Optional[str]:
181
+ """Generate voice narration using Azure Speech Services"""
182
+ if not SPEECH_SDK_AVAILABLE or not VOICE_KEY:
183
+ logger.warning("Voice narration not available - missing Speech SDK or API key")
184
+ return None
185
+
186
+ try:
187
+ # Configure speech service
188
+ speech_config = speechsdk.SpeechConfig(subscription=VOICE_KEY, region=VOICE_REGION)
189
+ speech_config.speech_synthesis_voice_name = voice_name
190
+
191
+ # Generate filename
192
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
193
+ audio_filename = f"narration_{timestamp}.wav"
194
+
195
+ # Configure audio output
196
+ audio_config = speechsdk.audio.AudioOutputConfig(filename=audio_filename)
197
+
198
+ # Create synthesizer
199
+ speech_synthesizer = speechsdk.SpeechSynthesizer(
200
+ speech_config=speech_config,
201
+ audio_config=audio_config
202
+ )
203
+
204
+ # Create SSML for educational content
205
+ ssml_text = f"""
206
+ <speak version="1.0" xmlns="http://www.w3.org/2001/10/synthesis" xml:lang="en-US">
207
+ <voice name="{voice_name}">
208
+ <prosody rate="0.9" pitch="medium">
209
+ {text}
210
+ </prosody>
211
+ </voice>
212
+ </speak>
213
+ """
214
+
215
+ # Synthesize speech
216
+ result = speech_synthesizer.speak_ssml_async(ssml_text).get()
217
+
218
+ if result.reason == speechsdk.ResultReason.SynthesizingAudioCompleted:
219
+ logger.info(f"Voice narration generated: {audio_filename}")
220
+ return audio_filename
221
+ else:
222
+ logger.error(f"Speech synthesis failed: {result.reason}")
223
+ return None
224
+
225
+ except Exception as e:
226
+ logger.error(f"Error generating voice narration: {e}")
227
+ return None
228
 
229
  # ===== MCP SERVER INTEGRATION =====
230
 
231
  # FastAPI app for MCP endpoints
232
  mcp_app = FastAPI(
233
+ title="SkillSprout MCP Server",
234
  description="Model Context Protocol endpoints for microlearning integration - Hackathon 2025",
235
  version="1.0.0"
236
  )
 
251
  async def root():
252
  """Root endpoint with hackathon information"""
253
  return {
254
+ "name": "SkillSprout MCP Server",
255
  "version": "1.0.0",
256
  "hackathon": "Gradio Agents & MCP Hackathon 2025",
257
  "track": "mcp-server-track",
 
298
  "user_context": {
299
  "user_id": request.user_id,
300
  "current_difficulty": progress.current_difficulty,
301
+ "lessons_completed": progress.lessons_completed },
302
+ "mcp_server": "SkillSprout",
 
303
  "timestamp": datetime.now().isoformat()
304
  }
305
 
 
318
  "user_id": progress.user_id,
319
  "skill": progress.skill,
320
  "lessons_completed": progress.lessons_completed,
321
+ "average_score": progress.get_average_score(), "current_difficulty": progress.current_difficulty,
 
322
  "recommendations": recommendation,
323
+ "mcp_server": "SkillSprout"
324
  }
325
  else:
326
  user_progress_data = {}
 
336
 
337
  return {
338
  "user_id": user_id,
339
+ "skills_progress": user_progress_data, "total_skills_learning": len(user_progress_data),
340
+ "mcp_server": "SkillSprout",
 
341
  "timestamp": datetime.now().isoformat()
342
  }
343
 
 
384
  "updated_progress": {
385
  "lessons_completed": progress.lessons_completed,
386
  "average_score": progress.get_average_score(),
387
+ "current_difficulty": progress.current_difficulty },
 
388
  "recommendation": recommendation,
389
+ "mcp_server": "SkillSprout",
390
  "timestamp": datetime.now().isoformat()
391
  }
392
 
 
399
  """Create the Gradio interface with enhanced hackathon features"""
400
 
401
  with gr.Blocks(
402
+ title="SkillSprout - MCP Hackathon 2025",
403
  theme=gr.themes.Soft(),
404
  css="""
405
  .gradio-container {
 
419
  # Enhanced Header for Hackathon
420
  gr.HTML("""
421
  <div class="hackathon-header">
422
+ <h1>🌱 SkillSprout</h1>
423
  <h3>AI-Powered Microlearning with MCP Integration</h3>
424
  <p><strong>🏆 Gradio Agents & MCP Hackathon 2025 Submission</strong></p>
425
  <p>Track: MCP Server/Tool • Demonstrating Agentic AI Workflows</p>
 
449
  )
450
 
451
  start_btn = gr.Button("🚀 Start Learning", variant="primary", size="lg")
452
+ # Learning content areas
 
453
  lesson_output = gr.Markdown(visible=False)
454
+
455
+ # Voice narration controls
456
+ with gr.Row(visible=False) as voice_controls:
457
+ voice_btn = gr.Button("🎧 Generate Voice Narration", variant="secondary")
458
+ voice_audio = gr.Audio(label="Lesson Audio", visible=False)
459
+
460
  lesson_btn = gr.Button("Complete Lesson", visible=False)
461
 
462
  quiz_output = gr.Markdown(visible=False)
 
523
 
524
  with gr.Column():
525
  mcp_output = gr.JSON(label="MCP Server Response")
526
+ # Event handlers with gamification integration
527
+ def handle_start_learning(skill_choice, custom_skill_input, user_id="default"):
528
+ """Enhanced learning session handler with gamification"""
529
  skill = custom_skill_input.strip() if custom_skill_input.strip() else skill_choice
530
+ if not skill: return [
 
531
  gr.update(value="⚠️ Please select or enter a skill to continue."),
532
+ gr.update(visible=False), # voice_controls
533
  gr.update(visible=False),
534
  gr.update(visible=False),
535
  skill
536
  ] + [gr.update(visible=False, value="") for _ in range(5)]
537
 
538
+ try:
539
+ # Start lesson using the app instance (sync call)
540
+ loop = asyncio.new_event_loop()
541
+ asyncio.set_event_loop(loop)
542
+ lesson_content, btn_text, _ = loop.run_until_complete(app_instance.start_lesson(skill))
543
+ app_instance.current_user = user_id
544
+
545
+ # Update user stats
546
+ stats = gamification.get_user_stats(user_id)
547
+ stats.total_lessons += 1
548
+ stats.add_points(10) # Points for starting lesson
549
+
550
+ # Check for achievements
551
+ progress = EnhancedUserProgress(user_id=user_id, skill=skill)
552
+ progress.lessons_completed = stats.total_lessons
553
+ newly_unlocked = gamification.check_achievements(user_id, progress)
554
+ return [
555
+ gr.update(value=lesson_content),
556
+ gr.update(visible=True), # voice_controls
557
+ gr.update(value=btn_text, visible=True),
558
+ gr.update(visible=False),
559
+ skill
560
+ ] + [gr.update(visible=False, value="") for _ in range(5)]
561
+
562
+ except Exception as e:
563
+ logger.error(f"Error starting lesson: {e}")
564
+ return [
565
+ gr.update(value=f"❌ Error starting lesson: {str(e)}"),
566
+ gr.update(visible=False), # voice_controls
567
+ gr.update(visible=False),
568
+ gr.update(visible=False),
569
+ skill
570
+ ] + [gr.update(visible=False, value="") for _ in range(5)]
571
+
572
+ def handle_complete_lesson(user_id="default"):
573
+ """Handle lesson completion and start quiz with gamification"""
574
+ try:
575
+ # Complete lesson and generate quiz (sync call)
576
+ loop = asyncio.new_event_loop()
577
+ asyncio.set_event_loop(loop)
578
+ quiz_content, btn_text, _ = loop.run_until_complete(app_instance.complete_lesson_and_start_quiz())
579
+
580
+ # Update user stats - lesson completed
581
+ stats = gamification.get_user_stats(user_id)
582
+ stats.add_points(20) # Points for completing lesson
583
+
584
+ quiz_updates = []
585
+ if app_instance.current_quiz:
586
+ for i, question in enumerate(app_instance.current_quiz.questions):
587
+ if i < len(quiz_inputs):
588
+ label = f"Q{i+1}: {question['question'][:50]}..."
589
+ quiz_updates.append(gr.update(label=label, visible=True))
590
+ else:
591
+ quiz_updates.append(gr.update(visible=False))
592
+ for i in range(len(app_instance.current_quiz.questions), len(quiz_inputs)):
593
  quiz_updates.append(gr.update(visible=False))
594
+ else:
595
+ quiz_updates = [gr.update(visible=False) for _ in range(len(quiz_inputs))]
596
+
597
+ return [
598
+ gr.update(visible=False),
599
+ gr.update(value=quiz_content, visible=True),
600
+ gr.update(value=btn_text, visible=True),
601
+ gr.update(visible=False)
602
+ ] + quiz_updates
603
+ except Exception as e:
604
+ logger.error(f"Error completing lesson: {e}")
605
+ return [
606
+ gr.update(visible=False),
607
+ gr.update(value=f"❌ Error completing lesson: {str(e)}", visible=True),
608
+ gr.update(visible=False),
609
+ gr.update(visible=False)
610
+ ] + [gr.update(visible=False) for _ in range(len(quiz_inputs))]
611
+
612
+ def handle_submit_quiz(*answers, user_id="default"):
613
+ """Handle quiz submission with gamification"""
614
+ try:
615
+ valid_answers = [ans for ans in answers if ans is not None and ans != ""]
616
+ results_content, btn_text, _ = app_instance.submit_quiz(*valid_answers)
617
+
618
+ # Update user stats for quiz completion
619
+ stats = gamification.get_user_stats(user_id)
620
+ stats.total_quizzes += 1
621
+
622
+ # Calculate quiz score and update stats
623
+ if app_instance.current_quiz and app_instance.current_quiz.questions:
624
+ total_questions = len(app_instance.current_quiz.questions)
625
+ # Simple scoring: assume each correct answer is worth points
626
+ score_points = len(valid_answers) * 20 # Base points per answer
627
+ stats.add_points(score_points)
628
+
629
+ # Check if perfect score (simplified check)
630
+ if "100%" in results_content or "Perfect" in results_content:
631
+ stats.correct_answers += total_questions
632
+ stats.add_points(50) # Bonus for perfect score
633
+ else:
634
+ # Estimate correct answers based on content (simplified)
635
+ stats.correct_answers += max(1, len(valid_answers) // 2)
636
+
637
+ return [
638
+ gr.update(visible=False),
639
+ gr.update(value=results_content, visible=True),
640
+ gr.update(value=btn_text, visible=True),
641
+ gr.update(visible=False)
642
+ ] + [gr.update(visible=False) for _ in range(len(quiz_inputs))]
643
+
644
+ except Exception as e:
645
+ logger.error(f"Error submitting quiz: {e}")
646
+ return [
647
+ gr.update(visible=False),
648
+ gr.update(value=f"❌ Error submitting quiz: {str(e)}", visible=True),
649
+ gr.update(visible=False),
650
+ gr.update(visible=False)
651
+ ] + [gr.update(visible=False) for _ in range(len(quiz_inputs))]
652
  def handle_restart():
653
  return [
654
  gr.update(visible=False),
655
+ gr.update(visible=False), # voice_controls
656
  gr.update(visible=False),
657
  gr.update(visible=False),
658
  gr.update(visible=False),
659
  gr.update(visible=False),
660
  ""
661
  ] + [gr.update(visible=False, value="") for _ in range(len(quiz_inputs))]
 
662
  def update_progress_display():
663
  if not app_instance.progress_agent.user_data:
664
  return "**No learning data yet.** Complete some lessons to see your progress!"
 
675
  """
676
  return progress_content
677
 
678
+ def handle_voice_generation(lesson_content, user_id="default"):
679
+ """Generate voice narration for lesson content"""
680
+ if not lesson_content or lesson_content == "":
681
+ return gr.update(value=None, visible=False), "❌ No lesson content to narrate"
682
+
683
+ try:
684
+ # Extract text content from markdown
685
+ import re
686
+ # Remove markdown formatting for better speech
687
+ text_content = re.sub(r'[#*`]', '', lesson_content)
688
+ text_content = text_content.replace('\n', ' ').strip()
689
+
690
+ # Limit text length for better narration
691
+ if len(text_content) > 1000:
692
+ text_content = text_content[:1000] + "..."
693
+
694
+ # Generate voice narration
695
+ audio_file = generate_voice_narration(text_content)
696
+
697
+ if audio_file:
698
+ # Award achievement for using voice feature
699
+ stats = gamification.get_user_stats(user_id)
700
+ if "explorer" not in stats.achievements:
701
+ stats.achievements.append("explorer")
702
+ stats.add_points(25)
703
+
704
+ return gr.update(value=audio_file, visible=True), "🎧 Voice narration generated!"
705
+ else:
706
+ return gr.update(value=None, visible=False), "❌ Voice narration not available"
707
+
708
+ except Exception as e:
709
+ logger.error(f"Error generating voice: {e}")
710
+ return gr.update(value=None, visible=False), f"❌ Error: {str(e)}"
711
+
712
  async def test_mcp_endpoint(skill, user_id):
713
  """Test MCP endpoint directly from the interface"""
714
  try:
 
739
  "user_context": {
740
  "user_id": user_id,
741
  "current_difficulty": progress.current_difficulty,
742
+ "lessons_completed": progress.lessons_completed },
743
+ "mcp_server": "SkillSprout",
 
744
  "status": "success"
745
  }
746
  }
 
753
  "error": str(e),
754
  "status": "error"
755
  }
756
+ # Wire up events
 
757
  start_btn.click(
758
  handle_start_learning,
759
  inputs=[skill_dropdown, custom_skill],
760
+ outputs=[lesson_output, voice_controls, lesson_btn, quiz_output, current_skill] + quiz_inputs[:5]
761
+ )
762
+
763
+ voice_btn.click(
764
+ handle_voice_generation,
765
+ inputs=[lesson_output],
766
+ outputs=[voice_audio, lesson_output]
767
  )
768
 
769
  lesson_btn.click(
 
776
  inputs=quiz_inputs,
777
  outputs=[quiz_submit_btn, results_output, restart_btn, quiz_output] + quiz_inputs
778
  )
 
779
  restart_btn.click(
780
  handle_restart,
781
+ outputs=[lesson_output, voice_controls, quiz_output, results_output, lesson_btn, restart_btn, current_skill] + quiz_inputs
782
  )
783
 
784
  refresh_progress_btn.click(
test_env.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Quick test to verify environment variables are loaded correctly
3
+ """
4
+ import os
5
+ from dotenv import load_dotenv
6
+
7
+ load_dotenv()
8
+
9
+ def test_env_vars():
10
+ """Test if required environment variables are available"""
11
+ required_vars = [
12
+ "AZURE_OPENAI_ENDPOINT",
13
+ "AZURE_OPENAI_KEY",
14
+ "AZURE_OPENAI_API_VERSION",
15
+ "AZURE_OPENAI_LLM_DEPLOYMENT",
16
+ "AZURE_OPENAI_LLM_MODEL"
17
+ ]
18
+
19
+ print("🔍 Environment Variables Check")
20
+ print("=" * 40)
21
+
22
+ all_present = True
23
+ for var in required_vars:
24
+ value = os.getenv(var)
25
+ if value and value.strip():
26
+ print(f"✅ {var}: Present")
27
+ else:
28
+ print(f"❌ {var}: Missing or empty")
29
+ all_present = False
30
+
31
+ print("=" * 40)
32
+ if all_present:
33
+ print("🎉 All environment variables are properly configured!")
34
+ print("✅ Your app should work in Hugging Face Spaces")
35
+ else:
36
+ print("⚠️ Some environment variables are missing")
37
+ print("🔧 Set them in Hugging Face Spaces Repository Secrets")
38
+
39
+ return all_present
40
+
41
+ if __name__ == "__main__":
42
+ test_env_vars()
tests/conftest.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Simple pytest configuration for SkillSprout tests
3
+ """
4
+ import pytest
5
+ import os
6
+ import sys
7
+
8
+ # Add parent directory to path
9
+ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
10
+
11
+ @pytest.fixture
12
+ def enhanced_user_progress():
13
+ """Enhanced user progress for testing"""
14
+ from space_app import EnhancedUserProgress
15
+ return EnhancedUserProgress(
16
+ user_id="test_user",
17
+ skill="Python Programming",
18
+ lessons_completed=3,
19
+ quiz_scores=[80.0, 90.0, 70.0],
20
+ mastery_level=75.0
21
+ )
22
+
23
+ @pytest.fixture
24
+ def gamification_manager():
25
+ """Gamification manager instance for testing"""
26
+ from space_app import GamificationManager
27
+ return GamificationManager()
28
+
29
+ # Pytest configuration
30
+ def pytest_configure(config):
31
+ """Configure pytest settings"""
32
+ config.addinivalue_line("markers", "unit: marks tests as unit tests")
33
+ config.addinivalue_line("markers", "integration: marks tests as integration tests")
34
+ config.addinivalue_line("markers", "slow: marks tests as slow")
tests/test_environment.py ADDED
@@ -0,0 +1,505 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Enhanced Environment Validation Tests for SkillSprout
3
+ """
4
+ import pytest
5
+ import os
6
+ import tempfile
7
+ import json
8
+ from unittest.mock import patch, Mock
9
+ from dataclasses import dataclass
10
+ from typing import Dict, List, Optional
11
+
12
+
13
+ @dataclass
14
+ class EnvironmentCheck:
15
+ """Data class for environment validation results"""
16
+ name: str
17
+ required: bool
18
+ present: bool
19
+ value: Optional[str] = None
20
+ validation_result: bool = True
21
+ error_message: Optional[str] = None
22
+
23
+
24
+ class EnvironmentValidator:
25
+ """Enhanced environment validation utility"""
26
+
27
+ REQUIRED_VARS = {
28
+ 'AZURE_OPENAI_ENDPOINT': {
29
+ 'required': True,
30
+ 'pattern': r'https://.*\.openai\.azure\.com/',
31
+ 'description': 'Azure OpenAI endpoint URL'
32
+ },
33
+ 'AZURE_OPENAI_KEY': {
34
+ 'required': True,
35
+ 'min_length': 20,
36
+ 'description': 'Azure OpenAI API key'
37
+ },
38
+ 'AZURE_OPENAI_API_VERSION': {
39
+ 'required': True,
40
+ 'pattern': r'\d{4}-\d{2}-\d{2}(-preview)?',
41
+ 'description': 'Azure OpenAI API version'
42
+ },
43
+ 'AZURE_OPENAI_LLM_DEPLOYMENT': {
44
+ 'required': True,
45
+ 'description': 'Azure OpenAI LLM deployment name'
46
+ },
47
+ 'AZURE_OPENAI_LLM_MODEL': {
48
+ 'required': True,
49
+ 'description': 'Azure OpenAI LLM model name'
50
+ }
51
+ }
52
+
53
+ OPTIONAL_VARS = {
54
+ 'AZURE_SPEECH_KEY': {
55
+ 'required': False,
56
+ 'min_length': 20,
57
+ 'description': 'Azure Speech Services key (for voice narration)'
58
+ },
59
+ 'AZURE_SPEECH_REGION': {
60
+ 'required': False,
61
+ 'description': 'Azure Speech Services region'
62
+ },
63
+ 'GRADIO_ANALYTICS_ENABLED': {
64
+ 'required': False,
65
+ 'valid_values': ['true', 'false', '1', '0'],
66
+ 'description': 'Enable/disable Gradio analytics'
67
+ },
68
+ 'PYTHONPATH': {
69
+ 'required': False,
70
+ 'description': 'Python path for module resolution'
71
+ }
72
+ }
73
+
74
+ def validate_all(self) -> List[EnvironmentCheck]:
75
+ """Validate all environment variables"""
76
+ results = []
77
+
78
+ # Check required variables
79
+ for var_name, config in self.REQUIRED_VARS.items():
80
+ result = self._validate_variable(var_name, config, required=True)
81
+ results.append(result)
82
+
83
+ # Check optional variables
84
+ for var_name, config in self.OPTIONAL_VARS.items():
85
+ result = self._validate_variable(var_name, config, required=False)
86
+ results.append(result)
87
+
88
+ return results
89
+
90
+ def _validate_variable(self, var_name: str, config: Dict, required: bool) -> EnvironmentCheck:
91
+ """Validate a single environment variable"""
92
+ import re
93
+
94
+ value = os.getenv(var_name)
95
+ present = value is not None
96
+ validation_result = True
97
+ error_message = None
98
+
99
+ if required and not present:
100
+ validation_result = False
101
+ error_message = f"Required environment variable {var_name} is not set"
102
+ elif present:
103
+ # Validate pattern if specified
104
+ if 'pattern' in config:
105
+ if not re.match(config['pattern'], value):
106
+ validation_result = False
107
+ error_message = f"{var_name} does not match expected pattern"
108
+
109
+ # Validate minimum length if specified
110
+ if 'min_length' in config:
111
+ if len(value) < config['min_length']:
112
+ validation_result = False
113
+ error_message = f"{var_name} is too short (minimum {config['min_length']} characters)"
114
+
115
+ # Validate against allowed values if specified
116
+ if 'valid_values' in config:
117
+ if value.lower() not in config['valid_values']:
118
+ validation_result = False
119
+ error_message = f"{var_name} must be one of: {config['valid_values']}"
120
+
121
+ return EnvironmentCheck(
122
+ name=var_name,
123
+ required=required,
124
+ present=present,
125
+ value=value if present else None,
126
+ validation_result=validation_result,
127
+ error_message=error_message
128
+ )
129
+
130
+ def get_validation_report(self) -> Dict:
131
+ """Get a comprehensive validation report"""
132
+ results = self.validate_all()
133
+
134
+ report = {
135
+ 'overall_status': 'PASS',
136
+ 'total_checks': len(results),
137
+ 'passed': 0,
138
+ 'failed': 0,
139
+ 'warnings': 0,
140
+ 'details': [],
141
+ 'errors': [],
142
+ 'warnings_list': []
143
+ }
144
+
145
+ for result in results:
146
+ detail = {
147
+ 'name': result.name,
148
+ 'required': result.required,
149
+ 'present': result.present,
150
+ 'status': 'PASS' if result.validation_result else 'FAIL',
151
+ 'value_length': len(result.value) if result.value else 0,
152
+ 'description': self._get_description(result.name)
153
+ }
154
+
155
+ if result.error_message:
156
+ detail['error'] = result.error_message
157
+
158
+ report['details'].append(detail)
159
+
160
+ if result.validation_result:
161
+ report['passed'] += 1
162
+ else:
163
+ report['failed'] += 1
164
+ if result.required:
165
+ report['overall_status'] = 'FAIL'
166
+ report['errors'].append(result.error_message)
167
+ else:
168
+ report['warnings'] += 1
169
+ report['warnings_list'].append(result.error_message)
170
+
171
+ return report
172
+
173
+ def _get_description(self, var_name: str) -> str:
174
+ """Get description for a variable"""
175
+ all_vars = {**self.REQUIRED_VARS, **self.OPTIONAL_VARS}
176
+ return all_vars.get(var_name, {}).get('description', 'No description available')
177
+
178
+
179
+ @pytest.mark.unit
180
+ class TestEnvironmentValidator:
181
+ """Test the environment validator utility"""
182
+
183
+ def test_validator_initialization(self):
184
+ """Test environment validator creates correctly"""
185
+ validator = EnvironmentValidator()
186
+ assert hasattr(validator, 'REQUIRED_VARS')
187
+ assert hasattr(validator, 'OPTIONAL_VARS')
188
+ assert len(validator.REQUIRED_VARS) > 0
189
+
190
+ def test_validate_required_variable_present(self):
191
+ """Test validation of present required variable"""
192
+ validator = EnvironmentValidator()
193
+
194
+ with patch.dict(os.environ, {'AZURE_OPENAI_ENDPOINT': 'https://test.openai.azure.com/'}):
195
+ result = validator._validate_variable(
196
+ 'AZURE_OPENAI_ENDPOINT',
197
+ validator.REQUIRED_VARS['AZURE_OPENAI_ENDPOINT'],
198
+ required=True
199
+ )
200
+
201
+ assert result.name == 'AZURE_OPENAI_ENDPOINT'
202
+ assert result.required is True
203
+ assert result.present is True
204
+ assert result.validation_result is True
205
+ assert result.error_message is None
206
+
207
+ def test_validate_required_variable_missing(self):
208
+ """Test validation of missing required variable"""
209
+ validator = EnvironmentValidator()
210
+
211
+ with patch.dict(os.environ, {}, clear=True):
212
+ result = validator._validate_variable(
213
+ 'AZURE_OPENAI_KEY',
214
+ validator.REQUIRED_VARS['AZURE_OPENAI_KEY'],
215
+ required=True
216
+ )
217
+
218
+ assert result.name == 'AZURE_OPENAI_KEY'
219
+ assert result.required is True
220
+ assert result.present is False
221
+ assert result.validation_result is False
222
+ assert "Required environment variable" in result.error_message
223
+
224
+ def test_validate_variable_pattern_match(self):
225
+ """Test variable validation with pattern matching"""
226
+ validator = EnvironmentValidator()
227
+
228
+ # Valid pattern
229
+ with patch.dict(os.environ, {'AZURE_OPENAI_API_VERSION': '2024-12-01-preview'}):
230
+ result = validator._validate_variable(
231
+ 'AZURE_OPENAI_API_VERSION',
232
+ validator.REQUIRED_VARS['AZURE_OPENAI_API_VERSION'],
233
+ required=True
234
+ )
235
+
236
+ assert result.validation_result is True
237
+
238
+ # Invalid pattern
239
+ with patch.dict(os.environ, {'AZURE_OPENAI_API_VERSION': 'invalid-version'}):
240
+ result = validator._validate_variable(
241
+ 'AZURE_OPENAI_API_VERSION',
242
+ validator.REQUIRED_VARS['AZURE_OPENAI_API_VERSION'],
243
+ required=True
244
+ )
245
+
246
+ assert result.validation_result is False
247
+ assert "does not match expected pattern" in result.error_message
248
+
249
+ def test_validate_variable_min_length(self):
250
+ """Test variable validation with minimum length"""
251
+ validator = EnvironmentValidator()
252
+
253
+ # Valid length
254
+ with patch.dict(os.environ, {'AZURE_OPENAI_KEY': 'a' * 25}):
255
+ result = validator._validate_variable(
256
+ 'AZURE_OPENAI_KEY',
257
+ validator.REQUIRED_VARS['AZURE_OPENAI_KEY'],
258
+ required=True
259
+ )
260
+
261
+ assert result.validation_result is True
262
+
263
+ # Too short
264
+ with patch.dict(os.environ, {'AZURE_OPENAI_KEY': 'short'}):
265
+ result = validator._validate_variable(
266
+ 'AZURE_OPENAI_KEY',
267
+ validator.REQUIRED_VARS['AZURE_OPENAI_KEY'],
268
+ required=True
269
+ )
270
+
271
+ assert result.validation_result is False
272
+ assert "is too short" in result.error_message
273
+
274
+
275
+ @pytest.mark.integration
276
+ class TestEnvironmentValidation:
277
+ """Integration tests for environment validation"""
278
+
279
+ def test_complete_validation_all_present(self):
280
+ """Test complete validation with all variables present"""
281
+ validator = EnvironmentValidator()
282
+
283
+ complete_env = {
284
+ 'AZURE_OPENAI_ENDPOINT': 'https://test.openai.azure.com/',
285
+ 'AZURE_OPENAI_KEY': 'a' * 30,
286
+ 'AZURE_OPENAI_API_VERSION': '2024-12-01-preview',
287
+ 'AZURE_OPENAI_LLM_DEPLOYMENT': 'gpt-4',
288
+ 'AZURE_OPENAI_LLM_MODEL': 'gpt-4',
289
+ 'AZURE_SPEECH_KEY': 'b' * 25,
290
+ 'AZURE_SPEECH_REGION': 'eastus'
291
+ }
292
+
293
+ with patch.dict(os.environ, complete_env):
294
+ results = validator.validate_all()
295
+
296
+ # Should have results for all defined variables
297
+ assert len(results) >= 5 # At least the required ones
298
+
299
+ # All required variables should pass
300
+ required_results = [r for r in results if r.required]
301
+ assert all(r.validation_result for r in required_results)
302
+
303
+ def test_validation_report_generation(self):
304
+ """Test generation of validation report"""
305
+ validator = EnvironmentValidator()
306
+
307
+ test_env = {
308
+ 'AZURE_OPENAI_ENDPOINT': 'https://test.openai.azure.com/',
309
+ 'AZURE_OPENAI_KEY': 'a' * 30,
310
+ 'AZURE_OPENAI_API_VERSION': '2024-12-01-preview',
311
+ 'AZURE_OPENAI_LLM_DEPLOYMENT': 'gpt-4',
312
+ 'AZURE_OPENAI_LLM_MODEL': 'gpt-4'
313
+ }
314
+
315
+ with patch.dict(os.environ, test_env):
316
+ report = validator.get_validation_report()
317
+
318
+ assert 'overall_status' in report
319
+ assert 'total_checks' in report
320
+ assert 'passed' in report
321
+ assert 'failed' in report
322
+ assert 'details' in report
323
+
324
+ # Should pass with all required vars present
325
+ assert report['overall_status'] == 'PASS'
326
+ assert report['failed'] == 0
327
+
328
+ def test_validation_report_with_failures(self):
329
+ """Test validation report with some failures"""
330
+ validator = EnvironmentValidator()
331
+
332
+ # Missing some required variables
333
+ incomplete_env = {
334
+ 'AZURE_OPENAI_ENDPOINT': 'https://test.openai.azure.com/',
335
+ 'AZURE_OPENAI_KEY': 'short', # Too short
336
+ # Missing other required vars
337
+ }
338
+
339
+ with patch.dict(os.environ, incomplete_env, clear=True):
340
+ report = validator.get_validation_report()
341
+
342
+ assert report['overall_status'] == 'FAIL'
343
+ assert report['failed'] > 0
344
+ assert len(report['errors']) > 0
345
+
346
+
347
+ @pytest.mark.unit
348
+ class TestEnvironmentConfiguration:
349
+ """Test environment configuration loading"""
350
+
351
+ def test_dotenv_file_loading(self):
352
+ """Test loading environment from .env file"""
353
+ # Create temporary .env file
354
+ env_content = """
355
+ AZURE_OPENAI_ENDPOINT=https://test.openai.azure.com/
356
+ AZURE_OPENAI_KEY=test_key_12345678901234567890
357
+ AZURE_OPENAI_API_VERSION=2024-12-01-preview
358
+ AZURE_OPENAI_LLM_DEPLOYMENT=gpt-4
359
+ AZURE_OPENAI_LLM_MODEL=gpt-4
360
+ """
361
+
362
+ with tempfile.NamedTemporaryFile(mode='w', suffix='.env', delete=False) as temp_file:
363
+ temp_file.write(env_content.strip())
364
+ temp_file_path = temp_file.name
365
+
366
+ try:
367
+ # Load environment from file
368
+ from dotenv import load_dotenv
369
+ load_dotenv(temp_file_path)
370
+
371
+ # Verify variables are loaded
372
+ assert os.getenv('AZURE_OPENAI_ENDPOINT') == 'https://test.openai.azure.com/'
373
+ assert os.getenv('AZURE_OPENAI_KEY') == 'test_key_12345678901234567890'
374
+ finally:
375
+ os.unlink(temp_file_path)
376
+
377
+ def test_environment_priority(self):
378
+ """Test that environment variables take priority over .env file"""
379
+ # Set environment variable
380
+ with patch.dict(os.environ, {'AZURE_OPENAI_ENDPOINT': 'https://env.openai.azure.com/'}):
381
+ # This should take priority over any .env file
382
+ assert os.getenv('AZURE_OPENAI_ENDPOINT') == 'https://env.openai.azure.com/'
383
+
384
+ def test_configuration_export(self):
385
+ """Test exporting configuration for debugging"""
386
+ test_env = {
387
+ 'AZURE_OPENAI_ENDPOINT': 'https://test.openai.azure.com/',
388
+ 'AZURE_OPENAI_KEY': 'secret_key_12345678901234567890',
389
+ 'AZURE_OPENAI_API_VERSION': '2024-12-01-preview'
390
+ }
391
+
392
+ with patch.dict(os.environ, test_env):
393
+ # Export configuration (with secrets redacted)
394
+ config = {}
395
+ for key in ['AZURE_OPENAI_ENDPOINT', 'AZURE_OPENAI_KEY', 'AZURE_OPENAI_API_VERSION']:
396
+ value = os.getenv(key)
397
+ if value and 'KEY' in key:
398
+ # Redact secrets
399
+ config[key] = value[:8] + '*' * (len(value) - 8)
400
+ else:
401
+ config[key] = value
402
+
403
+ assert config['AZURE_OPENAI_ENDPOINT'] == 'https://test.openai.azure.com/'
404
+ assert config['AZURE_OPENAI_KEY'].startswith('secret_k')
405
+ assert config['AZURE_OPENAI_KEY'].endswith('*' * 24)
406
+
407
+
408
+ @pytest.mark.integration
409
+ class TestApplicationEnvironmentIntegration:
410
+ """Test environment integration with application components"""
411
+
412
+ def test_azure_client_initialization(self):
413
+ """Test Azure OpenAI client initialization with environment"""
414
+ test_env = {
415
+ 'AZURE_OPENAI_ENDPOINT': 'https://test.openai.azure.com/',
416
+ 'AZURE_OPENAI_KEY': 'test_key_12345678901234567890',
417
+ 'AZURE_OPENAI_API_VERSION': '2024-12-01-preview',
418
+ 'AZURE_OPENAI_LLM_DEPLOYMENT': 'gpt-4',
419
+ 'AZURE_OPENAI_LLM_MODEL': 'gpt-4'
420
+ }
421
+
422
+ with patch.dict(os.environ, test_env):
423
+ with patch('openai.AzureOpenAI') as mock_client:
424
+ # Import and initialize client
425
+ from app import client
426
+
427
+ # Verify client was initialized with correct parameters
428
+ mock_client.assert_called_once()
429
+ call_args = mock_client.call_args
430
+ assert call_args[1]['azure_endpoint'] == test_env['AZURE_OPENAI_ENDPOINT']
431
+ assert call_args[1]['api_key'] == test_env['AZURE_OPENAI_KEY']
432
+ assert call_args[1]['api_version'] == test_env['AZURE_OPENAI_API_VERSION']
433
+
434
+ def test_missing_environment_error_handling(self):
435
+ """Test application behavior with missing environment variables"""
436
+ with patch.dict(os.environ, {}, clear=True):
437
+ # Should handle missing environment gracefully
438
+ try:
439
+ from app import client
440
+ # If this doesn't raise an exception, the app has good error handling
441
+ assert True
442
+ except Exception as e:
443
+ # If it does raise an exception, it should be informative
444
+ assert 'environment' in str(e).lower() or 'key' in str(e).lower()
445
+
446
+ def test_application_health_check(self):
447
+ """Test application health with environment validation"""
448
+ test_env = {
449
+ 'AZURE_OPENAI_ENDPOINT': 'https://test.openai.azure.com/',
450
+ 'AZURE_OPENAI_KEY': 'test_key_12345678901234567890',
451
+ 'AZURE_OPENAI_API_VERSION': '2024-12-01-preview',
452
+ 'AZURE_OPENAI_LLM_DEPLOYMENT': 'gpt-4',
453
+ 'AZURE_OPENAI_LLM_MODEL': 'gpt-4'
454
+ }
455
+
456
+ with patch.dict(os.environ, test_env):
457
+ validator = EnvironmentValidator()
458
+ report = validator.get_validation_report()
459
+
460
+ # Application should be healthy with all required vars
461
+ assert report['overall_status'] == 'PASS'
462
+
463
+ # Test application components can be imported
464
+ try:
465
+ from app import AgenticSkillBuilder, LessonAgent, QuizAgent, ProgressAgent
466
+ assert True # All imports successful
467
+ except ImportError as e:
468
+ pytest.fail(f"Failed to import application components: {e}")
469
+
470
+
471
+ @pytest.mark.unit
472
+ class TestEnvironmentSecurityValidation:
473
+ """Test security aspects of environment validation"""
474
+
475
+ def test_secret_redaction_in_logs(self):
476
+ """Test that secrets are properly redacted in logs/output"""
477
+ secret_value = "secret_key_abcdefghijklmnopqrstuvwxyz"
478
+
479
+ # Simulate redacting secrets for logging
480
+ def redact_secret(value: str, show_chars: int = 8) -> str:
481
+ if len(value) <= show_chars:
482
+ return '*' * len(value)
483
+ return value[:show_chars] + '*' * (len(value) - show_chars)
484
+
485
+ redacted = redact_secret(secret_value)
486
+
487
+ assert redacted.startswith('secret_k')
488
+ assert '*' in redacted
489
+ assert len(redacted) == len(secret_value)
490
+
491
+ def test_environment_variable_validation_security(self):
492
+ """Test validation doesn't expose sensitive data"""
493
+ validator = EnvironmentValidator()
494
+
495
+ with patch.dict(os.environ, {'AZURE_OPENAI_KEY': 'very_secret_key_123456789'}):
496
+ result = validator._validate_variable(
497
+ 'AZURE_OPENAI_KEY',
498
+ validator.REQUIRED_VARS['AZURE_OPENAI_KEY'],
499
+ required=True
500
+ )
501
+
502
+ # Value should be captured but could be redacted for security
503
+ assert result.present is True
504
+ assert result.validation_result is True
505
+ # Don't assert the actual value to avoid exposing secrets in test output
tests/test_gamification.py ADDED
@@ -0,0 +1,372 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tests for the gamification system in SkillSprout
3
+ """
4
+ import pytest
5
+ from unittest.mock import Mock, patch
6
+ import sys
7
+ import os
8
+
9
+ # Add parent directory to path
10
+ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
11
+
12
+ from space_app import (
13
+ GamificationManager, Achievement, UserStats, EnhancedUserProgress
14
+ )
15
+
16
+ class TestAchievement:
17
+ """Test cases for Achievement data class"""
18
+
19
+ def test_achievement_creation(self):
20
+ """Test creating an achievement"""
21
+ achievement = Achievement(
22
+ id="test_achievement",
23
+ name="Test Achievement",
24
+ description="A test achievement",
25
+ icon="🏆",
26
+ unlocked=False,
27
+ unlock_condition="Complete 1 lesson"
28
+ )
29
+
30
+ assert achievement.id == "test_achievement"
31
+ assert achievement.name == "Test Achievement"
32
+ assert achievement.unlocked is False
33
+ assert achievement.icon == "🏆"
34
+
35
+ def test_achievement_defaults(self):
36
+ """Test achievement with default values"""
37
+ achievement = Achievement(
38
+ id="simple",
39
+ name="Simple",
40
+ description="Simple achievement",
41
+ icon="⭐"
42
+ )
43
+
44
+ assert achievement.unlocked is False
45
+ assert achievement.unlock_condition == ""
46
+
47
+ class TestUserStats:
48
+ """Test cases for UserStats class"""
49
+
50
+ def test_user_stats_creation(self):
51
+ """Test creating user stats"""
52
+ stats = UserStats(user_id="test_user")
53
+
54
+ assert stats.user_id == "test_user"
55
+ assert stats.total_points == 0
56
+ assert stats.level == 1
57
+ assert stats.achievements == []
58
+ assert stats.streak_days == 0
59
+ assert stats.total_lessons == 0
60
+ assert stats.total_quizzes == 0
61
+ assert stats.correct_answers == 0
62
+
63
+ def test_add_points_no_level_up(self):
64
+ """Test adding points without level up"""
65
+ stats = UserStats(user_id="test_user")
66
+
67
+ # Act
68
+ stats.add_points(50)
69
+
70
+ # Assert
71
+ assert stats.total_points == 50
72
+ assert stats.level == 1 # Should still be level 1
73
+
74
+ def test_add_points_level_up(self):
75
+ """Test adding points that triggers level up"""
76
+ stats = UserStats(user_id="test_user")
77
+
78
+ # Act
79
+ stats.add_points(150) # Should trigger level up
80
+
81
+ # Assert
82
+ assert stats.total_points == 150
83
+ assert stats.level == 2
84
+
85
+ def test_add_points_multiple_levels(self):
86
+ """Test adding points that triggers multiple level ups"""
87
+ stats = UserStats(user_id="test_user")
88
+
89
+ # Act
90
+ stats.add_points(450) # Should reach level 5
91
+
92
+ # Assert
93
+ assert stats.total_points == 450
94
+ assert stats.level == 5
95
+
96
+ def test_add_points_max_level(self):
97
+ """Test that level doesn't exceed maximum"""
98
+ stats = UserStats(user_id="test_user")
99
+
100
+ # Act
101
+ stats.add_points(2000) # Way more than needed for max level
102
+
103
+ # Assert
104
+ assert stats.level == 10 # Should cap at level 10
105
+
106
+ def test_get_accuracy_no_quizzes(self):
107
+ """Test accuracy calculation with no quizzes"""
108
+ stats = UserStats(user_id="test_user")
109
+
110
+ # Act & Assert
111
+ assert stats.get_accuracy() == 0.0
112
+
113
+ def test_get_accuracy_with_quizzes(self):
114
+ """Test accuracy calculation with quiz data"""
115
+ stats = UserStats(user_id="test_user")
116
+ stats.total_quizzes = 10
117
+ stats.correct_answers = 8
118
+
119
+ # Act
120
+ accuracy = stats.get_accuracy()
121
+
122
+ # Assert
123
+ assert accuracy == 80.0
124
+
125
+ class TestEnhancedUserProgress:
126
+ """Test cases for EnhancedUserProgress class"""
127
+
128
+ def test_enhanced_progress_creation(self):
129
+ """Test creating enhanced user progress"""
130
+ progress = EnhancedUserProgress(
131
+ user_id="test_user",
132
+ skill="Python Programming"
133
+ )
134
+
135
+ assert progress.user_id == "test_user"
136
+ assert progress.skill == "Python Programming"
137
+ assert progress.lessons_completed == 0
138
+ assert progress.quiz_scores == []
139
+ assert progress.time_spent == []
140
+ assert progress.mastery_level == 0.0
141
+
142
+ class TestGamificationManager:
143
+ """Test cases for GamificationManager class"""
144
+
145
+ @pytest.fixture
146
+ def gamification_manager(self):
147
+ """Create a GamificationManager instance for testing"""
148
+ return GamificationManager()
149
+
150
+ def test_gamification_manager_initialization(self, gamification_manager):
151
+ """Test GamificationManager initialization"""
152
+ assert len(gamification_manager.user_stats) == 0
153
+ assert len(gamification_manager.achievements) > 0
154
+
155
+ # Check that all required achievements exist
156
+ required_achievements = [
157
+ "first_steps", "quiz_master", "persistent", "scholar",
158
+ "expert", "polyglot", "perfectionist", "speed",
159
+ "consistent", "explorer"
160
+ ]
161
+
162
+ for achievement_id in required_achievements:
163
+ assert achievement_id in gamification_manager.achievements
164
+
165
+ def test_get_user_stats_new_user(self, gamification_manager):
166
+ """Test getting stats for a new user"""
167
+ # Act
168
+ stats = gamification_manager.get_user_stats("new_user")
169
+
170
+ # Assert
171
+ assert isinstance(stats, UserStats)
172
+ assert stats.user_id == "new_user"
173
+ assert stats.total_points == 0
174
+ assert stats.level == 1
175
+
176
+ def test_get_user_stats_existing_user(self, gamification_manager):
177
+ """Test getting stats for existing user"""
178
+ # Arrange
179
+ user_id = "existing_user"
180
+ stats1 = gamification_manager.get_user_stats(user_id)
181
+ stats1.total_points = 100
182
+
183
+ # Act
184
+ stats2 = gamification_manager.get_user_stats(user_id)
185
+
186
+ # Assert
187
+ assert stats2.total_points == 100
188
+ assert stats1 is stats2 # Should be same object
189
+
190
+ def test_check_achievements_first_steps(self, gamification_manager):
191
+ """Test unlocking first steps achievement"""
192
+ # Arrange
193
+ user_id = "test_user"
194
+ progress = EnhancedUserProgress(user_id=user_id, skill="Python")
195
+
196
+ # Set up conditions for first steps achievement
197
+ stats = gamification_manager.get_user_stats(user_id)
198
+ stats.total_lessons = 1
199
+
200
+ # Act
201
+ newly_unlocked = gamification_manager.check_achievements(user_id, progress)
202
+
203
+ # Assert
204
+ assert len(newly_unlocked) > 0
205
+ achievement_ids = [a.id for a in newly_unlocked]
206
+ assert "first_steps" in achievement_ids
207
+ assert "first_steps" in stats.achievements
208
+
209
+ def test_check_achievements_quiz_master(self, gamification_manager):
210
+ """Test unlocking quiz master achievement"""
211
+ # Arrange
212
+ user_id = "quiz_master_user"
213
+ progress = EnhancedUserProgress(user_id=user_id, skill="Python")
214
+ progress.quiz_scores = [100, 80, 100] # Has perfect score
215
+
216
+ # Act
217
+ newly_unlocked = gamification_manager.check_achievements(user_id, progress)
218
+
219
+ # Assert
220
+ achievement_ids = [a.id for a in newly_unlocked]
221
+ assert "quiz_master" in achievement_ids
222
+
223
+ def test_check_achievements_persistent(self, gamification_manager):
224
+ """Test unlocking persistent learner achievement"""
225
+ # Arrange
226
+ user_id = "persistent_user"
227
+ progress = EnhancedUserProgress(user_id=user_id, skill="Python")
228
+
229
+ stats = gamification_manager.get_user_stats(user_id)
230
+ stats.total_lessons = 5
231
+
232
+ # Act
233
+ newly_unlocked = gamification_manager.check_achievements(user_id, progress)
234
+
235
+ # Assert
236
+ achievement_ids = [a.id for a in newly_unlocked]
237
+ assert "persistent" in achievement_ids
238
+
239
+ def test_check_achievements_no_new_unlocks(self, gamification_manager):
240
+ """Test checking achievements when none should be unlocked"""
241
+ # Arrange
242
+ user_id = "minimal_user"
243
+ progress = EnhancedUserProgress(user_id=user_id, skill="Python")
244
+ # User has minimal progress, shouldn't unlock anything
245
+
246
+ # Act
247
+ newly_unlocked = gamification_manager.check_achievements(user_id, progress)
248
+
249
+ # Assert
250
+ assert len(newly_unlocked) == 0
251
+
252
+ def test_check_achievements_already_unlocked(self, gamification_manager):
253
+ """Test that already unlocked achievements aren't returned again"""
254
+ # Arrange
255
+ user_id = "repeat_user"
256
+ progress = EnhancedUserProgress(user_id=user_id, skill="Python")
257
+
258
+ stats = gamification_manager.get_user_stats(user_id)
259
+ stats.total_lessons = 1
260
+ stats.achievements = ["first_steps"] # Already unlocked
261
+
262
+ # Act
263
+ newly_unlocked = gamification_manager.check_achievements(user_id, progress)
264
+
265
+ # Assert
266
+ achievement_ids = [a.id for a in newly_unlocked]
267
+ assert "first_steps" not in achievement_ids
268
+
269
+ def test_check_achievements_points_awarded(self, gamification_manager):
270
+ """Test that bonus points are awarded for achievements"""
271
+ # Arrange
272
+ user_id = "points_user"
273
+ progress = EnhancedUserProgress(user_id=user_id, skill="Python")
274
+
275
+ stats = gamification_manager.get_user_stats(user_id)
276
+ initial_points = stats.total_points
277
+ stats.total_lessons = 1 # Will unlock first_steps
278
+
279
+ # Act
280
+ newly_unlocked = gamification_manager.check_achievements(user_id, progress)
281
+
282
+ # Assert
283
+ assert len(newly_unlocked) > 0
284
+ assert stats.total_points > initial_points # Should have bonus points
285
+
286
+ def test_achievement_perfectionist(self, gamification_manager):
287
+ """Test perfectionist achievement (5 perfect scores)"""
288
+ # Arrange
289
+ user_id = "perfectionist_user"
290
+ progress = EnhancedUserProgress(user_id=user_id, skill="Python")
291
+ progress.quiz_scores = [100, 100, 100, 100, 100, 90] # 5 perfect scores
292
+
293
+ # Act
294
+ newly_unlocked = gamification_manager.check_achievements(user_id, progress)
295
+
296
+ # Assert
297
+ achievement_ids = [a.id for a in newly_unlocked]
298
+ assert "perfectionist" in achievement_ids
299
+
300
+ def test_achievement_consistent(self, gamification_manager):
301
+ """Test consistent achievement (7-day streak)"""
302
+ # Arrange
303
+ user_id = "consistent_user"
304
+ progress = EnhancedUserProgress(user_id=user_id, skill="Python")
305
+
306
+ stats = gamification_manager.get_user_stats(user_id)
307
+ stats.streak_days = 7
308
+
309
+ # Act
310
+ newly_unlocked = gamification_manager.check_achievements(user_id, progress)
311
+
312
+ # Assert
313
+ achievement_ids = [a.id for a in newly_unlocked]
314
+ assert "consistent" in achievement_ids
315
+
316
+ @pytest.mark.integration
317
+ class TestGamificationIntegration:
318
+ """Integration tests for gamification with other systems"""
319
+
320
+ def test_gamification_with_lesson_completion(self):
321
+ """Test gamification integration when completing lessons"""
322
+ # This would test the integration between the main app and gamification
323
+ # Requires importing the enhanced app components
324
+ pass
325
+
326
+ def test_gamification_with_quiz_submission(self):
327
+ """Test gamification integration when submitting quizzes"""
328
+ pass
329
+
330
+ @pytest.mark.unit
331
+ class TestAchievementConditions:
332
+ """Test specific achievement unlock conditions"""
333
+
334
+ @pytest.fixture
335
+ def sample_progress(self):
336
+ """Create sample progress data for testing"""
337
+ return EnhancedUserProgress(
338
+ user_id="test_user",
339
+ skill="Python Programming",
340
+ lessons_completed=0,
341
+ quiz_scores=[],
342
+ time_spent=[],
343
+ mastery_level=0.0
344
+ )
345
+
346
+ def test_scholar_achievement_condition(self, gamification_manager, enhanced_user_progress):
347
+ """Test scholar achievement (10 lessons)"""
348
+ # Arrange
349
+ stats = gamification_manager.get_user_stats("test_user")
350
+ stats.total_lessons = 10
351
+
352
+ # Act
353
+ newly_unlocked = gamification_manager.check_achievements("test_user", enhanced_user_progress)
354
+
355
+ # Assert
356
+ achievement_ids = [a.id for a in newly_unlocked]
357
+ assert "scholar" in achievement_ids
358
+
359
+ def test_expert_achievement_condition(self, gamification_manager, enhanced_user_progress):
360
+ """Test expert achievement (20 lessons)"""
361
+ # Arrange
362
+ stats = gamification_manager.get_user_stats("test_user")
363
+ stats.total_lessons = 20
364
+
365
+ # Act
366
+ newly_unlocked = gamification_manager.check_achievements("test_user", enhanced_user_progress)
367
+
368
+ # Assert
369
+ achievement_ids = [a.id for a in newly_unlocked]
370
+ assert "expert" in achievement_ids
371
+ assert "scholar" in achievement_ids # Should also unlock scholar
372
+ assert "persistent" in achievement_ids # Should also unlock persistent
tests/test_mcp_endpoints.py ADDED
@@ -0,0 +1,371 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tests for MCP server endpoints in SkillSprout
3
+ """
4
+ import pytest
5
+ import requests
6
+ import json
7
+ import asyncio
8
+ import threading
9
+ import time
10
+ import subprocess
11
+ import sys
12
+ from typing import Dict, Any
13
+
14
+ @pytest.mark.integration
15
+ class TestMCPEndpoints:
16
+ """Integration tests for MCP server endpoints"""
17
+
18
+ @pytest.fixture(scope="class", autouse=True)
19
+ def setup_test_server(self, test_server_url):
20
+ """Start MCP server for testing"""
21
+ # Try to connect to existing server first
22
+ try:
23
+ response = requests.get(test_server_url, timeout=2)
24
+ if response.status_code == 200:
25
+ print(f"✅ MCP server already running at {test_server_url}")
26
+ yield
27
+ return
28
+ except:
29
+ pass
30
+
31
+ # Start server if not running
32
+ print(f"🚀 Starting MCP server for testing...")
33
+ server_process = subprocess.Popen(
34
+ [sys.executable, "space_app.py"],
35
+ stdout=subprocess.PIPE,
36
+ stderr=subprocess.PIPE
37
+ )
38
+
39
+ # Wait for server to start
40
+ max_attempts = 30
41
+ for attempt in range(max_attempts):
42
+ try:
43
+ response = requests.get(test_server_url, timeout=1)
44
+ if response.status_code == 200:
45
+ print(f"✅ MCP server started successfully")
46
+ break
47
+ except:
48
+ time.sleep(1)
49
+ else:
50
+ server_process.terminate()
51
+ pytest.fail("Failed to start MCP server for testing")
52
+
53
+ yield
54
+
55
+ # Cleanup
56
+ server_process.terminate()
57
+ server_process.wait()
58
+
59
+ def test_root_endpoint(self, test_server_url):
60
+ """Test root endpoint returns server information"""
61
+ # Act
62
+ response = requests.get(test_server_url, timeout=10)
63
+
64
+ # Assert
65
+ assert response.status_code == 200
66
+ data = response.json()
67
+ assert "name" in data
68
+ assert "SkillSprout" in data["name"]
69
+ assert "version" in data
70
+ assert "hackathon" in data
71
+ assert data["track"] == "mcp-server-track"
72
+
73
+ def test_get_available_skills(self, test_server_url):
74
+ """Test getting list of available skills"""
75
+ # Act
76
+ response = requests.get(f"{test_server_url}/mcp/skills", timeout=10)
77
+
78
+ # Assert
79
+ assert response.status_code == 200
80
+ data = response.json()
81
+ assert "predefined_skills" in data
82
+ skills = data["predefined_skills"]
83
+ assert isinstance(skills, list)
84
+ assert len(skills) > 0
85
+ assert "Python Programming" in skills
86
+
87
+ def test_generate_lesson_valid_request(self, test_server_url):
88
+ """Test lesson generation with valid request"""
89
+ # Arrange
90
+ lesson_data = {
91
+ "skill": "Python Programming",
92
+ "user_id": "test_user",
93
+ "difficulty": "beginner"
94
+ }
95
+
96
+ # Act
97
+ response = requests.post(
98
+ f"{test_server_url}/mcp/lesson/generate",
99
+ json=lesson_data,
100
+ timeout=30
101
+ )
102
+
103
+ # Assert
104
+ assert response.status_code == 200
105
+ data = response.json()
106
+ assert "lesson" in data
107
+ lesson = data["lesson"]
108
+ assert lesson["skill"] == "Python Programming"
109
+ assert lesson["difficulty"] == "beginner"
110
+ assert "title" in lesson
111
+ assert "content" in lesson
112
+ assert "mcp_server" in data
113
+ assert data["mcp_server"] == "SkillSprout"
114
+
115
+ def test_generate_lesson_custom_skill(self, test_server_url):
116
+ """Test lesson generation with custom skill"""
117
+ # Arrange
118
+ lesson_data = {
119
+ "skill": "Custom Cooking Skill",
120
+ "user_id": "chef_user",
121
+ "difficulty": "intermediate"
122
+ }
123
+
124
+ # Act
125
+ response = requests.post(
126
+ f"{test_server_url}/mcp/lesson/generate",
127
+ json=lesson_data,
128
+ timeout=30
129
+ )
130
+
131
+ # Assert
132
+ assert response.status_code == 200
133
+ data = response.json()
134
+ assert data["lesson"]["skill"] == "Custom Cooking Skill"
135
+ assert data["lesson"]["difficulty"] == "intermediate"
136
+
137
+ def test_generate_lesson_missing_skill(self, test_server_url):
138
+ """Test lesson generation with missing skill parameter"""
139
+ # Arrange
140
+ lesson_data = {
141
+ "user_id": "test_user",
142
+ "difficulty": "beginner"
143
+ }
144
+
145
+ # Act
146
+ response = requests.post(
147
+ f"{test_server_url}/mcp/lesson/generate",
148
+ json=lesson_data,
149
+ timeout=10
150
+ )
151
+
152
+ # Assert
153
+ assert response.status_code == 422 # Validation error
154
+
155
+ def test_get_user_progress_new_user(self, test_server_url):
156
+ """Test getting progress for new user"""
157
+ # Act
158
+ response = requests.get(
159
+ f"{test_server_url}/mcp/progress/new_test_user",
160
+ timeout=10
161
+ )
162
+
163
+ # Assert
164
+ assert response.status_code == 200
165
+ data = response.json()
166
+ assert data["user_id"] == "new_test_user"
167
+ assert "skills_progress" in data
168
+ assert "total_skills_learning" in data
169
+ assert "mcp_server" in data
170
+ assert data["mcp_server"] == "SkillSprout"
171
+
172
+ def test_get_user_progress_with_skill_filter(self, test_server_url):
173
+ """Test getting progress filtered by skill"""
174
+ # Act
175
+ response = requests.get(
176
+ f"{test_server_url}/mcp/progress/test_user?skill=Python%20Programming",
177
+ timeout=10
178
+ )
179
+
180
+ # Assert
181
+ assert response.status_code == 200
182
+ data = response.json()
183
+ assert data["user_id"] == "test_user"
184
+
185
+ def test_submit_quiz_results(self, test_server_url):
186
+ """Test submitting quiz results"""
187
+ # Arrange
188
+ quiz_data = {
189
+ "user_id": "test_user",
190
+ "skill": "Python Programming",
191
+ "lesson_title": "Variables and Data Types",
192
+ "answers": ["A storage container", "True", "int"]
193
+ }
194
+
195
+ # Act
196
+ response = requests.post(
197
+ f"{test_server_url}/mcp/quiz/submit",
198
+ json=quiz_data,
199
+ timeout=15
200
+ )
201
+
202
+ # Assert
203
+ assert response.status_code == 200
204
+ data = response.json()
205
+ assert "score" in data
206
+ assert "feedback" in data
207
+ assert data["user_id"] == "test_user"
208
+ assert "mcp_server" in data
209
+ assert data["mcp_server"] == "SkillSprout"
210
+
211
+ def test_submit_quiz_empty_answers(self, test_server_url):
212
+ """Test submitting quiz with empty answers"""
213
+ # Arrange
214
+ quiz_data = {
215
+ "user_id": "test_user",
216
+ "skill": "Data Science",
217
+ "lesson_title": "Introduction to Data",
218
+ "answers": []
219
+ }
220
+
221
+ # Act
222
+ response = requests.post(
223
+ f"{test_server_url}/mcp/quiz/submit",
224
+ json=quiz_data,
225
+ timeout=10
226
+ )
227
+
228
+ # Assert
229
+ assert response.status_code == 200
230
+ data = response.json()
231
+ assert data["score"] == 0
232
+
233
+ def test_submit_quiz_invalid_data(self, test_server_url):
234
+ """Test submitting quiz with invalid data"""
235
+ # Arrange
236
+ quiz_data = {
237
+ "user_id": "test_user",
238
+ # Missing required fields
239
+ }
240
+
241
+ # Act
242
+ response = requests.post(
243
+ f"{test_server_url}/mcp/quiz/submit",
244
+ json=quiz_data,
245
+ timeout=10
246
+ )
247
+
248
+ # Assert
249
+ assert response.status_code == 422 # Validation error
250
+
251
+ @pytest.mark.integration
252
+ class TestMCPEndpointsSlow:
253
+ """Slower integration tests that require more setup"""
254
+
255
+ @pytest.mark.slow
256
+ def test_end_to_end_learning_flow(self, test_server_url):
257
+ """Test complete learning flow through MCP endpoints"""
258
+ user_id = "e2e_test_user"
259
+ skill = "JavaScript"
260
+
261
+ # Step 1: Get available skills
262
+ response = requests.get(f"{test_server_url}/mcp/skills")
263
+ assert response.status_code == 200
264
+
265
+ # Step 2: Generate a lesson
266
+ lesson_data = {
267
+ "skill": skill,
268
+ "user_id": user_id,
269
+ "difficulty": "beginner"
270
+ }
271
+ response = requests.post(
272
+ f"{test_server_url}/mcp/lesson/generate",
273
+ json=lesson_data,
274
+ timeout=30
275
+ )
276
+ assert response.status_code == 200
277
+ lesson_response = response.json()
278
+ lesson_title = lesson_response["lesson"]["title"]
279
+
280
+ # Step 3: Submit quiz results
281
+ quiz_data = {
282
+ "user_id": user_id,
283
+ "skill": skill,
284
+ "lesson_title": lesson_title,
285
+ "answers": ["answer1", "answer2"]
286
+ }
287
+ response = requests.post(
288
+ f"{test_server_url}/mcp/quiz/submit",
289
+ json=quiz_data,
290
+ timeout=15
291
+ )
292
+ assert response.status_code == 200
293
+
294
+ # Step 4: Check progress
295
+ response = requests.get(f"{test_server_url}/mcp/progress/{user_id}")
296
+ assert response.status_code == 200
297
+ progress_data = response.json()
298
+ assert skill in progress_data["skills_progress"]
299
+
300
+ @pytest.mark.slow
301
+ def test_multiple_concurrent_requests(self, test_server_url):
302
+ """Test server handling multiple concurrent requests"""
303
+ import concurrent.futures
304
+
305
+ def make_request(user_id):
306
+ """Make a lesson generation request"""
307
+ lesson_data = {
308
+ "skill": "Python Programming",
309
+ "user_id": f"concurrent_user_{user_id}",
310
+ "difficulty": "beginner"
311
+ }
312
+ response = requests.post(
313
+ f"{test_server_url}/mcp/lesson/generate",
314
+ json=lesson_data,
315
+ timeout=30
316
+ )
317
+ return response.status_code == 200
318
+
319
+ # Act - Make 5 concurrent requests
320
+ with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
321
+ futures = [executor.submit(make_request, i) for i in range(5)]
322
+ results = [future.result() for future in concurrent.futures.as_completed(futures)]
323
+
324
+ # Assert - All requests should succeed
325
+ assert all(results)
326
+
327
+ @pytest.mark.unit
328
+ class TestMCPErrorHandling:
329
+ """Unit tests for MCP error handling scenarios"""
330
+
331
+ def test_invalid_endpoint(self, test_server_url):
332
+ """Test accessing invalid endpoint"""
333
+ try:
334
+ response = requests.get(f"{test_server_url}/invalid/endpoint", timeout=5)
335
+ assert response.status_code == 404
336
+ except requests.exceptions.ConnectionError:
337
+ pytest.skip("MCP server not running")
338
+
339
+ def test_malformed_json_request(self, test_server_url):
340
+ """Test sending malformed JSON"""
341
+ try:
342
+ response = requests.post(
343
+ f"{test_server_url}/mcp/lesson/generate",
344
+ data="invalid json",
345
+ headers={"Content-Type": "application/json"},
346
+ timeout=5
347
+ )
348
+ assert response.status_code in [400, 422]
349
+ except requests.exceptions.ConnectionError:
350
+ pytest.skip("MCP server not running")
351
+
352
+ class TestMCPHelpers:
353
+ """Helper functions for MCP testing"""
354
+
355
+ @staticmethod
356
+ def is_server_running(url: str) -> bool:
357
+ """Check if MCP server is running"""
358
+ try:
359
+ response = requests.get(url, timeout=2)
360
+ return response.status_code == 200
361
+ except:
362
+ return False
363
+
364
+ @staticmethod
365
+ def wait_for_server(url: str, timeout: int = 30) -> bool:
366
+ """Wait for server to become available"""
367
+ for _ in range(timeout):
368
+ if TestMCPHelpers.is_server_running(url):
369
+ return True
370
+ time.sleep(1)
371
+ return False
tests/test_real_functionality.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Integration test using real Azure OpenAI API - run this when you want to test the full stack
3
+ """
4
+ import pytest
5
+ import asyncio
6
+ import os
7
+ from space_app import AgenticSkillBuilder
8
+
9
+ @pytest.mark.integration
10
+ @pytest.mark.skipif(not os.getenv("AZURE_OPENAI_KEY"), reason="No Azure API key")
11
+ class TestRealIntegration:
12
+ """Integration tests using real Azure OpenAI API"""
13
+
14
+ def test_full_lesson_flow(self):
15
+ """Test the complete lesson flow with real AI"""
16
+ app = AgenticSkillBuilder()
17
+
18
+ # This will make real API calls - that's OK for integration tests!
19
+ skill = "Basic Addition"
20
+
21
+ try:
22
+ # Test lesson generation
23
+ loop = asyncio.new_event_loop()
24
+ asyncio.set_event_loop(loop)
25
+
26
+ lesson_content, _, _ = loop.run_until_complete(app.start_lesson(skill))
27
+
28
+ # Basic validation
29
+ assert "addition" in lesson_content.lower() or "add" in lesson_content.lower()
30
+ assert len(lesson_content) > 100 # Should be substantial content
31
+
32
+ print(f"✅ Real lesson generated for '{skill}'")
33
+ print(f"Content length: {len(lesson_content)} characters")
34
+
35
+ except Exception as e:
36
+ pytest.skip(f"Integration test failed (this is OK): {e}")
37
+
38
+ @pytest.mark.unit
39
+ class TestWithoutMocks:
40
+ """Unit tests focusing on business logic without complex mocks"""
41
+
42
+ def test_app_initialization(self):
43
+ """Test that the app initializes correctly"""
44
+ app = AgenticSkillBuilder()
45
+
46
+ assert app.predefined_skills is not None
47
+ assert len(app.predefined_skills) > 0
48
+ assert "Python Programming" in app.predefined_skills
49
+
50
+ def test_progress_tracking(self):
51
+ """Test progress tracking logic"""
52
+ from space_app import EnhancedUserProgress
53
+
54
+ progress = EnhancedUserProgress("test_user", "Math")
55
+
56
+ # Test initial state
57
+ assert progress.lessons_completed == 0
58
+ assert progress.mastery_level == 0.0
59
+ assert len(progress.quiz_scores) == 0
60
+
61
+ # Test adding quiz scores
62
+ progress.quiz_scores = [0.8, 0.9, 0.7]
63
+ progress.lessons_completed = 3
64
+
65
+ # Test mastery calculation
66
+ mastery = progress.calculate_mastery()
67
+ assert mastery > 0
68
+ assert mastery <= 100
69
+
70
+ def test_gamification_basics(self):
71
+ """Test gamification without complex mocks"""
72
+ from space_app import GamificationManager, UserStats
73
+
74
+ gm = GamificationManager()
75
+
76
+ # Test user stats creation
77
+ stats = gm.get_user_stats("test_user")
78
+ assert isinstance(stats, UserStats)
79
+ assert stats.user_id == "test_user"
80
+ assert stats.total_points == 0
81
+
82
+ # Test point addition
83
+ stats.add_points(100)
84
+ assert stats.total_points == 100
85
+
86
+ # Test level calculation
87
+ initial_level = stats.level
88
+ stats.add_points(500) # Should trigger level up
89
+ assert stats.level >= initial_level
validate_hackathon.py DELETED
@@ -1,235 +0,0 @@
1
- """
2
- Hackathon Validation Script
3
- Tests the MCP server functionality for submission requirements
4
- """
5
-
6
- import asyncio
7
- import requests
8
- import time
9
- import subprocess
10
- import threading
11
- from datetime import datetime
12
-
13
- def test_mcp_server_endpoints():
14
- """Test MCP server endpoints to ensure hackathon compliance"""
15
- print("🧪 HACKATHON VALIDATION - MCP SERVER TESTING")
16
- print("=" * 60)
17
-
18
- base_url = "http://localhost:8001" # MCP server port
19
-
20
- tests = [
21
- ("Root endpoint", "GET", "/"),
22
- ("Skills list", "GET", "/mcp/skills"),
23
- ("Progress endpoint", "GET", "/mcp/progress/test_user"),
24
- ]
25
-
26
- print(f"🌐 Testing MCP server at {base_url}")
27
- print(f"📋 Running {len(tests)} endpoint tests...\n")
28
-
29
- results = []
30
-
31
- for test_name, method, endpoint in tests:
32
- try:
33
- url = f"{base_url}{endpoint}"
34
- print(f"🔄 Testing {test_name}: {method} {endpoint}")
35
-
36
- if method == "GET":
37
- response = requests.get(url, timeout=5)
38
- elif method == "POST":
39
- response = requests.post(url, json={}, timeout=5)
40
-
41
- if response.status_code == 200:
42
- print(f" ✅ SUCCESS: {response.status_code}")
43
- try:
44
- data = response.json()
45
- if "mcp" in str(data).lower() or "agentic" in str(data).lower():
46
- print(f" 🎯 MCP-compliant response detected")
47
- except:
48
- pass
49
- results.append((test_name, True, response.status_code))
50
- else:
51
- print(f" ❌ FAILED: {response.status_code}")
52
- results.append((test_name, False, response.status_code))
53
-
54
- except requests.exceptions.RequestException as e:
55
- print(f" ❌ CONNECTION ERROR: {e}")
56
- results.append((test_name, False, "Connection Error"))
57
-
58
- print()
59
-
60
- # Summary
61
- print("📊 TEST SUMMARY")
62
- print("-" * 40)
63
- passed = sum(1 for _, success, _ in results if success)
64
- total = len(results)
65
- print(f"✅ Passed: {passed}/{total}")
66
-
67
- if passed == total:
68
- print("🎉 ALL MCP ENDPOINT TESTS PASSED!")
69
- else:
70
- print("⚠️ Some tests failed. Check the MCP server.")
71
-
72
- return results
73
-
74
- def test_post_endpoints():
75
- """Test POST endpoints with sample data"""
76
- print("\n🧪 TESTING POST ENDPOINTS")
77
- print("=" * 40)
78
-
79
- base_url = "http://localhost:8001"
80
-
81
- # Test lesson generation
82
- try:
83
- print("🔄 Testing lesson generation...")
84
- lesson_data = {
85
- "skill": "Python Programming",
86
- "user_id": "test_user",
87
- "difficulty": "beginner"
88
- }
89
- response = requests.post(f"{base_url}/mcp/lesson/generate", json=lesson_data, timeout=10)
90
- if response.status_code == 200:
91
- print(" ✅ Lesson generation successful")
92
- data = response.json()
93
- if "lesson" in data:
94
- print(" 🎯 Lesson data structure valid")
95
- else:
96
- print(f" ❌ Lesson generation failed: {response.status_code}")
97
- except Exception as e:
98
- print(f" ❌ Lesson generation error: {e}")
99
-
100
- # Test quiz submission
101
- try:
102
- print("🔄 Testing quiz submission...")
103
- quiz_data = {
104
- "user_id": "test_user",
105
- "skill": "Python Programming",
106
- "lesson_title": "Variables and Data Types",
107
- "answers": ["correct", "incorrect", "correct"]
108
- }
109
- response = requests.post(f"{base_url}/mcp/quiz/submit", json=quiz_data, timeout=10)
110
- if response.status_code == 200:
111
- print(" ✅ Quiz submission successful")
112
- else:
113
- print(f" ❌ Quiz submission failed: {response.status_code}")
114
- except Exception as e:
115
- print(f" ❌ Quiz submission error: {e}")
116
-
117
- def validate_hackathon_requirements():
118
- """Validate all hackathon submission requirements"""
119
- print("\n🏆 HACKATHON SUBMISSION VALIDATION")
120
- print("=" * 50)
121
-
122
- requirements = []
123
-
124
- # Check README.md for required tag
125
- try:
126
- with open("README.md", "r", encoding="utf-8") as f:
127
- readme_content = f.read()
128
- if "mcp-server-track" in readme_content:
129
- print("✅ README.md contains 'mcp-server-track' tag")
130
- requirements.append(("README tag", True))
131
- else:
132
- print("❌ README.md missing 'mcp-server-track' tag")
133
- requirements.append(("README tag", False))
134
- except FileNotFoundError:
135
- print("❌ README.md not found")
136
- requirements.append(("README file", False))
137
-
138
- # Check for demo video link
139
- try:
140
- with open("README.md", "r", encoding="utf-8") as f:
141
- readme_content = f.read()
142
- if "demo-video-link.com" in readme_content or "your-demo-video-link.com" in readme_content:
143
- print("⚠️ Demo video link is placeholder - needs actual video")
144
- requirements.append(("Demo video", False))
145
- elif any(video_keyword in readme_content.lower() for video_keyword in ["youtube.com", "vimeo.com", "loom.com", "demo video"]):
146
- print("✅ Demo video link appears to be present")
147
- requirements.append(("Demo video", True))
148
- else:
149
- print("❌ Demo video link not found")
150
- requirements.append(("Demo video", False))
151
- except:
152
- requirements.append(("Demo video check", False))
153
-
154
- # Check space_app.py exists and has MCP endpoints
155
- try:
156
- with open("space_app.py", "r", encoding="utf-8") as f:
157
- app_content = f.read()
158
- if "FastAPI" in app_content and "@mcp_app" in app_content:
159
- print("✅ space_app.py has MCP server integration")
160
- requirements.append(("MCP integration", True))
161
- else:
162
- print("❌ space_app.py missing MCP server integration")
163
- requirements.append(("MCP integration", False))
164
- except FileNotFoundError:
165
- print("❌ space_app.py not found")
166
- requirements.append(("Main app file", False))
167
-
168
- # Check requirements.txt
169
- try:
170
- with open("requirements.txt", "r") as f:
171
- reqs = f.read()
172
- if "gradio" in reqs and "fastapi" in reqs:
173
- print("✅ requirements.txt has necessary dependencies")
174
- requirements.append(("Dependencies", True))
175
- else:
176
- print("❌ requirements.txt missing key dependencies")
177
- requirements.append(("Dependencies", False))
178
- except FileNotFoundError:
179
- print("❌ requirements.txt not found")
180
- requirements.append(("Requirements file", False))
181
-
182
- print("\n📋 SUBMISSION CHECKLIST")
183
- print("-" * 30)
184
- passed = sum(1 for _, success in requirements if success)
185
- total = len(requirements)
186
-
187
- for req_name, success in requirements:
188
- status = "✅" if success else "❌"
189
- print(f"{status} {req_name}")
190
-
191
- print(f"\n📊 Overall Score: {passed}/{total}")
192
-
193
- if passed == total:
194
- print("🎉 READY FOR HACKATHON SUBMISSION!")
195
- else:
196
- print("⚠️ Please address the failed requirements above.")
197
-
198
- return requirements
199
-
200
- def main():
201
- """Main validation function"""
202
- print("🚀 AGENTIC SKILL BUILDER - HACKATHON VALIDATION")
203
- print("=" * 60)
204
- print(f"⏰ Validation Time: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
205
- print()
206
-
207
- # First validate file-based requirements
208
- validate_hackathon_requirements()
209
-
210
- # Ask user if they want to test the running server
211
- print("\n" + "="*60)
212
- print("🌐 MCP SERVER TESTING")
213
- print("To test MCP endpoints, the server should be running on localhost:8001")
214
- print("You can start it with: python space_app.py")
215
-
216
- try:
217
- # Try to test if server is running
218
- response = requests.get("http://localhost:8001", timeout=2)
219
- print("✅ Server detected running!")
220
- test_mcp_server_endpoints()
221
- test_post_endpoints()
222
- except:
223
- print("⚠️ Server not detected. Start the server to test MCP endpoints.")
224
- print(" Command: python space_app.py")
225
-
226
- print("\n" + "="*60)
227
- print("🎯 NEXT STEPS FOR HACKATHON SUBMISSION:")
228
- print("1. 📹 Record demo video showing MCP server in action")
229
- print("2. 🔗 Update README.md with actual demo video link")
230
- print("3. 🚀 Upload to Hugging Face Spaces under Agents-MCP-Hackathon org")
231
- print("4. ✅ Ensure all MCP endpoints work in the deployed Space")
232
- print("=" * 60)
233
-
234
- if __name__ == "__main__":
235
- main()