Amal Nimmy Lal commited on
Commit
35765b5
·
1 Parent(s): 6f735fa

feat : Project Memory

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .dockerignore +51 -0
  2. .gitignore +212 -0
  3. LICENSE +21 -0
  4. README.md +103 -1
  5. backend/CHANGELOG.md +419 -0
  6. backend/Dockerfile +28 -0
  7. backend/app/__init__.py +1 -0
  8. backend/app/database.py +37 -0
  9. backend/app/llm.py +486 -0
  10. backend/app/main.py +544 -0
  11. backend/app/mcp_server.py +256 -0
  12. backend/app/model_router.py +414 -0
  13. backend/app/models.py +146 -0
  14. backend/app/schemas.py +173 -0
  15. backend/app/smart_query.py +684 -0
  16. backend/app/tools/__init__.py +1 -0
  17. backend/app/tools/memory.py +242 -0
  18. backend/app/tools/projects.py +176 -0
  19. backend/app/tools/tasks.py +139 -0
  20. backend/app/vectorstore.py +192 -0
  21. backend/pytest.ini +9 -0
  22. backend/requirements.txt +13 -0
  23. backend/tests/conftest.py +58 -0
  24. backend/tests/test_complete_api.py +219 -0
  25. backend/tests/test_database.py +69 -0
  26. backend/tests/test_devb.py +234 -0
  27. backend/tests/test_model_router.py +408 -0
  28. backend/tests/test_models.py +70 -0
  29. backend/tests/test_projects.py +171 -0
  30. backend/tests/test_scenarios.py +148 -0
  31. backend/tests/test_schemas.py +198 -0
  32. backend/tests/test_smart_query.py +656 -0
  33. backend/tests/test_tools.py +100 -0
  34. backend/tests/test_user_models.py +189 -0
  35. backend/tests/test_vector_query.py +410 -0
  36. docker-compose.yml +30 -0
  37. frontend/.gitignore +24 -0
  38. frontend/Dockerfile +32 -0
  39. frontend/README.md +73 -0
  40. frontend/eslint.config.js +23 -0
  41. frontend/index.html +13 -0
  42. frontend/nginx.conf +33 -0
  43. frontend/package-lock.json +0 -0
  44. frontend/package.json +33 -0
  45. frontend/public/vite.svg +1 -0
  46. frontend/src/App.tsx +107 -0
  47. frontend/src/api/client.ts +146 -0
  48. frontend/src/assets/react.svg +1 -0
  49. frontend/src/components/TaskCompleteModal.tsx +109 -0
  50. frontend/src/context/ProjectContext.tsx +56 -0
.dockerignore ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Git
2
+ .git
3
+ .gitignore
4
+
5
+ # Python
6
+ __pycache__
7
+ *.pyc
8
+ *.pyo
9
+ *.pyd
10
+ .Python
11
+ pip-log.txt
12
+ pip-delete-this-directory.txt
13
+ .venv/
14
+ venv/
15
+ ENV/
16
+ env/
17
+ *.egg-info/
18
+ .pytest_cache/
19
+
20
+ # Node
21
+ node_modules/
22
+ npm-debug.log
23
+ yarn-error.log
24
+
25
+ # IDE
26
+ .vscode/
27
+ .idea/
28
+ *.swp
29
+ *.swo
30
+ *~
31
+
32
+ # OS
33
+ .DS_Store
34
+ Thumbs.db
35
+
36
+ # Project specific
37
+ *.db
38
+ *.sqlite
39
+ *.log
40
+ .env
41
+ .env.local
42
+ .env.*.local
43
+
44
+ # Build outputs
45
+ dist/
46
+ build/
47
+
48
+ # Documentation
49
+ *.md
50
+ LICENSE
51
+ docs/
.gitignore ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #Agent
2
+ .claude/
3
+
4
+ #DB
5
+ backend/project_memory.db
6
+ # Byte-compiled / optimized / DLL files
7
+ __pycache__/
8
+ *.py[codz]
9
+ *$py.class
10
+
11
+ # C extensions
12
+ *.so
13
+
14
+ # Distribution / packaging
15
+ .Python
16
+ build/
17
+ develop-eggs/
18
+ dist/
19
+ downloads/
20
+ eggs/
21
+ .eggs/
22
+ lib/
23
+ lib64/
24
+ parts/
25
+ sdist/
26
+ var/
27
+ wheels/
28
+ share/python-wheels/
29
+ *.egg-info/
30
+ .installed.cfg
31
+ *.egg
32
+ MANIFEST
33
+
34
+ # PyInstaller
35
+ # Usually these files are written by a python script from a template
36
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
37
+ *.manifest
38
+ *.spec
39
+
40
+ # Installer logs
41
+ pip-log.txt
42
+ pip-delete-this-directory.txt
43
+
44
+ # Unit test / coverage reports
45
+ htmlcov/
46
+ .tox/
47
+ .nox/
48
+ .coverage
49
+ .coverage.*
50
+ .cache
51
+ nosetests.xml
52
+ coverage.xml
53
+ *.cover
54
+ *.py.cover
55
+ .hypothesis/
56
+ .pytest_cache/
57
+ cover/
58
+
59
+ # Translations
60
+ *.mo
61
+ *.pot
62
+
63
+ # Django stuff:
64
+ *.log
65
+ local_settings.py
66
+ db.sqlite3
67
+ db.sqlite3-journal
68
+
69
+ # Flask stuff:
70
+ instance/
71
+ .webassets-cache
72
+
73
+ # Scrapy stuff:
74
+ .scrapy
75
+
76
+ # Sphinx documentation
77
+ docs/_build/
78
+
79
+ # PyBuilder
80
+ .pybuilder/
81
+ target/
82
+
83
+ # Jupyter Notebook
84
+ .ipynb_checkpoints
85
+
86
+ # IPython
87
+ profile_default/
88
+ ipython_config.py
89
+
90
+ # pyenv
91
+ # For a library or package, you might want to ignore these files since the code is
92
+ # intended to run in multiple environments; otherwise, check them in:
93
+ # .python-version
94
+
95
+ # pipenv
96
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
97
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
98
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
99
+ # install all needed dependencies.
100
+ #Pipfile.lock
101
+
102
+ # UV
103
+ # Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
104
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
105
+ # commonly ignored for libraries.
106
+ #uv.lock
107
+
108
+ # poetry
109
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
110
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
111
+ # commonly ignored for libraries.
112
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
113
+ #poetry.lock
114
+ #poetry.toml
115
+
116
+ # pdm
117
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
118
+ # pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python.
119
+ # https://pdm-project.org/en/latest/usage/project/#working-with-version-control
120
+ #pdm.lock
121
+ #pdm.toml
122
+ .pdm-python
123
+ .pdm-build/
124
+
125
+ # pixi
126
+ # Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control.
127
+ #pixi.lock
128
+ # Pixi creates a virtual environment in the .pixi directory, just like venv module creates one
129
+ # in the .venv directory. It is recommended not to include this directory in version control.
130
+ .pixi
131
+
132
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
133
+ __pypackages__/
134
+
135
+ # Celery stuff
136
+ celerybeat-schedule
137
+ celerybeat.pid
138
+
139
+ # SageMath parsed files
140
+ *.sage.py
141
+
142
+ # Environments
143
+ .env
144
+ .envrc
145
+ .venv
146
+ env/
147
+ venv/
148
+ ENV/
149
+ env.bak/
150
+ venv.bak/
151
+
152
+ # Spyder project settings
153
+ .spyderproject
154
+ .spyproject
155
+
156
+ # Rope project settings
157
+ .ropeproject
158
+
159
+ # mkdocs documentation
160
+ /site
161
+
162
+ # mypy
163
+ .mypy_cache/
164
+ .dmypy.json
165
+ dmypy.json
166
+
167
+ # Pyre type checker
168
+ .pyre/
169
+
170
+ # pytype static type analyzer
171
+ .pytype/
172
+
173
+ # Cython debug symbols
174
+ cython_debug/
175
+
176
+ # PyCharm
177
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
178
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
179
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
180
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
181
+ #.idea/
182
+
183
+ # Abstra
184
+ # Abstra is an AI-powered process automation framework.
185
+ # Ignore directories containing user credentials, local state, and settings.
186
+ # Learn more at https://abstra.io/docs
187
+ .abstra/
188
+
189
+ # Visual Studio Code
190
+ # Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
191
+ # that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore
192
+ # and can be added to the global gitignore or merged into this file. However, if you prefer,
193
+ # you could uncomment the following to ignore the entire vscode folder
194
+ # .vscode/
195
+
196
+ # Ruff stuff:
197
+ .ruff_cache/
198
+
199
+ # PyPI configuration file
200
+ .pypirc
201
+
202
+ # Cursor
203
+ # Cursor is an AI-powered code editor. `.cursorignore` specifies files/directories to
204
+ # exclude from AI features like autocomplete and code analysis. Recommended for sensitive data
205
+ # refer to https://docs.cursor.com/context/ignore-files
206
+ .cursorignore
207
+ .cursorindexingignore
208
+
209
+ # Marimo
210
+ marimo/_static/
211
+ marimo/_lsp/
212
+ __marimo__/
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2025 arymandeshwal
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
README.md CHANGED
@@ -7,6 +7,108 @@ sdk: docker
7
  pinned: false
8
  license: mit
9
  short_description: Semantic, shared AI project memory.
 
 
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  pinned: false
8
  license: mit
9
  short_description: Semantic, shared AI project memory.
10
+ tags:
11
+ - building-mcp-track-enterprise
12
  ---
13
 
14
+
15
+ ## 🎯 Track 1: Building MCP - Enterprise Category
16
+
17
+ **Project Memory** is a multi-user, multi-project AI memory system powered by MCP (Model Context Protocol). It creates shared project memory where every action gets logged and becomes searchable via semantic search and AI chat.
18
+
19
+ ## 🚀 What We Built
20
+
21
+ An MCP server that extends LLM capabilities for enterprise teams by:
22
+ - **Persistent Project Memory**: Every task completion generates AI documentation that becomes searchable knowledge
23
+ - **Semantic Search**: Vector-based memory retrieval across all project activities
24
+ - **MCP Tool Integration**: Exposes project management capabilities as MCP tools
25
+ - **Multi-User Collaboration**: Teams can share and search collective knowledge
26
+
27
+ ## 🛠️ MCP Tools Exposed
28
+
29
+ Our MCP server provides these tools:
30
+ - `create_project`: Initialize a new project workspace
31
+ - `list_projects`: View all available projects
32
+ - `join_project`: Join an existing project
33
+ - `list_tasks`: Get project tasks with status
34
+ - `complete_task`: Mark task as done with AI-generated documentation
35
+ - `memory_search`: Semantic search across project history
36
+ - `list_activity`: View project activity feed
37
+
38
+ ## 📹 Demo Video
39
+
40
+ [Watch our 3-minute demo showing MCP integration with Claude Desktop](#) *(link to be added)*
41
+
42
+ ## 🏗️ Architecture
43
+
44
+ ```
45
+ ┌─────────────────┐ ┌─────────────────┐
46
+ │ Web Frontend │────▶│ FastAPI Backend │
47
+ │ (React) │ │ (MCP Client) │
48
+ └─────────────────┘ └─────────────────┘
49
+
50
+
51
+ ┌─────────────────┐
52
+ │ MCP Server │
53
+ │ (TypeScript) │
54
+ └─────────────────┘
55
+
56
+
57
+ ┌─────────────────┐
58
+ │ SQLite + Vec │
59
+ │ (Embeddings) │
60
+ └─────────────────┘
61
+ ```
62
+
63
+ ## 💡 Key Features
64
+
65
+ 1. **Task Completion Pipeline**: Transforms user work into searchable documentation
66
+ 2. **Vector Search**: Semantic retrieval using sqlite-vec embeddings
67
+ 3. **Chat Interface**: Natural language queries using MCP tools
68
+ 4. **Activity Feed**: Real-time project activity tracking
69
+ 5. **Multi-Project Support**: Manage multiple projects with isolated memory
70
+
71
+ ## 🔧 Technical Stack
72
+
73
+ - **MCP Server**: TypeScript with @modelcontextprotocol/sdk
74
+ - **Backend**: FastAPI (Python) as MCP client
75
+ - **Frontend**: React + Vite + Tailwind CSS
76
+ - **Database**: SQLite with sqlite-vec for embeddings
77
+ - **AI**: Google Generative AI (Gemini) for documentation generation
78
+ - **Deployment**: Docker container for Hugging Face Spaces
79
+
80
+ ## 🎮 How to Use
81
+
82
+ 1. **Create or Join a Project**: Start by creating a new project or joining an existing one
83
+ 2. **Complete Tasks**: Mark tasks as done and provide context about your work
84
+ 3. **AI Documentation**: System automatically generates searchable documentation
85
+ 4. **Search Memory**: Use semantic search to find any past work or decision
86
+ 5. **Chat with Memory**: Ask questions about project history using natural language
87
+
88
+ ## 🚢 Deployment
89
+
90
+ This Space runs as a Docker container combining:
91
+ - FastAPI backend serving as MCP client
92
+ - React frontend for user interface
93
+ - MCP server handling all tool operations
94
+ - SQLite database with vector search capabilities
95
+
96
+ ## 🔐 Environment Variables
97
+
98
+ Configure in Space settings:
99
+ - `GOOGLE_API_KEY`: For Gemini AI integration
100
+ - `DATABASE_URL`: (Optional) Custom database connection
101
+
102
+ ## 👥 Team
103
+
104
+ *Add team member names here*
105
+
106
+ ## 📝 License
107
+
108
+ MIT License - See LICENSE file for details
109
+
110
+ ## 🔗 Links
111
+
112
+ - [GitHub Repository](https://github.com/YOUR_USERNAME/project-memory)
113
+ - [MCP Documentation](https://modelcontextprotocol.io)
114
+ - [Hackathon Page](https://huggingface.co/MCP-1st-Birthday)
backend/CHANGELOG.md ADDED
@@ -0,0 +1,419 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Project Memory Backend - Changelog
2
+
3
+ ## Current Status: Production Ready
4
+
5
+ ---
6
+
7
+ ## Phase 4: User & Project ID Simplification (2025-11-30)
8
+
9
+ ### Breaking Changes
10
+
11
+ **User Model - Removed Email**
12
+ | Before | After |
13
+ |--------|-------|
14
+ | `name`, `email` | `first_name`, `last_name` |
15
+ | UUID for ID | `first_name[:3]` + 4 random digits |
16
+ | Email required | No email needed |
17
+
18
+ **Example User ID:** `Amal Nimmy` → `ama1234`
19
+
20
+ **Project Model - Name as ID**
21
+ | Before | After |
22
+ |--------|-------|
23
+ | UUID for ID | Project name used as ID |
24
+ | ID auto-generated | User chooses name (must be unique) |
25
+
26
+ **Example Project ID:** `fastgate` → ID is `fastgate`
27
+
28
+ ### New Features
29
+
30
+ | Feature | Description |
31
+ |---------|-------------|
32
+ | `check_project_id_available()` | Check if project ID/name is taken |
33
+ | `GET /api/projects/check/{id}` | API endpoint to check availability |
34
+ | `generate_user_id(first_name)` | Generate user ID from first name |
35
+
36
+ ### Updated Files
37
+
38
+ | File | Changes |
39
+ |------|---------|
40
+ | `app/models.py` | User: removed `email`, added `first_name`/`last_name`, new ID generation |
41
+ | `app/schemas.py` | UserBase/UserCreate: `firstName`, `lastName` (no email) |
42
+ | `app/main.py` | Updated user endpoints, added project ID check endpoint |
43
+ | `app/tools/projects.py` | Project uses name as ID, added `check_project_id_available()` |
44
+
45
+ ### New Unit Tests
46
+
47
+ | Test File | Coverage |
48
+ |-----------|----------|
49
+ | `tests/test_database.py` | Database init, session management, engine config |
50
+ | `tests/test_user_models.py` | User ID generation, User/Project/Task models, enums |
51
+ | `tests/test_projects.py` | Project creation, joining, ID availability check |
52
+ | `tests/test_schemas.py` | All Pydantic schemas validation |
53
+
54
+ ### Test Results
55
+ ```
56
+ 57 passed (new unit tests)
57
+ 3 passed, 1 xfailed (scenario tests)
58
+ ```
59
+
60
+ ### Database Migration
61
+
62
+ **Important:** Delete the old database to apply schema changes:
63
+ ```bash
64
+ del project_memory.db
65
+ # Database will be recreated on next startup
66
+ ```
67
+
68
+ ### API Changes
69
+
70
+ **Create User (Updated):**
71
+ ```bash
72
+ curl -X POST http://localhost:8000/api/users \
73
+ -H "Content-Type: application/json" \
74
+ -d '{"firstName": "Amal", "lastName": "Nimmy"}'
75
+ # Response: {"id": "ama1234", "firstName": "Amal", "lastName": "Nimmy", ...}
76
+ ```
77
+
78
+ **Check Project ID Availability (New):**
79
+ ```bash
80
+ curl http://localhost:8000/api/projects/check/fastgate
81
+ # Response: {"available": true, "project_id": "fastgate"}
82
+ ```
83
+
84
+ **Create Project (Updated):**
85
+ ```bash
86
+ curl -X POST http://localhost:8000/api/projects \
87
+ -H "Content-Type: application/json" \
88
+ -d '{"name": "fastgate", "description": "Fast API Gateway", "userId": "ama1234"}'
89
+ # Response: {"id": "fastgate", "name": "fastgate", ...}
90
+ ```
91
+
92
+ ---
93
+
94
+ ## Architecture
95
+
96
+ ```
97
+ backend/
98
+ ├── app/
99
+ │ ├── database.py # SQLite + SQLAlchemy setup
100
+ │ ├── models.py # User, Project, Task, LogEntry models
101
+ │ ├── schemas.py # Pydantic request/response models
102
+ │ ├── main.py # FastAPI REST API
103
+ │ ├── mcp_server.py # MCP server with all tools
104
+ │ ├── model_router.py # Multi-model rotation + multi-key support
105
+ │ ├── llm.py # Gemini LLM client (uses model_router)
106
+ │ ├── vectorstore.py # sqlite-vec vector storage
107
+ │ ├── smart_query.py # Natural language query processing
108
+ │ └── tools/
109
+ │ ├── projects.py # create_project, list_projects, join_project
110
+ │ ├── tasks.py # create_task, list_tasks, list_activity
111
+ │ └── memory.py # complete_task, memory_search
112
+ ├── test_*.py # Test scripts
113
+ ├── requirements.txt
114
+ └── .env
115
+ ```
116
+
117
+ ---
118
+
119
+ ## Phase 1: Foundation + Intelligence Layer
120
+
121
+ ### Database Models
122
+
123
+ | Model | Fields |
124
+ |-------|--------|
125
+ | `User` | id, first_name, last_name, avatar_url, created_at |
126
+ | `Project` | id (=name), name, description, created_by, created_at |
127
+ | `ProjectMembership` | project_id, user_id, role (owner/member) |
128
+ | `Task` | id, project_id, title, description, status, assigned_to |
129
+ | `LogEntry` | id, project_id, task_id, user_id, raw_input, generated_doc, tags |
130
+
131
+ ### Enums
132
+
133
+ | Enum | Values |
134
+ |------|--------|
135
+ | `TaskStatus` | todo, in_progress, done |
136
+ | `ActorType` | human, agent |
137
+ | `ActionType` | task_completed, doc_generated, query_answered |
138
+
139
+ ### MCP Tools
140
+
141
+ | Tool | Description |
142
+ |------|-------------|
143
+ | `create_project` | Create project + add owner membership |
144
+ | `list_projects` | List user's projects with roles |
145
+ | `join_project` | Add user to existing project |
146
+ | `create_task` | Create task in project |
147
+ | `list_tasks` | List tasks with optional status filter |
148
+ | `list_activity` | Get recent LogEntries for project |
149
+ | `complete_task` | Mark done + generate docs + create embedding |
150
+ | `memory_search` | Semantic search + LLM-synthesized answer |
151
+ | `smart_query` | Natural language queries with context awareness |
152
+
153
+ ### LLM Integration (`llm.py`)
154
+
155
+ | Function | Purpose |
156
+ |----------|---------|
157
+ | `generate_documentation()` | Generate docs from task completion |
158
+ | `synthesize_answer()` | Create answer from search context |
159
+ | `get_embedding()` | Get 768-dim embedding vector |
160
+
161
+ **Models:** Gemini 2.0 Flash (text) + text-embedding-004 (embeddings)
162
+
163
+ ### Vector Store (`vectorstore.py`)
164
+
165
+ | Function | Purpose |
166
+ |----------|---------|
167
+ | `init_vectorstore()` | Create tables at startup |
168
+ | `add_embedding()` | Store embedding with metadata |
169
+ | `search()` | Vector similarity search |
170
+ | `delete_by_project()` | Cleanup vectors |
171
+
172
+ ---
173
+
174
+ ## Phase 2: FastAPI REST API
175
+
176
+ ### Endpoints
177
+
178
+ | Method | Endpoint | Description |
179
+ |--------|----------|-------------|
180
+ | GET | `/` | Health check |
181
+ | GET | `/api/projects?userId={id}` | List user's projects |
182
+ | POST | `/api/projects` | Create new project |
183
+ | POST | `/api/projects/{id}/join` | Join existing project |
184
+ | GET | `/api/projects/{id}/tasks?status={s}` | List project tasks |
185
+ | POST | `/api/projects/{id}/tasks` | Create new task |
186
+ | POST | `/api/tasks/{id}/complete` | Complete task with docs |
187
+ | GET | `/api/projects/{id}/activity?limit={n}` | Get activity feed |
188
+ | POST | `/api/projects/{id}/search` | Semantic search |
189
+ | POST | `/api/chat` | Chat with AI |
190
+
191
+ ### API Examples
192
+
193
+ **Create Project:**
194
+ ```bash
195
+ curl -X POST http://localhost:8000/api/projects \
196
+ -H "Content-Type: application/json" \
197
+ -d '{"name": "My Project", "description": "Description", "userId": "user-id"}'
198
+ ```
199
+
200
+ **Complete Task:**
201
+ ```bash
202
+ curl -X POST http://localhost:8000/api/tasks/{task-id}/complete \
203
+ -H "Content-Type: application/json" \
204
+ -d '{"userId": "user-id", "whatIDid": "Implemented login API", "codeSnippet": "..."}'
205
+ ```
206
+
207
+ **Search Memory:**
208
+ ```bash
209
+ curl -X POST http://localhost:8000/api/projects/{project-id}/search \
210
+ -H "Content-Type: application/json" \
211
+ -d '{"query": "What did we do yesterday?"}'
212
+ ```
213
+
214
+ ### Features
215
+
216
+ - FastAPI with automatic OpenAPI docs (`/docs`, `/redoc`)
217
+ - CORS configured for frontend (localhost:5173, :3000)
218
+ - Pydantic schema validation
219
+ - Error handling with meaningful messages
220
+ - Database initialization on startup
221
+ - Async support for LLM calls
222
+
223
+ ---
224
+
225
+ ## Quick Start
226
+
227
+ ```bash
228
+ cd backend
229
+
230
+ # Setup
231
+ python -m venv .venv
232
+ .venv\Scripts\activate # Windows
233
+ pip install -r requirements.txt
234
+
235
+ # Configure
236
+ # Create .env with GEMINI_API_KEY
237
+
238
+ # Run
239
+ uvicorn app.main:app --reload
240
+ ```
241
+
242
+ **Server:** http://localhost:8000
243
+ **API Docs:** http://localhost:8000/docs
244
+
245
+ ---
246
+
247
+ ## Environment Variables
248
+
249
+ ```env
250
+ # Multi-key support (recommended) - comma-separated for 2x rate limits
251
+ GEMINI_API_KEYS=key1,key2
252
+
253
+ # Single key fallback (backward compatible)
254
+ GEMINI_API_KEY=your-key-here
255
+
256
+ DATABASE_URL=sqlite:///./project_memory.db
257
+ FRONTEND_URL=http://localhost:5173
258
+ ```
259
+
260
+ ---
261
+
262
+ ## Dependencies
263
+
264
+ ```
265
+ fastapi==0.115.0
266
+ uvicorn[standard]==0.32.0
267
+ sqlalchemy==2.0.36
268
+ python-dotenv==1.0.1
269
+ google-generativeai
270
+ sqlite-vec
271
+ numpy
272
+ pydantic==2.9.2
273
+ httpx==0.27.2
274
+ python-multipart==0.0.12
275
+ mcp
276
+ ```
277
+
278
+ ---
279
+
280
+ ## Troubleshooting
281
+
282
+ **Port in use:**
283
+ ```bash
284
+ netstat -ano | findstr :8000
285
+ taskkill /PID <pid> /F
286
+ ```
287
+
288
+ **Import errors:**
289
+ ```bash
290
+ .venv\Scripts\activate
291
+ pip install -r requirements.txt
292
+ ```
293
+
294
+ **Database issues:**
295
+ ```bash
296
+ del project_memory.db
297
+ .venv\Scripts\python test_api.py
298
+ ```
299
+
300
+ ---
301
+
302
+ ## Test Commands
303
+
304
+ ```bash
305
+ # Run all pytest unit tests (recommended)
306
+ pytest tests/ -v
307
+
308
+ # Run specific test files
309
+ pytest tests/test_database.py tests/test_user_models.py tests/test_projects.py tests/test_schemas.py -v
310
+ pytest tests/test_scenarios.py -v
311
+
312
+ # Legacy manual test scripts (not pytest)
313
+ .venv\Scripts\python tests/test_tools.py
314
+ .venv\Scripts\python tests/test_devb.py
315
+ .venv\Scripts\python tests/test_complete_api.py # Requires running server
316
+ .venv\Scripts\python tests/test_model_router.py
317
+ ```
318
+
319
+ ---
320
+
321
+ ## Phase 3: Multi-Model Rotation & Rate Limit Scaling
322
+
323
+ ### Problem
324
+ Gemini 2.0 Flash has only 15 RPM (requests per minute), limiting concurrent users to ~5.
325
+
326
+ ### Solution
327
+ Implemented multi-model rotation with multi-key support to scale from 15 RPM to 330+ RPM.
328
+
329
+ ### New File: `app/model_router.py`
330
+
331
+ | Component | Purpose |
332
+ |-----------|---------|
333
+ | `ModelRouter` class | Manages model rotation, rate limiting, caching, multi-key support |
334
+ | `_load_api_keys()` | Load API keys from environment (backward compatible) |
335
+ | `_get_next_key()` | Round-robin key selection with health tracking |
336
+ | `_check_rate_limit()` | Per-key, per-model rate limit checking |
337
+ | `generate()` | Main generation with automatic fallback |
338
+ | `get_stats()` | Usage statistics for monitoring |
339
+
340
+ ### Available Models
341
+
342
+ | Model | RPM/Key | Quality | Use Case |
343
+ |-------|---------|---------|----------|
344
+ | gemini-2.0-flash | 15 | Best | Complex tasks (chat, smart_query) |
345
+ | gemini-2.0-flash-lite | 30 | Good | Documentation generation |
346
+ | gemma-3-27b-it | 30 | Good | Documentation, synthesis |
347
+ | gemma-3-12b-it | 30 | Medium | Simple tasks |
348
+ | gemma-3-4b-it | 30 | Lower | Fallback |
349
+ | gemma-3-1b-it | 30 | Lowest | Emergency fallback |
350
+
351
+ **Total per key: 165 RPM**
352
+
353
+ ### Task-Based Model Priority
354
+
355
+ | Task Type | Priority (best → fallback) |
356
+ |-----------|----------------------------|
357
+ | `chat` | gemini-2.0-flash → flash-lite → gemma-3-27b |
358
+ | `smart_query` | gemini-2.0-flash → gemma-3-27b → gemma-3-12b |
359
+ | `documentation` | flash-lite → gemma-3-27b → gemma-3-12b |
360
+ | `synthesis` | gemma-3-27b → gemma-3-12b → gemma-3-4b |
361
+
362
+ ### Multi-Key Support
363
+
364
+ Configure multiple API keys for doubled rate limits:
365
+
366
+ ```env
367
+ # Option 1: Comma-separated (recommended)
368
+ GEMINI_API_KEYS=key1,key2
369
+
370
+ # Option 2: Single key (backward compatible)
371
+ GEMINI_API_KEY=your-key
372
+ ```
373
+
374
+ ### Key Health Management
375
+
376
+ | Error Type | Action | Cooldown |
377
+ |------------|--------|----------|
378
+ | Rate limit (429) | Mark unhealthy, try next key | 60 sec |
379
+ | Auth error (401/403) | Mark unhealthy | 24 hours |
380
+ | Quota exceeded | Mark unhealthy, try next key | 60 sec |
381
+ | Other errors | Short cooldown, try next model | 30 sec |
382
+
383
+ ### Response Caching
384
+
385
+ - **TTL:** 5 minutes
386
+ - **Key format:** `hash(task_type:user_id:prompt[:200])`
387
+ - **Auto-cleanup:** Expired entries removed periodically
388
+
389
+ ### Capacity Summary
390
+
391
+ | Metric | Before | 1 Key | 2 Keys |
392
+ |--------|--------|-------|--------|
393
+ | Base RPM | 15 | 165 | 330 |
394
+ | With cache | 15 | 300+ | 600+ |
395
+ | Concurrent users | ~5 | ~100 | ~200 |
396
+ | Fallback | None | Auto | Auto + failover |
397
+
398
+ ### Updated Files
399
+
400
+ | File | Changes |
401
+ |------|---------|
402
+ | `app/model_router.py` | NEW - Multi-model rotation + caching + multi-key |
403
+ | `app/llm.py` | Uses router for `generate_documentation()`, `synthesize_answer()`, `chat_with_tools()` |
404
+ | `app/smart_query.py` | Uses router for model selection |
405
+ | `test_model_router.py` | NEW - 20 test cases for router functionality |
406
+ | `.env` | Added `GEMINI_API_KEYS` for multi-key support |
407
+
408
+ ### Architecture Update
409
+
410
+ ```
411
+ backend/
412
+ ├── app/
413
+ │ ├── model_router.py # NEW: Multi-model rotation + multi-key support
414
+ │ ├── llm.py # Updated: Uses model router
415
+ │ ├── smart_query.py # Updated: Uses model router
416
+ │ └── ...
417
+ ├── test_model_router.py # NEW: Router test cases
418
+ └── ...
419
+ ```
backend/Dockerfile ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Backend Dockerfile
2
+ FROM python:3.11-slim
3
+
4
+ # Set working directory
5
+ WORKDIR /app
6
+
7
+ # Install system dependencies
8
+ RUN apt-get update && apt-get install -y \
9
+ gcc \
10
+ && rm -rf /var/lib/apt/lists/*
11
+
12
+ # Copy requirements first for better caching
13
+ COPY requirements.txt .
14
+
15
+ # Install Python dependencies
16
+ RUN pip install --no-cache-dir -r requirements.txt
17
+
18
+ # Copy application code
19
+ COPY . .
20
+
21
+ # Create directory for database
22
+ RUN mkdir -p /app/data
23
+
24
+ # Expose port
25
+ EXPOSE 8000
26
+
27
+ # Run the application
28
+ CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8000"]
backend/app/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # Project Memory Backend
backend/app/database.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Database connection and session management for Project Memory."""
2
+
3
+ from sqlalchemy import create_engine
4
+ from sqlalchemy.orm import sessionmaker, declarative_base
5
+ from pathlib import Path
6
+
7
+ # Database file location
8
+ DB_PATH = Path(__file__).parent.parent / "project_memory.db"
9
+ DATABASE_URL = f"sqlite:///{DB_PATH}"
10
+
11
+ # Create engine with SQLite-specific settings
12
+ engine = create_engine(
13
+ DATABASE_URL,
14
+ connect_args={"check_same_thread": False}, # Needed for SQLite
15
+ echo=False, # Set to True for SQL debugging
16
+ )
17
+
18
+ # Session factory
19
+ SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
20
+
21
+ # Base class for models
22
+ Base = declarative_base()
23
+
24
+
25
+ def get_db():
26
+ """Dependency that provides a database session."""
27
+ db = SessionLocal()
28
+ try:
29
+ yield db
30
+ finally:
31
+ db.close()
32
+
33
+
34
+ def init_db():
35
+ """Initialize database tables."""
36
+ from app import models # noqa: F401 - Import models to register them
37
+ Base.metadata.create_all(bind=engine)
backend/app/llm.py ADDED
@@ -0,0 +1,486 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import google.generativeai as genai
2
+ import json
3
+ import os
4
+ from dotenv import load_dotenv
5
+
6
+ # Load environment variables
7
+ load_dotenv()
8
+
9
+ # Validate required environment variable
10
+ GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
11
+ if not GEMINI_API_KEY:
12
+ raise ValueError(
13
+ "GEMINI_API_KEY environment variable is required. "
14
+ "Please set it in your .env file or environment."
15
+ )
16
+
17
+ # Configure Gemini
18
+ genai.configure(api_key=GEMINI_API_KEY)
19
+
20
+ # Import model router for multi-model rotation
21
+ from app.model_router import generate as router_generate, generate_with_info
22
+
23
+
24
+ async def generate_documentation(task_title: str, what_i_did: str, code_snippet: str | None = None) -> dict:
25
+ """Generate docs for completed task. Returns {summary, details, tags}"""
26
+ prompt = f"""
27
+ Generate technical documentation for this completed work.
28
+
29
+ Task: {task_title}
30
+ What was done: {what_i_did}
31
+ Code: {code_snippet or 'N/A'}
32
+
33
+ Return ONLY valid JSON with:
34
+ - "summary": one-line summary
35
+ - "details": 2-3 paragraph technical documentation
36
+ - "tags": array of 3-7 relevant tags
37
+
38
+ Response must be pure JSON, no markdown.
39
+ """
40
+
41
+ # Use model router for multi-model rotation
42
+ text = await router_generate(prompt, task_type="documentation")
43
+ # Clean response (remove markdown code blocks if present)
44
+ text = text.strip()
45
+ if text.startswith("```"):
46
+ text = text.split("```")[1]
47
+ if text.startswith("json"):
48
+ text = text[4:]
49
+ return json.loads(text.strip())
50
+
51
+
52
+ async def synthesize_answer(context: str, query: str) -> str:
53
+ """Generate answer from context. Returns answer string."""
54
+ prompt = f"""
55
+ Based on this project memory:
56
+ {context}
57
+
58
+ Answer: {query}
59
+
60
+ Cite specific entries. If info not found, say so.
61
+ """
62
+
63
+ # Use model router for multi-model rotation
64
+ return await router_generate(prompt, task_type="synthesis")
65
+
66
+
67
+ async def get_embedding(text: str) -> list[float]:
68
+ """Get embedding vector for text using Gemini embedding model."""
69
+ result = genai.embed_content(
70
+ model="models/text-embedding-004",
71
+ content=text
72
+ )
73
+ return result['embedding']
74
+
75
+
76
+ async def generate_tasks(project_name: str, project_description: str, count: int = 50) -> list[dict]:
77
+ """Generate demo tasks for a project using LLM.
78
+
79
+ Args:
80
+ project_name: Name of the project
81
+ project_description: Description of the project
82
+ count: Number of tasks to generate (max 50)
83
+
84
+ Returns:
85
+ List of tasks with title and description
86
+ """
87
+ # Cap at 50 tasks max
88
+ count = min(count, 50)
89
+
90
+ prompt = f"""
91
+ You are a project manager creating demo tasks for a hackathon project.
92
+
93
+ Project: {project_name}
94
+ Description: {project_description}
95
+
96
+ Generate exactly {count} simple, demo-friendly tasks for this software project. Each task should be:
97
+ - Simple and quick to complete (5-30 minutes each)
98
+ - Suitable for a demo or hackathon setting
99
+ - Cover typical software development activities (setup, coding, testing, docs, UI)
100
+
101
+ Include a mix of:
102
+ - Setup tasks (environment, dependencies, config)
103
+ - Feature implementation (simple features)
104
+ - Bug fixes (minor issues)
105
+ - Documentation (README, comments)
106
+ - Testing (basic tests)
107
+ - UI/UX improvements
108
+
109
+ Return ONLY a valid JSON array with objects containing:
110
+ - "title": short task title (max 100 chars)
111
+ - "description": brief description (1 sentence)
112
+
113
+ Example:
114
+ [
115
+ {{"title": "Set up development environment", "description": "Install dependencies and configure local dev environment."}},
116
+ {{"title": "Add user login button", "description": "Create a login button component in the header."}}
117
+ ]
118
+
119
+ Return ONLY the JSON array, no markdown or extra text.
120
+ """
121
+
122
+ # Use model router for generation
123
+ text = await router_generate(prompt, task_type="documentation")
124
+
125
+ # Clean response (remove markdown code blocks if present)
126
+ text = text.strip()
127
+ if text.startswith("```"):
128
+ lines = text.split("\n")
129
+ # Remove first and last lines (```json and ```)
130
+ text = "\n".join(lines[1:-1])
131
+ if text.startswith("json"):
132
+ text = text[4:]
133
+
134
+ try:
135
+ tasks = json.loads(text.strip())
136
+ # Validate structure
137
+ if not isinstance(tasks, list):
138
+ raise ValueError("Response is not a list")
139
+
140
+ # Ensure each task has required fields
141
+ validated_tasks = []
142
+ for task in tasks:
143
+ if isinstance(task, dict) and "title" in task:
144
+ validated_tasks.append({
145
+ "title": str(task.get("title", ""))[:100],
146
+ "description": str(task.get("description", ""))
147
+ })
148
+
149
+ return validated_tasks
150
+ except json.JSONDecodeError as e:
151
+ raise ValueError(f"Failed to parse LLM response as JSON: {e}")
152
+
153
+
154
+ async def chat_with_tools(messages: list[dict], project_id: str) -> str:
155
+ """Chat with AI using MCP tools for function calling.
156
+
157
+ Args:
158
+ messages: List of chat messages [{'role': 'user/assistant', 'content': '...'}]
159
+ project_id: Project ID for context
160
+
161
+ Returns:
162
+ AI response string
163
+ """
164
+ from app.tools.projects import list_projects, create_project, join_project
165
+ from app.tools.tasks import list_tasks, create_task, list_activity
166
+ from app.tools.memory import complete_task, memory_search
167
+ from app.model_router import router
168
+
169
+ # Define tools for Gemini function calling
170
+ tools = [
171
+ {
172
+ "name": "list_projects",
173
+ "description": "List all projects for a user",
174
+ "parameters": {
175
+ "type": "object",
176
+ "properties": {
177
+ "userId": {"type": "string", "description": "User ID"}
178
+ },
179
+ "required": ["userId"]
180
+ }
181
+ },
182
+ {
183
+ "name": "list_tasks",
184
+ "description": "List all tasks for a project",
185
+ "parameters": {
186
+ "type": "object",
187
+ "properties": {
188
+ "projectId": {"type": "string", "description": "Project ID"},
189
+ "status": {"type": "string", "enum": ["todo", "in_progress", "done"]}
190
+ },
191
+ "required": ["projectId"]
192
+ }
193
+ },
194
+ {
195
+ "name": "list_activity",
196
+ "description": "Get recent activity for a project",
197
+ "parameters": {
198
+ "type": "object",
199
+ "properties": {
200
+ "projectId": {"type": "string", "description": "Project ID"},
201
+ "limit": {"type": "number", "default": 20}
202
+ },
203
+ "required": ["projectId"]
204
+ }
205
+ },
206
+ {
207
+ "name": "memory_search",
208
+ "description": "Semantic search across project memory",
209
+ "parameters": {
210
+ "type": "object",
211
+ "properties": {
212
+ "projectId": {"type": "string", "description": "Project ID"},
213
+ "query": {"type": "string", "description": "Search query"}
214
+ },
215
+ "required": ["projectId", "query"]
216
+ }
217
+ }
218
+ ]
219
+
220
+ # Build system message with project context
221
+ system_message = f"""
222
+ You are an AI assistant helping users understand their project memory.
223
+ Current Project ID: {project_id}
224
+
225
+ You have access to these tools:
226
+ - list_projects: List user's projects
227
+ - list_tasks: List tasks in a project
228
+ - list_activity: Get recent activity
229
+ - memory_search: Search project memory semantically
230
+
231
+ Use these tools to answer user questions accurately.
232
+ """
233
+
234
+ # Convert messages to Gemini format
235
+ chat_messages = []
236
+ for msg in messages:
237
+ if msg["role"] == "system":
238
+ system_message = msg["content"]
239
+ else:
240
+ chat_messages.append(msg)
241
+
242
+ # Build the prompt with tool descriptions
243
+ tool_prompt = f"""
244
+ {system_message}
245
+
246
+ To use tools, format your response as:
247
+ TOOL: tool_name
248
+ ARGS: {{"arg1": "value1"}}
249
+
250
+ Available tools:
251
+ {json.dumps(tools, indent=2)}
252
+ """
253
+
254
+ # Add system context to first message
255
+ full_messages = [{"role": "user", "content": tool_prompt}] + chat_messages
256
+
257
+ # Convert to Gemini chat format
258
+ chat_history = []
259
+ for msg in full_messages[:-1]: # All except last
260
+ chat_history.append({
261
+ "role": "user" if msg["role"] == "user" else "model",
262
+ "parts": [msg["content"]]
263
+ })
264
+
265
+ # Get best available model from router for chat
266
+ model_name = router.get_model_for_task("chat")
267
+ if not model_name:
268
+ raise Exception("All models are rate limited. Please try again in a minute.")
269
+
270
+ model = router.models[model_name]
271
+ router._record_usage(model_name)
272
+
273
+ # Start chat session with selected model
274
+ chat = model.start_chat(history=chat_history)
275
+
276
+ # Send last message
277
+ last_message = full_messages[-1]["content"]
278
+ response = chat.send_message(last_message)
279
+
280
+ # Check if response contains tool call
281
+ response_text = response.text
282
+
283
+ # Simple tool detection
284
+ if "TOOL:" in response_text and "ARGS:" in response_text:
285
+ # Parse tool call
286
+ lines = response_text.split("\n")
287
+ tool_name = None
288
+ args = None
289
+
290
+ for line in lines:
291
+ if line.startswith("TOOL:"):
292
+ tool_name = line.replace("TOOL:", "").strip()
293
+ elif line.startswith("ARGS:"):
294
+ args = json.loads(line.replace("ARGS:", "").strip())
295
+
296
+ # Execute tool if found
297
+ if tool_name and args:
298
+ tool_result = None
299
+
300
+ if tool_name == "list_projects":
301
+ tool_result = list_projects(user_id=args["userId"])
302
+ elif tool_name == "list_tasks":
303
+ tool_result = list_tasks(
304
+ project_id=args["projectId"],
305
+ status=args.get("status")
306
+ )
307
+ elif tool_name == "list_activity":
308
+ tool_result = list_activity(
309
+ project_id=args["projectId"],
310
+ limit=args.get("limit", 20)
311
+ )
312
+ elif tool_name == "memory_search":
313
+ tool_result = await memory_search(
314
+ project_id=args["projectId"],
315
+ query=args["query"]
316
+ )
317
+
318
+ # Send tool result back to model
319
+ if tool_result:
320
+ follow_up = f"Tool {tool_name} returned: {json.dumps(tool_result)}\n\nBased on this, answer the user's question."
321
+ final_response = chat.send_message(follow_up)
322
+ return final_response.text
323
+
324
+ return response_text
325
+
326
+
327
+ async def task_chat(
328
+ task_id: str,
329
+ task_title: str,
330
+ task_description: str,
331
+ project_id: str,
332
+ user_id: str,
333
+ message: str,
334
+ history: list[dict],
335
+ current_datetime: str
336
+ ) -> dict:
337
+ """Chat with AI agent while working on a task.
338
+
339
+ The agent can:
340
+ - Answer questions and give coding advice
341
+ - Search project memory for context
342
+ - Complete the task when user indicates they're done
343
+
344
+ Args:
345
+ task_id: ID of the task being worked on
346
+ task_title: Title of the task
347
+ task_description: Description of the task
348
+ project_id: Project ID
349
+ user_id: User ID working on the task
350
+ message: User's message
351
+ history: Conversation history
352
+ current_datetime: Current timestamp
353
+
354
+ Returns:
355
+ {message: str, taskCompleted?: bool, taskStatus?: str}
356
+ """
357
+ from app.tools.memory import complete_task, memory_search
358
+ from app.model_router import router
359
+
360
+ # System prompt with task context
361
+ system_prompt = f"""You are an AI assistant helping a developer work on a task.
362
+
363
+ CURRENT TASK:
364
+ - Title: {task_title}
365
+ - Description: {task_description or 'No description'}
366
+ - Task ID: {task_id}
367
+
368
+ USER: {user_id}
369
+ PROJECT: {project_id}
370
+ CURRENT TIME: {current_datetime}
371
+
372
+ YOUR CAPABILITIES:
373
+ 1. Answer questions and give coding advice related to the task
374
+ 2. Search project memory for relevant context (completed tasks, documentation)
375
+ 3. Complete the task when the user EXPLICITLY CONFIRMS
376
+
377
+ TASK COMPLETION FLOW:
378
+ When the user indicates they've finished (e.g., "I'm done", "finished it", describes what they did):
379
+ 1. Briefly acknowledge what they accomplished
380
+ 2. ASK SIMPLY: "Would you like me to mark this task as complete?" (just this question, nothing more)
381
+ 3. WAIT for user confirmation (e.g., "yes", "mark it", "complete it", "sure")
382
+ 4. ONLY after explicit confirmation, call the complete_task tool
383
+
384
+ IMPORTANT:
385
+ - Do NOT call complete_task until the user explicitly confirms
386
+ - Do NOT ask for additional details or descriptions when confirming - just ask yes/no
387
+ - The user has already told you what they did - use that information for the complete_task tool
388
+
389
+ To use tools, format your response as:
390
+ TOOL: tool_name
391
+ ARGS: {{"arg1": "value1"}}
392
+ RESULT_PENDING
393
+
394
+ After I provide the tool result, give your final response to the user.
395
+
396
+ Available tools:
397
+ - memory_search: Search project memory. Args: {{"query": "search terms"}}
398
+ - complete_task: Mark task as complete. Args: {{"what_i_did": "description of work done", "code_snippet": "optional code"}}
399
+
400
+ Be helpful, concise, and focused on helping complete the task."""
401
+
402
+ # Build conversation for the model
403
+ chat_messages = []
404
+
405
+ # Add history (convert role names)
406
+ for msg in history:
407
+ role = "model" if msg["role"] == "assistant" else "user"
408
+ chat_messages.append({
409
+ "role": role,
410
+ "parts": [msg["content"]]
411
+ })
412
+
413
+ # Get best available model
414
+ model_name = router.get_model_for_task("chat")
415
+ if not model_name:
416
+ return {"message": "All AI models are temporarily unavailable. Please try again in a minute."}
417
+
418
+ model = router.models[model_name]
419
+ router._record_usage(model_name)
420
+
421
+ # Start chat with system context in first message
422
+ first_message = f"{system_prompt}\n\nUser's first message will follow."
423
+ chat_history = [{"role": "user", "parts": [first_message]}, {"role": "model", "parts": ["Understood. I'm ready to help you work on this task. What would you like to know or do?"]}]
424
+
425
+ # Add conversation history
426
+ chat_history.extend(chat_messages)
427
+
428
+ chat = model.start_chat(history=chat_history)
429
+
430
+ # Send user's message
431
+ response = chat.send_message(message)
432
+ response_text = response.text
433
+
434
+ # Check for tool calls
435
+ task_completed = False
436
+ task_status = "in_progress"
437
+
438
+ if "TOOL:" in response_text and "ARGS:" in response_text:
439
+ lines = response_text.split("\n")
440
+ tool_name = None
441
+ args = None
442
+
443
+ for line in lines:
444
+ if line.startswith("TOOL:"):
445
+ tool_name = line.replace("TOOL:", "").strip()
446
+ elif line.startswith("ARGS:"):
447
+ try:
448
+ args = json.loads(line.replace("ARGS:", "").strip())
449
+ except json.JSONDecodeError:
450
+ continue
451
+
452
+ if tool_name and args:
453
+ tool_result = None
454
+
455
+ if tool_name == "memory_search":
456
+ tool_result = await memory_search(
457
+ project_id=project_id,
458
+ query=args.get("query", "")
459
+ )
460
+ elif tool_name == "complete_task":
461
+ what_i_did = args.get("what_i_did", message)
462
+ code_snippet = args.get("code_snippet")
463
+
464
+ tool_result = await complete_task(
465
+ task_id=task_id,
466
+ project_id=project_id,
467
+ user_id=user_id,
468
+ what_i_did=what_i_did,
469
+ code_snippet=code_snippet
470
+ )
471
+
472
+ if "error" not in tool_result:
473
+ task_completed = True
474
+ task_status = "done"
475
+
476
+ # Get follow-up response with tool result
477
+ if tool_result:
478
+ follow_up = f"Tool {tool_name} returned: {json.dumps(tool_result)}\n\nProvide your response to the user."
479
+ final_response = chat.send_message(follow_up)
480
+ response_text = final_response.text
481
+
482
+ return {
483
+ "message": response_text,
484
+ "taskCompleted": task_completed,
485
+ "taskStatus": task_status
486
+ }
backend/app/main.py ADDED
@@ -0,0 +1,544 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """FastAPI application for Project Memory - API layer calling MCP tools."""
2
+
3
+ from fastapi import FastAPI, HTTPException
4
+ from fastapi.middleware.cors import CORSMiddleware
5
+ from fastapi.responses import JSONResponse
6
+ from contextlib import asynccontextmanager
7
+ from typing import List, Optional
8
+ import os
9
+ from dotenv import load_dotenv
10
+
11
+ from app import schemas
12
+ from app.schemas import (
13
+ ProjectCreate, ProjectJoin, Project,
14
+ TaskCreate, Task, TaskCompleteRequest, TaskCompleteResponse,
15
+ ActivityResponse, SearchRequest, SearchResponse,
16
+ SmartQueryRequest, SmartQueryResponse,
17
+ ChatRequest, ChatResponse, ErrorResponse,
18
+ UserCreate, User
19
+ )
20
+
21
+ from app.tools.projects import create_project, list_projects, join_project, check_project_id_available
22
+ from app.tools.tasks import create_task, list_tasks, list_activity
23
+ from app.tools.memory import complete_task, memory_search
24
+
25
+ # Load environment variables
26
+ load_dotenv()
27
+
28
+
29
+ @asynccontextmanager
30
+ async def lifespan(app: FastAPI):
31
+ """Initialize database and vector store on startup."""
32
+ from app.database import init_db
33
+ from app.vectorstore import init_vectorstore
34
+
35
+ init_db()
36
+ init_vectorstore()
37
+ print("[OK] Database and vector store initialized")
38
+ yield
39
+
40
+
41
+ # Initialize FastAPI app
42
+ app = FastAPI(
43
+ title="Project Memory API",
44
+ description="Multi-user, multi-project AI memory system powered by MCP",
45
+ version="1.0.0",
46
+ lifespan=lifespan
47
+ )
48
+
49
+ # Configure CORS
50
+ frontend_url = os.getenv("FRONTEND_URL", "http://localhost:5173")
51
+ app.add_middleware(
52
+ CORSMiddleware,
53
+ allow_origins=[frontend_url, "http://localhost:5173", "http://localhost:3000"],
54
+ allow_credentials=True,
55
+ allow_methods=["*"],
56
+ allow_headers=["*"],
57
+ )
58
+
59
+
60
+ # ==================== Health Check ====================
61
+ @app.get("/")
62
+ async def root():
63
+ """Health check endpoint."""
64
+ return {"status": "ok", "message": "Project Memory API is running"}
65
+
66
+
67
+ # ==================== User Endpoints ====================
68
+ @app.post("/api/users", response_model=schemas.User)
69
+ async def create_user(user: schemas.UserCreate):
70
+ """Create a new user."""
71
+ from app.database import get_db
72
+ from app.models import User, generate_user_id
73
+
74
+ db = next(get_db())
75
+
76
+ # Generate unique user ID (first 3 letters of firstname + 4 random digits)
77
+ user_id = generate_user_id(user.firstName)
78
+
79
+ # Ensure ID is unique (regenerate if collision)
80
+ while db.query(User).filter(User.id == user_id).first():
81
+ user_id = generate_user_id(user.firstName)
82
+
83
+ # Create new user
84
+ new_user = User(
85
+ id=user_id,
86
+ first_name=user.firstName,
87
+ last_name=user.lastName,
88
+ avatar_url=user.avatar_url
89
+ )
90
+ db.add(new_user)
91
+ db.commit()
92
+ db.refresh(new_user)
93
+
94
+ return {
95
+ "id": new_user.id,
96
+ "firstName": new_user.first_name,
97
+ "lastName": new_user.last_name,
98
+ "avatar_url": new_user.avatar_url,
99
+ "created_at": new_user.created_at
100
+ }
101
+
102
+
103
+ @app.get("/api/users/{user_id}", response_model=schemas.User)
104
+ async def get_user(user_id: str):
105
+ """Get user by ID."""
106
+ from app.database import get_db
107
+ from app.models import User
108
+
109
+ db = next(get_db())
110
+ user = db.query(User).filter(User.id == user_id).first()
111
+
112
+ if not user:
113
+ raise HTTPException(status_code=404, detail="User not found")
114
+
115
+ return {
116
+ "id": user.id,
117
+ "firstName": user.first_name,
118
+ "lastName": user.last_name,
119
+ "avatar_url": user.avatar_url,
120
+ "created_at": user.created_at
121
+ }
122
+
123
+
124
+ @app.get("/api/users")
125
+ async def list_users():
126
+ """List all users."""
127
+ from app.database import get_db
128
+ from app.models import User
129
+
130
+ db = next(get_db())
131
+ users = db.query(User).all()
132
+ return [{"id": u.id, "firstName": u.first_name, "lastName": u.last_name} for u in users]
133
+
134
+
135
+ # ==================== Project Endpoints ====================
136
+ @app.get("/api/projects/check/{project_id}")
137
+ async def check_project_availability(project_id: str):
138
+ """Check if a project ID is available."""
139
+ try:
140
+ result = check_project_id_available(project_id=project_id)
141
+ return result
142
+ except Exception as e:
143
+ raise HTTPException(status_code=500, detail=str(e))
144
+
145
+
146
+ @app.get("/api/projects")
147
+ async def get_projects(userId: str):
148
+ """List all projects for a user."""
149
+ try:
150
+ result = list_projects(user_id=userId)
151
+ return result
152
+ except Exception as e:
153
+ raise HTTPException(status_code=500, detail=str(e))
154
+
155
+
156
+ @app.post("/api/projects", response_model=Project)
157
+ async def create_new_project(project: ProjectCreate):
158
+ """Create a new project."""
159
+ try:
160
+ result = create_project(
161
+ name=project.name,
162
+ description=project.description,
163
+ user_id=project.userId
164
+ )
165
+ if "error" in result:
166
+ raise HTTPException(status_code=400, detail=result["error"])
167
+ return result
168
+ except HTTPException:
169
+ raise
170
+ except Exception as e:
171
+ raise HTTPException(status_code=500, detail=str(e))
172
+
173
+
174
+ @app.post("/api/projects/{project_id}/join")
175
+ async def join_existing_project(project_id: str, request: ProjectJoin):
176
+ """Join an existing project."""
177
+ try:
178
+ result = join_project(
179
+ project_id=project_id,
180
+ user_id=request.userId
181
+ )
182
+ if "error" in result:
183
+ raise HTTPException(status_code=400, detail=result["error"])
184
+ return result
185
+ except HTTPException:
186
+ raise
187
+ except Exception as e:
188
+ raise HTTPException(status_code=500, detail=str(e))
189
+
190
+
191
+ @app.get("/api/projects/{project_id}/members")
192
+ async def get_project_members(project_id: str):
193
+ """Get all members of a project."""
194
+ from app.database import get_db
195
+ from app.models import Project, ProjectMembership, User
196
+
197
+ db = next(get_db())
198
+ try:
199
+ # Check project exists
200
+ project = db.query(Project).filter(Project.id == project_id).first()
201
+ if not project:
202
+ raise HTTPException(status_code=404, detail="Project not found")
203
+
204
+ # Get all memberships with user details
205
+ memberships = db.query(ProjectMembership, User).join(
206
+ User, ProjectMembership.user_id == User.id
207
+ ).filter(ProjectMembership.project_id == project_id).all()
208
+
209
+ members = [
210
+ {
211
+ "id": user.id,
212
+ "firstName": user.first_name,
213
+ "lastName": user.last_name,
214
+ "avatar_url": user.avatar_url,
215
+ "role": membership.role,
216
+ "joined_at": membership.joined_at.isoformat() if membership.joined_at else None
217
+ }
218
+ for membership, user in memberships
219
+ ]
220
+
221
+ return {"members": members}
222
+ except HTTPException:
223
+ raise
224
+ except Exception as e:
225
+ raise HTTPException(status_code=500, detail=str(e))
226
+ finally:
227
+ db.close()
228
+
229
+
230
+ # ==================== Task Endpoints ====================
231
+ @app.get("/api/projects/{project_id}/tasks")
232
+ async def get_project_tasks(project_id: str, status: Optional[str] = None):
233
+ """Get all tasks for a project, optionally filtered by status."""
234
+ try:
235
+ result = list_tasks(
236
+ project_id=project_id,
237
+ status=status
238
+ )
239
+ return result
240
+ except Exception as e:
241
+ raise HTTPException(status_code=500, detail=str(e))
242
+
243
+
244
+ @app.post("/api/projects/{project_id}/tasks", response_model=Task)
245
+ async def create_new_task(project_id: str, task: TaskCreate):
246
+ """Create a new task in a project."""
247
+ try:
248
+ result = create_task(
249
+ project_id=project_id,
250
+ title=task.title,
251
+ description=task.description,
252
+ assigned_to=task.assignedTo
253
+ )
254
+ if "error" in result:
255
+ raise HTTPException(status_code=400, detail=result["error"])
256
+ return result
257
+ except HTTPException:
258
+ raise
259
+ except Exception as e:
260
+ raise HTTPException(status_code=500, detail=str(e))
261
+
262
+
263
+ @app.post("/api/projects/{project_id}/tasks/generate")
264
+ async def generate_project_tasks(project_id: str, request: dict = None):
265
+ """Generate demo tasks for a project using AI.
266
+
267
+ Does NOT save to database - returns generated tasks for user to edit.
268
+ Max 50 tasks.
269
+ """
270
+ from app.llm import generate_tasks
271
+ from app.database import get_db
272
+ from app.models import Project
273
+
274
+ db = next(get_db())
275
+ try:
276
+ # Get project details
277
+ project = db.query(Project).filter(Project.id == project_id).first()
278
+ if not project:
279
+ raise HTTPException(status_code=404, detail="Project not found")
280
+
281
+ # Get count from request, default 50, max 50
282
+ count = min(request.get("count", 50) if request else 50, 50)
283
+
284
+ # Generate tasks using LLM (no user prompt needed)
285
+ tasks = await generate_tasks(
286
+ project_name=project.name,
287
+ project_description=project.description,
288
+ count=count
289
+ )
290
+
291
+ return {"tasks": tasks}
292
+ except HTTPException:
293
+ raise
294
+ except ValueError as e:
295
+ raise HTTPException(status_code=400, detail=str(e))
296
+ except Exception as e:
297
+ raise HTTPException(status_code=500, detail=str(e))
298
+ finally:
299
+ db.close()
300
+
301
+
302
+ @app.post("/api/tasks/{task_id}/complete", response_model=TaskCompleteResponse)
303
+ async def complete_existing_task(task_id: str, request: TaskCompleteRequest):
304
+ """Complete a task with documentation. Generates AI docs and stores embeddings."""
305
+ from app.database import get_db
306
+ from app.models import Task as TaskModel
307
+
308
+ db = next(get_db())
309
+ try:
310
+ task = db.query(TaskModel).filter(TaskModel.id == task_id).first()
311
+
312
+ if not task:
313
+ raise HTTPException(status_code=404, detail="Task not found")
314
+
315
+ result = await complete_task(
316
+ task_id=task_id,
317
+ project_id=task.project_id,
318
+ user_id=request.userId,
319
+ what_i_did=request.whatIDid,
320
+ code_snippet=request.codeSnippet
321
+ )
322
+
323
+ if "error" in result:
324
+ raise HTTPException(status_code=400, detail=result["error"])
325
+
326
+ return result
327
+ except HTTPException:
328
+ raise
329
+ except Exception as e:
330
+ raise HTTPException(status_code=500, detail=str(e))
331
+ finally:
332
+ db.close()
333
+
334
+
335
+ @app.patch("/api/tasks/{task_id}/status")
336
+ async def update_task_status(task_id: str, request: dict):
337
+ """Update task status (for kanban board)."""
338
+ from app.database import get_db
339
+ from app.models import Task as TaskModel, TaskStatus
340
+
341
+ db = next(get_db())
342
+ try:
343
+ task = db.query(TaskModel).filter(TaskModel.id == task_id).first()
344
+
345
+ if not task:
346
+ raise HTTPException(status_code=404, detail="Task not found")
347
+
348
+ new_status = request.get("status")
349
+ if new_status not in ["todo", "in_progress", "done"]:
350
+ raise HTTPException(status_code=400, detail="Invalid status. Must be: todo, in_progress, or done")
351
+
352
+ task.status = TaskStatus(new_status)
353
+ db.commit()
354
+ db.refresh(task)
355
+
356
+ return {
357
+ "id": str(task.id),
358
+ "project_id": task.project_id,
359
+ "title": task.title,
360
+ "description": task.description,
361
+ "status": task.status.value,
362
+ "assigned_to": task.assigned_to,
363
+ "created_at": task.created_at.isoformat() if task.created_at else None
364
+ }
365
+ except HTTPException:
366
+ raise
367
+ except Exception as e:
368
+ db.rollback()
369
+ raise HTTPException(status_code=500, detail=str(e))
370
+ finally:
371
+ db.close()
372
+
373
+
374
+ @app.post("/api/tasks/{task_id}/chat")
375
+ async def chat_with_task_agent(task_id: str, request: dict):
376
+ """Chat with AI agent while working on a task.
377
+
378
+ The agent can answer questions, search project memory, and complete tasks.
379
+ """
380
+ from app.database import get_db
381
+ from app.models import Task as TaskModel
382
+ from app.llm import task_chat
383
+
384
+ db = next(get_db())
385
+ try:
386
+ # Get task details
387
+ task = db.query(TaskModel).filter(TaskModel.id == task_id).first()
388
+ if not task:
389
+ raise HTTPException(status_code=404, detail="Task not found")
390
+
391
+ # Extract request data
392
+ project_id = request.get("projectId", task.project_id)
393
+ user_id = request.get("userId")
394
+ message = request.get("message")
395
+ history = request.get("history", [])
396
+ current_datetime = request.get("currentDatetime", "")
397
+
398
+ if not user_id:
399
+ raise HTTPException(status_code=400, detail="userId is required")
400
+ if not message:
401
+ raise HTTPException(status_code=400, detail="message is required")
402
+
403
+ # Call the task chat function
404
+ result = await task_chat(
405
+ task_id=task_id,
406
+ task_title=task.title,
407
+ task_description=task.description or "",
408
+ project_id=project_id,
409
+ user_id=user_id,
410
+ message=message,
411
+ history=history,
412
+ current_datetime=current_datetime
413
+ )
414
+
415
+ return result
416
+ except HTTPException:
417
+ raise
418
+ except Exception as e:
419
+ raise HTTPException(status_code=500, detail=str(e))
420
+ finally:
421
+ db.close()
422
+
423
+
424
+ # ==================== Activity Feed Endpoint ====================
425
+ @app.get("/api/projects/{project_id}/activity")
426
+ async def get_project_activity(project_id: str, limit: int = 20):
427
+ """Get recent activity for a project."""
428
+ try:
429
+ result = list_activity(
430
+ project_id=project_id,
431
+ limit=limit
432
+ )
433
+ return result
434
+ except Exception as e:
435
+ raise HTTPException(status_code=500, detail=str(e))
436
+
437
+
438
+ # ==================== Search Endpoint ====================
439
+ @app.post("/api/projects/{project_id}/search", response_model=SearchResponse)
440
+ async def search_project_memory(project_id: str, request: SearchRequest):
441
+ """Semantic search across project memory."""
442
+ try:
443
+ result = await memory_search(
444
+ project_id=project_id,
445
+ query=request.query,
446
+ filters=request.filters.dict() if request.filters else None
447
+ )
448
+
449
+ if "error" in result:
450
+ raise HTTPException(status_code=400, detail=result["error"])
451
+
452
+ return result
453
+ except HTTPException:
454
+ raise
455
+ except Exception as e:
456
+ raise HTTPException(status_code=500, detail=str(e))
457
+
458
+
459
+ # ==================== Smart Query Endpoint ====================
460
+ @app.post("/api/projects/{project_id}/smart-query", response_model=SmartQueryResponse)
461
+ async def smart_query_project(project_id: str, request: SmartQueryRequest):
462
+ """Natural language query with context awareness.
463
+
464
+ Understands queries like:
465
+ - "What did I do yesterday?"
466
+ - "What did Alice do today?"
467
+ - "How does the auth system work?"
468
+ - "Task 13 status?"
469
+ """
470
+ try:
471
+ from app.smart_query import smart_query
472
+
473
+ result = await smart_query(
474
+ project_id=project_id,
475
+ query=request.query,
476
+ current_user_id=request.currentUserId,
477
+ current_datetime=request.currentDatetime
478
+ )
479
+
480
+ if "error" in result.get("answer", ""):
481
+ raise HTTPException(status_code=400, detail=result["answer"])
482
+
483
+ return result
484
+ except HTTPException:
485
+ raise
486
+ except Exception as e:
487
+ raise HTTPException(status_code=500, detail=str(e))
488
+
489
+
490
+ # ==================== Chat Endpoint ====================
491
+ @app.post("/api/chat", response_model=ChatResponse)
492
+ async def chat_with_ai(request: ChatRequest):
493
+ """Chat with AI using MCP tools."""
494
+ try:
495
+ # Import here to avoid circular dependency
496
+ from app.llm import chat_with_tools
497
+
498
+ # Convert messages to dict format
499
+ messages = [{"role": msg.role, "content": msg.content} for msg in request.messages]
500
+
501
+ # Call the chat function with tool support
502
+ result = await chat_with_tools(
503
+ messages=messages,
504
+ project_id=request.projectId
505
+ )
506
+
507
+ return {"message": result}
508
+ except Exception as e:
509
+ raise HTTPException(status_code=500, detail=str(e))
510
+
511
+
512
+ # ==================== Error Handlers ====================
513
+ @app.exception_handler(HTTPException)
514
+ async def http_exception_handler(request, exc):
515
+ """Custom HTTP exception handler."""
516
+ return JSONResponse(
517
+ status_code=exc.status_code,
518
+ content={
519
+ "error": exc.detail,
520
+ "status_code": exc.status_code
521
+ }
522
+ )
523
+
524
+
525
+ @app.exception_handler(Exception)
526
+ async def general_exception_handler(request, exc):
527
+ """General exception handler."""
528
+ return JSONResponse(
529
+ status_code=500,
530
+ content={
531
+ "error": "Internal server error",
532
+ "detail": str(exc)
533
+ }
534
+ )
535
+
536
+
537
+ if __name__ == "__main__":
538
+ import uvicorn
539
+ uvicorn.run(
540
+ "app.main:app",
541
+ host="0.0.0.0",
542
+ port=8000,
543
+ reload=True
544
+ )
backend/app/mcp_server.py ADDED
@@ -0,0 +1,256 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """MCP Server for Project Memory - The Brain of the system."""
2
+
3
+ import asyncio
4
+ import json
5
+ from mcp.server import Server
6
+ from mcp.server.stdio import stdio_server
7
+ from mcp.types import Tool, TextContent
8
+
9
+ from app.database import init_db
10
+ from app.tools.projects import create_project, list_projects, join_project
11
+ from app.tools.tasks import create_task, list_tasks, list_activity
12
+ from app.tools.memory import complete_task, memory_search
13
+ from app.smart_query import smart_query
14
+ from app.vectorstore import init_vectorstore
15
+
16
+ # Initialize the MCP server
17
+ server = Server("project-memory")
18
+
19
+
20
+ # Tool definitions
21
+ TOOLS = [
22
+ # Project Tools
23
+ Tool(
24
+ name="create_project",
25
+ description="Create a new project and add the creator as owner",
26
+ inputSchema={
27
+ "type": "object",
28
+ "properties": {
29
+ "name": {"type": "string", "description": "Project name"},
30
+ "description": {"type": "string", "description": "Project description"},
31
+ "userId": {"type": "string", "description": "ID of the user creating the project"}
32
+ },
33
+ "required": ["name", "description", "userId"]
34
+ }
35
+ ),
36
+ Tool(
37
+ name="list_projects",
38
+ description="List all projects for a user",
39
+ inputSchema={
40
+ "type": "object",
41
+ "properties": {
42
+ "userId": {"type": "string", "description": "ID of the user"}
43
+ },
44
+ "required": ["userId"]
45
+ }
46
+ ),
47
+ Tool(
48
+ name="join_project",
49
+ description="Add a user to an existing project",
50
+ inputSchema={
51
+ "type": "object",
52
+ "properties": {
53
+ "projectId": {"type": "string", "description": "ID of the project to join"},
54
+ "userId": {"type": "string", "description": "ID of the user joining"}
55
+ },
56
+ "required": ["projectId", "userId"]
57
+ }
58
+ ),
59
+ # Task Tools
60
+ Tool(
61
+ name="create_task",
62
+ description="Create a new task in a project",
63
+ inputSchema={
64
+ "type": "object",
65
+ "properties": {
66
+ "projectId": {"type": "string", "description": "ID of the project"},
67
+ "title": {"type": "string", "description": "Task title"},
68
+ "description": {"type": "string", "description": "Task description"},
69
+ "assignedTo": {"type": "string", "description": "User ID to assign the task to"}
70
+ },
71
+ "required": ["projectId", "title"]
72
+ }
73
+ ),
74
+ Tool(
75
+ name="list_tasks",
76
+ description="List all tasks for a project, optionally filtered by status",
77
+ inputSchema={
78
+ "type": "object",
79
+ "properties": {
80
+ "projectId": {"type": "string", "description": "ID of the project"},
81
+ "status": {
82
+ "type": "string",
83
+ "enum": ["todo", "in_progress", "done"],
84
+ "description": "Optional status filter"
85
+ }
86
+ },
87
+ "required": ["projectId"]
88
+ }
89
+ ),
90
+ Tool(
91
+ name="list_activity",
92
+ description="Get recent activity (log entries) for a project",
93
+ inputSchema={
94
+ "type": "object",
95
+ "properties": {
96
+ "projectId": {"type": "string", "description": "ID of the project"},
97
+ "limit": {"type": "number", "default": 20, "description": "Maximum number of entries"}
98
+ },
99
+ "required": ["projectId"]
100
+ }
101
+ ),
102
+ # Memory Tools (placeholders for Dev B to implement)
103
+ Tool(
104
+ name="complete_task",
105
+ description="Mark a task as complete with documentation. Generates AI documentation and stores embeddings.",
106
+ inputSchema={
107
+ "type": "object",
108
+ "properties": {
109
+ "taskId": {"type": "string", "description": "ID of the task to complete"},
110
+ "projectId": {"type": "string", "description": "ID of the project"},
111
+ "userId": {"type": "string", "description": "ID of the user completing the task"},
112
+ "whatIDid": {"type": "string", "description": "Description of what was done"},
113
+ "codeSnippet": {"type": "string", "description": "Optional code snippet"}
114
+ },
115
+ "required": ["taskId", "projectId", "userId", "whatIDid"]
116
+ }
117
+ ),
118
+ Tool(
119
+ name="memory_search",
120
+ description="Semantic search across project memory. Returns relevant log entries and AI-synthesized answer.",
121
+ inputSchema={
122
+ "type": "object",
123
+ "properties": {
124
+ "projectId": {"type": "string", "description": "ID of the project to search"},
125
+ "query": {"type": "string", "description": "Search query"},
126
+ "filters": {
127
+ "type": "object",
128
+ "properties": {
129
+ "userId": {"type": "string"},
130
+ "dateFrom": {"type": "string"},
131
+ "dateTo": {"type": "string"},
132
+ "tags": {"type": "array", "items": {"type": "string"}}
133
+ },
134
+ "description": "Optional filters"
135
+ }
136
+ },
137
+ "required": ["projectId", "query"]
138
+ }
139
+ ),
140
+ # Smart Query Tool - LLM-first natural language queries
141
+ Tool(
142
+ name="smart_query",
143
+ description="Natural language query with context awareness. Understands 'yesterday', 'I', user names, task references. Use for questions like 'What did I do yesterday?' or 'How does auth work?'",
144
+ inputSchema={
145
+ "type": "object",
146
+ "properties": {
147
+ "projectId": {"type": "string", "description": "Project ID to query"},
148
+ "query": {"type": "string", "description": "Natural language query"},
149
+ "currentUserId": {"type": "string", "description": "ID of user making the query"},
150
+ "currentDatetime": {"type": "string", "description": "Current datetime ISO format (optional)"}
151
+ },
152
+ "required": ["projectId", "query", "currentUserId"]
153
+ }
154
+ )
155
+ ]
156
+
157
+
158
+ @server.list_tools()
159
+ async def handle_list_tools() -> list[Tool]:
160
+ """Return all available tools."""
161
+ return TOOLS
162
+
163
+
164
+ @server.call_tool()
165
+ async def handle_call_tool(name: str, arguments: dict) -> list[TextContent]:
166
+ """Handle tool calls by dispatching to the appropriate function."""
167
+
168
+ try:
169
+ match name:
170
+ # Project Tools
171
+ case "create_project":
172
+ result = create_project(
173
+ name=arguments["name"],
174
+ description=arguments["description"],
175
+ user_id=arguments["userId"]
176
+ )
177
+ case "list_projects":
178
+ result = list_projects(user_id=arguments["userId"])
179
+ case "join_project":
180
+ result = join_project(
181
+ project_id=arguments["projectId"],
182
+ user_id=arguments["userId"]
183
+ )
184
+
185
+ # Task Tools
186
+ case "create_task":
187
+ result = create_task(
188
+ project_id=arguments["projectId"],
189
+ title=arguments["title"],
190
+ description=arguments.get("description"),
191
+ assigned_to=arguments.get("assignedTo")
192
+ )
193
+ case "list_tasks":
194
+ result = list_tasks(
195
+ project_id=arguments["projectId"],
196
+ status=arguments.get("status")
197
+ )
198
+ case "list_activity":
199
+ result = list_activity(
200
+ project_id=arguments["projectId"],
201
+ limit=arguments.get("limit", 20)
202
+ )
203
+
204
+ # Memory Tools (Dev B - implemented)
205
+ case "complete_task":
206
+ result = await complete_task(
207
+ task_id=arguments["taskId"],
208
+ project_id=arguments["projectId"],
209
+ user_id=arguments["userId"],
210
+ what_i_did=arguments["whatIDid"],
211
+ code_snippet=arguments.get("codeSnippet")
212
+ )
213
+ case "memory_search":
214
+ result = await memory_search(
215
+ project_id=arguments["projectId"],
216
+ query=arguments["query"],
217
+ filters=arguments.get("filters")
218
+ )
219
+
220
+ # Smart Query Tool
221
+ case "smart_query":
222
+ result = await smart_query(
223
+ project_id=arguments["projectId"],
224
+ query=arguments["query"],
225
+ current_user_id=arguments["currentUserId"],
226
+ current_datetime=arguments.get("currentDatetime")
227
+ )
228
+
229
+ case _:
230
+ result = {"error": f"Unknown tool: {name}"}
231
+
232
+ return [TextContent(type="text", text=json.dumps(result, default=str))]
233
+
234
+ except Exception as e:
235
+ error_result = {"error": str(e)}
236
+ return [TextContent(type="text", text=json.dumps(error_result))]
237
+
238
+
239
+ async def main():
240
+ """Run the MCP server."""
241
+ # Initialize database tables
242
+ init_db()
243
+ # Initialize vector store for embeddings
244
+ init_vectorstore()
245
+ print("Project Memory MCP Server starting...", flush=True)
246
+
247
+ async with stdio_server() as (read_stream, write_stream):
248
+ await server.run(
249
+ read_stream,
250
+ write_stream,
251
+ server.create_initialization_options()
252
+ )
253
+
254
+
255
+ if __name__ == "__main__":
256
+ asyncio.run(main())
backend/app/model_router.py ADDED
@@ -0,0 +1,414 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Model Router for multi-model rotation with rate limiting and caching."""
2
+
3
+ import google.generativeai as genai
4
+ import time
5
+ import hashlib
6
+ import os
7
+ from datetime import datetime, timedelta
8
+ from typing import Optional
9
+ from collections import deque
10
+ import asyncio
11
+ from dotenv import load_dotenv
12
+
13
+ load_dotenv()
14
+
15
+ # Cooldown durations in seconds
16
+ KEY_COOLDOWN_RATE_LIMIT = 60 # For 429/quota errors
17
+ KEY_COOLDOWN_OTHER = 30 # For other transient errors
18
+
19
+
20
+ def _load_api_keys() -> list[str]:
21
+ """Load API keys from environment (backward compatible)."""
22
+ keys_str = os.getenv("GEMINI_API_KEYS", "")
23
+ if keys_str:
24
+ return [k.strip() for k in keys_str.split(",") if k.strip()]
25
+ single_key = os.getenv("GEMINI_API_KEY")
26
+ return [single_key] if single_key else []
27
+
28
+
29
+ # Model configurations with RPM limits and quality tiers
30
+ MODEL_CONFIGS = {
31
+ "gemini-2.0-flash": {"rpm": 15, "quality": 1},
32
+ "gemini-2.0-flash-lite": {"rpm": 30, "quality": 2},
33
+ "gemma-3-27b-it": {"rpm": 30, "quality": 3},
34
+ "gemma-3-12b-it": {"rpm": 30, "quality": 4},
35
+ "gemma-3-4b-it": {"rpm": 30, "quality": 5},
36
+ "gemma-3-1b-it": {"rpm": 30, "quality": 6},
37
+ }
38
+
39
+ # Task type to model priority mapping (lower quality number = better model)
40
+ TASK_PRIORITIES = {
41
+ "chat": ["gemini-2.0-flash", "gemini-2.0-flash-lite", "gemma-3-27b-it"],
42
+ "smart_query": ["gemini-2.0-flash", "gemma-3-27b-it", "gemma-3-12b-it"],
43
+ "documentation": ["gemini-2.0-flash-lite", "gemma-3-27b-it", "gemma-3-12b-it"],
44
+ "synthesis": ["gemma-3-27b-it", "gemma-3-12b-it", "gemma-3-4b-it"],
45
+ "default": ["gemini-2.0-flash", "gemini-2.0-flash-lite", "gemma-3-27b-it",
46
+ "gemma-3-12b-it", "gemma-3-4b-it", "gemma-3-1b-it"],
47
+ }
48
+
49
+ # Cache TTL in seconds
50
+ CACHE_TTL = 300 # 5 minutes
51
+
52
+ # Retry delay in seconds
53
+ RETRY_DELAY = 2.5
54
+
55
+
56
+ class ModelRouter:
57
+ """Manages model rotation, rate limiting, response caching, and multi-key support."""
58
+
59
+ def __init__(self):
60
+ # Load API keys
61
+ self.api_keys = _load_api_keys()
62
+ if not self.api_keys:
63
+ raise ValueError("No API keys found. Set GEMINI_API_KEYS or GEMINI_API_KEY in .env")
64
+
65
+ # Key rotation state
66
+ self.key_index = 0
67
+ self.key_health: dict[int, dict] = {
68
+ i: {"healthy": True, "last_error": None, "retry_after": None}
69
+ for i in range(len(self.api_keys))
70
+ }
71
+
72
+ # Track usage per model per key: {key_idx: {model: deque}}
73
+ self.usage: dict[int, dict[str, deque]] = {
74
+ i: {model: deque() for model in MODEL_CONFIGS}
75
+ for i in range(len(self.api_keys))
76
+ }
77
+
78
+ # Response cache: {cache_key: {"response": str, "timestamp": datetime, "model": str}}
79
+ self.cache: dict[str, dict] = {}
80
+
81
+ # Initialize with first key (models created on-demand for key rotation)
82
+ self._configure_key(0)
83
+ self.models: dict[str, genai.GenerativeModel] = {
84
+ model: genai.GenerativeModel(model) for model in MODEL_CONFIGS
85
+ }
86
+
87
+ def _configure_key(self, key_idx: int):
88
+ """Configure genai with the specified API key."""
89
+ genai.configure(api_key=self.api_keys[key_idx])
90
+
91
+ def _is_key_healthy(self, key_idx: int) -> bool:
92
+ """Check if a key is healthy (not in cooldown)."""
93
+ health = self.key_health[key_idx]
94
+ if not health["healthy"] and health["retry_after"]:
95
+ if datetime.now() > health["retry_after"]:
96
+ health["healthy"] = True
97
+ health["last_error"] = None
98
+ health["retry_after"] = None
99
+ return health["healthy"]
100
+
101
+ def _mark_key_unhealthy(self, key_idx: int, error: Exception, cooldown_seconds: int):
102
+ """Mark a key as unhealthy with cooldown."""
103
+ self.key_health[key_idx] = {
104
+ "healthy": False,
105
+ "last_error": str(error),
106
+ "retry_after": datetime.now() + timedelta(seconds=cooldown_seconds)
107
+ }
108
+
109
+ def _get_next_key(self) -> tuple[int, str]:
110
+ """Get next healthy API key using round-robin."""
111
+ num_keys = len(self.api_keys)
112
+
113
+ # Try each key once
114
+ for _ in range(num_keys):
115
+ idx = self.key_index % num_keys
116
+ self.key_index += 1
117
+ if self._is_key_healthy(idx):
118
+ return idx, self.api_keys[idx]
119
+
120
+ # All keys unhealthy - find the one with earliest retry_after
121
+ earliest_idx = 0
122
+ earliest_time = datetime.max
123
+ for idx, health in self.key_health.items():
124
+ if health["retry_after"] and health["retry_after"] < earliest_time:
125
+ earliest_time = health["retry_after"]
126
+ earliest_idx = idx
127
+
128
+ # Reset that key and use it
129
+ self.key_health[earliest_idx]["healthy"] = True
130
+ return earliest_idx, self.api_keys[earliest_idx]
131
+
132
+ def _get_model_with_key(self, model_name: str, key_idx: int) -> genai.GenerativeModel:
133
+ """Get a model instance configured with the specified key."""
134
+ self._configure_key(key_idx)
135
+ return genai.GenerativeModel(model_name)
136
+
137
+ def _get_cache_key(self, task_type: str, user_id: Optional[str], prompt: str) -> str:
138
+ """Generate cache key from task type, user, and prompt."""
139
+ # Use first 200 chars of prompt to keep keys reasonable
140
+ key_string = f"{task_type}:{user_id or 'anon'}:{prompt[:200]}"
141
+ return hashlib.md5(key_string.encode()).hexdigest()
142
+
143
+ def _check_cache(self, cache_key: str) -> Optional[str]:
144
+ """Check if response is cached and not expired."""
145
+ if cache_key in self.cache:
146
+ entry = self.cache[cache_key]
147
+ if datetime.now() - entry["timestamp"] < timedelta(seconds=CACHE_TTL):
148
+ return entry["response"]
149
+ else:
150
+ # Expired, remove it
151
+ del self.cache[cache_key]
152
+ return None
153
+
154
+ def _store_cache(self, cache_key: str, response: str, model_used: str):
155
+ """Store response in cache."""
156
+ self.cache[cache_key] = {
157
+ "response": response,
158
+ "timestamp": datetime.now(),
159
+ "model": model_used
160
+ }
161
+ # Clean old cache entries periodically (every 100 entries)
162
+ if len(self.cache) > 100:
163
+ self._clean_cache()
164
+
165
+ def _clean_cache(self):
166
+ """Remove expired cache entries."""
167
+ now = datetime.now()
168
+ expired_keys = [
169
+ key for key, entry in self.cache.items()
170
+ if now - entry["timestamp"] >= timedelta(seconds=CACHE_TTL)
171
+ ]
172
+ for key in expired_keys:
173
+ del self.cache[key]
174
+
175
+ def _check_rate_limit(self, model_name: str, key_idx: int = 0) -> bool:
176
+ """Check if model is within rate limit for a specific key. Returns True if OK to use."""
177
+ config = MODEL_CONFIGS[model_name]
178
+ rpm_limit = config["rpm"]
179
+ usage_queue = self.usage[key_idx][model_name]
180
+
181
+ # Remove timestamps older than 60 seconds
182
+ now = time.time()
183
+ while usage_queue and usage_queue[0] < now - 60:
184
+ usage_queue.popleft()
185
+
186
+ # Check if under limit
187
+ return len(usage_queue) < rpm_limit
188
+
189
+ def _record_usage(self, model_name: str, key_idx: int = 0):
190
+ """Record a usage for rate limiting."""
191
+ self.usage[key_idx][model_name].append(time.time())
192
+
193
+ def get_model_for_task(self, task_type: str) -> Optional[str]:
194
+ """Get the best available model for a task type (checks all keys)."""
195
+ priorities = TASK_PRIORITIES.get(task_type, TASK_PRIORITIES["default"])
196
+
197
+ # Check across all healthy keys
198
+ for key_idx in range(len(self.api_keys)):
199
+ if not self._is_key_healthy(key_idx):
200
+ continue
201
+ for model_name in priorities:
202
+ if self._check_rate_limit(model_name, key_idx):
203
+ return model_name
204
+
205
+ # All preferred models at limit, try any available model on any key
206
+ for key_idx in range(len(self.api_keys)):
207
+ if not self._is_key_healthy(key_idx):
208
+ continue
209
+ for model_name in MODEL_CONFIGS:
210
+ if self._check_rate_limit(model_name, key_idx):
211
+ return model_name
212
+
213
+ return None
214
+
215
+ async def generate(
216
+ self,
217
+ prompt: str,
218
+ task_type: str = "default",
219
+ user_id: Optional[str] = None,
220
+ use_cache: bool = True
221
+ ) -> tuple[str, str]:
222
+ """Generate response with model rotation, key rotation, and caching.
223
+
224
+ Args:
225
+ prompt: The prompt to send to the model
226
+ task_type: Type of task (chat, smart_query, documentation, synthesis)
227
+ user_id: User ID for cache key differentiation
228
+ use_cache: Whether to use caching (default True)
229
+
230
+ Returns:
231
+ Tuple of (response_text, model_used)
232
+ """
233
+ # Check cache first
234
+ if use_cache:
235
+ cache_key = self._get_cache_key(task_type, user_id, prompt)
236
+ cached = self._check_cache(cache_key)
237
+ if cached:
238
+ return cached, "cache"
239
+
240
+ # Get prioritized models for this task
241
+ priorities = TASK_PRIORITIES.get(task_type, TASK_PRIORITIES["default"])
242
+ all_models = list(priorities) + [m for m in MODEL_CONFIGS if m not in priorities]
243
+
244
+ last_error = None
245
+ tried_combinations = set()
246
+
247
+ # Try each key/model combination
248
+ max_attempts = len(self.api_keys) * len(all_models)
249
+
250
+ for _ in range(max_attempts):
251
+ # Get next healthy key
252
+ key_idx, api_key = self._get_next_key()
253
+
254
+ for model_name in all_models:
255
+ combo = (key_idx, model_name)
256
+ if combo in tried_combinations:
257
+ continue
258
+
259
+ # Check rate limit for this key/model
260
+ if not self._check_rate_limit(model_name, key_idx):
261
+ continue
262
+
263
+ tried_combinations.add(combo)
264
+
265
+ try:
266
+ # Get model with this key
267
+ model = self._get_model_with_key(model_name, key_idx)
268
+ self._record_usage(model_name, key_idx)
269
+
270
+ response = model.generate_content(prompt)
271
+ response_text = response.text
272
+
273
+ # Cache the response
274
+ if use_cache:
275
+ self._store_cache(cache_key, response_text, model_name)
276
+
277
+ return response_text, model_name
278
+
279
+ except Exception as e:
280
+ error_str = str(e).lower()
281
+ last_error = e
282
+
283
+ # Determine cooldown based on error type
284
+ if "429" in str(e) or "resource exhausted" in error_str or "quota" in error_str:
285
+ # Rate limit - mark key unhealthy, wait briefly, try next
286
+ self._mark_key_unhealthy(key_idx, e, KEY_COOLDOWN_RATE_LIMIT)
287
+ await asyncio.sleep(RETRY_DELAY)
288
+ break # Try next key
289
+
290
+ elif "401" in str(e) or "403" in str(e) or "invalid" in error_str:
291
+ # Auth error - mark key permanently unhealthy
292
+ self._mark_key_unhealthy(key_idx, e, 86400) # 24 hours
293
+ break # Try next key
294
+
295
+ else:
296
+ # Other error - short cooldown, try next model
297
+ await asyncio.sleep(0.5)
298
+ continue
299
+
300
+ # All combinations exhausted
301
+ if last_error:
302
+ raise Exception(f"All models/keys exhausted. Last error: {last_error}")
303
+ else:
304
+ raise Exception("All models are rate limited. Please try again in a minute.")
305
+
306
+ async def generate_with_model(
307
+ self,
308
+ model_name: str,
309
+ prompt: str,
310
+ user_id: Optional[str] = None,
311
+ use_cache: bool = True
312
+ ) -> str:
313
+ """Generate with a specific model (for chat sessions that need consistency).
314
+
315
+ Falls back to other models if specified model is rate limited.
316
+ """
317
+ response, _ = await self.generate(
318
+ prompt=prompt,
319
+ task_type="default",
320
+ user_id=user_id,
321
+ use_cache=use_cache
322
+ )
323
+ return response
324
+
325
+ def get_stats(self) -> dict:
326
+ """Get current usage stats for monitoring."""
327
+ now = time.time()
328
+ stats = {
329
+ "keys": {
330
+ "total": len(self.api_keys),
331
+ "healthy": sum(1 for i in range(len(self.api_keys)) if self._is_key_healthy(i)),
332
+ "details": {}
333
+ },
334
+ "models": {},
335
+ "cache_size": len(self.cache)
336
+ }
337
+
338
+ # Per-key stats
339
+ for key_idx in range(len(self.api_keys)):
340
+ health = self.key_health[key_idx]
341
+ stats["keys"]["details"][f"key_{key_idx}"] = {
342
+ "healthy": self._is_key_healthy(key_idx),
343
+ "last_error": health["last_error"],
344
+ "retry_after": health["retry_after"].isoformat() if health["retry_after"] else None
345
+ }
346
+
347
+ # Aggregate model usage across all keys
348
+ for model_name in MODEL_CONFIGS:
349
+ total_used = 0
350
+ for key_idx in range(len(self.api_keys)):
351
+ usage_queue = self.usage[key_idx][model_name]
352
+ total_used += sum(1 for t in usage_queue if t > now - 60)
353
+
354
+ # Limit is per-key, so total limit = per_key_limit * num_keys
355
+ per_key_limit = MODEL_CONFIGS[model_name]["rpm"]
356
+ total_limit = per_key_limit * len(self.api_keys)
357
+
358
+ stats["models"][model_name] = {
359
+ "used": total_used,
360
+ "limit": total_limit,
361
+ "available": total_limit - total_used
362
+ }
363
+
364
+ return stats
365
+
366
+
367
+ # Global router instance
368
+ router = ModelRouter()
369
+
370
+
371
+ # Convenience functions
372
+ async def generate(
373
+ prompt: str,
374
+ task_type: str = "default",
375
+ user_id: Optional[str] = None,
376
+ use_cache: bool = True
377
+ ) -> str:
378
+ """Generate response using model router.
379
+
380
+ Args:
381
+ prompt: The prompt to send
382
+ task_type: One of 'chat', 'smart_query', 'documentation', 'synthesis', 'default'
383
+ user_id: User ID for cache differentiation
384
+ use_cache: Whether to use response cache
385
+
386
+ Returns:
387
+ Response text
388
+ """
389
+ response, model = await router.generate(prompt, task_type, user_id, use_cache)
390
+ return response
391
+
392
+
393
+ async def generate_with_info(
394
+ prompt: str,
395
+ task_type: str = "default",
396
+ user_id: Optional[str] = None,
397
+ use_cache: bool = True
398
+ ) -> tuple[str, str]:
399
+ """Generate response and return which model was used.
400
+
401
+ Returns:
402
+ Tuple of (response_text, model_name)
403
+ """
404
+ return await router.generate(prompt, task_type, user_id, use_cache)
405
+
406
+
407
+ def get_model_for_task(task_type: str) -> Optional[str]:
408
+ """Get best available model for a task type."""
409
+ return router.get_model_for_task(task_type)
410
+
411
+
412
+ def get_stats() -> dict:
413
+ """Get current router stats."""
414
+ return router.get_stats()
backend/app/models.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """SQLAlchemy models for Project Memory."""
2
+
3
+ from sqlalchemy import Column, String, DateTime, ForeignKey, Text, Enum, JSON
4
+ from sqlalchemy.orm import relationship
5
+ from datetime import datetime
6
+ import uuid
7
+ import enum
8
+ import random
9
+
10
+ from app.database import Base
11
+
12
+
13
+ def generate_uuid() -> str:
14
+ """Generate a new UUID string."""
15
+ return str(uuid.uuid4())
16
+
17
+
18
+ def generate_user_id(first_name: str) -> str:
19
+ """Generate user ID: first 3 letters of firstname + 4 random digits.
20
+
21
+ Example: 'Amal' -> 'ama1234'
22
+ """
23
+ prefix = first_name[:3].lower()
24
+ suffix = ''.join([str(random.randint(0, 9)) for _ in range(4)])
25
+ return f"{prefix}{suffix}"
26
+
27
+
28
+ class ActorType(str, enum.Enum):
29
+ """Who performed the action."""
30
+ human = "human"
31
+ agent = "agent"
32
+
33
+
34
+ class ActionType(str, enum.Enum):
35
+ """Type of action recorded."""
36
+ task_completed = "task_completed"
37
+ doc_generated = "doc_generated"
38
+ query_answered = "query_answered"
39
+
40
+
41
+ class TaskStatus(str, enum.Enum):
42
+ """Task status states."""
43
+ todo = "todo"
44
+ in_progress = "in_progress"
45
+ done = "done"
46
+
47
+
48
+ class User(Base):
49
+ """User account."""
50
+ __tablename__ = "users"
51
+
52
+ id = Column(String, primary_key=True) # Generated as first_name[:3] + 4 random digits
53
+ first_name = Column(String, nullable=False)
54
+ last_name = Column(String, nullable=False)
55
+ avatar_url = Column(String, nullable=True)
56
+ created_at = Column(DateTime, default=datetime.utcnow)
57
+
58
+ # Relationships
59
+ memberships = relationship("ProjectMembership", back_populates="user")
60
+ created_projects = relationship("Project", back_populates="creator")
61
+ log_entries = relationship("LogEntry", back_populates="user")
62
+
63
+ @property
64
+ def name(self) -> str:
65
+ """Full name for backward compatibility."""
66
+ return f"{self.first_name} {self.last_name}"
67
+
68
+
69
+ class Project(Base):
70
+ """Project that contains tasks and memory.
71
+
72
+ Option A: project name is also the stable project ID. We persist the ID explicitly
73
+ from the tools layer (create_project uses the provided name as id).
74
+ """
75
+ __tablename__ = "projects"
76
+
77
+ # ID is provided by callers (equal to the project name); no UUID default
78
+ id = Column(String, primary_key=True)
79
+ name = Column(String, nullable=False)
80
+ description = Column(Text, nullable=True)
81
+ created_at = Column(DateTime, default=datetime.utcnow)
82
+ created_by = Column(String, ForeignKey("users.id"), nullable=True)
83
+
84
+ # Relationships
85
+ creator = relationship("User", back_populates="created_projects")
86
+ memberships = relationship("ProjectMembership", back_populates="project")
87
+ tasks = relationship("Task", back_populates="project")
88
+ log_entries = relationship("LogEntry", back_populates="project")
89
+
90
+
91
+ class ProjectMembership(Base):
92
+ """Association between users and projects."""
93
+ __tablename__ = "project_memberships"
94
+
95
+ id = Column(String, primary_key=True, default=generate_uuid)
96
+ project_id = Column(String, ForeignKey("projects.id"), nullable=False)
97
+ user_id = Column(String, ForeignKey("users.id"), nullable=False)
98
+ role = Column(String, default="member") # "owner" or "member"
99
+ joined_at = Column(DateTime, default=datetime.utcnow)
100
+
101
+ # Relationships
102
+ project = relationship("Project", back_populates="memberships")
103
+ user = relationship("User", back_populates="memberships")
104
+
105
+
106
+ class Task(Base):
107
+ """Task within a project."""
108
+ __tablename__ = "tasks"
109
+
110
+ id = Column(String, primary_key=True, default=generate_uuid)
111
+ project_id = Column(String, ForeignKey("projects.id"), nullable=False)
112
+ title = Column(String, nullable=False)
113
+ description = Column(Text, nullable=True)
114
+ status = Column(Enum(TaskStatus), default=TaskStatus.todo)
115
+ assigned_to = Column(String, nullable=True) # userId or "agent"
116
+ created_at = Column(DateTime, default=datetime.utcnow)
117
+ completed_at = Column(DateTime, nullable=True)
118
+
119
+ # Relationships
120
+ project = relationship("Project", back_populates="tasks")
121
+ log_entries = relationship("LogEntry", back_populates="task")
122
+
123
+
124
+ class LogEntry(Base):
125
+ """
126
+ The core of project memory.
127
+ Records what was done, by whom, and stores LLM-generated documentation.
128
+ """
129
+ __tablename__ = "log_entries"
130
+
131
+ id = Column(String, primary_key=True, default=generate_uuid)
132
+ project_id = Column(String, ForeignKey("projects.id"), nullable=False)
133
+ task_id = Column(String, ForeignKey("tasks.id"), nullable=True)
134
+ user_id = Column(String, ForeignKey("users.id"), nullable=True)
135
+ actor_type = Column(Enum(ActorType), nullable=False)
136
+ action_type = Column(Enum(ActionType), nullable=False)
137
+ raw_input = Column(Text, nullable=False) # What user typed
138
+ code_snippet = Column(Text, nullable=True) # Optional code
139
+ generated_doc = Column(Text, nullable=False) # LLM-generated documentation
140
+ tags = Column(JSON, default=list) # Extracted tags
141
+ created_at = Column(DateTime, default=datetime.utcnow)
142
+
143
+ # Relationships
144
+ project = relationship("Project", back_populates="log_entries")
145
+ task = relationship("Task", back_populates="log_entries")
146
+ user = relationship("User", back_populates="log_entries")
backend/app/schemas.py ADDED
@@ -0,0 +1,173 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Pydantic schemas for FastAPI request/response models."""
2
+
3
+ from pydantic import BaseModel
4
+ from typing import Optional, List, Dict, Any
5
+ from datetime import datetime
6
+
7
+
8
+ # ==================== User Schemas ====================
9
+ class UserBase(BaseModel):
10
+ firstName: str
11
+ lastName: str
12
+ avatar_url: Optional[str] = None
13
+
14
+
15
+ class UserCreate(UserBase):
16
+ pass
17
+
18
+
19
+ class User(UserBase):
20
+ id: str
21
+ created_at: datetime
22
+
23
+ class Config:
24
+ from_attributes = True
25
+
26
+
27
+ # ==================== Project Schemas ====================
28
+ class ProjectBase(BaseModel):
29
+ name: str
30
+ description: str
31
+
32
+
33
+ class ProjectCreate(ProjectBase):
34
+ userId: str
35
+
36
+
37
+ class ProjectJoin(BaseModel):
38
+ userId: str
39
+
40
+
41
+ class Project(ProjectBase):
42
+ id: str
43
+ created_by: str
44
+ created_at: datetime
45
+
46
+ class Config:
47
+ from_attributes = True
48
+
49
+
50
+ class ProjectWithRole(Project):
51
+ role: str # "owner" or "member"
52
+
53
+
54
+ # ==================== Task Schemas ====================
55
+ class TaskBase(BaseModel):
56
+ title: str
57
+ description: Optional[str] = None
58
+
59
+
60
+ class TaskCreate(TaskBase):
61
+ assignedTo: Optional[str] = None
62
+
63
+
64
+ class Task(TaskBase):
65
+ id: str
66
+ project_id: str
67
+ status: str # "todo", "in_progress", "done"
68
+ assigned_to: Optional[str] = None
69
+ created_at: datetime
70
+ completed_at: Optional[datetime] = None
71
+
72
+ class Config:
73
+ from_attributes = True
74
+
75
+
76
+ # ==================== Task Completion Schemas ====================
77
+ class TaskCompleteRequest(BaseModel):
78
+ userId: str
79
+ whatIDid: str
80
+ codeSnippet: Optional[str] = None
81
+
82
+
83
+ class TaskCompleteResponse(BaseModel):
84
+ success: bool
85
+ log_entry_id: str
86
+
87
+
88
+ # ==================== Activity Log Schemas ====================
89
+ class LogEntry(BaseModel):
90
+ id: str
91
+ project_id: str
92
+ task_id: Optional[str] = None
93
+ user_id: Optional[str] = None
94
+ actor_type: str # "human" or "agent"
95
+ action_type: str # "task_completed", "doc_generated", "query_answered"
96
+ raw_input: str
97
+ code_snippet: Optional[str] = None
98
+ generated_doc: str
99
+ tags: List[str] = []
100
+ created_at: datetime
101
+
102
+ class Config:
103
+ from_attributes = True
104
+
105
+
106
+ class ActivityResponse(BaseModel):
107
+ entries: List[LogEntry]
108
+
109
+
110
+ # ==================== Search Schemas ====================
111
+ class SearchFilters(BaseModel):
112
+ userId: Optional[str] = None
113
+ dateFrom: Optional[str] = None
114
+ dateTo: Optional[str] = None
115
+ tags: Optional[List[str]] = None
116
+
117
+
118
+ class SearchRequest(BaseModel):
119
+ query: str
120
+ filters: Optional[SearchFilters] = None
121
+
122
+
123
+ class SearchSource(BaseModel):
124
+ id: str
125
+ summary: str
126
+
127
+
128
+ class SearchResponse(BaseModel):
129
+ answer: str
130
+ sources: List[SearchSource]
131
+
132
+
133
+ # ==================== Smart Query Schemas ====================
134
+ class SmartQueryRequest(BaseModel):
135
+ query: str
136
+ currentUserId: str
137
+ currentDatetime: Optional[str] = None
138
+
139
+
140
+ class SmartQuerySource(BaseModel):
141
+ id: str
142
+ type: str # "activity", "memory", "task"
143
+ summary: str
144
+ date: Optional[str] = None
145
+ relevance: Optional[float] = None
146
+ status: Optional[str] = None
147
+
148
+
149
+ class SmartQueryResponse(BaseModel):
150
+ answer: str
151
+ tools_used: List[str] = []
152
+ sources: List[SmartQuerySource] = []
153
+
154
+
155
+ # ==================== Chat Schemas ====================
156
+ class ChatMessage(BaseModel):
157
+ role: str # "user", "assistant", "system"
158
+ content: str
159
+
160
+
161
+ class ChatRequest(BaseModel):
162
+ messages: List[ChatMessage]
163
+ projectId: str
164
+
165
+
166
+ class ChatResponse(BaseModel):
167
+ message: str
168
+
169
+
170
+ # ==================== Error Schema ====================
171
+ class ErrorResponse(BaseModel):
172
+ error: str
173
+ detail: Optional[str] = None
backend/app/smart_query.py ADDED
@@ -0,0 +1,684 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Smart Query - LLM-First implementation using Gemini function calling.
3
+
4
+ Enables natural language queries like:
5
+ - "What did I do yesterday?"
6
+ - "What did dev_a do today?"
7
+ - "Did dev_b complete task abc?"
8
+ - "How did the xyz api get implemented?"
9
+ - "Task 13 status?"
10
+ """
11
+ from dataclasses import dataclass
12
+ from datetime import datetime, timedelta
13
+ from typing import Optional
14
+ import google.generativeai as genai
15
+ import json
16
+ import os
17
+ from dotenv import load_dotenv
18
+
19
+ # Load environment variables
20
+ load_dotenv()
21
+
22
+ from app.database import SessionLocal
23
+ from app.models import Task, LogEntry, User, TaskStatus, ProjectMembership
24
+ from app.llm import get_embedding
25
+ from app.vectorstore import search
26
+ from app.model_router import router as model_router
27
+
28
+ # Configure Gemini (reuse existing config pattern from llm.py)
29
+ genai.configure(api_key=os.getenv("GEMINI_API_KEY"))
30
+
31
+
32
+ @dataclass
33
+ class QueryContext:
34
+ """Context from frontend for query processing."""
35
+ current_user_id: str
36
+ current_datetime: datetime
37
+ project_id: str
38
+
39
+
40
+ # Gemini Function Declarations
41
+ GEMINI_TOOLS = [
42
+ genai.protos.Tool(
43
+ function_declarations=[
44
+ genai.protos.FunctionDeclaration(
45
+ name="get_user_activity",
46
+ description="Get activity/log entries for a user within a date range",
47
+ parameters=genai.protos.Schema(
48
+ type=genai.protos.Type.OBJECT,
49
+ properties={
50
+ "user_id": genai.protos.Schema(type=genai.protos.Type.STRING, description="User ID (use 'current' for the current user)"),
51
+ "user_name": genai.protos.Schema(type=genai.protos.Type.STRING, description="User name to look up"),
52
+ "date_from": genai.protos.Schema(type=genai.protos.Type.STRING, description="Start date ISO format"),
53
+ "date_to": genai.protos.Schema(type=genai.protos.Type.STRING, description="End date ISO format"),
54
+ },
55
+ required=["date_from", "date_to"]
56
+ )
57
+ ),
58
+ genai.protos.FunctionDeclaration(
59
+ name="get_task_status",
60
+ description="Get status and details of a specific task by ID or title",
61
+ parameters=genai.protos.Schema(
62
+ type=genai.protos.Type.OBJECT,
63
+ properties={
64
+ "task_id": genai.protos.Schema(type=genai.protos.Type.STRING, description="Task ID"),
65
+ "task_title": genai.protos.Schema(type=genai.protos.Type.STRING, description="Task title to search"),
66
+ }
67
+ )
68
+ ),
69
+ genai.protos.FunctionDeclaration(
70
+ name="check_task_completion",
71
+ description="Check if a task was completed, optionally by a specific user",
72
+ parameters=genai.protos.Schema(
73
+ type=genai.protos.Type.OBJECT,
74
+ properties={
75
+ "task_title": genai.protos.Schema(type=genai.protos.Type.STRING, description="Task title"),
76
+ "user_name": genai.protos.Schema(type=genai.protos.Type.STRING, description="User to check"),
77
+ },
78
+ required=["task_title"]
79
+ )
80
+ ),
81
+ genai.protos.FunctionDeclaration(
82
+ name="semantic_search",
83
+ description="Search project memory semantically for concepts, implementations, or how things work",
84
+ parameters=genai.protos.Schema(
85
+ type=genai.protos.Type.OBJECT,
86
+ properties={
87
+ "search_query": genai.protos.Schema(type=genai.protos.Type.STRING, description="Semantic search query"),
88
+ },
89
+ required=["search_query"]
90
+ )
91
+ ),
92
+ genai.protos.FunctionDeclaration(
93
+ name="list_users",
94
+ description="List all users/members in the project",
95
+ parameters=genai.protos.Schema(
96
+ type=genai.protos.Type.OBJECT,
97
+ properties={}
98
+ )
99
+ ),
100
+ genai.protos.FunctionDeclaration(
101
+ name="list_tasks",
102
+ description="List tasks in the project, optionally filtered by status. Use this for queries like 'what tasks are done', 'pending tasks', 'all tasks'",
103
+ parameters=genai.protos.Schema(
104
+ type=genai.protos.Type.OBJECT,
105
+ properties={
106
+ "status": genai.protos.Schema(
107
+ type=genai.protos.Type.STRING,
108
+ description="Filter by status: 'todo', 'in_progress', 'done', or 'all' for no filter"
109
+ ),
110
+ "assigned_to": genai.protos.Schema(
111
+ type=genai.protos.Type.STRING,
112
+ description="Filter by assignee user ID or 'agent'"
113
+ ),
114
+ "limit": genai.protos.Schema(
115
+ type=genai.protos.Type.INTEGER,
116
+ description="Max number of tasks to return (default 20)"
117
+ ),
118
+ }
119
+ )
120
+ ),
121
+ ]
122
+ )
123
+ ]
124
+
125
+
126
+ async def smart_query(
127
+ project_id: str,
128
+ query: str,
129
+ current_user_id: str,
130
+ current_datetime: Optional[str] = None
131
+ ) -> dict:
132
+ """
133
+ Main entry point for LLM-first query processing.
134
+
135
+ Args:
136
+ project_id: Project to query
137
+ query: Natural language query
138
+ current_user_id: ID of user making the query
139
+ current_datetime: ISO datetime string (optional, defaults to now)
140
+
141
+ Returns:
142
+ {answer: str, tools_used: list[str], sources: list[dict]}
143
+ """
144
+ # Parse datetime
145
+ dt = datetime.fromisoformat(current_datetime) if current_datetime else datetime.now()
146
+ context = QueryContext(current_user_id, dt, project_id)
147
+
148
+ # Build system prompt with context
149
+ yesterday = (dt - timedelta(days=1)).date()
150
+ today = dt.date()
151
+
152
+ system_prompt = f"""You are a project memory assistant helping users find information about their project work.
153
+
154
+ CONTEXT:
155
+ - Current User ID: {context.current_user_id}
156
+ - Current Date/Time: {dt.isoformat()}
157
+ - Project ID: {context.project_id}
158
+
159
+ RULES:
160
+ 1. When user says "I", "me", or "my" - use user_id="{context.current_user_id}"
161
+ 2. "today" = {today}
162
+ 3. "yesterday" = {yesterday}
163
+ 4. Convert relative dates to absolute ISO format before calling tools
164
+ 5. Use the tools to fetch real data - NEVER make up information
165
+ 6. If you need to resolve a user name to ID, call list_users first
166
+ 7. Always cite sources in your answer when available
167
+ """
168
+
169
+ try:
170
+ # Get best available model from router for smart_query task
171
+ model_name = model_router.get_model_for_task("smart_query")
172
+ if not model_name:
173
+ return {
174
+ "answer": "All models are rate limited. Please try again in a minute.",
175
+ "tools_used": [],
176
+ "sources": []
177
+ }
178
+
179
+ # Record usage for rate limiting
180
+ model_router._record_usage(model_name)
181
+
182
+ # Create model with function calling using selected model
183
+ model = genai.GenerativeModel(
184
+ model_name,
185
+ tools=GEMINI_TOOLS,
186
+ system_instruction=system_prompt
187
+ )
188
+
189
+ # Start chat and send query
190
+ chat = model.start_chat()
191
+ response = chat.send_message(query)
192
+
193
+ # Tool calling loop
194
+ tool_results = []
195
+ max_iterations = 5
196
+
197
+ for _ in range(max_iterations):
198
+ # Check for function calls in response
199
+ function_calls = []
200
+ for part in response.candidates[0].content.parts:
201
+ if hasattr(part, 'function_call') and part.function_call.name:
202
+ function_calls.append(part.function_call)
203
+
204
+ if not function_calls:
205
+ break # No more function calls, we have final answer
206
+
207
+ # Execute each function call
208
+ function_responses = []
209
+ for fn_call in function_calls:
210
+ result = await execute_tool(fn_call.name, dict(fn_call.args), context)
211
+ tool_results.append({"tool": fn_call.name, "args": dict(fn_call.args), "result": result})
212
+
213
+ function_responses.append(
214
+ genai.protos.Part(
215
+ function_response=genai.protos.FunctionResponse(
216
+ name=fn_call.name,
217
+ response={"result": json.dumps(result, default=str)}
218
+ )
219
+ )
220
+ )
221
+
222
+ # Send function results back to model
223
+ response = chat.send_message(function_responses)
224
+
225
+ # Extract final text answer
226
+ final_answer = ""
227
+ for part in response.candidates[0].content.parts:
228
+ if hasattr(part, 'text'):
229
+ final_answer += part.text
230
+
231
+ return {
232
+ "answer": final_answer,
233
+ "tools_used": [tr["tool"] for tr in tool_results],
234
+ "sources": extract_sources(tool_results)
235
+ }
236
+
237
+ except Exception as e:
238
+ return {
239
+ "answer": f"Error processing query: {str(e)}",
240
+ "tools_used": [],
241
+ "sources": []
242
+ }
243
+
244
+
245
+ async def execute_tool(name: str, args: dict, context: QueryContext) -> dict:
246
+ """Execute a tool by name with given arguments."""
247
+ db = SessionLocal()
248
+
249
+ try:
250
+ match name:
251
+ case "get_user_activity":
252
+ return _tool_get_user_activity(db, context, args)
253
+ case "get_task_status":
254
+ return _tool_get_task_status(db, context.project_id, args)
255
+ case "check_task_completion":
256
+ return _tool_check_completion(db, context.project_id, args)
257
+ case "semantic_search":
258
+ return await _tool_semantic_search(context.project_id, args)
259
+ case "list_users":
260
+ return _tool_list_users(db, context.project_id)
261
+ case "list_tasks":
262
+ return _tool_list_tasks(db, context.project_id, args)
263
+ case _:
264
+ return {"error": f"Unknown tool: {name}"}
265
+ finally:
266
+ db.close()
267
+
268
+
269
+ def _get_recent_work_hint(db, user_id: str, project_id: str) -> str:
270
+ """Get a short hint about user's recent work for disambiguation."""
271
+ recent_entry = db.query(LogEntry).filter(
272
+ LogEntry.user_id == user_id,
273
+ LogEntry.project_id == project_id
274
+ ).order_by(LogEntry.created_at.desc()).first()
275
+
276
+ if recent_entry:
277
+ # Use first 50 chars of raw_input or first tag
278
+ if recent_entry.raw_input:
279
+ return f"worked on: {recent_entry.raw_input[:50]}..."
280
+ elif recent_entry.tags:
281
+ return f"worked on: {recent_entry.tags[0]}"
282
+ return "no recent activity"
283
+
284
+
285
+ def _resolve_user_in_project(db, project_id: str, user_name: str) -> dict:
286
+ """
287
+ Resolve a user by name within project scope only using first/last name fields.
288
+
289
+ Returns:
290
+ {"found": True, "user_id": "...", "user_name": "..."} - single match
291
+ {"found": False, "reason": "not_found"} - no matches
292
+ {"found": False, "reason": "ambiguous", "options": [...]} - multiple matches
293
+ """
294
+ # Query users who are members of this project and match first or last name
295
+ matches = db.query(User, ProjectMembership).join(
296
+ ProjectMembership, User.id == ProjectMembership.user_id
297
+ ).filter(
298
+ ProjectMembership.project_id == project_id,
299
+ User.name.ilike(f"%{user_name}%")
300
+ ).all()
301
+
302
+ if not matches:
303
+ # Try first_name or last_name separately as fallback
304
+ matches = db.query(User, ProjectMembership).join(
305
+ ProjectMembership, User.id == ProjectMembership.user_id
306
+ ).filter(
307
+ ProjectMembership.project_id == project_id,
308
+ (User.first_name.ilike(f"%{user_name}%") | User.last_name.ilike(f"%{user_name}%"))
309
+ ).all()
310
+
311
+ if not matches:
312
+ return {
313
+ "found": False,
314
+ "reason": "not_found",
315
+ "message": f"No project member named '{user_name}' found"
316
+ }
317
+
318
+ if len(matches) == 1:
319
+ user, membership = matches[0]
320
+ return {
321
+ "found": True,
322
+ "user_id": str(user.id),
323
+ "user_name": f"{user.first_name} {user.last_name}"
324
+ }
325
+
326
+ # Multiple matches - return disambiguation options
327
+ options = []
328
+ for user, membership in matches:
329
+ options.append({
330
+ "user_id": str(user.id),
331
+ "name": user.name,
332
+ "role": membership.role,
333
+ "recent_work": _get_recent_work_hint(db, str(user.id), project_id)
334
+ })
335
+
336
+ return {
337
+ "found": False,
338
+ "reason": "ambiguous",
339
+ "message": f"Found {len(matches)} project members matching '{user_name}'",
340
+ "options": options
341
+ }
342
+
343
+
344
+ def _tool_get_user_activity(db, context: QueryContext, args: dict) -> dict:
345
+ """Get user activity within date range."""
346
+ user_id = args.get("user_id")
347
+ user_name = args.get("user_name")
348
+ resolved_user_name = None
349
+
350
+ # Handle "current" user reference
351
+ if user_id == "current" or user_id == context.current_user_id:
352
+ user_id = context.current_user_id
353
+
354
+ # Resolve user name to ID using project-scoped resolver
355
+ if user_name and not user_id:
356
+ resolution = _resolve_user_in_project(db, context.project_id, user_name)
357
+
358
+ if not resolution["found"]:
359
+ # Return disambiguation or not found directly
360
+ return resolution
361
+
362
+ user_id = resolution["user_id"]
363
+ resolved_user_name = resolution["user_name"]
364
+
365
+ # Parse dates (handle 'Z' suffix and various formats)
366
+ date_from_str = args["date_from"].replace('Z', '+00:00').replace('+00:00', '')
367
+ date_to_str = args["date_to"].replace('Z', '+00:00').replace('+00:00', '')
368
+
369
+ try:
370
+ date_from = datetime.fromisoformat(date_from_str)
371
+ except ValueError:
372
+ # Try parsing just date
373
+ date_from = datetime.strptime(date_from_str[:10], '%Y-%m-%d')
374
+
375
+ try:
376
+ date_to = datetime.fromisoformat(date_to_str)
377
+ except ValueError:
378
+ date_to = datetime.strptime(date_to_str[:10], '%Y-%m-%d') + timedelta(days=1)
379
+
380
+ # Query LogEntry table
381
+ query = db.query(LogEntry).filter(
382
+ LogEntry.project_id == context.project_id,
383
+ LogEntry.created_at >= date_from,
384
+ LogEntry.created_at <= date_to
385
+ )
386
+
387
+ if user_id:
388
+ query = query.filter(LogEntry.user_id == user_id)
389
+
390
+ entries = query.order_by(LogEntry.created_at.desc()).all()
391
+
392
+ # Get user name for response (use already resolved name if available)
393
+ if not resolved_user_name and user_id:
394
+ resolved_user = db.query(User).filter(User.id == user_id).first()
395
+ resolved_user_name = (
396
+ f"{resolved_user.first_name} {resolved_user.last_name}"
397
+ if resolved_user else None
398
+ )
399
+
400
+ return {
401
+ "user_id": user_id,
402
+ "user_name": resolved_user_name,
403
+ "date_range": {"from": args["date_from"], "to": args["date_to"]},
404
+ "count": len(entries),
405
+ "activities": [
406
+ {
407
+ "id": str(e.id),
408
+ "task_id": str(e.task_id) if e.task_id else None,
409
+ "what_was_done": e.raw_input,
410
+ "summary": e.generated_doc[:200] if e.generated_doc else None,
411
+ "tags": e.tags or [],
412
+ "timestamp": e.created_at.isoformat()
413
+ }
414
+ for e in entries
415
+ ]
416
+ }
417
+
418
+
419
+ def _tool_get_task_status(db, project_id: str, args: dict) -> dict:
420
+ """Get task by ID or title."""
421
+ task_id = args.get("task_id")
422
+ task_title = args.get("task_title")
423
+
424
+ query = db.query(Task).filter(Task.project_id == project_id)
425
+
426
+ if task_id:
427
+ # Try exact match first, then partial
428
+ task = query.filter(Task.id == task_id).first()
429
+ if not task:
430
+ task = query.filter(Task.id.like(f"%{task_id}%")).first()
431
+ elif task_title:
432
+ task = query.filter(Task.title.ilike(f"%{task_title}%")).first()
433
+ else:
434
+ return {"error": "Provide either task_id or task_title"}
435
+
436
+ if not task:
437
+ return {"found": False, "message": "Task not found"}
438
+
439
+ # Get completion log if exists
440
+ log_entry = db.query(LogEntry).filter(LogEntry.task_id == task.id).first()
441
+
442
+ return {
443
+ "found": True,
444
+ "task": {
445
+ "id": str(task.id),
446
+ "title": task.title,
447
+ "description": task.description,
448
+ "status": task.status.value,
449
+ "assigned_to": task.assigned_to,
450
+ "created_at": task.created_at.isoformat(),
451
+ "completed_at": task.completed_at.isoformat() if task.completed_at else None
452
+ },
453
+ "completion_details": {
454
+ "what_was_done": log_entry.raw_input,
455
+ "completed_by": str(log_entry.user_id) if log_entry.user_id else None,
456
+ "documentation": log_entry.generated_doc[:300] if log_entry.generated_doc else None
457
+ } if log_entry else None
458
+ }
459
+
460
+
461
+ def _tool_check_completion(db, project_id: str, args: dict) -> dict:
462
+ """Check if task was completed, optionally by specific user."""
463
+ task_title = args["task_title"]
464
+ user_name = args.get("user_name")
465
+ user_id = None
466
+
467
+ # Find task
468
+ task = db.query(Task).filter(
469
+ Task.project_id == project_id,
470
+ Task.title.ilike(f"%{task_title}%")
471
+ ).first()
472
+
473
+ if not task:
474
+ return {"found": False, "message": f"Task matching '{task_title}' not found"}
475
+
476
+ # Resolve user name using project-scoped resolver
477
+ if user_name:
478
+ resolution = _resolve_user_in_project(db, project_id, user_name)
479
+
480
+ if not resolution["found"]:
481
+ # Return disambiguation or not found directly
482
+ return resolution
483
+
484
+ user_id = resolution["user_id"]
485
+
486
+ # Check completion log
487
+ log_query = db.query(LogEntry).filter(LogEntry.task_id == task.id)
488
+
489
+ if user_id:
490
+ log_query = log_query.filter(LogEntry.user_id == user_id)
491
+
492
+ log_entry = log_query.first()
493
+
494
+ return {
495
+ "found": True,
496
+ "task_title": task.title,
497
+ "task_id": str(task.id),
498
+ "status": task.status.value,
499
+ "is_completed": task.status == TaskStatus.done,
500
+ "completed_by_specified_user": log_entry is not None if user_name else None,
501
+ "completion_details": {
502
+ "what_was_done": log_entry.raw_input,
503
+ "timestamp": log_entry.created_at.isoformat()
504
+ } if log_entry else None
505
+ }
506
+
507
+
508
+ async def _tool_semantic_search(project_id: str, args: dict) -> dict:
509
+ """Semantic search using vector store."""
510
+ query_text = args["search_query"]
511
+
512
+ # Get embedding (reuse existing function from llm.py)
513
+ query_embedding = await get_embedding(query_text)
514
+
515
+ # Search vector store
516
+ results = search(
517
+ query_embedding=query_embedding,
518
+ project_id=project_id,
519
+ n_results=10
520
+ )
521
+
522
+ if not results:
523
+ return {"query": query_text, "count": 0, "results": []}
524
+
525
+ # Enrich with full log entries
526
+ db = SessionLocal()
527
+ try:
528
+ log_ids = [r["id"] for r in results]
529
+ entries = db.query(LogEntry).filter(LogEntry.id.in_(log_ids)).all()
530
+ entry_map = {str(e.id): e for e in entries}
531
+
532
+ enriched = []
533
+ for r in results:
534
+ entry = entry_map.get(r["id"])
535
+ enriched.append({
536
+ "id": r["id"],
537
+ "relevance_score": round(1 - r.get("distance", 0), 3),
538
+ "what_was_done": entry.raw_input if entry else r["metadata"].get("text", "")[:200],
539
+ "documentation": entry.generated_doc[:300] if entry and entry.generated_doc else None,
540
+ "tags": entry.tags if entry else [],
541
+ "timestamp": entry.created_at.isoformat() if entry else r["metadata"].get("created_at")
542
+ })
543
+
544
+ return {"query": query_text, "count": len(enriched), "results": enriched}
545
+ finally:
546
+ db.close()
547
+
548
+
549
+ def _tool_list_users(db, project_id: str) -> dict:
550
+ """List all project members."""
551
+ memberships = db.query(ProjectMembership).filter(
552
+ ProjectMembership.project_id == project_id
553
+ ).all()
554
+
555
+ users = []
556
+ for m in memberships:
557
+ user = db.query(User).filter(User.id == m.user_id).first()
558
+ if user:
559
+ users.append({
560
+ "id": str(user.id),
561
+ "name": user.name,
562
+ "first_name": user.first_name,
563
+ "last_name": user.last_name,
564
+ "role": m.role
565
+ })
566
+
567
+ return {"project_id": project_id, "count": len(users), "users": users}
568
+
569
+
570
+ def _tool_list_tasks(db, project_id: str, args: dict) -> dict:
571
+ """List tasks in project, optionally filtered by status."""
572
+ from sqlalchemy import desc, func
573
+
574
+ status_filter = args.get("status", "all")
575
+ assigned_to = args.get("assigned_to")
576
+ limit = args.get("limit", 20)
577
+
578
+ # Build base query - always filter by project_id
579
+ base_query = db.query(Task).filter(Task.project_id == project_id)
580
+
581
+ # Apply status filter
582
+ if status_filter and status_filter != "all":
583
+ try:
584
+ status_enum = TaskStatus(status_filter)
585
+ base_query = base_query.filter(Task.status == status_enum)
586
+ except ValueError:
587
+ return {"error": f"Invalid status: {status_filter}. Use: todo, in_progress, done, or all"}
588
+
589
+ # Apply assignee filter
590
+ if assigned_to:
591
+ base_query = base_query.filter(Task.assigned_to == assigned_to)
592
+
593
+ # Get TOTAL count before applying limit
594
+ total_count = base_query.count()
595
+
596
+ # Now apply ordering and limit for the actual results
597
+ tasks = base_query.order_by(desc(Task.created_at)).limit(limit).all()
598
+
599
+ # Get status counts for ALL tasks in project (not filtered)
600
+ all_tasks = db.query(Task).filter(Task.project_id == project_id).all()
601
+ status_counts = {"todo": 0, "in_progress": 0, "done": 0}
602
+ for task in all_tasks:
603
+ status_counts[task.status.value] += 1
604
+
605
+ return {
606
+ "project_id": project_id,
607
+ "filter": {
608
+ "status": status_filter,
609
+ "assigned_to": assigned_to
610
+ },
611
+ "total_count": total_count, # Actual total matching the filter
612
+ "returned_count": len(tasks), # How many in this response (may be limited)
613
+ "status_summary": status_counts, # Counts for ALL tasks in project
614
+ "tasks": [
615
+ {
616
+ "id": str(task.id),
617
+ "title": task.title,
618
+ "description": task.description[:100] if task.description else None,
619
+ "status": task.status.value,
620
+ "assigned_to": task.assigned_to,
621
+ "created_at": task.created_at.isoformat() if task.created_at else None,
622
+ "completed_at": task.completed_at.isoformat() if task.completed_at else None
623
+ }
624
+ for task in tasks
625
+ ]
626
+ }
627
+
628
+
629
+ def extract_sources(tool_results: list) -> list:
630
+ """Extract source citations from tool results."""
631
+ sources = []
632
+ seen_ids = set()
633
+
634
+ for tr in tool_results:
635
+ result = tr.get("result", {})
636
+
637
+ # From activity queries
638
+ if "activities" in result:
639
+ for a in result["activities"][:5]:
640
+ if a.get("id") and a["id"] not in seen_ids:
641
+ sources.append({
642
+ "id": a["id"],
643
+ "type": "activity",
644
+ "summary": a.get("what_was_done", "")[:100],
645
+ "date": a.get("timestamp")
646
+ })
647
+ seen_ids.add(a["id"])
648
+
649
+ # From semantic search
650
+ if "results" in result:
651
+ for r in result["results"][:5]:
652
+ if r.get("id") and r["id"] not in seen_ids:
653
+ sources.append({
654
+ "id": r["id"],
655
+ "type": "memory",
656
+ "summary": r.get("what_was_done", "")[:100],
657
+ "date": r.get("timestamp"),
658
+ "relevance": r.get("relevance_score")
659
+ })
660
+ seen_ids.add(r["id"])
661
+
662
+ # From task queries (single task)
663
+ if "task" in result and result.get("found"):
664
+ task = result["task"]
665
+ sources.append({
666
+ "id": task["id"],
667
+ "type": "task",
668
+ "summary": task["title"],
669
+ "status": task["status"]
670
+ })
671
+
672
+ # From list_tasks queries (multiple tasks)
673
+ if "tasks" in result and isinstance(result["tasks"], list):
674
+ for task in result["tasks"][:5]:
675
+ if task.get("id") and task["id"] not in seen_ids:
676
+ sources.append({
677
+ "id": task["id"],
678
+ "type": "task",
679
+ "summary": task["title"],
680
+ "status": task.get("status")
681
+ })
682
+ seen_ids.add(task["id"])
683
+
684
+ return sources
backend/app/tools/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # MCP Tools
backend/app/tools/memory.py ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Memory tools for Project Memory - complete_task and memory_search.
3
+
4
+ Dependencies (from Dev A):
5
+ - app.models: Task, LogEntry, TaskStatus, ActorType, ActionType
6
+ - app.database: get_db
7
+
8
+ These imports will work once Dev A completes models.py and database.py.
9
+ """
10
+
11
+ from datetime import datetime
12
+ from typing import Optional
13
+ from sqlalchemy.orm import Session
14
+
15
+ # Dev A's imports (will work when their files are ready)
16
+ from app.models import Task, LogEntry, TaskStatus, ActorType, ActionType
17
+ from app.database import get_db
18
+
19
+ # Dev B's imports
20
+ from app.llm import generate_documentation, synthesize_answer, get_embedding
21
+ from app.vectorstore import add_embedding, search
22
+
23
+ # Tool definitions for MCP server
24
+ TOOLS = [
25
+ {
26
+ "name": "complete_task",
27
+ "description": "Mark a task as complete with AI-generated documentation",
28
+ "inputSchema": {
29
+ "type": "object",
30
+ "properties": {
31
+ "task_id": {"type": "string", "description": "ID of the task to complete"},
32
+ "project_id": {"type": "string", "description": "Project ID"},
33
+ "user_id": {"type": "string", "description": "User completing the task"},
34
+ "what_i_did": {"type": "string", "description": "Description of work done"},
35
+ "code_snippet": {"type": "string", "description": "Optional code snippet"}
36
+ },
37
+ "required": ["task_id", "project_id", "user_id", "what_i_did"]
38
+ }
39
+ },
40
+ {
41
+ "name": "memory_search",
42
+ "description": "Search project memory with natural language and get AI-synthesized answers",
43
+ "inputSchema": {
44
+ "type": "object",
45
+ "properties": {
46
+ "project_id": {"type": "string", "description": "Project to search in"},
47
+ "query": {"type": "string", "description": "Natural language search query"}
48
+ },
49
+ "required": ["project_id", "query"]
50
+ }
51
+ }
52
+ ]
53
+
54
+
55
+ async def complete_task(
56
+ task_id: str,
57
+ project_id: str,
58
+ user_id: str,
59
+ what_i_did: str,
60
+ code_snippet: Optional[str] = None,
61
+ db: Optional[Session] = None
62
+ ) -> dict:
63
+ """
64
+ Complete a task and create searchable memory.
65
+
66
+ Pipeline:
67
+ 1. Update task status to done
68
+ 2. Generate documentation via LLM
69
+ 3. Create LogEntry record
70
+ 4. Create embedding and store in vector DB
71
+ """
72
+ if db is None:
73
+ db = next(get_db())
74
+
75
+ try:
76
+ # 1. Get and update task
77
+ task = db.query(Task).filter(Task.id == task_id).first()
78
+ if not task:
79
+ return {"success": False, "error": "Task not found"}
80
+
81
+ task.status = TaskStatus.done
82
+ task.completed_at = datetime.now()
83
+
84
+ # 2. Generate documentation via LLM
85
+ doc = await generate_documentation(
86
+ task_title=task.title,
87
+ what_i_did=what_i_did,
88
+ code_snippet=code_snippet
89
+ )
90
+
91
+ # 3. Create LogEntry
92
+ log_entry = LogEntry(
93
+ project_id=project_id,
94
+ task_id=task_id,
95
+ user_id=user_id,
96
+ actor_type=ActorType.human,
97
+ action_type=ActionType.task_completed,
98
+ raw_input=what_i_did,
99
+ code_snippet=code_snippet,
100
+ generated_doc=doc["details"],
101
+ tags=doc.get("tags", [])
102
+ )
103
+ db.add(log_entry)
104
+ db.commit()
105
+ db.refresh(log_entry)
106
+
107
+ # 4. Create embedding and store
108
+ text_to_embed = f"""
109
+ Task: {task.title}
110
+ Summary: {doc['summary']}
111
+ Details: {doc['details']}
112
+ Code: {code_snippet or ''}
113
+ """
114
+
115
+ embedding = await get_embedding(text_to_embed)
116
+
117
+ add_embedding(
118
+ log_entry_id=str(log_entry.id),
119
+ text=text_to_embed,
120
+ embedding=embedding,
121
+ metadata={
122
+ "project_id": project_id,
123
+ "user_id": user_id,
124
+ "task_id": task_id,
125
+ "created_at": log_entry.created_at.isoformat()
126
+ }
127
+ )
128
+
129
+ return {
130
+ "success": True,
131
+ "log_entry_id": str(log_entry.id),
132
+ "summary": doc["summary"],
133
+ "tags": doc.get("tags", [])
134
+ }
135
+
136
+ except Exception as e:
137
+ db.rollback()
138
+ return {"success": False, "error": str(e)}
139
+
140
+
141
+ async def memory_search(
142
+ project_id: str,
143
+ query: str,
144
+ filters: Optional[dict] = None,
145
+ db: Optional[Session] = None
146
+ ) -> dict:
147
+ """
148
+ Search project memory and synthesize answer.
149
+
150
+ Pipeline:
151
+ 1. Get query embedding
152
+ 2. Vector similarity search (with optional filters)
153
+ 3. Fetch full log entries
154
+ 4. LLM synthesis of answer with citations
155
+
156
+ Args:
157
+ project_id: Project to search in
158
+ query: Natural language search query
159
+ filters: Optional filters dict with keys: userId, dateFrom, dateTo, tags
160
+ db: Database session (optional)
161
+ """
162
+ if db is None:
163
+ db = next(get_db())
164
+
165
+ try:
166
+ # 1. Get query embedding
167
+ query_embedding = await get_embedding(query)
168
+
169
+ # 2. Convert camelCase filter keys to snake_case for vectorstore
170
+ search_filters = None
171
+ if filters:
172
+ search_filters = {}
173
+ if filters.get("userId"):
174
+ search_filters["user_id"] = filters["userId"]
175
+ if filters.get("dateFrom"):
176
+ search_filters["date_from"] = filters["dateFrom"]
177
+ if filters.get("dateTo"):
178
+ search_filters["date_to"] = filters["dateTo"]
179
+ # Note: tags filtering not yet implemented in vectorstore
180
+
181
+ # 3. Vector search with filters
182
+ results = search(
183
+ query_embedding=query_embedding,
184
+ project_id=project_id,
185
+ n_results=10,
186
+ filters=search_filters
187
+ )
188
+
189
+ if not results:
190
+ return {
191
+ "answer": "No relevant information found in project memory.",
192
+ "sources": []
193
+ }
194
+
195
+ # 3. Get full log entries
196
+ log_entry_ids = [r["id"] for r in results]
197
+ log_entries = db.query(LogEntry).filter(LogEntry.id.in_(log_entry_ids)).all()
198
+
199
+ if not log_entries:
200
+ # Fallback to vector store text if log entries not found
201
+ context = "\n---\n".join([
202
+ f"Entry: {r['metadata'].get('text', '')}"
203
+ for r in results
204
+ ])
205
+ else:
206
+ # 4. Build context from log entries
207
+ context = "\n---\n".join([
208
+ f"Date: {e.created_at}\nTask: {e.raw_input}\nDoc: {e.generated_doc}"
209
+ for e in log_entries
210
+ ])
211
+
212
+ # 5. Synthesize answer
213
+ answer = await synthesize_answer(context, query)
214
+
215
+ return {
216
+ "answer": answer,
217
+ "sources": [
218
+ {
219
+ "id": str(e.id),
220
+ "summary": e.raw_input,
221
+ "date": e.created_at.isoformat()
222
+ }
223
+ for e in log_entries
224
+ ] if log_entries else [
225
+ {
226
+ "id": r["id"],
227
+ "summary": r["metadata"].get("text", "")[:100],
228
+ "date": r["metadata"].get("created_at", "")
229
+ }
230
+ for r in results
231
+ ]
232
+ }
233
+
234
+ except Exception as e:
235
+ return {"answer": f"Error searching memory: {str(e)}", "sources": []}
236
+
237
+
238
+ # Handler map for MCP server
239
+ HANDLERS = {
240
+ "complete_task": complete_task,
241
+ "memory_search": memory_search,
242
+ }
backend/app/tools/projects.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Project-related MCP tools."""
2
+
3
+ from sqlalchemy.orm import Session
4
+ from app.database import SessionLocal
5
+ from app.models import Project, ProjectMembership, User
6
+
7
+
8
+ def check_project_id_available(project_id: str) -> dict:
9
+ """
10
+ Check if a project ID is available.
11
+
12
+ Args:
13
+ project_id: The project ID to check
14
+
15
+ Returns:
16
+ Dict with 'available' boolean
17
+ """
18
+ db: Session = SessionLocal()
19
+ try:
20
+ existing = db.query(Project).filter(Project.id == project_id).first()
21
+ return {"available": existing is None, "project_id": project_id}
22
+ finally:
23
+ db.close()
24
+
25
+
26
+ def create_project(name: str, description: str, user_id: str) -> dict:
27
+ """
28
+ Create a new project and add the creator as owner.
29
+ The project ID will be the project name (used as-is).
30
+
31
+ Args:
32
+ name: Project name (will also be used as project ID)
33
+ description: Project description
34
+ user_id: ID of the user creating the project
35
+
36
+ Returns:
37
+ Created project data or error if project ID already exists
38
+ """
39
+ db: Session = SessionLocal()
40
+ try:
41
+ # Verify user exists
42
+ user = db.query(User).filter(User.id == user_id).first()
43
+ if not user:
44
+ return {"error": f"User {user_id} not found"}
45
+
46
+ # Use name as project ID
47
+ project_id = name
48
+
49
+ # Check if project ID already exists
50
+ existing = db.query(Project).filter(Project.id == project_id).first()
51
+ if existing:
52
+ return {"error": f"Project ID '{project_id}' already exists. Please choose a different name."}
53
+
54
+ # Create project with name as ID
55
+ project = Project(
56
+ id=project_id,
57
+ name=name,
58
+ description=description,
59
+ created_by=user_id
60
+ )
61
+ db.add(project)
62
+ db.flush()
63
+
64
+ # Add creator as owner
65
+ membership = ProjectMembership(
66
+ project_id=project.id,
67
+ user_id=user_id,
68
+ role="owner"
69
+ )
70
+ db.add(membership)
71
+ db.commit()
72
+
73
+ return {
74
+ "id": project.id,
75
+ "name": project.name,
76
+ "description": project.description,
77
+ "created_at": project.created_at.isoformat(),
78
+ "created_by": project.created_by
79
+ }
80
+ finally:
81
+ db.close()
82
+
83
+
84
+ def list_projects(user_id: str) -> dict:
85
+ """
86
+ List all projects for a user.
87
+
88
+ Args:
89
+ user_id: ID of the user
90
+
91
+ Returns:
92
+ List of projects the user is a member of
93
+ """
94
+ db: Session = SessionLocal()
95
+ try:
96
+ # Get all project memberships for user
97
+ memberships = (
98
+ db.query(ProjectMembership)
99
+ .filter(ProjectMembership.user_id == user_id)
100
+ .all()
101
+ )
102
+
103
+ projects = []
104
+ for membership in memberships:
105
+ project = membership.project
106
+ projects.append({
107
+ "id": project.id,
108
+ "name": project.name,
109
+ "description": project.description,
110
+ "created_at": project.created_at.isoformat(),
111
+ "role": membership.role
112
+ })
113
+
114
+ return {"projects": projects}
115
+ finally:
116
+ db.close()
117
+
118
+
119
+ def join_project(project_id: str, user_id: str) -> dict:
120
+ """
121
+ Add a user to an existing project.
122
+
123
+ Args:
124
+ project_id: ID of the project to join
125
+ user_id: ID of the user joining
126
+
127
+ Returns:
128
+ Membership confirmation
129
+ """
130
+ db: Session = SessionLocal()
131
+ try:
132
+ # Verify project exists
133
+ project = db.query(Project).filter(Project.id == project_id).first()
134
+ if not project:
135
+ return {"error": f"Project {project_id} not found"}
136
+
137
+ # Verify user exists
138
+ user = db.query(User).filter(User.id == user_id).first()
139
+ if not user:
140
+ return {"error": f"User {user_id} not found"}
141
+
142
+ # Check if already a member
143
+ existing = (
144
+ db.query(ProjectMembership)
145
+ .filter(
146
+ ProjectMembership.project_id == project_id,
147
+ ProjectMembership.user_id == user_id
148
+ )
149
+ .first()
150
+ )
151
+ if existing:
152
+ return {
153
+ "message": "Already a member",
154
+ "project_id": project_id,
155
+ "user_id": user_id,
156
+ "role": existing.role
157
+ }
158
+
159
+ # Create membership
160
+ membership = ProjectMembership(
161
+ project_id=project_id,
162
+ user_id=user_id,
163
+ role="member"
164
+ )
165
+ db.add(membership)
166
+ db.commit()
167
+
168
+ return {
169
+ "message": "Joined project successfully",
170
+ "project_id": project_id,
171
+ "project_name": project.name,
172
+ "user_id": user_id,
173
+ "role": "member"
174
+ }
175
+ finally:
176
+ db.close()
backend/app/tools/tasks.py ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Task-related MCP tools."""
2
+
3
+ from sqlalchemy.orm import Session
4
+ from sqlalchemy import desc
5
+ from app.database import SessionLocal
6
+ from app.models import Task, TaskStatus, Project, LogEntry
7
+
8
+
9
+ def create_task(project_id: str, title: str, description: str = None, assigned_to: str = None) -> dict:
10
+ """
11
+ Create a new task in a project.
12
+
13
+ Args:
14
+ project_id: ID of the project
15
+ title: Task title
16
+ description: Optional task description
17
+ assigned_to: Optional user ID to assign the task to
18
+
19
+ Returns:
20
+ Created task data
21
+ """
22
+ db: Session = SessionLocal()
23
+ try:
24
+ # Verify project exists
25
+ project = db.query(Project).filter(Project.id == project_id).first()
26
+ if not project:
27
+ return {"error": f"Project {project_id} not found"}
28
+
29
+ # Create task
30
+ task = Task(
31
+ project_id=project_id,
32
+ title=title,
33
+ description=description,
34
+ assigned_to=assigned_to,
35
+ status=TaskStatus.todo
36
+ )
37
+ db.add(task)
38
+ db.commit()
39
+
40
+ return {
41
+ "id": task.id,
42
+ "project_id": task.project_id,
43
+ "title": task.title,
44
+ "description": task.description,
45
+ "status": task.status.value,
46
+ "assigned_to": task.assigned_to,
47
+ "created_at": task.created_at.isoformat()
48
+ }
49
+ finally:
50
+ db.close()
51
+
52
+
53
+ def list_tasks(project_id: str, status: str = None) -> dict:
54
+ """
55
+ List all tasks for a project, optionally filtered by status.
56
+
57
+ Args:
58
+ project_id: ID of the project
59
+ status: Optional status filter ("todo", "in_progress", "done")
60
+
61
+ Returns:
62
+ List of tasks
63
+ """
64
+ db: Session = SessionLocal()
65
+ try:
66
+ # Build query
67
+ query = db.query(Task).filter(Task.project_id == project_id)
68
+
69
+ # Apply status filter if provided
70
+ if status:
71
+ try:
72
+ status_enum = TaskStatus(status)
73
+ query = query.filter(Task.status == status_enum)
74
+ except ValueError:
75
+ return {"error": f"Invalid status: {status}. Must be one of: todo, in_progress, done"}
76
+
77
+ # Order by created_at descending
78
+ tasks = query.order_by(desc(Task.created_at)).all()
79
+
80
+ return {
81
+ "tasks": [
82
+ {
83
+ "id": task.id,
84
+ "project_id": task.project_id,
85
+ "title": task.title,
86
+ "description": task.description,
87
+ "status": task.status.value,
88
+ "assigned_to": task.assigned_to,
89
+ "created_at": task.created_at.isoformat(),
90
+ "completed_at": task.completed_at.isoformat() if task.completed_at else None
91
+ }
92
+ for task in tasks
93
+ ]
94
+ }
95
+ finally:
96
+ db.close()
97
+
98
+
99
+ def list_activity(project_id: str, limit: int = 20) -> dict:
100
+ """
101
+ Get recent activity (log entries) for a project.
102
+
103
+ Args:
104
+ project_id: ID of the project
105
+ limit: Maximum number of entries to return (default 20)
106
+
107
+ Returns:
108
+ List of recent log entries
109
+ """
110
+ db: Session = SessionLocal()
111
+ try:
112
+ # Query log entries ordered by most recent
113
+ entries = (
114
+ db.query(LogEntry)
115
+ .filter(LogEntry.project_id == project_id)
116
+ .order_by(desc(LogEntry.created_at))
117
+ .limit(limit)
118
+ .all()
119
+ )
120
+
121
+ return {
122
+ "activity": [
123
+ {
124
+ "id": entry.id,
125
+ "project_id": entry.project_id,
126
+ "task_id": entry.task_id,
127
+ "user_id": entry.user_id,
128
+ "actor_type": entry.actor_type.value,
129
+ "action_type": entry.action_type.value,
130
+ "raw_input": entry.raw_input,
131
+ "generated_doc": entry.generated_doc,
132
+ "tags": entry.tags,
133
+ "created_at": entry.created_at.isoformat()
134
+ }
135
+ for entry in entries
136
+ ]
137
+ }
138
+ finally:
139
+ db.close()
backend/app/vectorstore.py ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sqlite3
2
+ import sqlite_vec
3
+ import struct
4
+ from typing import Optional
5
+ import os
6
+
7
+ # Database path - same as main SQLite database
8
+ DB_PATH = os.path.join(os.path.dirname(__file__), "..", "project_memory.db")
9
+
10
+
11
+ def _serialize_vector(vec: list[float]) -> bytes:
12
+ """Convert list of floats to bytes for sqlite-vec."""
13
+ return struct.pack(f'{len(vec)}f', *vec)
14
+
15
+
16
+ def _get_connection():
17
+ """Get SQLite connection with sqlite-vec loaded."""
18
+ conn = sqlite3.connect(DB_PATH)
19
+ conn.enable_load_extension(True)
20
+ sqlite_vec.load(conn)
21
+ conn.enable_load_extension(False)
22
+ return conn
23
+
24
+
25
+ def init_vectorstore():
26
+ """Initialize the vector table. Call once at startup."""
27
+ conn = _get_connection()
28
+
29
+ # Metadata table for embeddings
30
+ conn.execute("""
31
+ CREATE TABLE IF NOT EXISTS embeddings (
32
+ id TEXT PRIMARY KEY,
33
+ project_id TEXT NOT NULL,
34
+ user_id TEXT,
35
+ task_id TEXT,
36
+ text TEXT,
37
+ created_at TEXT
38
+ )
39
+ """)
40
+
41
+ # Create index for faster project filtering
42
+ conn.execute("""
43
+ CREATE INDEX IF NOT EXISTS idx_embeddings_project
44
+ ON embeddings(project_id)
45
+ """)
46
+
47
+ # Create virtual table for vector search (768 dims for Gemini)
48
+ conn.execute("""
49
+ CREATE VIRTUAL TABLE IF NOT EXISTS vec_embeddings USING vec0(
50
+ id TEXT PRIMARY KEY,
51
+ embedding FLOAT[768]
52
+ )
53
+ """)
54
+
55
+ conn.commit()
56
+ conn.close()
57
+
58
+
59
+ def add_embedding(
60
+ log_entry_id: str,
61
+ text: str,
62
+ embedding: list[float],
63
+ metadata: dict
64
+ ) -> None:
65
+ """Store embedding with metadata."""
66
+ conn = _get_connection()
67
+
68
+ # Store metadata
69
+ conn.execute("""
70
+ INSERT OR REPLACE INTO embeddings (id, project_id, user_id, task_id, text, created_at)
71
+ VALUES (?, ?, ?, ?, ?, ?)
72
+ """, (
73
+ log_entry_id,
74
+ metadata.get("project_id"),
75
+ metadata.get("user_id"),
76
+ metadata.get("task_id"),
77
+ text[:2000], # Truncate long text
78
+ metadata.get("created_at")
79
+ ))
80
+
81
+ # Store vector
82
+ conn.execute("""
83
+ INSERT OR REPLACE INTO vec_embeddings (id, embedding)
84
+ VALUES (?, ?)
85
+ """, (log_entry_id, _serialize_vector(embedding)))
86
+
87
+ conn.commit()
88
+ conn.close()
89
+
90
+
91
+ def search(
92
+ query_embedding: list[float],
93
+ project_id: str,
94
+ n_results: int = 10,
95
+ filters: Optional[dict] = None
96
+ ) -> list[dict]:
97
+ """Search for similar documents within a project."""
98
+ conn = _get_connection()
99
+
100
+ # Vector similarity search with metadata filter
101
+ # sqlite-vec uses k parameter in the MATCH clause
102
+ query = """
103
+ SELECT
104
+ e.id,
105
+ e.project_id,
106
+ e.user_id,
107
+ e.task_id,
108
+ e.text,
109
+ e.created_at,
110
+ v.distance
111
+ FROM vec_embeddings v
112
+ JOIN embeddings e ON v.id = e.id
113
+ WHERE v.embedding MATCH ?
114
+ AND k = ?
115
+ AND e.project_id = ?
116
+ """
117
+ params = [_serialize_vector(query_embedding), n_results * 2, project_id]
118
+
119
+ if filters:
120
+ if filters.get("user_id"):
121
+ query += " AND e.user_id = ?"
122
+ params.append(filters["user_id"])
123
+
124
+ # Date filters for time-based queries
125
+ if filters.get("date_from"):
126
+ date_from = filters["date_from"]
127
+ if hasattr(date_from, 'isoformat'):
128
+ date_from = date_from.isoformat()
129
+ query += " AND e.created_at >= ?"
130
+ params.append(date_from)
131
+
132
+ if filters.get("date_to"):
133
+ date_to = filters["date_to"]
134
+ if hasattr(date_to, 'isoformat'):
135
+ date_to = date_to.isoformat()
136
+ query += " AND e.created_at < ?"
137
+ params.append(date_to)
138
+
139
+ query += " ORDER BY v.distance LIMIT ?"
140
+ params.append(n_results)
141
+
142
+ results = conn.execute(query, params).fetchall()
143
+ conn.close()
144
+
145
+ return [
146
+ {
147
+ "id": row[0],
148
+ "metadata": {
149
+ "project_id": row[1],
150
+ "user_id": row[2],
151
+ "task_id": row[3],
152
+ "text": row[4],
153
+ "created_at": row[5]
154
+ },
155
+ "distance": row[6]
156
+ }
157
+ for row in results
158
+ ]
159
+
160
+
161
+ def delete_by_project(project_id: str) -> None:
162
+ """Delete all vectors for a project."""
163
+ conn = _get_connection()
164
+
165
+ # Get IDs to delete
166
+ ids = conn.execute(
167
+ "SELECT id FROM embeddings WHERE project_id = ?",
168
+ (project_id,)
169
+ ).fetchall()
170
+
171
+ for (id_,) in ids:
172
+ conn.execute("DELETE FROM vec_embeddings WHERE id = ?", (id_,))
173
+
174
+ conn.execute("DELETE FROM embeddings WHERE project_id = ?", (project_id,))
175
+ conn.commit()
176
+ conn.close()
177
+
178
+
179
+ def count_embeddings(project_id: Optional[str] = None) -> int:
180
+ """Count embeddings, optionally filtered by project."""
181
+ conn = _get_connection()
182
+
183
+ if project_id:
184
+ result = conn.execute(
185
+ "SELECT COUNT(*) FROM embeddings WHERE project_id = ?",
186
+ (project_id,)
187
+ ).fetchone()
188
+ else:
189
+ result = conn.execute("SELECT COUNT(*) FROM embeddings").fetchone()
190
+
191
+ conn.close()
192
+ return result[0]
backend/pytest.ini ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ [pytest]
2
+ pythonpath = .
3
+ testpaths = tests
4
+ python_files = test_*.py
5
+ asyncio_mode = auto
6
+ asyncio_default_fixture_loop_scope = function
7
+ markers =
8
+ timeout: mark test to timeout after X seconds
9
+ asyncio: mark test as async
backend/requirements.txt ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ fastapi==0.115.0
2
+ uvicorn[standard]==0.32.0
3
+ sqlalchemy==2.0.36
4
+ python-dotenv==1.0.1
5
+ google-generativeai
6
+ sqlite-vec
7
+ mcp
8
+ numpy
9
+ pydantic==2.9.2
10
+ httpx==0.27.2
11
+ python-multipart==0.0.12
12
+ pytest==8.3.3
13
+ pytest-asyncio==0.24.0
backend/tests/conftest.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import asyncio
3
+ import pytest
4
+
5
+ from app.database import init_db, SessionLocal
6
+ from app.models import LogEntry, Task, ProjectMembership, Project, User, generate_user_id
7
+
8
+ @pytest.fixture(autouse=True)
9
+ def _ensure_gemini_env(monkeypatch):
10
+ # Prevent import-time failure in app.llm when GEMINI_API_KEY is missing
11
+ monkeypatch.setenv("GEMINI_API_KEY", os.getenv("GEMINI_API_KEY", "test-key"))
12
+
13
+ @pytest.fixture(autouse=True, scope="session")
14
+ def setup_db():
15
+ """Drop and recreate all tables once per test session."""
16
+ from app.database import Base, engine
17
+ # Drop all tables to ensure clean schema
18
+ Base.metadata.drop_all(bind=engine)
19
+ # Recreate with new schema
20
+ init_db()
21
+ yield
22
+
23
+
24
+ @pytest.fixture(autouse=True)
25
+ def reset_db(setup_db):
26
+ """Clean all rows before each test."""
27
+ db = SessionLocal()
28
+ try:
29
+ # Delete in dependency order to satisfy FKs
30
+ db.query(LogEntry).delete()
31
+ db.query(Task).delete()
32
+ db.query(ProjectMembership).delete()
33
+ db.query(Project).delete()
34
+ db.query(User).delete()
35
+ db.commit()
36
+ yield
37
+ finally:
38
+ db.close()
39
+
40
+ @pytest.fixture()
41
+ def db_session():
42
+ db = SessionLocal()
43
+ try:
44
+ yield db
45
+ finally:
46
+ db.close()
47
+
48
+ @pytest.fixture()
49
+ def create_user(db_session):
50
+ def _create(first_name: str, last_name: str):
51
+ user_id = generate_user_id(first_name)
52
+ u = User(id=user_id, first_name=first_name, last_name=last_name)
53
+ db_session.add(u)
54
+ db_session.commit()
55
+ db_session.refresh(u)
56
+ return u
57
+ return _create
58
+
backend/tests/test_complete_api.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Complete API test with user creation flow."""
2
+
3
+ import requests
4
+ import json
5
+ import sys
6
+
7
+ BASE_URL = "http://localhost:8000"
8
+
9
+ print("="*60)
10
+ print("PROJECT MEMORY - COMPLETE API TEST")
11
+ print("="*60)
12
+
13
+ def test_api():
14
+ # Check if server is running
15
+ try:
16
+ response = requests.get(f"{BASE_URL}/")
17
+ print(f"✓ Server is running: {response.json()['message']}")
18
+ except:
19
+ print("❌ Server is not running!")
20
+ print("Start it with: uvicorn app.main:app --reload")
21
+ return
22
+
23
+ print("\n" + "="*60)
24
+ print("1. CREATE USER")
25
+ print("="*60)
26
+
27
+ # Create a new user
28
+ user_data = {
29
+ "firstName": "John",
30
+ "lastName": "Doe",
31
+ "avatar_url": "https://example.com/avatar.jpg"
32
+ }
33
+
34
+ print(f"POST /api/users")
35
+ print(f"Body: {json.dumps(user_data, indent=2)}")
36
+
37
+ response = requests.post(f"{BASE_URL}/api/users", json=user_data)
38
+ print(f"Status: {response.status_code}")
39
+
40
+ if response.status_code != 200:
41
+ print(f"Error: {response.text}")
42
+ return
43
+
44
+ user = response.json()
45
+ user_id = user["id"]
46
+ print(f"✓ Created user: {user_id}")
47
+ print(f"Response: {json.dumps(user, indent=2)}")
48
+
49
+ print("\n" + "="*60)
50
+ print("2. GET USER")
51
+ print("="*60)
52
+
53
+ print(f"GET /api/users/{user_id}")
54
+ response = requests.get(f"{BASE_URL}/api/users/{user_id}")
55
+ print(f"Status: {response.status_code}")
56
+ print(f"Response: {json.dumps(response.json(), indent=2)}")
57
+
58
+ print("\n" + "="*60)
59
+ print("3. LIST ALL USERS")
60
+ print("="*60)
61
+
62
+ print(f"GET /api/users")
63
+ response = requests.get(f"{BASE_URL}/api/users")
64
+ print(f"Status: {response.status_code}")
65
+ users = response.json()
66
+ print(f"Total users: {len(users)}")
67
+ for u in users[:3]: # Show first 3 users
68
+ print(f" - {u['firstName']} {u['lastName']} ({u['id']})")
69
+
70
+ print("\n" + "="*60)
71
+ print("4. CREATE PROJECT")
72
+ print("="*60)
73
+
74
+ project_data = {
75
+ "name": "Test Project",
76
+ "description": "API Testing Project",
77
+ "userId": user_id
78
+ }
79
+
80
+ print(f"POST /api/projects")
81
+ print(f"Body: {json.dumps(project_data, indent=2)}")
82
+
83
+ response = requests.post(f"{BASE_URL}/api/projects", json=project_data)
84
+ print(f"Status: {response.status_code}")
85
+
86
+ if response.status_code != 200:
87
+ print(f"Error: {response.text}")
88
+ return
89
+
90
+ project = response.json()
91
+ project_id = project["id"]
92
+ print(f"✓ Created project: {project_id}")
93
+
94
+ print("\n" + "="*60)
95
+ print("5. LIST USER'S PROJECTS")
96
+ print("="*60)
97
+
98
+ print(f"GET /api/projects?userId={user_id}")
99
+ response = requests.get(f"{BASE_URL}/api/projects?userId={user_id}")
100
+ print(f"Status: {response.status_code}")
101
+ projects = response.json()
102
+ print(f"User has {len(projects)} project(s)")
103
+
104
+ print("\n" + "="*60)
105
+ print("6. CREATE TASK")
106
+ print("="*60)
107
+
108
+ task_data = {
109
+ "title": "Implement user authentication",
110
+ "description": "Add login and registration features",
111
+ "assignedTo": user_id
112
+ }
113
+
114
+ print(f"POST /api/projects/{project_id}/tasks")
115
+ print(f"Body: {json.dumps(task_data, indent=2)}")
116
+
117
+ response = requests.post(f"{BASE_URL}/api/projects/{project_id}/tasks", json=task_data)
118
+ print(f"Status: {response.status_code}")
119
+
120
+ if response.status_code != 200:
121
+ print(f"Error: {response.text}")
122
+ return
123
+
124
+ task = response.json()
125
+ task_id = task["id"]
126
+ print(f"✓ Created task: {task_id}")
127
+
128
+ print("\n" + "="*60)
129
+ print("7. LIST TASKS")
130
+ print("="*60)
131
+
132
+ print(f"GET /api/projects/{project_id}/tasks")
133
+ response = requests.get(f"{BASE_URL}/api/projects/{project_id}/tasks")
134
+ print(f"Status: {response.status_code}")
135
+ tasks = response.json()
136
+ print(f"Project has {len(tasks)} task(s)")
137
+
138
+ print("\n" + "="*60)
139
+ print("8. COMPLETE TASK (with AI documentation)")
140
+ print("="*60)
141
+
142
+ complete_data = {
143
+ "userId": user_id,
144
+ "whatIDid": "Implemented OAuth2 authentication with JWT tokens",
145
+ "codeSnippet": "def authenticate(token): return jwt.decode(token, SECRET_KEY)"
146
+ }
147
+
148
+ print(f"POST /api/tasks/{task_id}/complete")
149
+ print(f"Body: {json.dumps(complete_data, indent=2)}")
150
+ print("⏳ This will call Gemini AI to generate documentation...")
151
+
152
+ response = requests.post(f"{BASE_URL}/api/tasks/{task_id}/complete", json=complete_data)
153
+ print(f"Status: {response.status_code}")
154
+
155
+ if response.status_code == 200:
156
+ print(f"✓ Task completed with AI documentation")
157
+ print(f"Response: {json.dumps(response.json(), indent=2)}")
158
+ else:
159
+ print(f"Note: Task completion requires GEMINI_API_KEY in .env")
160
+ print(f"Response: {response.text}")
161
+
162
+ print("\n" + "="*60)
163
+ print("9. GET ACTIVITY FEED")
164
+ print("="*60)
165
+
166
+ print(f"GET /api/projects/{project_id}/activity")
167
+ response = requests.get(f"{BASE_URL}/api/projects/{project_id}/activity")
168
+ print(f"Status: {response.status_code}")
169
+ if response.status_code == 200:
170
+ activity = response.json()
171
+ print(f"Activity entries: {len(activity)}")
172
+
173
+ print("\n" + "="*60)
174
+ print("TEST COMPLETE - ALL ENDPOINTS WORKING!")
175
+ print("="*60)
176
+
177
+ print(f"""
178
+ SUMMARY OF CREATED DATA:
179
+ - User ID: {user_id}
180
+ - Project ID: {project_id}
181
+ - Task ID: {task_id}
182
+
183
+ You can now use these IDs to test other endpoints!
184
+ """)
185
+
186
+ print("="*60)
187
+ print("CURL EXAMPLES WITH YOUR DATA")
188
+ print("="*60)
189
+
190
+ print(f"""
191
+ # Create another user
192
+ curl -X POST {BASE_URL}/api/users \\
193
+ -H "Content-Type: application/json" \\
194
+ -d '{{"firstName": "Jane", "lastName": "Smith"}}'
195
+
196
+ # Get your user
197
+ curl {BASE_URL}/api/users/{user_id}
198
+
199
+ # List all users
200
+ curl {BASE_URL}/api/users
201
+
202
+ # Create another project
203
+ curl -X POST {BASE_URL}/api/projects \\
204
+ -H "Content-Type: application/json" \\
205
+ -d '{{"name": "Another Project", "description": "Description", "userId": "{user_id}"}}'
206
+
207
+ # Join a project (create a second user first)
208
+ curl -X POST {BASE_URL}/api/projects/{project_id}/join \\
209
+ -H "Content-Type: application/json" \\
210
+ -d '{{"userId": "ANOTHER_USER_ID"}}'
211
+
212
+ # Search project memory (after completing tasks)
213
+ curl -X POST {BASE_URL}/api/projects/{project_id}/search \\
214
+ -H "Content-Type: application/json" \\
215
+ -d '{{"query": "authentication"}}'
216
+ """)
217
+
218
+ if __name__ == "__main__":
219
+ test_api()
backend/tests/test_database.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Unit tests for database module."""
2
+
3
+ import pytest
4
+ from sqlalchemy.orm import Session
5
+
6
+ from app.database import get_db, init_db, SessionLocal, Base, engine
7
+
8
+
9
+ class TestDatabase:
10
+ """Test database initialization and session management."""
11
+
12
+ def test_init_db_creates_tables(self):
13
+ """Test that init_db creates all model tables."""
14
+ init_db()
15
+
16
+ # Check that tables exist
17
+ from app.models import User, Project, Task, LogEntry, ProjectMembership
18
+
19
+ # Verify table names are in metadata
20
+ assert "users" in Base.metadata.tables
21
+ assert "projects" in Base.metadata.tables
22
+ assert "tasks" in Base.metadata.tables
23
+ assert "log_entries" in Base.metadata.tables
24
+ assert "project_memberships" in Base.metadata.tables
25
+
26
+ def test_session_local_returns_session(self):
27
+ """Test SessionLocal creates a valid session."""
28
+ session = SessionLocal()
29
+ assert isinstance(session, Session)
30
+ session.close()
31
+
32
+ def test_get_db_yields_session(self):
33
+ """Test get_db generator yields a session and closes it."""
34
+ gen = get_db()
35
+ db = next(gen)
36
+
37
+ assert isinstance(db, Session)
38
+
39
+ # Exhaust generator to trigger cleanup
40
+ try:
41
+ next(gen)
42
+ except StopIteration:
43
+ pass
44
+
45
+ def test_get_db_closes_on_exception(self):
46
+ """Test get_db closes session even on exception."""
47
+ gen = get_db()
48
+ db = next(gen)
49
+
50
+ # Simulate exception by closing manually
51
+ try:
52
+ gen.throw(Exception("test error"))
53
+ except Exception:
54
+ pass
55
+
56
+ # Session should be closed (no error means cleanup happened)
57
+
58
+
59
+ class TestDatabaseEngine:
60
+ """Test database engine configuration."""
61
+
62
+ def test_engine_is_sqlite(self):
63
+ """Test engine is configured for SQLite."""
64
+ assert "sqlite" in str(engine.url)
65
+
66
+ def test_engine_allows_multithread(self):
67
+ """Test SQLite is configured for multi-threaded access."""
68
+ # check_same_thread=False is set in database.py
69
+ assert engine.pool is not None
backend/tests/test_devb.py ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Dev B Test Suite - Run with: .venv/Scripts/python test_devb.py
3
+ """
4
+
5
+ import asyncio
6
+ import sys
7
+ sys.path.insert(0, '.')
8
+
9
+ from app.llm import generate_documentation, synthesize_answer, get_embedding
10
+ from app.vectorstore import init_vectorstore, add_embedding, search, count_embeddings, delete_by_project
11
+
12
+ # Test counters
13
+ passed = 0
14
+ failed = 0
15
+
16
+
17
+ def test(name, condition, details=""):
18
+ global passed, failed
19
+ if condition:
20
+ print(f" [PASS] {name}")
21
+ passed += 1
22
+ else:
23
+ print(f" [FAIL] {name} - {details}")
24
+ failed += 1
25
+
26
+
27
+ async def test_llm():
28
+ print("\n[1/4] Testing LLM Client (llm.py)")
29
+ print("-" * 40)
30
+
31
+ # Test 1: get_embedding
32
+ try:
33
+ emb = await get_embedding("test embedding")
34
+ test("get_embedding returns list", isinstance(emb, list))
35
+ test("get_embedding returns 768 dims", len(emb) == 768, f"got {len(emb)}")
36
+ test("get_embedding returns floats", isinstance(emb[0], float))
37
+ except Exception as e:
38
+ test("get_embedding", False, str(e))
39
+
40
+ # Test 2: generate_documentation
41
+ try:
42
+ doc = await generate_documentation(
43
+ task_title="Setup API",
44
+ what_i_did="Created REST endpoints",
45
+ code_snippet="@app.get('/api')"
46
+ )
47
+ test("generate_documentation returns dict", isinstance(doc, dict))
48
+ test("generate_documentation has summary", "summary" in doc)
49
+ test("generate_documentation has details", "details" in doc)
50
+ test("generate_documentation has tags", "tags" in doc and isinstance(doc["tags"], list))
51
+ except Exception as e:
52
+ test("generate_documentation", False, str(e))
53
+
54
+ # Test 3: synthesize_answer
55
+ try:
56
+ answer = await synthesize_answer(
57
+ context="Task completed: Built login system with JWT authentication",
58
+ query="What authentication was implemented?"
59
+ )
60
+ test("synthesize_answer returns string", isinstance(answer, str))
61
+ test("synthesize_answer not empty", len(answer) > 0)
62
+ except Exception as e:
63
+ test("synthesize_answer", False, str(e))
64
+
65
+
66
+ async def test_vectorstore():
67
+ print("\n[2/4] Testing Vector Store (vectorstore.py)")
68
+ print("-" * 40)
69
+
70
+ # Test 1: init_vectorstore
71
+ try:
72
+ init_vectorstore()
73
+ test("init_vectorstore succeeds", True)
74
+ except Exception as e:
75
+ test("init_vectorstore", False, str(e))
76
+ return # Can't continue without init
77
+
78
+ # Clean up test data first
79
+ try:
80
+ delete_by_project("test-project-xyz")
81
+ except:
82
+ pass
83
+
84
+ # Test 2: add_embedding
85
+ try:
86
+ emb = await get_embedding("Test document about Python programming")
87
+ add_embedding(
88
+ log_entry_id="test-entry-1",
89
+ text="Test document about Python programming",
90
+ embedding=emb,
91
+ metadata={
92
+ "project_id": "test-project-xyz",
93
+ "user_id": "test-user",
94
+ "task_id": "test-task",
95
+ "created_at": "2024-01-01T00:00:00"
96
+ }
97
+ )
98
+ test("add_embedding succeeds", True)
99
+ except Exception as e:
100
+ test("add_embedding", False, str(e))
101
+
102
+ # Test 3: count_embeddings
103
+ try:
104
+ count = count_embeddings("test-project-xyz")
105
+ test("count_embeddings returns int", isinstance(count, int))
106
+ test("count_embeddings >= 1", count >= 1, f"got {count}")
107
+ except Exception as e:
108
+ test("count_embeddings", False, str(e))
109
+
110
+ # Test 4: search
111
+ try:
112
+ query_emb = await get_embedding("Python")
113
+ results = search(query_emb, "test-project-xyz", n_results=5)
114
+ test("search returns list", isinstance(results, list))
115
+ test("search finds results", len(results) > 0, "no results found")
116
+ if results:
117
+ test("search result has id", "id" in results[0])
118
+ test("search result has metadata", "metadata" in results[0])
119
+ test("search result has distance", "distance" in results[0])
120
+ except Exception as e:
121
+ test("search", False, str(e))
122
+
123
+ # Test 5: delete_by_project
124
+ try:
125
+ delete_by_project("test-project-xyz")
126
+ count_after = count_embeddings("test-project-xyz")
127
+ test("delete_by_project removes data", count_after == 0, f"still has {count_after}")
128
+ except Exception as e:
129
+ test("delete_by_project", False, str(e))
130
+
131
+
132
+ async def test_full_pipeline():
133
+ print("\n[3/4] Testing Full Pipeline")
134
+ print("-" * 40)
135
+
136
+ project_id = "pipeline-test-proj"
137
+
138
+ # Clean up first
139
+ try:
140
+ delete_by_project(project_id)
141
+ except:
142
+ pass
143
+
144
+ try:
145
+ # Step 1: Generate documentation
146
+ doc = await generate_documentation(
147
+ task_title="Implement user registration",
148
+ what_i_did="Added signup endpoint with email validation and password hashing",
149
+ code_snippet="def register(email, password): ..."
150
+ )
151
+ test("Pipeline: doc generation", "summary" in doc and "details" in doc)
152
+
153
+ # Step 2: Create embedding from doc
154
+ text_to_embed = f"{doc['summary']} {doc['details']}"
155
+ embedding = await get_embedding(text_to_embed)
156
+ test("Pipeline: embedding created", len(embedding) == 768)
157
+
158
+ # Step 3: Store in vectorstore
159
+ add_embedding(
160
+ log_entry_id="pipeline-log-1",
161
+ text=text_to_embed,
162
+ embedding=embedding,
163
+ metadata={"project_id": project_id, "user_id": "dev1"}
164
+ )
165
+ test("Pipeline: stored in vectorstore", count_embeddings(project_id) == 1)
166
+
167
+ # Step 4: Search for it
168
+ query_emb = await get_embedding("user registration signup")
169
+ results = search(query_emb, project_id)
170
+ test("Pipeline: search finds it", len(results) > 0)
171
+
172
+ # Step 5: Synthesize answer
173
+ if results:
174
+ context = results[0]["metadata"]["text"]
175
+ answer = await synthesize_answer(context, "What was done for user registration?")
176
+ test("Pipeline: answer synthesized", len(answer) > 20)
177
+
178
+ # Cleanup
179
+ delete_by_project(project_id)
180
+
181
+ except Exception as e:
182
+ test("Pipeline", False, str(e))
183
+
184
+
185
+ async def test_edge_cases():
186
+ print("\n[4/4] Testing Edge Cases")
187
+ print("-" * 40)
188
+
189
+ # Test empty search
190
+ try:
191
+ init_vectorstore()
192
+ emb = await get_embedding("random query")
193
+ results = search(emb, "nonexistent-project-12345")
194
+ test("Empty search returns empty list", results == [])
195
+ except Exception as e:
196
+ test("Empty search", False, str(e))
197
+
198
+ # Test long text embedding
199
+ try:
200
+ long_text = "word " * 1000 # ~5000 chars
201
+ emb = await get_embedding(long_text)
202
+ test("Long text embedding works", len(emb) == 768)
203
+ except Exception as e:
204
+ test("Long text embedding", False, str(e))
205
+
206
+ # Test special characters
207
+ try:
208
+ special_text = "Code: `const x = 'hello';` // comment <script>alert('xss')</script>"
209
+ emb = await get_embedding(special_text)
210
+ test("Special chars embedding works", len(emb) == 768)
211
+ except Exception as e:
212
+ test("Special chars embedding", False, str(e))
213
+
214
+
215
+ async def main():
216
+ print("=" * 50)
217
+ print(" DEV B TEST SUITE - Intelligence Layer")
218
+ print("=" * 50)
219
+
220
+ await test_llm()
221
+ await test_vectorstore()
222
+ await test_full_pipeline()
223
+ await test_edge_cases()
224
+
225
+ print("\n" + "=" * 50)
226
+ print(f" RESULTS: {passed} passed, {failed} failed")
227
+ print("=" * 50)
228
+
229
+ if failed > 0:
230
+ sys.exit(1)
231
+
232
+
233
+ if __name__ == "__main__":
234
+ asyncio.run(main())
backend/tests/test_model_router.py ADDED
@@ -0,0 +1,408 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Test cases for Model Router - multi-model rotation with rate limiting and caching."""
2
+
3
+ import asyncio
4
+ import time
5
+ from unittest.mock import patch, MagicMock, AsyncMock
6
+ from datetime import datetime, timedelta
7
+ import sys
8
+ import os
9
+
10
+ # Add parent to path for imports
11
+ sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
12
+
13
+ from dotenv import load_dotenv
14
+ load_dotenv()
15
+
16
+ # Test configuration
17
+ TESTS_PASSED = 0
18
+ TESTS_FAILED = 0
19
+
20
+
21
+ def test(name):
22
+ """Decorator for test functions."""
23
+ def decorator(func):
24
+ async def wrapper():
25
+ global TESTS_PASSED, TESTS_FAILED
26
+ try:
27
+ if asyncio.iscoroutinefunction(func):
28
+ await func()
29
+ else:
30
+ func()
31
+ print(f"[PASS] {name}")
32
+ TESTS_PASSED += 1
33
+ except AssertionError as e:
34
+ print(f"[FAIL] {name}: {e}")
35
+ TESTS_FAILED += 1
36
+ except Exception as e:
37
+ print(f"[ERROR] {name}: {e}")
38
+ TESTS_FAILED += 1
39
+ return wrapper
40
+ return decorator
41
+
42
+
43
+ # ========== Model Selection Tests ==========
44
+
45
+ @test("Model selection returns best model for chat task")
46
+ def test_model_selection_chat():
47
+ from app.model_router import ModelRouter, TASK_PRIORITIES
48
+ router = ModelRouter()
49
+ model = router.get_model_for_task("chat")
50
+ assert model == "gemini-2.0-flash", f"Expected gemini-2.0-flash, got {model}"
51
+
52
+
53
+ @test("Model selection returns best model for documentation task")
54
+ def test_model_selection_documentation():
55
+ from app.model_router import ModelRouter, TASK_PRIORITIES
56
+ router = ModelRouter()
57
+ model = router.get_model_for_task("documentation")
58
+ assert model == "gemini-2.0-flash-lite", f"Expected gemini-2.0-flash-lite, got {model}"
59
+
60
+
61
+ @test("Model selection returns best model for synthesis task")
62
+ def test_model_selection_synthesis():
63
+ from app.model_router import ModelRouter, TASK_PRIORITIES
64
+ router = ModelRouter()
65
+ model = router.get_model_for_task("synthesis")
66
+ assert model == "gemma-3-27b-it", f"Expected gemma-3-27b-it, got {model}"
67
+
68
+
69
+ @test("Model selection falls back to default for unknown task")
70
+ def test_model_selection_unknown():
71
+ from app.model_router import ModelRouter
72
+ router = ModelRouter()
73
+ model = router.get_model_for_task("unknown_task_type")
74
+ assert model == "gemini-2.0-flash", f"Expected gemini-2.0-flash (default), got {model}"
75
+
76
+
77
+ # ========== Rate Limiting Tests ==========
78
+
79
+ @test("Rate limit tracking works correctly")
80
+ def test_rate_limit_tracking():
81
+ from app.model_router import ModelRouter, MODEL_CONFIGS
82
+ router = ModelRouter()
83
+
84
+ # Initially all models should be available (key 0)
85
+ assert router._check_rate_limit("gemini-2.0-flash", 0) == True
86
+
87
+ # Record usage up to limit
88
+ rpm_limit = MODEL_CONFIGS["gemini-2.0-flash"]["rpm"]
89
+ for _ in range(rpm_limit):
90
+ router._record_usage("gemini-2.0-flash", 0)
91
+
92
+ # Should now be rate limited for key 0
93
+ assert router._check_rate_limit("gemini-2.0-flash", 0) == False
94
+
95
+
96
+ @test("Model falls back when primary is rate limited")
97
+ def test_model_fallback():
98
+ from app.model_router import ModelRouter, MODEL_CONFIGS
99
+ router = ModelRouter()
100
+
101
+ # Exhaust gemini-2.0-flash rate limit on all keys
102
+ rpm_limit = MODEL_CONFIGS["gemini-2.0-flash"]["rpm"]
103
+ for key_idx in range(len(router.api_keys)):
104
+ for _ in range(rpm_limit):
105
+ router._record_usage("gemini-2.0-flash", key_idx)
106
+
107
+ # Should fall back to next model in chat priority
108
+ model = router.get_model_for_task("chat")
109
+ assert model == "gemini-2.0-flash-lite", f"Expected fallback to gemini-2.0-flash-lite, got {model}"
110
+
111
+
112
+ @test("Returns None when all models exhausted on all keys")
113
+ def test_all_models_exhausted():
114
+ from app.model_router import ModelRouter, MODEL_CONFIGS
115
+ router = ModelRouter()
116
+
117
+ # Exhaust all models on all keys
118
+ for key_idx in range(len(router.api_keys)):
119
+ for model_name, config in MODEL_CONFIGS.items():
120
+ for _ in range(config["rpm"]):
121
+ router._record_usage(model_name, key_idx)
122
+
123
+ # Should return None
124
+ model = router.get_model_for_task("chat")
125
+ assert model is None, f"Expected None when all exhausted, got {model}"
126
+
127
+
128
+ # ========== Cache Tests ==========
129
+
130
+ @test("Cache stores and retrieves responses")
131
+ def test_cache_store_retrieve():
132
+ from app.model_router import ModelRouter
133
+ router = ModelRouter()
134
+
135
+ cache_key = router._get_cache_key("chat", "user1", "test prompt")
136
+
137
+ # Initially empty
138
+ assert router._check_cache(cache_key) is None
139
+
140
+ # Store response
141
+ router._store_cache(cache_key, "cached response", "gemini-2.0-flash")
142
+
143
+ # Should retrieve
144
+ cached = router._check_cache(cache_key)
145
+ assert cached == "cached response", f"Expected 'cached response', got {cached}"
146
+
147
+
148
+ @test("Cache key includes user_id")
149
+ def test_cache_key_user_differentiation():
150
+ from app.model_router import ModelRouter
151
+ router = ModelRouter()
152
+
153
+ key1 = router._get_cache_key("chat", "user1", "same prompt")
154
+ key2 = router._get_cache_key("chat", "user2", "same prompt")
155
+
156
+ assert key1 != key2, "Cache keys should differ for different users"
157
+
158
+
159
+ @test("Cache key includes task_type")
160
+ def test_cache_key_task_differentiation():
161
+ from app.model_router import ModelRouter
162
+ router = ModelRouter()
163
+
164
+ key1 = router._get_cache_key("chat", "user1", "same prompt")
165
+ key2 = router._get_cache_key("documentation", "user1", "same prompt")
166
+
167
+ assert key1 != key2, "Cache keys should differ for different task types"
168
+
169
+
170
+ @test("Cache expires after TTL")
171
+ def test_cache_expiry():
172
+ from app.model_router import ModelRouter, CACHE_TTL
173
+ router = ModelRouter()
174
+
175
+ cache_key = router._get_cache_key("chat", "user1", "test prompt")
176
+ router._store_cache(cache_key, "cached response", "gemini-2.0-flash")
177
+
178
+ # Manually expire the cache entry
179
+ router.cache[cache_key]["timestamp"] = datetime.now() - timedelta(seconds=CACHE_TTL + 1)
180
+
181
+ # Should not retrieve expired entry
182
+ cached = router._check_cache(cache_key)
183
+ assert cached is None, "Expired cache entry should return None"
184
+
185
+
186
+ @test("Cache cleaning removes expired entries")
187
+ def test_cache_cleaning():
188
+ from app.model_router import ModelRouter, CACHE_TTL
189
+ router = ModelRouter()
190
+
191
+ # Add expired entries
192
+ for i in range(5):
193
+ key = f"expired_{i}"
194
+ router.cache[key] = {
195
+ "response": f"response_{i}",
196
+ "timestamp": datetime.now() - timedelta(seconds=CACHE_TTL + 1),
197
+ "model": "test"
198
+ }
199
+
200
+ # Add valid entry
201
+ router.cache["valid"] = {
202
+ "response": "valid_response",
203
+ "timestamp": datetime.now(),
204
+ "model": "test"
205
+ }
206
+
207
+ # Clean cache
208
+ router._clean_cache()
209
+
210
+ # Only valid entry should remain
211
+ assert len(router.cache) == 1, f"Expected 1 entry after cleaning, got {len(router.cache)}"
212
+ assert "valid" in router.cache, "Valid entry should remain after cleaning"
213
+
214
+
215
+ # ========== Stats Tests ==========
216
+
217
+ @test("Stats returns correct usage info")
218
+ def test_stats():
219
+ from app.model_router import ModelRouter, MODEL_CONFIGS
220
+ router = ModelRouter()
221
+
222
+ # Record some usage on key 0
223
+ router._record_usage("gemini-2.0-flash", 0)
224
+ router._record_usage("gemini-2.0-flash", 0)
225
+ router._record_usage("gemma-3-27b-it", 0)
226
+
227
+ stats = router.get_stats()
228
+
229
+ assert stats["models"]["gemini-2.0-flash"]["used"] == 2, "Should show 2 uses for gemini-2.0-flash"
230
+ assert stats["models"]["gemma-3-27b-it"]["used"] == 1, "Should show 1 use for gemma-3-27b-it"
231
+ # Limit is per-key * num_keys
232
+ expected_limit = MODEL_CONFIGS["gemini-2.0-flash"]["rpm"] * len(router.api_keys)
233
+ assert stats["models"]["gemini-2.0-flash"]["limit"] == expected_limit
234
+
235
+
236
+ # ========== Multi-Key Tests ==========
237
+
238
+ @test("Multiple keys are loaded from environment")
239
+ def test_multi_key_loading():
240
+ from app.model_router import ModelRouter
241
+ router = ModelRouter()
242
+ assert len(router.api_keys) >= 1, "Should have at least one API key"
243
+
244
+
245
+ @test("Key health tracking works")
246
+ def test_key_health_tracking():
247
+ from app.model_router import ModelRouter, KEY_COOLDOWN_RATE_LIMIT
248
+ router = ModelRouter()
249
+
250
+ # Initially all keys should be healthy
251
+ for i in range(len(router.api_keys)):
252
+ assert router._is_key_healthy(i) == True, f"Key {i} should be healthy initially"
253
+
254
+ # Mark first key as unhealthy
255
+ router._mark_key_unhealthy(0, Exception("Test error"), KEY_COOLDOWN_RATE_LIMIT)
256
+
257
+ assert router._is_key_healthy(0) == False, "Key 0 should be unhealthy after marking"
258
+ assert router.key_health[0]["last_error"] == "Test error"
259
+
260
+
261
+ @test("Key rotation skips unhealthy keys")
262
+ def test_key_rotation_skips_unhealthy():
263
+ from app.model_router import ModelRouter
264
+ router = ModelRouter()
265
+
266
+ if len(router.api_keys) < 2:
267
+ return # Skip if only one key
268
+
269
+ # Mark key 0 as unhealthy
270
+ router._mark_key_unhealthy(0, Exception("Test"), 60)
271
+
272
+ # Get next key should skip key 0
273
+ key_idx, _ = router._get_next_key()
274
+ assert key_idx != 0 or len(router.api_keys) == 1, "Should skip unhealthy key 0"
275
+
276
+
277
+ @test("Key auto-recovers after cooldown")
278
+ def test_key_auto_recovery():
279
+ from app.model_router import ModelRouter
280
+ from datetime import datetime, timedelta
281
+ router = ModelRouter()
282
+
283
+ # Mark key as unhealthy with expired cooldown
284
+ router.key_health[0] = {
285
+ "healthy": False,
286
+ "last_error": "Test",
287
+ "retry_after": datetime.now() - timedelta(seconds=1) # Already expired
288
+ }
289
+
290
+ # Should recover when checked
291
+ assert router._is_key_healthy(0) == True, "Key should auto-recover after cooldown"
292
+ assert router.key_health[0]["healthy"] == True
293
+ assert router.key_health[0]["last_error"] is None
294
+
295
+
296
+ @test("Stats includes key information")
297
+ def test_stats_includes_keys():
298
+ from app.model_router import ModelRouter
299
+ router = ModelRouter()
300
+
301
+ stats = router.get_stats()
302
+
303
+ assert "keys" in stats, "Stats should include keys info"
304
+ assert stats["keys"]["total"] >= 1, "Should have at least one key"
305
+ assert stats["keys"]["healthy"] >= 1, "Should have at least one healthy key"
306
+ assert "details" in stats["keys"], "Stats should include key details"
307
+
308
+
309
+ # ========== Integration Tests (requires API key) ==========
310
+
311
+ @test("Generate returns response and model info")
312
+ async def test_generate_integration():
313
+ from app.model_router import generate_with_info
314
+
315
+ response, model = await generate_with_info(
316
+ "Say 'test' in one word.",
317
+ task_type="default",
318
+ use_cache=False
319
+ )
320
+
321
+ assert response is not None, "Response should not be None"
322
+ assert len(response) > 0, "Response should not be empty"
323
+ assert model in ["gemini-2.0-flash", "gemini-2.0-flash-lite", "gemma-3-27b-it",
324
+ "gemma-3-12b-it", "gemma-3-4b-it", "gemma-3-1b-it", "cache"]
325
+
326
+
327
+ @test("Generate uses cache on repeated calls")
328
+ async def test_generate_uses_cache():
329
+ from app.model_router import generate_with_info, router
330
+
331
+ # Clear cache first
332
+ router.cache.clear()
333
+
334
+ prompt = "Say 'cached test' in two words."
335
+
336
+ # First call - should hit model
337
+ response1, model1 = await generate_with_info(prompt, task_type="default", use_cache=True)
338
+ assert model1 != "cache", f"First call should not be from cache, got {model1}"
339
+
340
+ # Second call - should hit cache
341
+ response2, model2 = await generate_with_info(prompt, task_type="default", use_cache=True)
342
+ assert model2 == "cache", f"Second call should be from cache, got {model2}"
343
+ assert response1 == response2, "Cached response should match original"
344
+
345
+
346
+ # ========== Run Tests ==========
347
+
348
+ async def run_tests():
349
+ """Run all tests."""
350
+ print("=" * 60)
351
+ print("Model Router Tests")
352
+ print("=" * 60)
353
+ print()
354
+
355
+ # Unit tests (no API needed)
356
+ print("--- Model Selection Tests ---")
357
+ await test_model_selection_chat()
358
+ await test_model_selection_documentation()
359
+ await test_model_selection_synthesis()
360
+ await test_model_selection_unknown()
361
+
362
+ print()
363
+ print("--- Rate Limiting Tests ---")
364
+ await test_rate_limit_tracking()
365
+ await test_model_fallback()
366
+ await test_all_models_exhausted()
367
+
368
+ print()
369
+ print("--- Cache Tests ---")
370
+ await test_cache_store_retrieve()
371
+ await test_cache_key_user_differentiation()
372
+ await test_cache_key_task_differentiation()
373
+ await test_cache_expiry()
374
+ await test_cache_cleaning()
375
+
376
+ print()
377
+ print("--- Stats Tests ---")
378
+ await test_stats()
379
+
380
+ print()
381
+ print("--- Multi-Key Tests ---")
382
+ await test_multi_key_loading()
383
+ await test_key_health_tracking()
384
+ await test_key_rotation_skips_unhealthy()
385
+ await test_key_auto_recovery()
386
+ await test_stats_includes_keys()
387
+
388
+ print()
389
+ print("--- Integration Tests (requires API key) ---")
390
+
391
+ # Check if API key is available
392
+ if not os.getenv("GEMINI_API_KEY") and not os.getenv("GEMINI_API_KEYS"):
393
+ print("[SKIP] Integration tests skipped - no API keys")
394
+ else:
395
+ await test_generate_integration()
396
+ await test_generate_uses_cache()
397
+
398
+ print()
399
+ print("=" * 60)
400
+ print(f"Results: {TESTS_PASSED} passed, {TESTS_FAILED} failed")
401
+ print("=" * 60)
402
+
403
+ return TESTS_FAILED == 0
404
+
405
+
406
+ if __name__ == "__main__":
407
+ success = asyncio.run(run_tests())
408
+ exit(0 if success else 1)
backend/tests/test_models.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Test which Gemini/Gemma models are available for your API key."""
2
+
3
+ import google.generativeai as genai
4
+ import os
5
+ from dotenv import load_dotenv
6
+
7
+ load_dotenv()
8
+
9
+ GEMINI_API_KEY = os.getenv("GEMINI_API_KEY")
10
+ if not GEMINI_API_KEY:
11
+ print("ERROR: GEMINI_API_KEY not found in .env")
12
+ exit(1)
13
+
14
+ genai.configure(api_key=GEMINI_API_KEY)
15
+
16
+ # Models to test
17
+ MODELS = [
18
+ "gemini-2.0-flash",
19
+ "gemini-2.0-flash-lite",
20
+ "gemma-3-1b-it",
21
+ "gemma-3-4b-it",
22
+ "gemma-3-12b-it",
23
+ "gemma-3-27b-it",
24
+ ]
25
+
26
+ TEST_PROMPT = "Say 'Hello' in one word."
27
+
28
+ print("=" * 60)
29
+ print("Testing Gemini/Gemma Models Availability")
30
+ print("=" * 60)
31
+
32
+ available = []
33
+ unavailable = []
34
+
35
+ for model_name in MODELS:
36
+ try:
37
+ model = genai.GenerativeModel(model_name)
38
+ response = model.generate_content(TEST_PROMPT)
39
+ text = response.text.strip()[:50]
40
+ print(f"[OK] {model_name:25} -> {text}")
41
+ available.append(model_name)
42
+ except Exception as e:
43
+ error_msg = str(e)[:60]
44
+ print(f"[ERR] {model_name:25} -> {error_msg}")
45
+ unavailable.append(model_name)
46
+
47
+ print("\n" + "=" * 60)
48
+ print("SUMMARY")
49
+ print("=" * 60)
50
+ print(f"\nAvailable ({len(available)}):")
51
+ for m in available:
52
+ print(f" - {m}")
53
+
54
+ print(f"\nUnavailable ({len(unavailable)}):")
55
+ for m in unavailable:
56
+ print(f" - {m}")
57
+
58
+ # Calculate combined RPM
59
+ rpm_map = {
60
+ "gemini-2.0-flash": 15,
61
+ "gemini-2.0-flash-lite": 30,
62
+ "gemma-3-1b-it": 30,
63
+ "gemma-3-4b-it": 30,
64
+ "gemma-3-12b-it": 30,
65
+ "gemma-3-27b-it": 30,
66
+ }
67
+
68
+ total_rpm = sum(rpm_map.get(m, 0) for m in available)
69
+ print(f"\nCombined RPM capacity: {total_rpm} RPM")
70
+ print(f"With 50% cache hit rate: ~{total_rpm * 2} effective RPM")
backend/tests/test_projects.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Unit tests for project tools."""
2
+
3
+ import pytest
4
+ from app.tools.projects import (
5
+ create_project, list_projects, join_project, check_project_id_available
6
+ )
7
+ from app.database import SessionLocal
8
+ from app.models import Project, ProjectMembership
9
+
10
+
11
+ class TestCheckProjectIdAvailable:
12
+ """Test project ID availability checking."""
13
+
14
+ def test_available_id(self, create_user):
15
+ """Test checking an available project ID."""
16
+ result = check_project_id_available("nonexistent-project")
17
+
18
+ assert result["available"] == True
19
+ assert result["project_id"] == "nonexistent-project"
20
+
21
+ def test_unavailable_id(self, create_user):
22
+ """Test checking an ID that's already taken."""
23
+ user = create_user("Test", "User")
24
+
25
+ # Create a project first
26
+ create_project("takenid", "Description", user.id)
27
+
28
+ # Check if the ID is available
29
+ result = check_project_id_available("takenid")
30
+
31
+ assert result["available"] == False
32
+ assert result["project_id"] == "takenid"
33
+
34
+
35
+ class TestCreateProject:
36
+ """Test project creation with name as ID."""
37
+
38
+ def test_create_project_uses_name_as_id(self, create_user):
39
+ """Test that project name is used as project ID."""
40
+ user = create_user("Creator", "Test")
41
+
42
+ result = create_project("fastgate", "A fast gateway", user.id)
43
+
44
+ assert result["id"] == "fastgate"
45
+ assert result["name"] == "fastgate"
46
+ assert result["description"] == "A fast gateway"
47
+ assert result["created_by"] == user.id
48
+
49
+ def test_create_project_adds_owner_membership(self, create_user):
50
+ """Test that creator is added as owner."""
51
+ user = create_user("Owner", "Test")
52
+
53
+ result = create_project("ownerproj", "Test", user.id)
54
+
55
+ db = SessionLocal()
56
+ try:
57
+ membership = db.query(ProjectMembership).filter(
58
+ ProjectMembership.project_id == result["id"],
59
+ ProjectMembership.user_id == user.id
60
+ ).first()
61
+
62
+ assert membership is not None
63
+ assert membership.role == "owner"
64
+ finally:
65
+ db.close()
66
+
67
+ def test_create_project_duplicate_id_fails(self, create_user):
68
+ """Test that creating project with existing ID fails."""
69
+ user = create_user("Test", "User")
70
+
71
+ # Create first project
72
+ create_project("duplicate", "First project", user.id)
73
+
74
+ # Try to create another with same name/ID
75
+ result = create_project("duplicate", "Second project", user.id)
76
+
77
+ assert "error" in result
78
+ assert "already exists" in result["error"]
79
+
80
+ def test_create_project_nonexistent_user(self):
81
+ """Test creating project with non-existent user fails."""
82
+ result = create_project("orphan", "No owner", "nonexistent-user-id")
83
+
84
+ assert "error" in result
85
+ assert "not found" in result["error"]
86
+
87
+
88
+ class TestListProjects:
89
+ """Test listing user's projects."""
90
+
91
+ def test_list_projects_empty(self, create_user):
92
+ """Test listing projects for user with no projects."""
93
+ user = create_user("New", "User")
94
+
95
+ result = list_projects(user.id)
96
+
97
+ assert "projects" in result
98
+ assert len(result["projects"]) == 0
99
+
100
+ def test_list_projects_with_owned_project(self, create_user):
101
+ """Test listing projects includes owned projects."""
102
+ user = create_user("Owner", "Test")
103
+ create_project("ownedproj", "My project", user.id)
104
+
105
+ result = list_projects(user.id)
106
+
107
+ assert len(result["projects"]) == 1
108
+ assert result["projects"][0]["name"] == "ownedproj"
109
+ assert result["projects"][0]["role"] == "owner"
110
+
111
+ def test_list_projects_with_member_project(self, create_user):
112
+ """Test listing projects includes joined projects."""
113
+ owner = create_user("Owner", "Test")
114
+ member = create_user("Member", "Test")
115
+
116
+ create_project("sharedproj", "Shared project", owner.id)
117
+ join_project("sharedproj", member.id)
118
+
119
+ result = list_projects(member.id)
120
+
121
+ assert len(result["projects"]) == 1
122
+ assert result["projects"][0]["name"] == "sharedproj"
123
+ assert result["projects"][0]["role"] == "member"
124
+
125
+
126
+ class TestJoinProject:
127
+ """Test joining existing projects."""
128
+
129
+ def test_join_project_success(self, create_user):
130
+ """Test successfully joining a project."""
131
+ owner = create_user("Owner", "Test")
132
+ member = create_user("Member", "Test")
133
+
134
+ create_project("jointest", "Test project", owner.id)
135
+ result = join_project("jointest", member.id)
136
+
137
+ assert "message" in result
138
+ assert result["project_id"] == "jointest"
139
+ assert result["role"] == "member"
140
+
141
+ def test_join_project_already_member(self, create_user):
142
+ """Test joining a project you're already a member of."""
143
+ owner = create_user("Owner", "Test")
144
+ member = create_user("Member", "Test")
145
+
146
+ create_project("alreadyin", "Test project", owner.id)
147
+ join_project("alreadyin", member.id)
148
+
149
+ # Try joining again
150
+ result = join_project("alreadyin", member.id)
151
+
152
+ assert "Already a member" in result.get("message", "")
153
+
154
+ def test_join_nonexistent_project(self, create_user):
155
+ """Test joining a project that doesn't exist."""
156
+ user = create_user("User", "Test")
157
+
158
+ result = join_project("doesnotexist", user.id)
159
+
160
+ assert "error" in result
161
+ assert "not found" in result["error"]
162
+
163
+ def test_join_project_nonexistent_user(self, create_user):
164
+ """Test joining with non-existent user."""
165
+ owner = create_user("Owner", "Test")
166
+ create_project("validproj", "Test", owner.id)
167
+
168
+ result = join_project("validproj", "nonexistent-user")
169
+
170
+ assert "error" in result
171
+ assert "not found" in result["error"]
backend/tests/test_scenarios.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import asyncio
2
+ from concurrent.futures import ThreadPoolExecutor, as_completed
3
+ import pytest
4
+ from app.tools.projects import create_project, join_project
5
+ from app.tools.tasks import create_task
6
+ from app.tools import memory as memory_tools
7
+ from app.database import SessionLocal
8
+ from app.models import ProjectMembership, Task as TaskModel, LogEntry, TaskStatus
9
+
10
+ @pytest.mark.timeout(30)
11
+ def test_1_project_multiple_members(create_user):
12
+ """
13
+ Scenario: One project is created, and multiple members join it.
14
+ """
15
+ # 1. Setup users
16
+ owner = create_user("Owner", "One")
17
+ member2 = create_user("Member", "Two")
18
+ member3 = create_user("Member", "Three")
19
+
20
+ # 2. Create project
21
+ proj = create_project("Proj A", "Test project", owner.id)
22
+ pid = proj["id"]
23
+
24
+ # 3. Members join
25
+ join_project(pid, member2.id)
26
+ join_project(pid, member3.id)
27
+
28
+ # 4. Verify DB state
29
+ db = SessionLocal()
30
+ try:
31
+ memberships = db.query(ProjectMembership).filter(ProjectMembership.project_id == pid).all()
32
+ assert len(memberships) == 3 # Owner + 2 members
33
+ finally:
34
+ db.close()
35
+
36
+
37
+ @pytest.mark.timeout(60)
38
+ def test_multiple_projects_multiple_members(create_user):
39
+ """
40
+ Scenario: Multiple projects created at same time, and everyone joins all of them together.
41
+ """
42
+ # Create 5 users
43
+ users = [create_user(f"User{i}", f"Last{i}") for i in range(5)]
44
+ creator_ids = [u.id for u in users[:3]] # First 3 users create projects
45
+
46
+ # Helper to create project in a thread
47
+ def _mk(name, desc, uid):
48
+ return create_project(name, desc, uid)
49
+
50
+ # Run 3 creates in parallel
51
+ with ThreadPoolExecutor(max_workers=6) as tp:
52
+ futures = [tp.submit(_mk, f"P{i}", "desc", creator_ids[i]) for i in range(len(creator_ids))]
53
+ projects = [f.result() for f in futures]
54
+
55
+ pids = [p["id"] for p in projects]
56
+
57
+ # Helper to join project in a thread
58
+ def _join(pid, uid):
59
+ return join_project(pid, uid)
60
+
61
+ # Run ALL joins in parallel (3 projects * 5 users = 15 joins)
62
+ with ThreadPoolExecutor(max_workers=15) as tp:
63
+ futs = []
64
+ for pid in pids:
65
+ for u in users:
66
+ futs.append(tp.submit(_join, pid, u.id))
67
+ # Wait for all to finish
68
+ for f in as_completed(futs):
69
+ f.result()
70
+
71
+ # Verify: Every project should have 5 members
72
+ db = SessionLocal()
73
+ try:
74
+ for pid in pids:
75
+ mems = db.query(ProjectMembership).filter(ProjectMembership.project_id == pid).all()
76
+ unique_members = set(m.user_id for m in mems)
77
+ assert len(unique_members) == 5
78
+ finally:
79
+ db.close()
80
+
81
+
82
+ @pytest.mark.asyncio
83
+ @pytest.mark.timeout(60)
84
+ async def test_concurrent_task_completion(create_user, monkeypatch):
85
+ """
86
+ Scenario: Multiple members complete tasks simultaneously.
87
+ (Uses mocks for LLM to run fast without API keys)
88
+ """
89
+ owner = create_user("Owner", "Test")
90
+ members = [create_user(f"Member{i}", f"Test{i}") for i in range(3)]
91
+ proj = create_project("Concurrency", "Simul tasks", owner.id)
92
+ pid = proj["id"]
93
+
94
+ # Mock LLM calls (fake them)
95
+ async def fake_doc(*args, **kwargs):
96
+ return {"summary": "Done", "details": "Details...", "tags": ["test"]}
97
+
98
+ async def fake_embed(*args, **kwargs):
99
+ return [0.0] * 768 # Fake vector
100
+
101
+ # Replace real functions with fakes
102
+ monkeypatch.setattr(memory_tools, "generate_documentation", fake_doc)
103
+ monkeypatch.setattr(memory_tools, "get_embedding", fake_embed)
104
+ monkeypatch.setattr(memory_tools, "add_embedding", lambda **kw: None)
105
+
106
+ # Setup tasks
107
+ users = [owner] + members
108
+ tasks = [create_task(pid, f"Task {i}", "desc", assigned_to=u.id) for i, u in enumerate(users)]
109
+
110
+ # Run completion in parallel
111
+ results = await asyncio.gather(*[
112
+ memory_tools.complete_task(tasks[i]["id"], pid, users[i].id, "Finished")
113
+ for i in range(len(users))
114
+ ])
115
+
116
+ assert all(r.get("success") for r in results)
117
+
118
+
119
+ @pytest.mark.timeout(30)
120
+ @pytest.mark.xfail(reason="Known Issue: No DB constraint prevents duplicate joins under race conditions")
121
+ def test_same_user_joining_twice_simultaneously(create_user):
122
+ """
123
+ Scenario: The same user tries to join the same project twice at the exact same time.
124
+ """
125
+ owner = create_user("Owner", "Two")
126
+ member = create_user("New", "Member")
127
+ proj = create_project("Proj Race", "Race condition join", owner.id)
128
+ pid = proj["id"]
129
+
130
+ # Helper to join
131
+ def _join():
132
+ return join_project(pid, member.id)
133
+
134
+ # Run 2 joins in parallel for SAME user
135
+ with ThreadPoolExecutor(max_workers=2) as tp:
136
+ futures = [tp.submit(_join) for _ in range(2)]
137
+ _ = [f.result() for f in futures]
138
+
139
+ # Assert: Should only be 1 membership record
140
+ db = SessionLocal()
141
+ try:
142
+ mems = db.query(ProjectMembership).filter(
143
+ ProjectMembership.project_id == pid,
144
+ ProjectMembership.user_id == member.id,
145
+ ).all()
146
+ assert len(mems) == 1
147
+ finally:
148
+ db.close()
backend/tests/test_schemas.py ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Unit tests for Pydantic schemas."""
2
+
3
+ import pytest
4
+ from datetime import datetime
5
+ from pydantic import ValidationError
6
+
7
+ from app.schemas import (
8
+ UserBase, UserCreate, User,
9
+ ProjectBase, ProjectCreate, ProjectJoin, Project,
10
+ TaskBase, TaskCreate, Task,
11
+ TaskCompleteRequest, TaskCompleteResponse,
12
+ SearchRequest, SearchFilters, SearchResponse,
13
+ SmartQueryRequest, SmartQueryResponse
14
+ )
15
+
16
+
17
+ class TestUserSchemas:
18
+ """Test User-related schemas."""
19
+
20
+ def test_user_base_requires_names(self):
21
+ """Test UserBase requires firstName and lastName."""
22
+ user = UserBase(firstName="John", lastName="Doe")
23
+ assert user.firstName == "John"
24
+ assert user.lastName == "Doe"
25
+
26
+ def test_user_base_no_email_field(self):
27
+ """Test UserBase does not have email field."""
28
+ user = UserBase(firstName="Test", lastName="User")
29
+ assert not hasattr(user, "email")
30
+
31
+ def test_user_base_optional_avatar(self):
32
+ """Test avatar_url is optional."""
33
+ user = UserBase(firstName="Test", lastName="User")
34
+ assert user.avatar_url is None
35
+
36
+ user_with_avatar = UserBase(
37
+ firstName="Test",
38
+ lastName="User",
39
+ avatar_url="https://example.com/avatar.png"
40
+ )
41
+ assert user_with_avatar.avatar_url == "https://example.com/avatar.png"
42
+
43
+ def test_user_create_inherits_user_base(self):
44
+ """Test UserCreate has same fields as UserBase."""
45
+ user = UserCreate(firstName="New", lastName="User")
46
+ assert user.firstName == "New"
47
+ assert user.lastName == "User"
48
+
49
+ def test_user_validation_missing_first_name(self):
50
+ """Test validation fails without firstName."""
51
+ with pytest.raises(ValidationError):
52
+ UserBase(lastName="Doe")
53
+
54
+ def test_user_validation_missing_last_name(self):
55
+ """Test validation fails without lastName."""
56
+ with pytest.raises(ValidationError):
57
+ UserBase(firstName="John")
58
+
59
+ def test_user_schema_has_id_and_created_at(self):
60
+ """Test User schema includes id and created_at."""
61
+ user = User(
62
+ id="tes1234",
63
+ firstName="Test",
64
+ lastName="User",
65
+ created_at=datetime.now()
66
+ )
67
+ assert user.id == "tes1234"
68
+ assert user.created_at is not None
69
+
70
+
71
+ class TestProjectSchemas:
72
+ """Test Project-related schemas."""
73
+
74
+ def test_project_base(self):
75
+ """Test ProjectBase schema."""
76
+ project = ProjectBase(name="myproject", description="A test project")
77
+ assert project.name == "myproject"
78
+ assert project.description == "A test project"
79
+
80
+ def test_project_create_requires_user_id(self):
81
+ """Test ProjectCreate requires userId."""
82
+ project = ProjectCreate(
83
+ name="testproj",
84
+ description="Test",
85
+ userId="usr1234"
86
+ )
87
+ assert project.userId == "usr1234"
88
+
89
+ def test_project_join_schema(self):
90
+ """Test ProjectJoin schema."""
91
+ join = ProjectJoin(userId="usr1234")
92
+ assert join.userId == "usr1234"
93
+
94
+ def test_project_schema(self):
95
+ """Test full Project schema."""
96
+ project = Project(
97
+ id="myproject",
98
+ name="myproject",
99
+ description="Test",
100
+ created_by="usr1234",
101
+ created_at=datetime.now()
102
+ )
103
+ assert project.id == project.name # ID is the name
104
+
105
+
106
+ class TestTaskSchemas:
107
+ """Test Task-related schemas."""
108
+
109
+ def test_task_base(self):
110
+ """Test TaskBase schema."""
111
+ task = TaskBase(title="Test Task")
112
+ assert task.title == "Test Task"
113
+ assert task.description is None
114
+
115
+ def test_task_create_optional_assigned_to(self):
116
+ """Test TaskCreate has optional assignedTo."""
117
+ task = TaskCreate(title="Unassigned Task")
118
+ assert task.assignedTo is None
119
+
120
+ assigned_task = TaskCreate(title="Assigned", assignedTo="usr1234")
121
+ assert assigned_task.assignedTo == "usr1234"
122
+
123
+ def test_task_complete_request(self):
124
+ """Test TaskCompleteRequest schema."""
125
+ request = TaskCompleteRequest(
126
+ userId="usr1234",
127
+ whatIDid="Fixed the bug",
128
+ codeSnippet="def fix(): pass"
129
+ )
130
+ assert request.userId == "usr1234"
131
+ assert request.whatIDid == "Fixed the bug"
132
+ assert request.codeSnippet == "def fix(): pass"
133
+
134
+ def test_task_complete_request_optional_code(self):
135
+ """Test codeSnippet is optional."""
136
+ request = TaskCompleteRequest(
137
+ userId="usr1234",
138
+ whatIDid="Fixed the bug"
139
+ )
140
+ assert request.codeSnippet is None
141
+
142
+
143
+ class TestSearchSchemas:
144
+ """Test Search-related schemas."""
145
+
146
+ def test_search_filters_all_optional(self):
147
+ """Test SearchFilters has all optional fields."""
148
+ filters = SearchFilters()
149
+ assert filters.userId is None
150
+ assert filters.dateFrom is None
151
+ assert filters.dateTo is None
152
+ assert filters.tags is None
153
+
154
+ def test_search_request(self):
155
+ """Test SearchRequest schema."""
156
+ request = SearchRequest(query="authentication")
157
+ assert request.query == "authentication"
158
+ assert request.filters is None
159
+
160
+ def test_search_request_with_filters(self):
161
+ """Test SearchRequest with filters."""
162
+ filters = SearchFilters(userId="usr1234", tags=["auth", "security"])
163
+ request = SearchRequest(query="login", filters=filters)
164
+ assert request.filters.userId == "usr1234"
165
+ assert request.filters.tags == ["auth", "security"]
166
+
167
+
168
+ class TestSmartQuerySchemas:
169
+ """Test SmartQuery-related schemas."""
170
+
171
+ def test_smart_query_request(self):
172
+ """Test SmartQueryRequest schema."""
173
+ request = SmartQueryRequest(
174
+ query="What did I do yesterday?",
175
+ currentUserId="usr1234"
176
+ )
177
+ assert request.query == "What did I do yesterday?"
178
+ assert request.currentUserId == "usr1234"
179
+ assert request.currentDatetime is None
180
+
181
+ def test_smart_query_request_with_datetime(self):
182
+ """Test SmartQueryRequest with datetime."""
183
+ request = SmartQueryRequest(
184
+ query="What tasks are done?",
185
+ currentUserId="usr1234",
186
+ currentDatetime="2025-11-30T12:00:00Z"
187
+ )
188
+ assert request.currentDatetime == "2025-11-30T12:00:00Z"
189
+
190
+ def test_smart_query_response(self):
191
+ """Test SmartQueryResponse schema."""
192
+ response = SmartQueryResponse(
193
+ answer="You completed the auth task yesterday.",
194
+ tools_used=["get_user_activity"],
195
+ sources=[]
196
+ )
197
+ assert response.answer == "You completed the auth task yesterday."
198
+ assert "get_user_activity" in response.tools_used
backend/tests/test_smart_query.py ADDED
@@ -0,0 +1,656 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Smart Query Test Suite - Run with: python test_smart_query.py
3
+
4
+ Tests the smart query tool functions and integration.
5
+ Requires GEMINI_API_KEY for full integration tests.
6
+ """
7
+
8
+ import asyncio
9
+ import sys
10
+ from datetime import datetime, timedelta
11
+
12
+ sys.path.insert(0, '.')
13
+
14
+ from app.database import init_db, SessionLocal
15
+ from app.models import User, Task, LogEntry, TaskStatus, ActorType, ActionType, ProjectMembership
16
+ from app.tools.projects import create_project
17
+ from app.tools.tasks import create_task
18
+ from app.vectorstore import init_vectorstore, delete_by_project
19
+
20
+ # Test counters
21
+ passed = 0
22
+ failed = 0
23
+
24
+
25
+ def test(name, condition, details=""):
26
+ global passed, failed
27
+ if condition:
28
+ print(f" [PASS] {name}")
29
+ passed += 1
30
+ else:
31
+ print(f" [FAIL] {name} - {details}")
32
+ failed += 1
33
+
34
+
35
+ def create_test_user(db, first_name: str, last_name: str) -> User:
36
+ """Create a test user and return the user object."""
37
+ from app.models import generate_user_id
38
+ user_id = generate_user_id(first_name)
39
+ user = User(id=user_id, first_name=first_name, last_name=last_name)
40
+ db.add(user)
41
+ db.commit()
42
+ db.refresh(user)
43
+ return user
44
+
45
+
46
+ def create_test_log_entry(db, project_id: str, task_id: str, user_id: str, raw_input: str, hours_ago: int = 0) -> LogEntry:
47
+ """Create a test log entry."""
48
+ entry = LogEntry(
49
+ project_id=project_id,
50
+ task_id=task_id,
51
+ user_id=user_id,
52
+ actor_type=ActorType.human,
53
+ action_type=ActionType.task_completed,
54
+ raw_input=raw_input,
55
+ generated_doc=f"Documentation for: {raw_input}",
56
+ tags=["test"],
57
+ created_at=datetime.now() - timedelta(hours=hours_ago)
58
+ )
59
+ db.add(entry)
60
+ db.commit()
61
+ db.refresh(entry)
62
+ return entry
63
+
64
+
65
+ def setup_test_data():
66
+ """Set up test data and return IDs."""
67
+ db = SessionLocal()
68
+ try:
69
+ # Create users
70
+ user1 = create_test_user(db, "Alice", "Developer")
71
+ user2 = create_test_user(db, "Bob", "Developer")
72
+
73
+ # Create project
74
+ project_result = create_project(
75
+ name=f"Smart Query Test Project {datetime.now().timestamp()}",
76
+ description="Testing smart query functionality",
77
+ user_id=user1.id
78
+ )
79
+ project_id = project_result["id"]
80
+
81
+ # Add user2 to project
82
+ membership = ProjectMembership(project_id=project_id, user_id=user2.id, role="member")
83
+ db.add(membership)
84
+ db.commit()
85
+
86
+ # Create tasks
87
+ task1_result = create_task(
88
+ project_id=project_id,
89
+ title="Implement Authentication API",
90
+ description="Build JWT auth system",
91
+ assigned_to=user1.id
92
+ )
93
+ task1_id = task1_result["id"]
94
+
95
+ task2_result = create_task(
96
+ project_id=project_id,
97
+ title="Write Unit Tests",
98
+ description="Create test coverage",
99
+ assigned_to=user2.id
100
+ )
101
+ task2_id = task2_result["id"]
102
+
103
+ # Mark task1 as done
104
+ task1 = db.query(Task).filter(Task.id == task1_id).first()
105
+ task1.status = TaskStatus.done
106
+ task1.completed_at = datetime.now() - timedelta(hours=2)
107
+ db.commit()
108
+
109
+ # Create log entries
110
+ log1 = create_test_log_entry(
111
+ db, project_id, task1_id, user1.id,
112
+ "Implemented JWT authentication with refresh tokens",
113
+ hours_ago=2
114
+ )
115
+
116
+ log2 = create_test_log_entry(
117
+ db, project_id, task1_id, user1.id,
118
+ "Added password hashing with bcrypt",
119
+ hours_ago=25 # Yesterday
120
+ )
121
+
122
+ log3 = create_test_log_entry(
123
+ db, project_id, task2_id, user2.id,
124
+ "Created unit tests for auth module",
125
+ hours_ago=1
126
+ )
127
+
128
+ return {
129
+ "project_id": project_id,
130
+ "user1_id": user1.id,
131
+ "user1_name": user1.name,
132
+ "user2_id": user2.id,
133
+ "user2_name": user2.name,
134
+ "task1_id": task1_id,
135
+ "task2_id": task2_id,
136
+ "log1_id": log1.id,
137
+ "log2_id": log2.id,
138
+ "log3_id": log3.id,
139
+ }
140
+ finally:
141
+ db.close()
142
+
143
+
144
+ def test_tool_functions(test_data):
145
+ """Test individual tool functions."""
146
+ print("\n[1/4] Testing Smart Query Tool Functions")
147
+ print("-" * 50)
148
+
149
+ from app.smart_query import (
150
+ _tool_get_user_activity,
151
+ _tool_get_task_status,
152
+ _tool_check_completion,
153
+ _tool_list_users,
154
+ QueryContext,
155
+ )
156
+
157
+ db = SessionLocal()
158
+ context = QueryContext(
159
+ current_user_id=test_data["user1_id"],
160
+ current_datetime=datetime.now(),
161
+ project_id=test_data["project_id"]
162
+ )
163
+
164
+ try:
165
+ # Test _tool_get_user_activity with user_id
166
+ print("\n Testing _tool_get_user_activity...")
167
+ today = datetime.now().date()
168
+ yesterday = (datetime.now() - timedelta(days=1)).date()
169
+
170
+ result = _tool_get_user_activity(db, context, {
171
+ "user_id": test_data["user1_id"],
172
+ "date_from": yesterday.isoformat(),
173
+ "date_to": (today + timedelta(days=1)).isoformat()
174
+ })
175
+ test("get_user_activity returns dict", isinstance(result, dict))
176
+ test("get_user_activity has count", "count" in result)
177
+ test("get_user_activity has activities", "activities" in result)
178
+ test("get_user_activity finds entries", result["count"] >= 1, f"got {result['count']}")
179
+
180
+ # Test _tool_get_user_activity with user_name
181
+ result2 = _tool_get_user_activity(db, context, {
182
+ "user_name": "Alice",
183
+ "date_from": yesterday.isoformat(),
184
+ "date_to": (today + timedelta(days=1)).isoformat()
185
+ })
186
+ test("get_user_activity resolves name", result2["user_id"] == test_data["user1_id"])
187
+
188
+ # Test _tool_get_task_status by ID
189
+ print("\n Testing _tool_get_task_status...")
190
+ result = _tool_get_task_status(db, test_data["project_id"], {
191
+ "task_id": test_data["task1_id"]
192
+ })
193
+ test("get_task_status returns dict", isinstance(result, dict))
194
+ test("get_task_status found=True", result.get("found") == True)
195
+ test("get_task_status has task", "task" in result)
196
+ test("get_task_status correct title", "Authentication" in result["task"]["title"])
197
+ test("get_task_status status is done", result["task"]["status"] == "done")
198
+
199
+ # Test _tool_get_task_status by title
200
+ result2 = _tool_get_task_status(db, test_data["project_id"], {
201
+ "task_title": "Unit Tests"
202
+ })
203
+ test("get_task_status by title works", result2.get("found") == True)
204
+ test("get_task_status by title correct", "Unit Tests" in result2["task"]["title"])
205
+
206
+ # Test _tool_get_task_status not found
207
+ result3 = _tool_get_task_status(db, test_data["project_id"], {
208
+ "task_title": "Nonexistent Task XYZ"
209
+ })
210
+ test("get_task_status not found", result3.get("found") == False)
211
+
212
+ # Test _tool_check_completion
213
+ print("\n Testing _tool_check_completion...")
214
+ result = _tool_check_completion(db, test_data["project_id"], {
215
+ "task_title": "Authentication"
216
+ })
217
+ test("check_completion returns dict", isinstance(result, dict))
218
+ test("check_completion found", result.get("found") == True)
219
+ test("check_completion is_completed", result.get("is_completed") == True)
220
+ test("check_completion has details", result.get("completion_details") is not None)
221
+
222
+ # Test _tool_check_completion with user
223
+ result2 = _tool_check_completion(db, test_data["project_id"], {
224
+ "task_title": "Authentication",
225
+ "user_name": "Alice"
226
+ })
227
+ test("check_completion by user works", result2.get("completed_by_specified_user") == True)
228
+
229
+ result3 = _tool_check_completion(db, test_data["project_id"], {
230
+ "task_title": "Authentication",
231
+ "user_name": "Bob"
232
+ })
233
+ test("check_completion wrong user", result3.get("completed_by_specified_user") == False)
234
+
235
+ # Test _tool_list_users
236
+ print("\n Testing _tool_list_users...")
237
+ result = _tool_list_users(db, test_data["project_id"])
238
+ test("list_users returns dict", isinstance(result, dict))
239
+ test("list_users has users", "users" in result)
240
+ test("list_users has count", "count" in result)
241
+ test("list_users finds 2 users", result["count"] == 2, f"got {result['count']}")
242
+
243
+ user_names = [u["name"] for u in result["users"]]
244
+ test("list_users includes Alice", any("Alice" in n for n in user_names))
245
+ test("list_users includes Bob", any("Bob" in n for n in user_names))
246
+
247
+ finally:
248
+ db.close()
249
+
250
+
251
+ def test_extract_sources():
252
+ """Test the extract_sources helper."""
253
+ print("\n[2/4] Testing extract_sources Helper")
254
+ print("-" * 50)
255
+
256
+ from app.smart_query import extract_sources
257
+
258
+ # Test with activity results
259
+ tool_results = [
260
+ {
261
+ "tool": "get_user_activity",
262
+ "result": {
263
+ "activities": [
264
+ {"id": "log-1", "what_was_done": "Built auth", "timestamp": "2024-01-01T10:00:00"},
265
+ {"id": "log-2", "what_was_done": "Fixed bug", "timestamp": "2024-01-01T11:00:00"},
266
+ ]
267
+ }
268
+ }
269
+ ]
270
+ sources = extract_sources(tool_results)
271
+ test("extract_sources returns list", isinstance(sources, list))
272
+ test("extract_sources extracts activities", len(sources) == 2)
273
+ test("extract_sources has type=activity", sources[0]["type"] == "activity")
274
+
275
+ # Test with semantic search results
276
+ tool_results2 = [
277
+ {
278
+ "tool": "semantic_search",
279
+ "result": {
280
+ "results": [
281
+ {"id": "mem-1", "what_was_done": "Implemented login", "relevance_score": 0.95},
282
+ ]
283
+ }
284
+ }
285
+ ]
286
+ sources2 = extract_sources(tool_results2)
287
+ test("extract_sources handles search results", len(sources2) == 1)
288
+ test("extract_sources has type=memory", sources2[0]["type"] == "memory")
289
+
290
+ # Test with task results
291
+ tool_results3 = [
292
+ {
293
+ "tool": "get_task_status",
294
+ "result": {
295
+ "found": True,
296
+ "task": {"id": "task-1", "title": "Auth API", "status": "done"}
297
+ }
298
+ }
299
+ ]
300
+ sources3 = extract_sources(tool_results3)
301
+ test("extract_sources handles task results", len(sources3) == 1)
302
+ test("extract_sources has type=task", sources3[0]["type"] == "task")
303
+
304
+ # Test deduplication
305
+ tool_results4 = [
306
+ {"tool": "get_user_activity", "result": {"activities": [{"id": "dup-1", "what_was_done": "Test"}]}},
307
+ {"tool": "get_user_activity", "result": {"activities": [{"id": "dup-1", "what_was_done": "Test"}]}},
308
+ ]
309
+ sources4 = extract_sources(tool_results4)
310
+ test("extract_sources deduplicates", len(sources4) == 1)
311
+
312
+
313
+ def test_query_context():
314
+ """Test QueryContext dataclass."""
315
+ print("\n[3/4] Testing QueryContext")
316
+ print("-" * 50)
317
+
318
+ from app.smart_query import QueryContext
319
+
320
+ ctx = QueryContext(
321
+ current_user_id="user-123",
322
+ current_datetime=datetime(2024, 1, 15, 10, 30, 0),
323
+ project_id="proj-456"
324
+ )
325
+
326
+ test("QueryContext has current_user_id", ctx.current_user_id == "user-123")
327
+ test("QueryContext has current_datetime", ctx.current_datetime.year == 2024)
328
+ test("QueryContext has project_id", ctx.project_id == "proj-456")
329
+
330
+
331
+ async def test_semantic_search_tool(test_data):
332
+ """Test semantic search tool (requires API key)."""
333
+ print("\n[4/4] Testing Semantic Search Tool (requires API)")
334
+ print("-" * 50)
335
+
336
+ try:
337
+ from app.smart_query import _tool_semantic_search
338
+ from app.llm import get_embedding
339
+ from app.vectorstore import add_embedding
340
+
341
+ # Add test embedding
342
+ text = "Implemented JWT authentication with refresh tokens"
343
+ emb = await get_embedding(text)
344
+ add_embedding(
345
+ log_entry_id=test_data["log1_id"],
346
+ text=text,
347
+ embedding=emb,
348
+ metadata={
349
+ "project_id": test_data["project_id"],
350
+ "user_id": test_data["user1_id"],
351
+ "task_id": test_data["task1_id"],
352
+ "created_at": datetime.now().isoformat()
353
+ }
354
+ )
355
+
356
+ # Test semantic search
357
+ result = await _tool_semantic_search(test_data["project_id"], {
358
+ "search_query": "authentication JWT"
359
+ })
360
+
361
+ test("semantic_search returns dict", isinstance(result, dict))
362
+ test("semantic_search has results", "results" in result)
363
+ test("semantic_search has count", "count" in result)
364
+ test("semantic_search finds results", result["count"] > 0, f"got {result['count']}")
365
+
366
+ if result["results"]:
367
+ test("semantic_search result has id", "id" in result["results"][0])
368
+ test("semantic_search result has relevance", "relevance_score" in result["results"][0])
369
+
370
+ except Exception as e:
371
+ test("semantic_search (API required)", False, str(e))
372
+
373
+
374
+ async def test_full_smart_query(test_data):
375
+ """Test the full smart_query function (requires API key)."""
376
+ print("\n[BONUS] Testing Full smart_query Integration")
377
+ print("-" * 50)
378
+
379
+ try:
380
+ from app.smart_query import smart_query
381
+
382
+ # Test simple query
383
+ result = await smart_query(
384
+ project_id=test_data["project_id"],
385
+ query="What tasks are done?",
386
+ current_user_id=test_data["user1_id"],
387
+ current_datetime=datetime.now().isoformat()
388
+ )
389
+
390
+ test("smart_query returns dict", isinstance(result, dict))
391
+ test("smart_query has answer", "answer" in result)
392
+ test("smart_query has tools_used", "tools_used" in result)
393
+ test("smart_query has sources", "sources" in result)
394
+ test("smart_query answer not empty", len(result.get("answer", "")) > 0)
395
+
396
+ print(f"\n Query: 'What tasks are done?'")
397
+ print(f" Answer: {result.get('answer', '')[:200]}...")
398
+ print(f" Tools used: {result.get('tools_used', [])}")
399
+
400
+ except Exception as e:
401
+ test("smart_query integration (API required)", False, str(e))
402
+
403
+
404
+ def test_user_resolution(test_data):
405
+ """Test duplicate name handling in user resolution."""
406
+ print("\n[NEW] Testing User Resolution & Duplicate Name Handling")
407
+ print("-" * 50)
408
+
409
+ from app.smart_query import (
410
+ _resolve_user_in_project,
411
+ _get_recent_work_hint,
412
+ _tool_get_user_activity,
413
+ _tool_check_completion,
414
+ QueryContext,
415
+ )
416
+
417
+ db = SessionLocal()
418
+
419
+ try:
420
+ # Test _get_recent_work_hint
421
+ print("\n Testing _get_recent_work_hint...")
422
+ hint = _get_recent_work_hint(db, str(test_data["user1_id"]), test_data["project_id"])
423
+ test("get_recent_work_hint returns string", isinstance(hint, str))
424
+ test("get_recent_work_hint has content", len(hint) > 0)
425
+ test("get_recent_work_hint format correct", "worked on:" in hint or hint == "no recent activity")
426
+
427
+ # Test _resolve_user_in_project - single match
428
+ print("\n Testing _resolve_user_in_project (single match)...")
429
+ result = _resolve_user_in_project(db, test_data["project_id"], "Alice")
430
+ test("resolve single user found=True", result.get("found") == True)
431
+ test("resolve single user has user_id", "user_id" in result)
432
+ test("resolve single user correct id", result.get("user_id") == str(test_data["user1_id"]))
433
+
434
+ # Test _resolve_user_in_project - not found
435
+ print("\n Testing _resolve_user_in_project (not found)...")
436
+ result2 = _resolve_user_in_project(db, test_data["project_id"], "NonExistentUser")
437
+ test("resolve not found returns found=False", result2.get("found") == False)
438
+ test("resolve not found reason=not_found", result2.get("reason") == "not_found")
439
+ test("resolve not found has message", "message" in result2)
440
+
441
+ # Create duplicate name scenario
442
+ print("\n Testing duplicate name handling...")
443
+ # Add another user with similar name to the project
444
+ duplicate_user = User(name="Dev Alice Smith", email=f"alice.smith_{datetime.now().timestamp()}@test.com")
445
+ db.add(duplicate_user)
446
+ db.commit()
447
+ db.refresh(duplicate_user)
448
+
449
+ # Add to project
450
+ membership = ProjectMembership(project_id=test_data["project_id"], user_id=duplicate_user.id, role="member")
451
+ db.add(membership)
452
+ db.commit()
453
+
454
+ # Now search for "Alice" - should find 2 users
455
+ result3 = _resolve_user_in_project(db, test_data["project_id"], "Alice")
456
+ test("resolve ambiguous found=False", result3.get("found") == False)
457
+ test("resolve ambiguous reason=ambiguous", result3.get("reason") == "ambiguous")
458
+ test("resolve ambiguous has options", "options" in result3)
459
+ test("resolve ambiguous options is list", isinstance(result3.get("options"), list))
460
+ test("resolve ambiguous has 2 options", len(result3.get("options", [])) == 2, f"got {len(result3.get('options', []))}")
461
+
462
+ if result3.get("options"):
463
+ opt = result3["options"][0]
464
+ test("option has user_id", "user_id" in opt)
465
+ test("option has name", "name" in opt)
466
+ test("option has email", "email" in opt)
467
+ test("option has role", "role" in opt)
468
+ test("option has recent_work", "recent_work" in opt)
469
+
470
+ # Test that _tool_get_user_activity returns disambiguation
471
+ print("\n Testing tool returns disambiguation...")
472
+ context = QueryContext(
473
+ current_user_id=str(test_data["user1_id"]),
474
+ current_datetime=datetime.now(),
475
+ project_id=test_data["project_id"]
476
+ )
477
+ today = datetime.now().date()
478
+ yesterday = (datetime.now() - timedelta(days=1)).date()
479
+
480
+ activity_result = _tool_get_user_activity(db, context, {
481
+ "user_name": "Alice",
482
+ "date_from": yesterday.isoformat(),
483
+ "date_to": (today + timedelta(days=1)).isoformat()
484
+ })
485
+ test("tool returns ambiguous response", activity_result.get("found") == False)
486
+ test("tool has options", "options" in activity_result)
487
+
488
+ # Test that _tool_check_completion returns disambiguation
489
+ completion_result = _tool_check_completion(db, test_data["project_id"], {
490
+ "task_title": "Authentication",
491
+ "user_name": "Alice"
492
+ })
493
+ test("check_completion returns ambiguous", completion_result.get("found") == False)
494
+ test("check_completion has options", "options" in completion_result)
495
+
496
+ # Cleanup the duplicate user
497
+ db.query(ProjectMembership).filter(ProjectMembership.user_id == duplicate_user.id).delete()
498
+ db.query(User).filter(User.id == duplicate_user.id).delete()
499
+ db.commit()
500
+
501
+ # Test email fallback
502
+ print("\n Testing email fallback...")
503
+ result4 = _resolve_user_in_project(db, test_data["project_id"], "alice") # Part of email
504
+ # This might match by name or email depending on data
505
+ test("email fallback works", result4.get("found") == True or result4.get("reason") == "ambiguous")
506
+
507
+ finally:
508
+ db.close()
509
+
510
+
511
+ async def test_memory_search_filters(test_data):
512
+ """Test memory_search with filters (requires API key)."""
513
+ print("\n[FILTERS] Testing memory_search Filters")
514
+ print("-" * 50)
515
+
516
+ try:
517
+ from app.tools.memory import memory_search
518
+ from app.llm import get_embedding
519
+ from app.vectorstore import add_embedding
520
+
521
+ # Add test embeddings with different users and dates
522
+ text1 = "User1 implemented login feature yesterday"
523
+ text2 = "User2 added payment processing today"
524
+
525
+ emb1 = await get_embedding(text1)
526
+ emb2 = await get_embedding(text2)
527
+
528
+ yesterday = (datetime.now() - timedelta(days=1)).isoformat()
529
+ today = datetime.now().isoformat()
530
+
531
+ add_embedding(
532
+ log_entry_id=str(test_data["log1_id"]) + "-filter1",
533
+ text=text1,
534
+ embedding=emb1,
535
+ metadata={
536
+ "project_id": test_data["project_id"],
537
+ "user_id": str(test_data["user1_id"]),
538
+ "task_id": str(test_data["task1_id"]),
539
+ "created_at": yesterday
540
+ }
541
+ )
542
+
543
+ add_embedding(
544
+ log_entry_id=str(test_data["log2_id"]) + "-filter2",
545
+ text=text2,
546
+ embedding=emb2,
547
+ metadata={
548
+ "project_id": test_data["project_id"],
549
+ "user_id": str(test_data["user2_id"]),
550
+ "task_id": str(test_data["task2_id"]),
551
+ "created_at": today
552
+ }
553
+ )
554
+
555
+ # Test 1: Search without filters
556
+ print("\n Testing search without filters...")
557
+ result_no_filter = await memory_search(
558
+ project_id=test_data["project_id"],
559
+ query="feature implementation"
560
+ )
561
+ test("search without filters returns answer", "answer" in result_no_filter)
562
+
563
+ # Test 2: Search with userId filter
564
+ print("\n Testing search with userId filter...")
565
+ result_user_filter = await memory_search(
566
+ project_id=test_data["project_id"],
567
+ query="feature implementation",
568
+ filters={"userId": str(test_data["user1_id"])}
569
+ )
570
+ test("search with userId filter returns answer", "answer" in result_user_filter)
571
+
572
+ # Test 3: Search with date range filter
573
+ print("\n Testing search with date filters...")
574
+ tomorrow = (datetime.now() + timedelta(days=1)).strftime("%Y-%m-%d")
575
+ result_date_filter = await memory_search(
576
+ project_id=test_data["project_id"],
577
+ query="feature implementation",
578
+ filters={
579
+ "dateFrom": (datetime.now() - timedelta(days=2)).strftime("%Y-%m-%d"),
580
+ "dateTo": tomorrow
581
+ }
582
+ )
583
+ test("search with date filter returns answer", "answer" in result_date_filter)
584
+
585
+ # Test 4: Search with combined filters
586
+ print("\n Testing search with combined filters...")
587
+ result_combined = await memory_search(
588
+ project_id=test_data["project_id"],
589
+ query="feature",
590
+ filters={
591
+ "userId": str(test_data["user1_id"]),
592
+ "dateFrom": (datetime.now() - timedelta(days=2)).strftime("%Y-%m-%d")
593
+ }
594
+ )
595
+ test("search with combined filters returns answer", "answer" in result_combined)
596
+
597
+ print("\n Filter wiring tests passed!")
598
+
599
+ except Exception as e:
600
+ test("memory_search filters (API required)", False, str(e))
601
+
602
+
603
+ def cleanup_test_data(test_data):
604
+ """Clean up test data."""
605
+ try:
606
+ delete_by_project(test_data["project_id"])
607
+ except:
608
+ pass
609
+
610
+
611
+ async def main():
612
+ print("=" * 60)
613
+ print(" SMART QUERY TEST SUITE")
614
+ print("=" * 60)
615
+
616
+ # Initialize
617
+ print("\nInitializing database and vectorstore...")
618
+ init_db()
619
+ init_vectorstore()
620
+
621
+ # Setup test data
622
+ print("Setting up test data...")
623
+ test_data = setup_test_data()
624
+ print(f" Project ID: {test_data['project_id']}")
625
+ print(f" User 1: {test_data['user1_name']} ({test_data['user1_id']})")
626
+ print(f" User 2: {test_data['user2_name']} ({test_data['user2_id']})")
627
+
628
+ try:
629
+ # Run tests
630
+ test_tool_functions(test_data)
631
+ test_extract_sources()
632
+ test_query_context()
633
+ test_user_resolution(test_data) # New: duplicate name handling tests
634
+
635
+ # API-dependent tests (will fail gracefully without API key)
636
+ await test_semantic_search_tool(test_data)
637
+ await test_memory_search_filters(test_data) # New: filter wiring tests
638
+ await test_full_smart_query(test_data)
639
+
640
+ finally:
641
+ # Cleanup
642
+ print("\nCleaning up test data...")
643
+ cleanup_test_data(test_data)
644
+
645
+ # Results
646
+ print("\n" + "=" * 60)
647
+ print(f" RESULTS: {passed} passed, {failed} failed")
648
+ print("=" * 60)
649
+
650
+ if failed > 0:
651
+ print("\nNote: Some tests require GEMINI_API_KEY to be set.")
652
+ sys.exit(1)
653
+
654
+
655
+ if __name__ == "__main__":
656
+ asyncio.run(main())
backend/tests/test_tools.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Test script to verify MCP tools work correctly."""
2
+
3
+ import sys
4
+ sys.path.insert(0, '.')
5
+
6
+ from app.database import init_db, SessionLocal
7
+ from app.models import User, generate_user_id
8
+ from app.tools.projects import create_project, list_projects, join_project
9
+ from app.tools.tasks import create_task, list_tasks, list_activity
10
+
11
+
12
+ def create_test_user(first_name: str, last_name: str) -> str:
13
+ """Create a test user and return their ID."""
14
+ db = SessionLocal()
15
+ try:
16
+ user_id = generate_user_id(first_name)
17
+ user = User(id=user_id, first_name=first_name, last_name=last_name)
18
+ db.add(user)
19
+ db.commit()
20
+ return user.id
21
+ finally:
22
+ db.close()
23
+
24
+
25
+ def main():
26
+ # Initialize database
27
+ print("Initializing database...")
28
+ init_db()
29
+
30
+ # Create test users
31
+ print("\n1. Creating test users...")
32
+ user1_id = create_test_user("Alice", "Developer")
33
+ user2_id = create_test_user("Bob", "Engineer")
34
+ print(f" Created User 1: {user1_id}")
35
+ print(f" Created User 2: {user2_id}")
36
+
37
+ # Test create_project
38
+ print("\n2. Testing create_project...")
39
+ project = create_project(
40
+ name="AI Memory System",
41
+ description="Building a shared AI memory for dev teams",
42
+ user_id=user1_id
43
+ )
44
+ print(f" Created project: {project}")
45
+ project_id = project["id"]
46
+
47
+ # Test list_projects
48
+ print("\n3. Testing list_projects...")
49
+ projects = list_projects(user_id=user1_id)
50
+ print(f" User 1's projects: {projects}")
51
+
52
+ # Test join_project
53
+ print("\n4. Testing join_project...")
54
+ join_result = join_project(project_id=project_id, user_id=user2_id)
55
+ print(f" Join result: {join_result}")
56
+
57
+ # Verify User 2 now sees the project
58
+ projects2 = list_projects(user_id=user2_id)
59
+ print(f" User 2's projects: {projects2}")
60
+
61
+ # Test create_task
62
+ print("\n5. Testing create_task...")
63
+ task1 = create_task(
64
+ project_id=project_id,
65
+ title="Set up database schema",
66
+ description="Create SQLAlchemy models for all entities",
67
+ assigned_to=user1_id
68
+ )
69
+ print(f" Created task 1: {task1}")
70
+
71
+ task2 = create_task(
72
+ project_id=project_id,
73
+ title="Implement MCP server",
74
+ description="Set up MCP server with tool definitions"
75
+ )
76
+ print(f" Created task 2: {task2}")
77
+
78
+ # Test list_tasks
79
+ print("\n6. Testing list_tasks...")
80
+ tasks = list_tasks(project_id=project_id)
81
+ print(f" All tasks: {tasks}")
82
+
83
+ # Test list_tasks with status filter
84
+ print("\n7. Testing list_tasks with status filter...")
85
+ todo_tasks = list_tasks(project_id=project_id, status="todo")
86
+ print(f" Todo tasks: {todo_tasks}")
87
+
88
+ # Test list_activity (should be empty initially)
89
+ print("\n8. Testing list_activity...")
90
+ activity = list_activity(project_id=project_id)
91
+ print(f" Activity (empty expected): {activity}")
92
+
93
+ print("\n✅ All Foundation Layer tests passed!")
94
+ print("\nReady for Dev B to implement:")
95
+ print(" - complete_task (with LLM documentation generation)")
96
+ print(" - memory_search (with vector search)")
97
+
98
+
99
+ if __name__ == "__main__":
100
+ main()
backend/tests/test_user_models.py ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Unit tests for models module."""
2
+
3
+ import pytest
4
+ from datetime import datetime
5
+
6
+ from app.database import SessionLocal, init_db
7
+ from app.models import (
8
+ User, Project, Task, LogEntry, ProjectMembership,
9
+ generate_uuid, generate_user_id,
10
+ TaskStatus, ActorType, ActionType
11
+ )
12
+
13
+
14
+ class TestGenerateUserId:
15
+ """Test user ID generation function."""
16
+
17
+ def test_generate_user_id_format(self):
18
+ """Test generated ID has correct format: 3 letters + 4 digits."""
19
+ user_id = generate_user_id("Alice")
20
+
21
+ assert len(user_id) == 7
22
+ assert user_id[:3] == "ali" # First 3 letters lowercase
23
+ assert user_id[3:].isdigit() # Last 4 are digits
24
+
25
+ def test_generate_user_id_lowercase(self):
26
+ """Test first name is lowercased."""
27
+ user_id = generate_user_id("JOHN")
28
+ assert user_id[:3] == "joh"
29
+
30
+ def test_generate_user_id_short_name(self):
31
+ """Test with name shorter than 3 characters."""
32
+ user_id = generate_user_id("Jo")
33
+ assert user_id[:2] == "jo"
34
+ assert len(user_id) == 6 # 2 letters + 4 digits
35
+
36
+ def test_generate_user_id_uniqueness(self):
37
+ """Test that multiple calls generate different IDs."""
38
+ ids = [generate_user_id("Test") for _ in range(10)]
39
+ # With 4 random digits, collisions are unlikely
40
+ assert len(set(ids)) > 5 # At least some should be unique
41
+
42
+ def test_generate_user_id_example(self):
43
+ """Test example from requirements: Amal -> ama + 4 digits."""
44
+ user_id = generate_user_id("Amal")
45
+ assert user_id.startswith("ama")
46
+ assert len(user_id) == 7
47
+
48
+
49
+ class TestGenerateUuid:
50
+ """Test UUID generation function."""
51
+
52
+ def test_generate_uuid_format(self):
53
+ """Test UUID is valid format."""
54
+ uuid = generate_uuid()
55
+ assert len(uuid) == 36 # Standard UUID length with hyphens
56
+ assert uuid.count("-") == 4
57
+
58
+ def test_generate_uuid_uniqueness(self):
59
+ """Test UUIDs are unique."""
60
+ uuids = [generate_uuid() for _ in range(100)]
61
+ assert len(set(uuids)) == 100
62
+
63
+
64
+ class TestUserModel:
65
+ """Test User model."""
66
+
67
+ def test_user_creation(self, db_session):
68
+ """Test creating a user with new fields."""
69
+ user_id = generate_user_id("Test")
70
+ user = User(
71
+ id=user_id,
72
+ first_name="Test",
73
+ last_name="User"
74
+ )
75
+ db_session.add(user)
76
+ db_session.commit()
77
+
78
+ assert user.id == user_id
79
+ assert user.first_name == "Test"
80
+ assert user.last_name == "User"
81
+ assert user.created_at is not None
82
+
83
+ def test_user_name_property(self, db_session):
84
+ """Test the name property returns full name."""
85
+ user_id = generate_user_id("John")
86
+ user = User(id=user_id, first_name="John", last_name="Doe")
87
+ db_session.add(user)
88
+ db_session.commit()
89
+
90
+ assert user.name == "John Doe"
91
+
92
+ def test_user_optional_avatar(self, db_session):
93
+ """Test avatar_url is optional."""
94
+ user_id = generate_user_id("Jane")
95
+ user = User(id=user_id, first_name="Jane", last_name="Smith")
96
+ db_session.add(user)
97
+ db_session.commit()
98
+
99
+ assert user.avatar_url is None
100
+
101
+ def test_user_with_avatar(self, db_session):
102
+ """Test user with avatar URL."""
103
+ user_id = generate_user_id("Bob")
104
+ user = User(
105
+ id=user_id,
106
+ first_name="Bob",
107
+ last_name="Builder",
108
+ avatar_url="https://example.com/avatar.jpg"
109
+ )
110
+ db_session.add(user)
111
+ db_session.commit()
112
+
113
+ assert user.avatar_url == "https://example.com/avatar.jpg"
114
+
115
+
116
+ class TestProjectModel:
117
+ """Test Project model."""
118
+
119
+ def test_project_with_name_as_id(self, db_session, create_user):
120
+ """Test creating project with name as ID."""
121
+ user = create_user("Owner", "Test")
122
+
123
+ project = Project(
124
+ id="myproject", # Name used as ID
125
+ name="myproject",
126
+ description="Test project",
127
+ created_by=user.id
128
+ )
129
+ db_session.add(project)
130
+ db_session.commit()
131
+
132
+ assert project.id == "myproject"
133
+ assert project.name == "myproject"
134
+
135
+ def test_project_id_uniqueness(self, db_session, create_user):
136
+ """Test that duplicate project IDs fail."""
137
+ user = create_user("Owner", "Test")
138
+
139
+ project1 = Project(id="unique", name="unique", description="First", created_by=user.id)
140
+ db_session.add(project1)
141
+ db_session.commit()
142
+
143
+ project2 = Project(id="unique", name="unique", description="Second", created_by=user.id)
144
+ db_session.add(project2)
145
+
146
+ with pytest.raises(Exception): # IntegrityError
147
+ db_session.commit()
148
+
149
+
150
+ class TestTaskModel:
151
+ """Test Task model."""
152
+
153
+ def test_task_status_enum(self):
154
+ """Test TaskStatus enum values."""
155
+ assert TaskStatus.todo.value == "todo"
156
+ assert TaskStatus.in_progress.value == "in_progress"
157
+ assert TaskStatus.done.value == "done"
158
+
159
+ def test_task_default_status(self, db_session, create_user):
160
+ """Test task defaults to 'todo' status."""
161
+ user = create_user("Dev", "Test")
162
+ project = Project(id="taskproj", name="taskproj", description="Test", created_by=user.id)
163
+ db_session.add(project)
164
+ db_session.commit()
165
+
166
+ task = Task(
167
+ project_id=project.id,
168
+ title="Test Task",
169
+ description="Test description"
170
+ )
171
+ db_session.add(task)
172
+ db_session.commit()
173
+
174
+ assert task.status == TaskStatus.todo
175
+
176
+
177
+ class TestEnums:
178
+ """Test enum definitions."""
179
+
180
+ def test_actor_type_enum(self):
181
+ """Test ActorType enum."""
182
+ assert ActorType.human.value == "human"
183
+ assert ActorType.agent.value == "agent"
184
+
185
+ def test_action_type_enum(self):
186
+ """Test ActionType enum."""
187
+ assert ActionType.task_completed.value == "task_completed"
188
+ assert ActionType.doc_generated.value == "doc_generated"
189
+ assert ActionType.query_answered.value == "query_answered"
backend/tests/test_vector_query.py ADDED
@@ -0,0 +1,410 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Vector Query End-to-End Test - Run with: python test_vector_query.py
3
+
4
+ This test:
5
+ 1. Creates a project with users, tasks, and log entries
6
+ 2. Generates embeddings and stores them in the vector store
7
+ 3. Tests various smart queries against the populated data
8
+
9
+ Requires GEMINI_API_KEY environment variable.
10
+ """
11
+
12
+ import asyncio
13
+ import sys
14
+ from datetime import datetime, timedelta, timezone
15
+
16
+ sys.path.insert(0, '.')
17
+
18
+ from sqlalchemy import text
19
+ from app.database import init_db, SessionLocal
20
+ from app.models import User, Task, LogEntry, TaskStatus, ActorType, ActionType, ProjectMembership
21
+ from app.vectorstore import init_vectorstore, add_embedding, delete_by_project, count_embeddings
22
+ from app.llm import get_embedding
23
+
24
+ # Test project ID (fixed for easy cleanup)
25
+ TEST_PROJECT_ID = "vector-test-project-001"
26
+
27
+
28
+ async def setup_test_environment():
29
+ """Create test users, project, tasks, and log entries with embeddings."""
30
+ print("\n" + "=" * 60)
31
+ print(" SETTING UP TEST ENVIRONMENT")
32
+ print("=" * 60)
33
+
34
+ init_db()
35
+ init_vectorstore()
36
+
37
+ # Clean up any previous test data
38
+ print("\n[1] Cleaning up previous test data...")
39
+ delete_by_project(TEST_PROJECT_ID)
40
+
41
+ db = SessionLocal()
42
+ try:
43
+ # Delete existing test project and related data
44
+ db.execute(text(f"DELETE FROM log_entries WHERE project_id = '{TEST_PROJECT_ID}'"))
45
+ db.execute(text(f"DELETE FROM tasks WHERE project_id = '{TEST_PROJECT_ID}'"))
46
+ db.execute(text(f"DELETE FROM project_memberships WHERE project_id = '{TEST_PROJECT_ID}'"))
47
+ db.execute(text(f"DELETE FROM projects WHERE id = '{TEST_PROJECT_ID}'"))
48
+ db.commit()
49
+ except:
50
+ db.rollback()
51
+ finally:
52
+ db.close()
53
+
54
+ db = SessionLocal()
55
+ try:
56
+ # Create users
57
+ print("\n[2] Creating test users...")
58
+ user_alice = User(
59
+ id="user-alice-001",
60
+ first_name="Alice",
61
+ last_name="Developer"
62
+ )
63
+ user_bob = User(
64
+ id="user-bob-001",
65
+ first_name="Bob",
66
+ last_name="Engineer"
67
+ )
68
+ db.add(user_alice)
69
+ db.add(user_bob)
70
+ db.commit()
71
+ print(f" Created: {user_alice.name} ({user_alice.id})")
72
+ print(f" Created: {user_bob.name} ({user_bob.id})")
73
+
74
+ # Create project
75
+ print("\n[3] Creating test project...")
76
+ from app.models import Project
77
+ project = Project(
78
+ id=TEST_PROJECT_ID,
79
+ name="E-Commerce Platform",
80
+ description="Building an online shopping platform",
81
+ created_by=user_alice.id
82
+ )
83
+ db.add(project)
84
+ db.commit()
85
+ print(f" Created: {project.name} ({project.id})")
86
+
87
+ # Add memberships
88
+ print("\n[4] Adding project memberships...")
89
+ membership1 = ProjectMembership(project_id=TEST_PROJECT_ID, user_id=user_alice.id, role="owner")
90
+ membership2 = ProjectMembership(project_id=TEST_PROJECT_ID, user_id=user_bob.id, role="member")
91
+ db.add(membership1)
92
+ db.add(membership2)
93
+ db.commit()
94
+ print(f" Added Alice as owner")
95
+ print(f" Added Bob as member")
96
+
97
+ # Create tasks
98
+ print("\n[5] Creating tasks...")
99
+ tasks_data = [
100
+ {
101
+ "id": "task-auth-001",
102
+ "title": "Implement User Authentication",
103
+ "description": "Build JWT-based login and registration system",
104
+ "assigned_to": user_alice.id,
105
+ "status": TaskStatus.done,
106
+ "completed_at": datetime.now(timezone.utc) - timedelta(days=2)
107
+ },
108
+ {
109
+ "id": "task-cart-001",
110
+ "title": "Build Shopping Cart",
111
+ "description": "Create cart functionality with add/remove items",
112
+ "assigned_to": user_bob.id,
113
+ "status": TaskStatus.done,
114
+ "completed_at": datetime.now(timezone.utc) - timedelta(days=1)
115
+ },
116
+ {
117
+ "id": "task-checkout-001",
118
+ "title": "Implement Checkout Flow",
119
+ "description": "Payment integration and order processing",
120
+ "assigned_to": user_alice.id,
121
+ "status": TaskStatus.in_progress
122
+ },
123
+ {
124
+ "id": "task-tests-001",
125
+ "title": "Write Unit Tests",
126
+ "description": "Create test coverage for auth and cart modules",
127
+ "assigned_to": user_bob.id,
128
+ "status": TaskStatus.done,
129
+ "completed_at": datetime.now(timezone.utc) - timedelta(hours=5)
130
+ }
131
+ ]
132
+
133
+ for t in tasks_data:
134
+ task = Task(
135
+ id=t["id"],
136
+ project_id=TEST_PROJECT_ID,
137
+ title=t["title"],
138
+ description=t["description"],
139
+ assigned_to=t["assigned_to"],
140
+ status=t["status"],
141
+ completed_at=t.get("completed_at")
142
+ )
143
+ db.add(task)
144
+ print(f" Created: {task.title} ({task.status.value})")
145
+ db.commit()
146
+
147
+ # Create log entries with embeddings
148
+ print("\n[6] Creating log entries and embeddings...")
149
+ log_entries_data = [
150
+ {
151
+ "id": "log-auth-001",
152
+ "task_id": "task-auth-001",
153
+ "user_id": user_alice.id,
154
+ "raw_input": "Implemented JWT authentication with access and refresh tokens. Added password hashing using bcrypt.",
155
+ "generated_doc": "Authentication system implemented using JWT tokens. The system generates short-lived access tokens (15 min) and long-lived refresh tokens (7 days). Passwords are securely hashed using bcrypt with salt rounds of 12. Login endpoint validates credentials and returns both tokens. Refresh endpoint allows obtaining new access tokens.",
156
+ "tags": ["auth", "jwt", "security", "bcrypt"],
157
+ "hours_ago": 48
158
+ },
159
+ {
160
+ "id": "log-cart-001",
161
+ "task_id": "task-cart-001",
162
+ "user_id": user_bob.id,
163
+ "raw_input": "Built shopping cart with Redux state management. Added add/remove/update quantity functions.",
164
+ "generated_doc": "Shopping cart functionality using Redux for state management. Cart persists to localStorage. Features include: adding products with quantities, removing items, updating quantities, calculating totals with tax, and applying discount codes. Cart state syncs across browser tabs.",
165
+ "tags": ["cart", "redux", "frontend", "state"],
166
+ "hours_ago": 24
167
+ },
168
+ {
169
+ "id": "log-api-001",
170
+ "task_id": "task-auth-001",
171
+ "user_id": user_alice.id,
172
+ "raw_input": "Created REST API endpoints for user profile management.",
173
+ "generated_doc": "REST API endpoints for user profiles. GET /api/users/me returns current user. PUT /api/users/me updates profile fields (name, avatar, preferences). Password change requires current password verification. All endpoints require valid JWT in Authorization header.",
174
+ "tags": ["api", "rest", "profile", "user"],
175
+ "hours_ago": 36
176
+ },
177
+ {
178
+ "id": "log-tests-001",
179
+ "task_id": "task-tests-001",
180
+ "user_id": user_bob.id,
181
+ "raw_input": "Wrote unit tests for authentication and cart modules. Achieved 85% coverage.",
182
+ "generated_doc": "Unit test suite for authentication and cart modules. Auth tests cover: login success/failure, token refresh, password validation, protected routes. Cart tests cover: add/remove items, quantity updates, price calculations, discount application. Using Jest with React Testing Library. Coverage at 85%.",
183
+ "tags": ["tests", "jest", "coverage", "unit-tests"],
184
+ "hours_ago": 5
185
+ },
186
+ {
187
+ "id": "log-today-001",
188
+ "task_id": "task-checkout-001",
189
+ "user_id": user_alice.id,
190
+ "raw_input": "Started working on Stripe payment integration for checkout.",
191
+ "generated_doc": "Began Stripe payment integration. Set up Stripe SDK and test account. Created payment intent endpoint. Working on client-side card element component. Next: handle payment confirmation and webhooks for order updates.",
192
+ "tags": ["checkout", "stripe", "payments", "integration"],
193
+ "hours_ago": 2
194
+ }
195
+ ]
196
+
197
+ for entry_data in log_entries_data:
198
+ # Create log entry
199
+ entry = LogEntry(
200
+ id=entry_data["id"],
201
+ project_id=TEST_PROJECT_ID,
202
+ task_id=entry_data["task_id"],
203
+ user_id=entry_data["user_id"],
204
+ actor_type=ActorType.human,
205
+ action_type=ActionType.task_completed,
206
+ raw_input=entry_data["raw_input"],
207
+ generated_doc=entry_data["generated_doc"],
208
+ tags=entry_data["tags"],
209
+ created_at=datetime.now(timezone.utc) - timedelta(hours=entry_data["hours_ago"])
210
+ )
211
+ db.add(entry)
212
+ db.commit()
213
+
214
+ # Create embedding
215
+ text_to_embed = f"""
216
+ Task: {entry_data['raw_input']}
217
+ Documentation: {entry_data['generated_doc']}
218
+ Tags: {', '.join(entry_data['tags'])}
219
+ """
220
+ embedding = await get_embedding(text_to_embed)
221
+ add_embedding(
222
+ log_entry_id=entry_data["id"],
223
+ text=text_to_embed,
224
+ embedding=embedding,
225
+ metadata={
226
+ "project_id": TEST_PROJECT_ID,
227
+ "user_id": entry_data["user_id"],
228
+ "task_id": entry_data["task_id"],
229
+ "created_at": entry.created_at.isoformat()
230
+ }
231
+ )
232
+ print(f" Created: {entry_data['id']} + embedding")
233
+
234
+ db.commit()
235
+
236
+ # Verify embeddings
237
+ embed_count = count_embeddings(TEST_PROJECT_ID)
238
+ print(f"\n Total embeddings stored: {embed_count}")
239
+
240
+ return {
241
+ "project_id": TEST_PROJECT_ID,
242
+ "user_alice_id": user_alice.id,
243
+ "user_bob_id": user_bob.id
244
+ }
245
+
246
+ finally:
247
+ db.close()
248
+
249
+
250
+ async def run_test_queries(test_data):
251
+ """Run various queries against the test data."""
252
+ print("\n" + "=" * 60)
253
+ print(" RUNNING TEST QUERIES")
254
+ print("=" * 60)
255
+
256
+ from app.smart_query import smart_query
257
+
258
+ queries = [
259
+ {
260
+ "query": "What did I do yesterday?",
261
+ "user_id": test_data["user_alice_id"],
262
+ "description": "User activity query (yesterday)"
263
+ },
264
+ {
265
+ "query": "What did Bob work on?",
266
+ "user_id": test_data["user_alice_id"],
267
+ "description": "Other user's activity"
268
+ },
269
+ {
270
+ "query": "How does authentication work?",
271
+ "user_id": test_data["user_alice_id"],
272
+ "description": "Semantic search for auth"
273
+ },
274
+ {
275
+ "query": "Status of the checkout task?",
276
+ "user_id": test_data["user_alice_id"],
277
+ "description": "Task status query"
278
+ },
279
+ {
280
+ "query": "Did Bob complete the shopping cart?",
281
+ "user_id": test_data["user_alice_id"],
282
+ "description": "Task completion check"
283
+ },
284
+ {
285
+ "query": "What tests were written?",
286
+ "user_id": test_data["user_bob_id"],
287
+ "description": "Semantic search for tests"
288
+ },
289
+ {
290
+ "query": "What payment system is being used?",
291
+ "user_id": test_data["user_alice_id"],
292
+ "description": "Semantic search for payments"
293
+ },
294
+ {
295
+ "query": "Who are the team members?",
296
+ "user_id": test_data["user_alice_id"],
297
+ "description": "List users query"
298
+ }
299
+ ]
300
+
301
+ results = []
302
+ for i, q in enumerate(queries, 1):
303
+ print(f"\n[Query {i}] {q['description']}")
304
+ print(f" Question: \"{q['query']}\"")
305
+ print(f" User: {q['user_id']}")
306
+
307
+ try:
308
+ result = await smart_query(
309
+ project_id=test_data["project_id"],
310
+ query=q["query"],
311
+ current_user_id=q["user_id"],
312
+ current_datetime=datetime.now(timezone.utc).isoformat()
313
+ )
314
+
315
+ print(f"\n Tools Used: {result.get('tools_used', [])}")
316
+ print(f"\n Answer:")
317
+ answer = result.get("answer", "No answer")
318
+ # Print answer with word wrap
319
+ for line in answer.split('\n'):
320
+ print(f" {line}")
321
+
322
+ if result.get("sources"):
323
+ print(f"\n Sources ({len(result['sources'])}):")
324
+ for src in result["sources"][:3]:
325
+ print(f" - [{src.get('type', 'unknown')}] {src.get('summary', 'No summary')[:60]}...")
326
+
327
+ results.append({"query": q, "result": result, "success": True})
328
+
329
+ except Exception as e:
330
+ print(f"\n ERROR: {str(e)}")
331
+ results.append({"query": q, "error": str(e), "success": False})
332
+
333
+ print("\n" + "-" * 60)
334
+
335
+ return results
336
+
337
+
338
+ def print_summary(results):
339
+ """Print test summary."""
340
+ print("\n" + "=" * 60)
341
+ print(" TEST SUMMARY")
342
+ print("=" * 60)
343
+
344
+ successful = sum(1 for r in results if r["success"])
345
+ failed = len(results) - successful
346
+
347
+ print(f"\n Total queries: {len(results)}")
348
+ print(f" Successful: {successful}")
349
+ print(f" Failed: {failed}")
350
+
351
+ if failed > 0:
352
+ print("\n Failed queries:")
353
+ for r in results:
354
+ if not r["success"]:
355
+ print(f" - {r['query']['description']}: {r.get('error', 'Unknown error')}")
356
+
357
+
358
+ def cleanup():
359
+ """Clean up test data."""
360
+ print("\n[CLEANUP] Removing test data...")
361
+ try:
362
+ delete_by_project(TEST_PROJECT_ID)
363
+ db = SessionLocal()
364
+ try:
365
+ db.execute(text(f"DELETE FROM log_entries WHERE project_id = '{TEST_PROJECT_ID}'"))
366
+ db.execute(text(f"DELETE FROM tasks WHERE project_id = '{TEST_PROJECT_ID}'"))
367
+ db.execute(text(f"DELETE FROM project_memberships WHERE project_id = '{TEST_PROJECT_ID}'"))
368
+ db.execute(text(f"DELETE FROM projects WHERE id = '{TEST_PROJECT_ID}'"))
369
+ db.commit()
370
+ print(" Cleanup complete.")
371
+ finally:
372
+ db.close()
373
+ except Exception as e:
374
+ print(f" Cleanup error: {e}")
375
+
376
+
377
+ async def main():
378
+ print("=" * 60)
379
+ print(" VECTOR QUERY END-TO-END TEST")
380
+ print(" Tests smart_query against populated vector store")
381
+ print("=" * 60)
382
+
383
+ try:
384
+ # Setup
385
+ test_data = await setup_test_environment()
386
+
387
+ # Run queries
388
+ results = await run_test_queries(test_data)
389
+
390
+ # Summary
391
+ print_summary(results)
392
+
393
+ except Exception as e:
394
+ print(f"\nFATAL ERROR: {e}")
395
+ import traceback
396
+ traceback.print_exc()
397
+
398
+ finally:
399
+ # Ask about cleanup
400
+ print("\n" + "=" * 60)
401
+ response = input("Clean up test data? (y/n): ").strip().lower()
402
+ if response == 'y':
403
+ cleanup()
404
+ else:
405
+ print(f"Test data retained. Project ID: {TEST_PROJECT_ID}")
406
+ print("Run cleanup manually or delete project from database.")
407
+
408
+
409
+ if __name__ == "__main__":
410
+ asyncio.run(main())
docker-compose.yml ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ services:
2
+ backend:
3
+ build: ./backend
4
+ container_name: pm-backend
5
+ ports:
6
+ - "8000:8000"
7
+ env_file: ./backend/.env
8
+ volumes:
9
+ - backend-data:/app/data
10
+ networks:
11
+ - pm-network
12
+ restart: unless-stopped
13
+
14
+ frontend:
15
+ build: ./frontend
16
+ container_name: pm-frontend
17
+ ports:
18
+ - "80:80"
19
+ depends_on:
20
+ - backend
21
+ networks:
22
+ - pm-network
23
+ restart: unless-stopped
24
+
25
+ networks:
26
+ pm-network:
27
+ driver: bridge
28
+
29
+ volumes:
30
+ backend-data: {}
frontend/.gitignore ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Logs
2
+ logs
3
+ *.log
4
+ npm-debug.log*
5
+ yarn-debug.log*
6
+ yarn-error.log*
7
+ pnpm-debug.log*
8
+ lerna-debug.log*
9
+
10
+ node_modules
11
+ dist
12
+ dist-ssr
13
+ *.local
14
+
15
+ # Editor directories and files
16
+ .vscode/*
17
+ !.vscode/extensions.json
18
+ .idea
19
+ .DS_Store
20
+ *.suo
21
+ *.ntvs*
22
+ *.njsproj
23
+ *.sln
24
+ *.sw?
frontend/Dockerfile ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Frontend Dockerfile
2
+ FROM node:20-alpine AS builder
3
+
4
+ # Set working directory
5
+ WORKDIR /app
6
+
7
+ # Copy package files
8
+ COPY package*.json ./
9
+
10
+ # Install dependencies
11
+ RUN npm ci
12
+
13
+ # Copy source code
14
+ COPY . .
15
+
16
+ # Build the application
17
+ RUN npm run build
18
+
19
+ # Production stage
20
+ FROM nginx:alpine
21
+
22
+ # Copy built files to nginx
23
+ COPY --from=builder /app/dist /usr/share/nginx/html
24
+
25
+ # Copy nginx configuration
26
+ COPY nginx.conf /etc/nginx/conf.d/default.conf
27
+
28
+ # Expose port
29
+ EXPOSE 80
30
+
31
+ # Start nginx
32
+ CMD ["nginx", "-g", "daemon off;"]
frontend/README.md ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # React + TypeScript + Vite
2
+
3
+ This template provides a minimal setup to get React working in Vite with HMR and some ESLint rules.
4
+
5
+ Currently, two official plugins are available:
6
+
7
+ - [@vitejs/plugin-react](https://github.com/vitejs/vite-plugin-react/blob/main/packages/plugin-react) uses [Babel](https://babeljs.io/) (or [oxc](https://oxc.rs) when used in [rolldown-vite](https://vite.dev/guide/rolldown)) for Fast Refresh
8
+ - [@vitejs/plugin-react-swc](https://github.com/vitejs/vite-plugin-react/blob/main/packages/plugin-react-swc) uses [SWC](https://swc.rs/) for Fast Refresh
9
+
10
+ ## React Compiler
11
+
12
+ The React Compiler is not enabled on this template because of its impact on dev & build performances. To add it, see [this documentation](https://react.dev/learn/react-compiler/installation).
13
+
14
+ ## Expanding the ESLint configuration
15
+
16
+ If you are developing a production application, we recommend updating the configuration to enable type-aware lint rules:
17
+
18
+ ```js
19
+ export default defineConfig([
20
+ globalIgnores(['dist']),
21
+ {
22
+ files: ['**/*.{ts,tsx}'],
23
+ extends: [
24
+ // Other configs...
25
+
26
+ // Remove tseslint.configs.recommended and replace with this
27
+ tseslint.configs.recommendedTypeChecked,
28
+ // Alternatively, use this for stricter rules
29
+ tseslint.configs.strictTypeChecked,
30
+ // Optionally, add this for stylistic rules
31
+ tseslint.configs.stylisticTypeChecked,
32
+
33
+ // Other configs...
34
+ ],
35
+ languageOptions: {
36
+ parserOptions: {
37
+ project: ['./tsconfig.node.json', './tsconfig.app.json'],
38
+ tsconfigRootDir: import.meta.dirname,
39
+ },
40
+ // other options...
41
+ },
42
+ },
43
+ ])
44
+ ```
45
+
46
+ You can also install [eslint-plugin-react-x](https://github.com/Rel1cx/eslint-react/tree/main/packages/plugins/eslint-plugin-react-x) and [eslint-plugin-react-dom](https://github.com/Rel1cx/eslint-react/tree/main/packages/plugins/eslint-plugin-react-dom) for React-specific lint rules:
47
+
48
+ ```js
49
+ // eslint.config.js
50
+ import reactX from 'eslint-plugin-react-x'
51
+ import reactDom from 'eslint-plugin-react-dom'
52
+
53
+ export default defineConfig([
54
+ globalIgnores(['dist']),
55
+ {
56
+ files: ['**/*.{ts,tsx}'],
57
+ extends: [
58
+ // Other configs...
59
+ // Enable lint rules for React
60
+ reactX.configs['recommended-typescript'],
61
+ // Enable lint rules for React DOM
62
+ reactDom.configs.recommended,
63
+ ],
64
+ languageOptions: {
65
+ parserOptions: {
66
+ project: ['./tsconfig.node.json', './tsconfig.app.json'],
67
+ tsconfigRootDir: import.meta.dirname,
68
+ },
69
+ // other options...
70
+ },
71
+ },
72
+ ])
73
+ ```
frontend/eslint.config.js ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import js from '@eslint/js'
2
+ import globals from 'globals'
3
+ import reactHooks from 'eslint-plugin-react-hooks'
4
+ import reactRefresh from 'eslint-plugin-react-refresh'
5
+ import tseslint from 'typescript-eslint'
6
+ import { defineConfig, globalIgnores } from 'eslint/config'
7
+
8
+ export default defineConfig([
9
+ globalIgnores(['dist']),
10
+ {
11
+ files: ['**/*.{ts,tsx}'],
12
+ extends: [
13
+ js.configs.recommended,
14
+ tseslint.configs.recommended,
15
+ reactHooks.configs.flat.recommended,
16
+ reactRefresh.configs.vite,
17
+ ],
18
+ languageOptions: {
19
+ ecmaVersion: 2020,
20
+ globals: globals.browser,
21
+ },
22
+ },
23
+ ])
frontend/index.html ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!doctype html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8" />
5
+ <link rel="icon" type="image/svg+xml" href="/vite.svg" />
6
+ <meta name="viewport" content="width=device-width, initial-scale=1.0" />
7
+ <title>frontend</title>
8
+ </head>
9
+ <body>
10
+ <div id="root"></div>
11
+ <script type="module" src="/src/main.tsx"></script>
12
+ </body>
13
+ </html>
frontend/nginx.conf ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ server {
2
+ listen 80;
3
+ server_name localhost;
4
+
5
+ root /usr/share/nginx/html;
6
+ index index.html;
7
+
8
+ # Frontend routes
9
+ location / {
10
+ try_files $uri $uri/ /index.html;
11
+ }
12
+
13
+ # API proxy
14
+ location /api {
15
+ proxy_pass http://backend:8000;
16
+ proxy_http_version 1.1;
17
+ proxy_set_header Upgrade $http_upgrade;
18
+ proxy_set_header Connection 'upgrade';
19
+ proxy_set_header Host $host;
20
+ proxy_cache_bypass $http_upgrade;
21
+ proxy_set_header X-Real-IP $remote_addr;
22
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
23
+ proxy_set_header X-Forwarded-Proto $scheme;
24
+ }
25
+
26
+ # WebSocket support (if needed)
27
+ location /ws {
28
+ proxy_pass http://backend:8000;
29
+ proxy_http_version 1.1;
30
+ proxy_set_header Upgrade $http_upgrade;
31
+ proxy_set_header Connection "upgrade";
32
+ }
33
+ }
frontend/package-lock.json ADDED
The diff for this file is too large to render. See raw diff
 
frontend/package.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "frontend",
3
+ "private": true,
4
+ "version": "0.0.0",
5
+ "type": "module",
6
+ "scripts": {
7
+ "dev": "vite",
8
+ "build": "tsc -b && vite build",
9
+ "lint": "eslint .",
10
+ "preview": "vite preview"
11
+ },
12
+ "dependencies": {
13
+ "react": "^19.2.0",
14
+ "react-dom": "^19.2.0",
15
+ "react-markdown": "^10.1.0"
16
+ },
17
+ "devDependencies": {
18
+ "@eslint/js": "^9.39.1",
19
+ "@tailwindcss/vite": "^4.1.17",
20
+ "@types/node": "^24.10.1",
21
+ "@types/react": "^19.2.5",
22
+ "@types/react-dom": "^19.2.3",
23
+ "@vitejs/plugin-react": "^5.1.1",
24
+ "eslint": "^9.39.1",
25
+ "eslint-plugin-react-hooks": "^7.0.1",
26
+ "eslint-plugin-react-refresh": "^0.4.24",
27
+ "globals": "^16.5.0",
28
+ "tailwindcss": "^4.1.17",
29
+ "typescript": "~5.9.3",
30
+ "typescript-eslint": "^8.46.4",
31
+ "vite": "^7.2.4"
32
+ }
33
+ }
frontend/public/vite.svg ADDED
frontend/src/App.tsx ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { useState, useEffect } from 'react';
2
+ import { UserProvider, useUser } from './context/UserContext';
3
+ import { ProjectProvider, useProject } from './context/ProjectContext';
4
+ import { LoginPage } from './pages/LoginPage';
5
+ import { ProjectSelectionPage } from './pages/ProjectSelectionPage';
6
+ import { TaskSetupPage } from './pages/TaskSetupPage';
7
+ import { ActivityPage } from './pages/ActivityPage';
8
+ import { TaskSolverPage } from './pages/TaskSolverPage';
9
+ import { api } from './api/client';
10
+ import type { Task } from './types';
11
+
12
+ function AppContent() {
13
+ const { user, isLoading: userLoading } = useUser();
14
+ const { currentProject } = useProject();
15
+ const [taskSetupComplete, setTaskSetupComplete] = useState<boolean | null>(null);
16
+ const [checkingTasks, setCheckingTasks] = useState(false);
17
+ const [activeTask, setActiveTask] = useState<Task | null>(null);
18
+
19
+ // Check if project has tasks (for owner flow)
20
+ useEffect(() => {
21
+ if (!currentProject || !user) {
22
+ setTaskSetupComplete(null);
23
+ return;
24
+ }
25
+
26
+ // Members skip task setup
27
+ if (currentProject.role !== 'owner') {
28
+ setTaskSetupComplete(true);
29
+ return;
30
+ }
31
+
32
+ // Owners: check if tasks exist
33
+ const checkTasks = async () => {
34
+ setCheckingTasks(true);
35
+ try {
36
+ const result = await api.listTasks(currentProject.id);
37
+ setTaskSetupComplete((result.tasks?.length || 0) > 0);
38
+ } catch {
39
+ // If error, assume setup not complete
40
+ setTaskSetupComplete(false);
41
+ } finally {
42
+ setCheckingTasks(false);
43
+ }
44
+ };
45
+
46
+ checkTasks();
47
+ }, [currentProject, user]);
48
+
49
+ // Show loading state while checking for stored user
50
+ if (userLoading) {
51
+ return (
52
+ <div className="min-h-screen bg-gradient-to-br from-slate-900 via-purple-900 to-slate-900 flex items-center justify-center">
53
+ <div className="text-white text-xl">Loading...</div>
54
+ </div>
55
+ );
56
+ }
57
+
58
+ // No user → Login
59
+ if (!user) {
60
+ return <LoginPage />;
61
+ }
62
+
63
+ // User but no project → Project Selection
64
+ if (!currentProject) {
65
+ return <ProjectSelectionPage />;
66
+ }
67
+
68
+ // Checking tasks status
69
+ if (checkingTasks || taskSetupComplete === null) {
70
+ return (
71
+ <div className="min-h-screen bg-gradient-to-br from-slate-900 via-purple-900 to-slate-900 flex items-center justify-center">
72
+ <div className="text-white text-xl">Loading project...</div>
73
+ </div>
74
+ );
75
+ }
76
+
77
+ // Owner + no tasks → Task Setup
78
+ if (currentProject.role === 'owner' && !taskSetupComplete) {
79
+ return <TaskSetupPage onComplete={() => setTaskSetupComplete(true)} />;
80
+ }
81
+
82
+ // Active task → Task Solver Page
83
+ if (activeTask) {
84
+ return (
85
+ <TaskSolverPage
86
+ task={activeTask}
87
+ onBack={() => setActiveTask(null)}
88
+ onTaskCompleted={() => setActiveTask(null)}
89
+ />
90
+ );
91
+ }
92
+
93
+ // User + Project + Tasks → Activity Page
94
+ return <ActivityPage onStartTask={setActiveTask} />;
95
+ }
96
+
97
+ function App() {
98
+ return (
99
+ <UserProvider>
100
+ <ProjectProvider>
101
+ <AppContent />
102
+ </ProjectProvider>
103
+ </UserProvider>
104
+ );
105
+ }
106
+
107
+ export default App;
frontend/src/api/client.ts ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import type { User, UserCreate, Project, ProjectCreate, ProjectsResponse, ProjectAvailability, Task, TaskCreate, TasksResponse, MembersResponse, ActivityResponse } from '../types';
2
+
3
+ const API_BASE = 'http://localhost:8000/api';
4
+
5
+ class ApiClient {
6
+ private async request<T>(endpoint: string, options?: RequestInit): Promise<T> {
7
+ const response = await fetch(`${API_BASE}${endpoint}`, {
8
+ headers: {
9
+ 'Content-Type': 'application/json',
10
+ ...options?.headers,
11
+ },
12
+ ...options,
13
+ });
14
+
15
+ if (!response.ok) {
16
+ const error = await response.json().catch(() => ({ error: 'Request failed' }));
17
+ throw new Error(error.error || error.detail || 'Request failed');
18
+ }
19
+
20
+ return response.json();
21
+ }
22
+
23
+ // User endpoints
24
+ async createUser(data: UserCreate): Promise<User> {
25
+ return this.request<User>('/users', {
26
+ method: 'POST',
27
+ body: JSON.stringify(data),
28
+ });
29
+ }
30
+
31
+ async getUser(userId: string): Promise<User> {
32
+ return this.request<User>(`/users/${userId}`);
33
+ }
34
+
35
+ async listUsers(): Promise<User[]> {
36
+ return this.request<User[]>('/users');
37
+ }
38
+
39
+ // Project endpoints
40
+ async checkProjectAvailability(projectId: string): Promise<ProjectAvailability> {
41
+ return this.request<ProjectAvailability>(`/projects/check/${projectId}`);
42
+ }
43
+
44
+ async listProjects(userId: string): Promise<ProjectsResponse> {
45
+ return this.request<ProjectsResponse>(`/projects?userId=${userId}`);
46
+ }
47
+
48
+ async createProject(data: ProjectCreate): Promise<Project> {
49
+ return this.request<Project>('/projects', {
50
+ method: 'POST',
51
+ body: JSON.stringify(data),
52
+ });
53
+ }
54
+
55
+ async joinProject(projectId: string, userId: string): Promise<{ message: string; project_id: string; role: string }> {
56
+ return this.request(`/projects/${projectId}/join`, {
57
+ method: 'POST',
58
+ body: JSON.stringify({ userId }),
59
+ });
60
+ }
61
+
62
+ // Task endpoints
63
+ async listTasks(projectId: string, status?: string): Promise<TasksResponse> {
64
+ const query = status ? `?status=${status}` : '';
65
+ return this.request<TasksResponse>(`/projects/${projectId}/tasks${query}`);
66
+ }
67
+
68
+ async createTask(projectId: string, data: TaskCreate): Promise<Task> {
69
+ return this.request<Task>(`/projects/${projectId}/tasks`, {
70
+ method: 'POST',
71
+ body: JSON.stringify(data),
72
+ });
73
+ }
74
+
75
+ async generateTasks(projectId: string, count = 50): Promise<{ tasks: { title: string; description: string }[] }> {
76
+ return this.request(`/projects/${projectId}/tasks/generate`, {
77
+ method: 'POST',
78
+ body: JSON.stringify({ count: Math.min(count, 50) }),
79
+ });
80
+ }
81
+
82
+ async completeTask(taskId: string, data: { userId: string; whatIDid: string; codeSnippet?: string }) {
83
+ return this.request(`/tasks/${taskId}/complete`, {
84
+ method: 'POST',
85
+ body: JSON.stringify(data),
86
+ });
87
+ }
88
+
89
+ async updateTaskStatus(taskId: string, status: 'todo' | 'in_progress' | 'done'): Promise<Task> {
90
+ return this.request<Task>(`/tasks/${taskId}/status`, {
91
+ method: 'PATCH',
92
+ body: JSON.stringify({ status }),
93
+ });
94
+ }
95
+
96
+ // Members
97
+ async getMembers(projectId: string): Promise<MembersResponse> {
98
+ return this.request<MembersResponse>(`/projects/${projectId}/members`);
99
+ }
100
+
101
+ // Search & Activity
102
+ async searchProject(projectId: string, query: string, filters?: { userId?: string; dateFrom?: string; dateTo?: string }) {
103
+ return this.request(`/projects/${projectId}/search`, {
104
+ method: 'POST',
105
+ body: JSON.stringify({ query, filters }),
106
+ });
107
+ }
108
+
109
+ async getActivity(projectId: string, limit = 20): Promise<ActivityResponse> {
110
+ return this.request<ActivityResponse>(`/projects/${projectId}/activity?limit=${limit}`);
111
+ }
112
+
113
+ // Smart Query
114
+ async smartQuery(projectId: string, query: string, currentUserId: string) {
115
+ return this.request(`/projects/${projectId}/smart-query`, {
116
+ method: 'POST',
117
+ body: JSON.stringify({
118
+ query,
119
+ currentUserId,
120
+ currentDatetime: new Date().toISOString(),
121
+ }),
122
+ });
123
+ }
124
+
125
+ // Task Chat (Agent)
126
+ async taskChat(
127
+ projectId: string,
128
+ taskId: string,
129
+ userId: string,
130
+ message: string,
131
+ history: { role: string; content: string }[]
132
+ ): Promise<{ message: string; taskCompleted?: boolean; taskStatus?: string }> {
133
+ return this.request(`/tasks/${taskId}/chat`, {
134
+ method: 'POST',
135
+ body: JSON.stringify({
136
+ projectId,
137
+ userId,
138
+ message,
139
+ history,
140
+ currentDatetime: new Date().toISOString(),
141
+ }),
142
+ });
143
+ }
144
+ }
145
+
146
+ export const api = new ApiClient();
frontend/src/assets/react.svg ADDED
frontend/src/components/TaskCompleteModal.tsx ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { useState } from 'react';
2
+ import type { Task } from '../types';
3
+
4
+ interface TaskCompleteModalProps {
5
+ task: Task;
6
+ onClose: () => void;
7
+ onComplete: (taskId: string, whatIDid: string, codeSnippet?: string) => Promise<void>;
8
+ }
9
+
10
+ export function TaskCompleteModal({ task, onClose, onComplete }: TaskCompleteModalProps) {
11
+ const [whatIDid, setWhatIDid] = useState('');
12
+ const [codeSnippet, setCodeSnippet] = useState('');
13
+ const [isSubmitting, setIsSubmitting] = useState(false);
14
+ const [error, setError] = useState('');
15
+
16
+ const handleSubmit = async (e: React.FormEvent) => {
17
+ e.preventDefault();
18
+ if (!whatIDid.trim()) {
19
+ setError('Please describe what you did');
20
+ return;
21
+ }
22
+
23
+ setIsSubmitting(true);
24
+ setError('');
25
+
26
+ try {
27
+ await onComplete(task.id, whatIDid.trim(), codeSnippet.trim() || undefined);
28
+ onClose();
29
+ } catch (err) {
30
+ setError(err instanceof Error ? err.message : 'Failed to complete task');
31
+ } finally {
32
+ setIsSubmitting(false);
33
+ }
34
+ };
35
+
36
+ return (
37
+ <div className="fixed inset-0 z-50 flex items-center justify-center">
38
+ {/* Backdrop */}
39
+ <div
40
+ className="absolute inset-0 bg-black/60 backdrop-blur-sm"
41
+ onClick={onClose}
42
+ />
43
+
44
+ {/* Modal */}
45
+ <div className="relative bg-slate-900 border border-white/20 rounded-xl w-full max-w-lg mx-4 shadow-2xl">
46
+ {/* Header */}
47
+ <div className="px-6 py-4 border-b border-white/10">
48
+ <h2 className="text-xl font-bold text-white">Complete Task</h2>
49
+ <p className="text-purple-300/70 text-sm mt-1 truncate">{task.title}</p>
50
+ </div>
51
+
52
+ {/* Form */}
53
+ <form onSubmit={handleSubmit} className="p-6 space-y-4">
54
+ {error && (
55
+ <div className="p-3 bg-red-500/20 border border-red-500/50 rounded-lg text-red-200 text-sm">
56
+ {error}
57
+ </div>
58
+ )}
59
+
60
+ <div>
61
+ <label className="block text-white text-sm font-medium mb-2">
62
+ What did you do? <span className="text-red-400">*</span>
63
+ </label>
64
+ <textarea
65
+ value={whatIDid}
66
+ onChange={(e) => setWhatIDid(e.target.value)}
67
+ placeholder="Describe what you accomplished..."
68
+ rows={3}
69
+ className="w-full px-4 py-3 bg-white/5 border border-white/10 rounded-lg text-white placeholder-white/40 focus:outline-none focus:ring-2 focus:ring-purple-500 resize-none"
70
+ autoFocus
71
+ />
72
+ </div>
73
+
74
+ <div>
75
+ <label className="block text-white text-sm font-medium mb-2">
76
+ Code snippet (optional)
77
+ </label>
78
+ <textarea
79
+ value={codeSnippet}
80
+ onChange={(e) => setCodeSnippet(e.target.value)}
81
+ placeholder="Paste any relevant code..."
82
+ rows={4}
83
+ className="w-full px-4 py-3 bg-white/5 border border-white/10 rounded-lg text-white placeholder-white/40 focus:outline-none focus:ring-2 focus:ring-purple-500 resize-none font-mono text-sm"
84
+ />
85
+ </div>
86
+
87
+ {/* Actions */}
88
+ <div className="flex gap-3 pt-2">
89
+ <button
90
+ type="button"
91
+ onClick={onClose}
92
+ disabled={isSubmitting}
93
+ className="flex-1 px-4 py-3 bg-white/10 hover:bg-white/20 text-white rounded-lg transition-all"
94
+ >
95
+ Cancel
96
+ </button>
97
+ <button
98
+ type="submit"
99
+ disabled={isSubmitting || !whatIDid.trim()}
100
+ className="flex-1 px-4 py-3 bg-green-600 hover:bg-green-700 disabled:bg-green-800 disabled:cursor-not-allowed text-white font-medium rounded-lg transition-all"
101
+ >
102
+ {isSubmitting ? 'Completing...' : 'Complete Task'}
103
+ </button>
104
+ </div>
105
+ </form>
106
+ </div>
107
+ </div>
108
+ );
109
+ }
frontend/src/context/ProjectContext.tsx ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { createContext, useContext, useState, useEffect, type ReactNode } from 'react';
2
+ import type { Project } from '../types';
3
+
4
+ interface ProjectContextType {
5
+ currentProject: Project | null;
6
+ setCurrentProject: (project: Project | null) => void;
7
+ clearProject: () => void;
8
+ }
9
+
10
+ const ProjectContext = createContext<ProjectContextType | null>(null);
11
+
12
+ const PROJECT_STORAGE_KEY = 'project_memory_current_project';
13
+
14
+ export function ProjectProvider({ children }: { children: ReactNode }) {
15
+ const [currentProject, setCurrentProjectState] = useState<Project | null>(null);
16
+
17
+ // Load from localStorage on mount
18
+ useEffect(() => {
19
+ const stored = localStorage.getItem(PROJECT_STORAGE_KEY);
20
+ if (stored) {
21
+ try {
22
+ setCurrentProjectState(JSON.parse(stored));
23
+ } catch {
24
+ localStorage.removeItem(PROJECT_STORAGE_KEY);
25
+ }
26
+ }
27
+ }, []);
28
+
29
+ const setCurrentProject = (project: Project | null) => {
30
+ setCurrentProjectState(project);
31
+ if (project) {
32
+ localStorage.setItem(PROJECT_STORAGE_KEY, JSON.stringify(project));
33
+ } else {
34
+ localStorage.removeItem(PROJECT_STORAGE_KEY);
35
+ }
36
+ };
37
+
38
+ const clearProject = () => {
39
+ setCurrentProjectState(null);
40
+ localStorage.removeItem(PROJECT_STORAGE_KEY);
41
+ };
42
+
43
+ return (
44
+ <ProjectContext.Provider value={{ currentProject, setCurrentProject, clearProject }}>
45
+ {children}
46
+ </ProjectContext.Provider>
47
+ );
48
+ }
49
+
50
+ export function useProject() {
51
+ const context = useContext(ProjectContext);
52
+ if (!context) {
53
+ throw new Error('useProject must be used within a ProjectProvider');
54
+ }
55
+ return context;
56
+ }