Spaces:
Runtime error
Runtime error
Sync from GitHub repo - 2025-09-20 22:01:48
Browse files- .env.example +27 -0
- .gitignore +39 -0
- LICENSE +21 -0
- README.md +165 -11
- ai_client.py +463 -0
- app.py +801 -0
- config.py +397 -0
- config.yaml +9 -0
- database.py +589 -0
- requirements.txt +23 -0
- templates/export_html.html +537 -0
- templates/export_markdown.md +67 -0
- utils.py +898 -0
.env.example
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Environment variables required for Naexya Docs AI to connect with supported
|
| 2 |
+
# AI service providers. Copy this file to `.env` and replace placeholder values
|
| 3 |
+
# with your actual API credentials before running the application.
|
| 4 |
+
|
| 5 |
+
# OpenAI powers GPT-based specification assistance, such as drafting or
|
| 6 |
+
# reviewing requirement documents.
|
| 7 |
+
OPENAI_API_KEY=your_openai_key_here
|
| 8 |
+
|
| 9 |
+
# Anthropic enables access to Claude models for alternative language model
|
| 10 |
+
# support and redundancy across AI providers.
|
| 11 |
+
ANTHROPIC_API_KEY=your_anthropic_key_here
|
| 12 |
+
|
| 13 |
+
# Google provides Gemini (and related) generative AI capabilities used for
|
| 14 |
+
# advanced specification analysis and summarization features.
|
| 15 |
+
GOOGLE_API_KEY=your_google_key_here
|
| 16 |
+
|
| 17 |
+
# xAI delivers access to models like Grok for experimentation and fallback
|
| 18 |
+
# options when other providers are unavailable.
|
| 19 |
+
XAI_API_KEY=your_xai_key_here
|
| 20 |
+
|
| 21 |
+
# Moonshot offers specialized AI models tailored for technical documentation
|
| 22 |
+
# and domain-specific reasoning tasks.
|
| 23 |
+
MOONSHOT_API_KEY=your_moonshot_key_here
|
| 24 |
+
|
| 25 |
+
# Qwen supplies open-source-friendly large language models that can be used for
|
| 26 |
+
# cost-effective or on-premise specification processing.
|
| 27 |
+
QWEN_API_KEY=your_qwen_key_here
|
.gitignore
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Environment variables and secrets
|
| 2 |
+
.env
|
| 3 |
+
*.key
|
| 4 |
+
secrets/
|
| 5 |
+
|
| 6 |
+
# Python build artefacts and bytecode
|
| 7 |
+
__pycache__/
|
| 8 |
+
*.py[cod]
|
| 9 |
+
*$py.class
|
| 10 |
+
*.so
|
| 11 |
+
.Python
|
| 12 |
+
|
| 13 |
+
# Local virtual environment folders
|
| 14 |
+
env/
|
| 15 |
+
venv/
|
| 16 |
+
ENV/
|
| 17 |
+
|
| 18 |
+
# SQLite and other local database files
|
| 19 |
+
*.db
|
| 20 |
+
*.sqlite
|
| 21 |
+
*.sqlite3
|
| 22 |
+
|
| 23 |
+
# IDE and editor specific files
|
| 24 |
+
.vscode/
|
| 25 |
+
.idea/
|
| 26 |
+
*.swp
|
| 27 |
+
*.swo
|
| 28 |
+
|
| 29 |
+
# Operating system generated files
|
| 30 |
+
.DS_Store
|
| 31 |
+
Thumbs.db
|
| 32 |
+
|
| 33 |
+
# Gradio generated assets
|
| 34 |
+
gradio_cached_examples/
|
| 35 |
+
flagged/
|
| 36 |
+
|
| 37 |
+
# Log files and directories
|
| 38 |
+
*.log
|
| 39 |
+
logs/
|
LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MIT License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2025 Naexya
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
README.md
CHANGED
|
@@ -1,14 +1,168 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
---
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
|
|
|
|
|
|
|
|
|
| 12 |
---
|
| 13 |
|
| 14 |
-
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Naexya Docs AI
|
| 2 |
+
|
| 3 |
+
Open-source AI-powered specification management tool that helps product and engineering teams collaborate with multiple large language models, extract structured requirements, and export professional documentation without leaving a browser tab.
|
| 4 |
+
|
| 5 |
+
---
|
| 6 |
+
|
| 7 |
+
## Features
|
| 8 |
+
|
| 9 |
+
- **Multi-provider AI integration** (OpenAI, Anthropic, Google, xAI, Moonshot, Qwen) with a unified client and per-provider rate limit awareness.
|
| 10 |
+
- **Dual AI personas** (Requirements Specialist + Technical Architect) designed to capture business context and technical design details in parallel.
|
| 11 |
+
- **Conversation-based specification extraction** that promotes iterative refinement and transparent traceability back to the originating chat history.
|
| 12 |
+
- **Validation workflow for quality control** so human reviewers can approve or reject generated specifications before they become canonical.
|
| 13 |
+
- **Professional export to HTML/Markdown** leveraging branded templates optimised for stakeholders and AI coding agents alike.
|
| 14 |
+
- **Local SQLite storage (no cloud dependencies)** providing self-hosted data retention with optional demo seed data for evaluation.
|
| 15 |
+
- **Bring-your-own-API-key model** ensuring you retain full control over model usage, quotas, and billing across all supported vendors.
|
| 16 |
+
|
| 17 |
+
---
|
| 18 |
+
|
| 19 |
+
## Quick Start
|
| 20 |
+
|
| 21 |
+
1. **Clone the repository**
|
| 22 |
+
```bash
|
| 23 |
+
git clone https://github.com/your-org/NaexyaDocsAI.git
|
| 24 |
+
cd NaexyaDocsAI
|
| 25 |
+
```
|
| 26 |
+
2. **Create an isolated environment (recommended)**
|
| 27 |
+
```bash
|
| 28 |
+
python3 -m venv .venv
|
| 29 |
+
source .venv/bin/activate # Windows: .venv\\Scripts\\activate
|
| 30 |
+
```
|
| 31 |
+
3. **Install dependencies**
|
| 32 |
+
```bash
|
| 33 |
+
pip install --upgrade pip
|
| 34 |
+
pip install -r requirements.txt
|
| 35 |
+
```
|
| 36 |
+
4. **Copy environment template and add your API keys**
|
| 37 |
+
```bash
|
| 38 |
+
cp .env.example .env
|
| 39 |
+
# Edit .env with your provider keys
|
| 40 |
+
```
|
| 41 |
+
5. **Launch the Gradio application**
|
| 42 |
+
```bash
|
| 43 |
+
python app.py
|
| 44 |
+
```
|
| 45 |
+
6. **Open the local URL** printed by Gradio (typically `http://127.0.0.1:7860/`) to start collaborating with the personas and managing specifications.
|
| 46 |
+
|
| 47 |
+
> 💡 **Tip:** If you do not yet have API keys, enable the built-in demo data from the landing page. This allows you to explore the interface, validation queue, and export flows without making external API calls.
|
| 48 |
+
|
| 49 |
+
---
|
| 50 |
+
|
| 51 |
+
## Configuration
|
| 52 |
+
|
| 53 |
+
The platform is fully configurable through `config.py` and environment variables loaded from `.env`. Each provider entry defines endpoints, default parameters, and header templates to help you stay within rate limits.
|
| 54 |
+
|
| 55 |
+
### Environment Variables
|
| 56 |
+
|
| 57 |
+
| Variable | Description |
|
| 58 |
+
| --- | --- |
|
| 59 |
+
| `OPENAI_API_KEY` | Secret key for OpenAI GPT-5 endpoints. |
|
| 60 |
+
| `ANTHROPIC_API_KEY` | Authentication token for Claude models. |
|
| 61 |
+
| `GOOGLE_API_KEY` | Google AI Studio API key for Gemini models. |
|
| 62 |
+
| `XAI_API_KEY` | API key for xAI Grok models. |
|
| 63 |
+
| `MOONSHOT_API_KEY` | Credential for Moonshot (Kimi) access. |
|
| 64 |
+
| `QWEN_API_KEY` | Access token for Alibaba Qwen models. |
|
| 65 |
+
|
| 66 |
+
> ⚠️ Keep the `.env` file out of version control. Only `.env.example` should be committed.
|
| 67 |
+
|
| 68 |
+
### Provider Setup Details
|
| 69 |
+
|
| 70 |
+
1. **OpenAI (GPT-5)**
|
| 71 |
+
- Create or reuse an OpenAI account and generate a key from the dashboard.
|
| 72 |
+
- Ensure the `https://api.openai.com/v1/chat/completions` endpoint is enabled for your organisation.
|
| 73 |
+
- Optional parameters such as `temperature` and `max_tokens` can be fine-tuned in `config.py`.
|
| 74 |
+
|
| 75 |
+
2. **Anthropic (Claude-4-Sonnet)**
|
| 76 |
+
- Request access to Claude-4 via the Anthropic console.
|
| 77 |
+
- Place the key in `.env` as `ANTHROPIC_API_KEY`.
|
| 78 |
+
- Respect the token per-minute limits published in the console; the defaults in `config.py` reflect conservative usage.
|
| 79 |
+
|
| 80 |
+
3. **Google (Gemini-2.5-Pro)**
|
| 81 |
+
- Enable the Generative Language API in Google Cloud and create credentials through Google AI Studio.
|
| 82 |
+
- Set `GOOGLE_API_KEY` and confirm the project has the `models.generateContent` permission.
|
| 83 |
+
|
| 84 |
+
4. **xAI (Grok-4-Fast)**
|
| 85 |
+
- Obtain access from the xAI developer portal and generate an API key.
|
| 86 |
+
- Update `.env` with `XAI_API_KEY`; the client automatically adds the `x-api-key` header required by Grok.
|
| 87 |
+
|
| 88 |
+
5. **Moonshot (Kimi-K2)**
|
| 89 |
+
- Sign in to Moonshot AI, subscribe to the Kimi API plan, and generate a token.
|
| 90 |
+
- Store the token in `MOONSHOT_API_KEY`; the client converts payloads to the Moonshot JSON schema for you.
|
| 91 |
+
|
| 92 |
+
6. **Qwen (Qwen3-Next)**
|
| 93 |
+
- Activate DashScope and retrieve a key with text-generation permissions.
|
| 94 |
+
- Save the key as `QWEN_API_KEY`; the integration handles the `Authorization: Bearer` header format.
|
| 95 |
+
|
| 96 |
+
After updating `.env`, restart the application so that Gradio reloads the configuration.
|
| 97 |
+
|
| 98 |
+
---
|
| 99 |
+
|
| 100 |
+
## Usage Guide
|
| 101 |
+
|
| 102 |
+
1. **Create or select a project** in the **Projects** tab. Each project stores conversations, specifications, and export history.
|
| 103 |
+
2. **Engage with the Requirements Specialist persona** in the **Requirements Chat** tab. Provide business objectives, user roles, and product scenarios. The assistant will log messages and surface candidate user stories.
|
| 104 |
+
3. **Switch to the Technical Architect persona** in the **Technical Chat** tab to capture APIs, data models, and system components with full technical depth.
|
| 105 |
+
4. **Review generated specifications** in the **Validation** tab. Approve high-quality outputs, request revisions, or reject items that need more context.
|
| 106 |
+
5. **Browse approved artefacts** in the **Specifications** tab. Filter by User Stories, Features, API Endpoints, Database Design, or System Architecture.
|
| 107 |
+
6. **Export documentation** from the **Export** tab. Download branded HTML or AI-friendly Markdown reports that include metadata, statistics, and links back to conversations.
|
| 108 |
+
7. **Manage provider settings** and rotate keys within the **Settings** tab. All changes are persisted locally so you can tailor the stack to your environment.
|
| 109 |
+
|
| 110 |
+
Throughout the workflow, the application captures timestamps and associations between conversations, personas, and specifications for full traceability.
|
| 111 |
+
|
| 112 |
+
---
|
| 113 |
+
|
| 114 |
+
## Deployment
|
| 115 |
+
|
| 116 |
+
### Local (Recommended for Development)
|
| 117 |
+
|
| 118 |
+
- Follow the Quick Start steps above.
|
| 119 |
+
- To run the app on a custom port, export `GRADIO_SERVER_PORT=XXXX` before launching `python app.py`.
|
| 120 |
+
- Use tools like `tmux` or `systemd` if you want to keep the application running in the background.
|
| 121 |
+
|
| 122 |
+
### Hugging Face Spaces
|
| 123 |
+
|
| 124 |
+
Hugging Face Spaces reads the [`config.yaml`](config.yaml) manifest and pinned
|
| 125 |
+
[`requirements.txt`](requirements.txt) to build and launch the application.
|
| 126 |
+
|
| 127 |
+
1. Create a new **Gradio** Space and connect it to your fork of the repository.
|
| 128 |
+
2. Review the metadata in `config.yaml`; update the title or colour palette if you fork the project.
|
| 129 |
+
3. Set the **Space hardware** to at least the default CPU (no GPU required).
|
| 130 |
+
4. In the Space **Variables** section, add any provider keys you plan to use
|
| 131 |
+
(`OPENAI_API_KEY`, `ANTHROPIC_API_KEY`, `GOOGLE_API_KEY`, `XAI_API_KEY`,
|
| 132 |
+
`MOONSHOT_API_KEY`, `QWEN_API_KEY`). Spaces automatically exposes these as
|
| 133 |
+
environment variables.
|
| 134 |
+
5. Optional: provide `NAEXYA_DEFAULT_PROVIDER` to specify which vendor should
|
| 135 |
+
be called first when multiple keys are present.
|
| 136 |
+
6. Save the settings and rebuild the Space. Dependencies are installed from the
|
| 137 |
+
pinned versions in `requirements.txt`, and `app.py` is used as the entry
|
| 138 |
+
point.
|
| 139 |
+
7. Persistent storage is available under `/data`. The application automatically
|
| 140 |
+
stores the SQLite database there when running inside a Space.
|
| 141 |
+
|
| 142 |
+
> 💡 No API keys yet? Launch the Space anyway. The interface automatically
|
| 143 |
+
> enters **demo mode** so you can explore the workflow using the built-in mock
|
| 144 |
+
> responses, validation queue, and exports without leaving the browser.
|
| 145 |
+
|
| 146 |
+
> 📘 For other hosting targets (e.g., Docker, Railway), reuse the same
|
| 147 |
+
> environment variables and ensure port `7860` is exposed.
|
| 148 |
+
|
| 149 |
---
|
| 150 |
+
|
| 151 |
+
## Contributing
|
| 152 |
+
|
| 153 |
+
We welcome pull requests and ideas from the community. To contribute:
|
| 154 |
+
|
| 155 |
+
1. Fork the repository and create a feature branch (`git checkout -b feature/amazing-idea`).
|
| 156 |
+
2. Install dependencies and run the application locally to validate your changes.
|
| 157 |
+
3. Add or update documentation, including screenshots if you modify the UI.
|
| 158 |
+
4. Run `python -m compileall .` (or the relevant test suite once added) to ensure there are no syntax errors.
|
| 159 |
+
5. Submit a pull request describing the motivation, approach, and testing performed.
|
| 160 |
+
|
| 161 |
+
Please follow the existing coding style, docstring conventions, and commit message clarity when contributing.
|
| 162 |
+
|
| 163 |
---
|
| 164 |
|
| 165 |
+
## License
|
| 166 |
+
|
| 167 |
+
Naexya Docs AI is released under the terms of the [MIT License](LICENSE). You are free to self-host, extend, and integrate the project in accordance with the license.
|
| 168 |
+
|
ai_client.py
ADDED
|
@@ -0,0 +1,463 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Unified AI client abstraction for Naexya Docs AI.
|
| 2 |
+
|
| 3 |
+
This module centralises the integration logic for every supported AI provider,
|
| 4 |
+
so the rest of the application can request completions without knowing anything
|
| 5 |
+
about HTTP payload formats or authentication details. The implementation
|
| 6 |
+
favours readability and extensive inline documentation over brevity because it
|
| 7 |
+
serves as both reference material and an onboarding resource for new
|
| 8 |
+
contributors.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
from __future__ import annotations
|
| 12 |
+
|
| 13 |
+
import json
|
| 14 |
+
import logging
|
| 15 |
+
import re
|
| 16 |
+
from dataclasses import dataclass
|
| 17 |
+
from typing import Dict, Generator, Iterable, List, Optional, Tuple
|
| 18 |
+
|
| 19 |
+
import requests
|
| 20 |
+
|
| 21 |
+
from config import AI_PROVIDERS, AppConfig
|
| 22 |
+
|
| 23 |
+
logger = logging.getLogger(__name__)
|
| 24 |
+
|
| 25 |
+
# ---------------------------------------------------------------------------
|
| 26 |
+
# Provider configuration metadata
|
| 27 |
+
# ---------------------------------------------------------------------------
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
@dataclass(frozen=True)
|
| 31 |
+
class ProviderConfig:
|
| 32 |
+
"""Container describing the static details for a provider.
|
| 33 |
+
|
| 34 |
+
Attributes
|
| 35 |
+
----------
|
| 36 |
+
name:
|
| 37 |
+
Human friendly label used in logs and error messages.
|
| 38 |
+
endpoint:
|
| 39 |
+
HTTPS endpoint for the chat or text generation API.
|
| 40 |
+
default_model:
|
| 41 |
+
Suggested model identifier when a caller does not provide one.
|
| 42 |
+
supports_streaming:
|
| 43 |
+
Flag documenting whether the HTTP API provides a streaming interface.
|
| 44 |
+
"""
|
| 45 |
+
|
| 46 |
+
name: str
|
| 47 |
+
endpoint: str
|
| 48 |
+
default_model: str
|
| 49 |
+
supports_streaming: bool = True
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
PROVIDERS: Dict[str, ProviderConfig] = {
|
| 53 |
+
# OpenAI's Chat Completions endpoint. Authentication is handled with a
|
| 54 |
+
# Bearer token header, and the request payload is expressed as JSON.
|
| 55 |
+
"openai": ProviderConfig(
|
| 56 |
+
name="OpenAI GPT-5",
|
| 57 |
+
endpoint="https://api.openai.com/v1/chat/completions",
|
| 58 |
+
default_model="gpt-5",
|
| 59 |
+
supports_streaming=True,
|
| 60 |
+
),
|
| 61 |
+
# Anthropic's Messages API. This API expects a slightly different JSON
|
| 62 |
+
# schema compared to OpenAI, including an explicit "messages" array with
|
| 63 |
+
# role/content pairs. It uses X-API-Key and Anthropic-Version headers.
|
| 64 |
+
"anthropic": ProviderConfig(
|
| 65 |
+
name="Anthropic Claude-4-Sonnet",
|
| 66 |
+
endpoint="https://api.anthropic.com/v1/messages",
|
| 67 |
+
default_model="claude-4-sonnet",
|
| 68 |
+
supports_streaming=True,
|
| 69 |
+
),
|
| 70 |
+
# Google Gemini's Generative Language API. It expects the content in a
|
| 71 |
+
# "contents" array that contains "parts" objects. Authentication is
|
| 72 |
+
# performed via a query parameter instead of HTTP headers.
|
| 73 |
+
"google": ProviderConfig(
|
| 74 |
+
name="Google Gemini-2.5-Pro",
|
| 75 |
+
endpoint="https://generativelanguage.googleapis.com/v1/models/"
|
| 76 |
+
"gemini-2.5-pro:generateContent",
|
| 77 |
+
default_model="gemini-2.5-pro",
|
| 78 |
+
supports_streaming=False,
|
| 79 |
+
),
|
| 80 |
+
# xAI's Grok models mimic the OpenAI schema but use their own endpoint and
|
| 81 |
+
# versioned Accept header.
|
| 82 |
+
"xai": ProviderConfig(
|
| 83 |
+
name="xAI Grok-4-Fast",
|
| 84 |
+
endpoint="https://api.x.ai/v1/chat/completions",
|
| 85 |
+
default_model="grok-4-fast",
|
| 86 |
+
supports_streaming=True,
|
| 87 |
+
),
|
| 88 |
+
# Moonshot's Kimi API is also compatible with the chat completions format
|
| 89 |
+
# yet includes an "X-Api-Key" header.
|
| 90 |
+
"moonshot": ProviderConfig(
|
| 91 |
+
name="Moonshot Kimi-K2",
|
| 92 |
+
endpoint="https://api.moonshot.ai/v1/chat/completions",
|
| 93 |
+
default_model="kimi-k2",
|
| 94 |
+
supports_streaming=True,
|
| 95 |
+
),
|
| 96 |
+
# Alibaba's Qwen DashScope endpoint accepts JSON requests with a "model"
|
| 97 |
+
# field and a "input" object. Streaming requires a special accept header.
|
| 98 |
+
"qwen": ProviderConfig(
|
| 99 |
+
name="Qwen3-Next",
|
| 100 |
+
endpoint="https://dashscope.aliyuncs.com/api/v1/services/"
|
| 101 |
+
"aigc/text-generation/generation",
|
| 102 |
+
default_model="qwen3-next",
|
| 103 |
+
supports_streaming=True,
|
| 104 |
+
),
|
| 105 |
+
}
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
# ---------------------------------------------------------------------------
|
| 109 |
+
# Helper utilities
|
| 110 |
+
# ---------------------------------------------------------------------------
|
| 111 |
+
|
| 112 |
+
def get_provider_headers(provider: str, api_key: str) -> Dict[str, str]:
|
| 113 |
+
"""Return the HTTP headers required for a specific provider.
|
| 114 |
+
|
| 115 |
+
Parameters
|
| 116 |
+
----------
|
| 117 |
+
provider:
|
| 118 |
+
Provider identifier (e.g. ``"openai"``). Case insensitive.
|
| 119 |
+
api_key:
|
| 120 |
+
Secret token used for authentication. The function does *not* validate
|
| 121 |
+
the contents but will raise a :class:`ValueError` when missing.
|
| 122 |
+
|
| 123 |
+
Notes
|
| 124 |
+
-----
|
| 125 |
+
Each vendor has its own header requirements, therefore the logic is kept in
|
| 126 |
+
a dedicated helper so new providers can be added without modifying the rest
|
| 127 |
+
of the codebase.
|
| 128 |
+
"""
|
| 129 |
+
|
| 130 |
+
if not api_key:
|
| 131 |
+
raise ValueError("API key is required for provider headers")
|
| 132 |
+
|
| 133 |
+
provider_key = provider.lower()
|
| 134 |
+
if provider_key == "openai":
|
| 135 |
+
return {"Authorization": f"Bearer {api_key}"}
|
| 136 |
+
if provider_key == "anthropic":
|
| 137 |
+
return {
|
| 138 |
+
"x-api-key": api_key,
|
| 139 |
+
"anthropic-version": "2023-06-01",
|
| 140 |
+
"content-type": "application/json",
|
| 141 |
+
}
|
| 142 |
+
if provider_key == "google":
|
| 143 |
+
# Google uses query string authentication, but we still provide content
|
| 144 |
+
# type for completeness.
|
| 145 |
+
return {"Content-Type": "application/json"}
|
| 146 |
+
if provider_key == "xai":
|
| 147 |
+
return {
|
| 148 |
+
"Authorization": f"Bearer {api_key}",
|
| 149 |
+
"Accept": "application/json",
|
| 150 |
+
}
|
| 151 |
+
if provider_key == "moonshot":
|
| 152 |
+
return {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
|
| 153 |
+
if provider_key == "qwen":
|
| 154 |
+
return {
|
| 155 |
+
"Authorization": f"Bearer {api_key}",
|
| 156 |
+
"Content-Type": "application/json",
|
| 157 |
+
}
|
| 158 |
+
|
| 159 |
+
raise ValueError(f"Unsupported provider '{provider}'")
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
def _build_payload(provider: str, model: Optional[str], messages: List[Dict[str, str]]) -> Dict[str, object]:
|
| 163 |
+
"""Construct the HTTP payload matching the provider's schema."""
|
| 164 |
+
|
| 165 |
+
provider_key = provider.lower()
|
| 166 |
+
if provider_key == "openai" or provider_key == "xai" or provider_key == "moonshot":
|
| 167 |
+
return {
|
| 168 |
+
"model": model,
|
| 169 |
+
"messages": messages,
|
| 170 |
+
"stream": False,
|
| 171 |
+
}
|
| 172 |
+
if provider_key == "anthropic":
|
| 173 |
+
return {
|
| 174 |
+
"model": model,
|
| 175 |
+
"messages": [
|
| 176 |
+
{
|
| 177 |
+
"role": message["role"],
|
| 178 |
+
"content": message["content"],
|
| 179 |
+
}
|
| 180 |
+
for message in messages
|
| 181 |
+
],
|
| 182 |
+
"max_tokens": 4096,
|
| 183 |
+
"stream": False,
|
| 184 |
+
}
|
| 185 |
+
if provider_key == "google":
|
| 186 |
+
# Gemini expects nested "contents" with parts containing text payloads.
|
| 187 |
+
return {
|
| 188 |
+
"model": model,
|
| 189 |
+
"contents": [
|
| 190 |
+
{
|
| 191 |
+
"role": message["role"],
|
| 192 |
+
"parts": [{"text": message["content"]}],
|
| 193 |
+
}
|
| 194 |
+
for message in messages
|
| 195 |
+
],
|
| 196 |
+
}
|
| 197 |
+
if provider_key == "qwen":
|
| 198 |
+
return {
|
| 199 |
+
"model": model,
|
| 200 |
+
"input": {
|
| 201 |
+
"messages": messages,
|
| 202 |
+
},
|
| 203 |
+
"parameters": {"enable_search": False},
|
| 204 |
+
}
|
| 205 |
+
|
| 206 |
+
raise ValueError(f"Unsupported provider '{provider}'")
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
# ---------------------------------------------------------------------------
|
| 210 |
+
# Core API interaction helpers
|
| 211 |
+
# ---------------------------------------------------------------------------
|
| 212 |
+
|
| 213 |
+
def call_ai_provider(
|
| 214 |
+
provider: str,
|
| 215 |
+
model: Optional[str],
|
| 216 |
+
messages: List[Dict[str, str]],
|
| 217 |
+
api_key: str,
|
| 218 |
+
timeout: int = 60,
|
| 219 |
+
) -> Dict[str, object]:
|
| 220 |
+
"""Send a chat completion request to the specified provider.
|
| 221 |
+
|
| 222 |
+
The helper translates a generic ``messages`` list into the JSON body expected
|
| 223 |
+
by each API. It returns the parsed JSON response so higher level code can
|
| 224 |
+
extract relevant fields.
|
| 225 |
+
|
| 226 |
+
Error handling is intentionally defensive: network errors, non-successful
|
| 227 |
+
HTTP responses, and JSON parsing failures are all logged with context and
|
| 228 |
+
re-raised as :class:`RuntimeError` to keep calling code consistent.
|
| 229 |
+
"""
|
| 230 |
+
|
| 231 |
+
provider_key = provider.lower()
|
| 232 |
+
if provider_key not in PROVIDERS:
|
| 233 |
+
raise ValueError(f"Unsupported provider '{provider}'")
|
| 234 |
+
|
| 235 |
+
config = PROVIDERS[provider_key]
|
| 236 |
+
resolved_model = model or config.default_model
|
| 237 |
+
headers = get_provider_headers(provider_key, api_key)
|
| 238 |
+
payload = _build_payload(provider_key, resolved_model, messages)
|
| 239 |
+
|
| 240 |
+
try:
|
| 241 |
+
if provider_key == "google":
|
| 242 |
+
# Google requires the API key as a query parameter rather than header.
|
| 243 |
+
response = requests.post(
|
| 244 |
+
config.endpoint,
|
| 245 |
+
params={"key": api_key},
|
| 246 |
+
headers=headers,
|
| 247 |
+
data=json.dumps(payload),
|
| 248 |
+
timeout=timeout,
|
| 249 |
+
)
|
| 250 |
+
else:
|
| 251 |
+
response = requests.post(
|
| 252 |
+
config.endpoint,
|
| 253 |
+
headers=headers,
|
| 254 |
+
data=json.dumps(payload),
|
| 255 |
+
timeout=timeout,
|
| 256 |
+
)
|
| 257 |
+
except requests.RequestException as exc: # pragma: no cover - network errors
|
| 258 |
+
logger.exception("Network failure when calling %s", config.name)
|
| 259 |
+
raise RuntimeError(f"Failed to reach {config.name}: {exc}") from exc
|
| 260 |
+
|
| 261 |
+
if not response.ok:
|
| 262 |
+
logger.error(
|
| 263 |
+
"Provider %s responded with status %s: %s",
|
| 264 |
+
config.name,
|
| 265 |
+
response.status_code,
|
| 266 |
+
response.text,
|
| 267 |
+
)
|
| 268 |
+
raise RuntimeError(
|
| 269 |
+
f"{config.name} returned {response.status_code}: {response.text[:200]}"
|
| 270 |
+
)
|
| 271 |
+
|
| 272 |
+
try:
|
| 273 |
+
payload = response.json()
|
| 274 |
+
except ValueError as exc: # pragma: no cover - unexpected payloads
|
| 275 |
+
logger.exception("Invalid JSON from %s", config.name)
|
| 276 |
+
raise RuntimeError(f"Invalid JSON response from {config.name}") from exc
|
| 277 |
+
|
| 278 |
+
return payload
|
| 279 |
+
|
| 280 |
+
|
| 281 |
+
def handle_streaming_response(response: Iterable[bytes]) -> Generator[str, None, None]:
|
| 282 |
+
"""Convert a streaming HTTP response into decoded text chunks.
|
| 283 |
+
|
| 284 |
+
Some providers (OpenAI, Anthropic, xAI, Moonshot, Qwen) support streaming
|
| 285 |
+
tokens over an HTTP connection. Gradio primarily expects plain text, so
|
| 286 |
+
this utility yields decoded strings one by one. Callers can combine the
|
| 287 |
+
chunks or surface them progressively in the UI.
|
| 288 |
+
"""
|
| 289 |
+
|
| 290 |
+
for chunk in response:
|
| 291 |
+
if not chunk:
|
| 292 |
+
continue
|
| 293 |
+
try:
|
| 294 |
+
decoded = chunk.decode("utf-8")
|
| 295 |
+
except UnicodeDecodeError: # pragma: no cover - unexpected encoding
|
| 296 |
+
logger.warning("Received non UTF-8 chunk from streaming response")
|
| 297 |
+
continue
|
| 298 |
+
yield decoded
|
| 299 |
+
|
| 300 |
+
|
| 301 |
+
# ---------------------------------------------------------------------------
|
| 302 |
+
# Response post-processing utilities
|
| 303 |
+
# ---------------------------------------------------------------------------
|
| 304 |
+
|
| 305 |
+
def extract_specifications_from_response(response_text: str) -> List[Dict[str, str]]:
|
| 306 |
+
"""Extract structured specification blocks from the raw model output.
|
| 307 |
+
|
| 308 |
+
The helper searches for Markdown style headings and bullet lists describing
|
| 309 |
+
requirements. The format is intentionally permissive because different
|
| 310 |
+
models may return subtly different layouts. The result is a list of
|
| 311 |
+
dictionaries so higher level code can serialise or store it easily.
|
| 312 |
+
"""
|
| 313 |
+
|
| 314 |
+
specs: List[Dict[str, str]] = []
|
| 315 |
+
if not response_text:
|
| 316 |
+
return specs
|
| 317 |
+
|
| 318 |
+
pattern = re.compile(r"^#+\\s*(?P<title>.+)$", re.MULTILINE)
|
| 319 |
+
matches = list(pattern.finditer(response_text))
|
| 320 |
+
|
| 321 |
+
for index, match in enumerate(matches):
|
| 322 |
+
title = match.group("title").strip()
|
| 323 |
+
start = match.end()
|
| 324 |
+
end = matches[index + 1].start() if index + 1 < len(matches) else len(response_text)
|
| 325 |
+
body = response_text[start:end].strip()
|
| 326 |
+
|
| 327 |
+
if not body:
|
| 328 |
+
continue
|
| 329 |
+
|
| 330 |
+
specs.append(
|
| 331 |
+
{
|
| 332 |
+
"title": title,
|
| 333 |
+
"content": body,
|
| 334 |
+
"status": "pending",
|
| 335 |
+
}
|
| 336 |
+
)
|
| 337 |
+
|
| 338 |
+
# Fallback: if no headings were found, treat the whole message as a single
|
| 339 |
+
# specification for manual review.
|
| 340 |
+
if not specs:
|
| 341 |
+
specs.append({"title": "Generated Specification", "content": response_text.strip(), "status": "pending"})
|
| 342 |
+
|
| 343 |
+
return specs
|
| 344 |
+
|
| 345 |
+
|
| 346 |
+
# ---------------------------------------------------------------------------
|
| 347 |
+
# Demo utilities
|
| 348 |
+
# ---------------------------------------------------------------------------
|
| 349 |
+
|
| 350 |
+
def mock_ai_response(persona_type: str, user_message: str) -> str:
|
| 351 |
+
"""Return a deterministic response for demo sessions without API keys."""
|
| 352 |
+
|
| 353 |
+
persona = persona_type.lower()
|
| 354 |
+
if persona == "business":
|
| 355 |
+
return (
|
| 356 |
+
"# Business Requirement Summary\n"
|
| 357 |
+
f"Customer input: {user_message}\n\n"
|
| 358 |
+
"- Objective: Deliver clear stakeholder value.\n"
|
| 359 |
+
"- Success Criteria: Measure impact using agreed KPIs.\n"
|
| 360 |
+
"- Constraints: Respect budget and compliance limits."
|
| 361 |
+
)
|
| 362 |
+
if persona == "technical":
|
| 363 |
+
return (
|
| 364 |
+
"# Technical Solution Outline\n"
|
| 365 |
+
f"Key request: {user_message}\n\n"
|
| 366 |
+
"- Architecture: Propose modular microservices with shared auth.\n"
|
| 367 |
+
"- Integrations: Connect to existing analytics platform via REST.\n"
|
| 368 |
+
"- Risks: Validate performance under peak concurrency."
|
| 369 |
+
)
|
| 370 |
+
return (
|
| 371 |
+
"# General Response\n"
|
| 372 |
+
f"Prompt echoed: {user_message}\n\n"
|
| 373 |
+
"This persona is not defined yet, but the placeholder keeps the UI\n"
|
| 374 |
+
"functional during demos."
|
| 375 |
+
)
|
| 376 |
+
|
| 377 |
+
|
| 378 |
+
class AIClient:
|
| 379 |
+
"""Convenience wrapper that routes prompts to configured AI providers."""
|
| 380 |
+
|
| 381 |
+
def __init__(self, config: AppConfig):
|
| 382 |
+
self.config = config
|
| 383 |
+
|
| 384 |
+
def _resolve_provider(self) -> Tuple[str, str]:
|
| 385 |
+
"""Return the provider identifier and API key to use for requests."""
|
| 386 |
+
|
| 387 |
+
preferred = self.config.default_provider.lower()
|
| 388 |
+
api_key = self.config.get_api_key(preferred)
|
| 389 |
+
if api_key:
|
| 390 |
+
return preferred, api_key
|
| 391 |
+
|
| 392 |
+
for name, credential in self.config.configured_providers().items():
|
| 393 |
+
if credential.api_key:
|
| 394 |
+
return name, credential.api_key
|
| 395 |
+
|
| 396 |
+
raise RuntimeError(
|
| 397 |
+
"No AI provider API keys are configured. Supply a key or enable demo mode."
|
| 398 |
+
)
|
| 399 |
+
|
| 400 |
+
@staticmethod
|
| 401 |
+
def _extract_text(provider: str, payload: Dict[str, object]) -> str:
|
| 402 |
+
"""Normalise provider responses to a plain text string."""
|
| 403 |
+
|
| 404 |
+
try:
|
| 405 |
+
if provider in {"openai", "xai", "moonshot"}:
|
| 406 |
+
return str(payload["choices"][0]["message"]["content"]).strip()
|
| 407 |
+
if provider == "anthropic":
|
| 408 |
+
return str(payload["content"][0]["text"]).strip()
|
| 409 |
+
if provider == "google":
|
| 410 |
+
return str(payload["candidates"][0]["content"]["parts"][0]["text"]).strip()
|
| 411 |
+
if provider == "qwen":
|
| 412 |
+
output = payload.get("output") or payload.get("data") or {}
|
| 413 |
+
if isinstance(output, dict) and "text" in output:
|
| 414 |
+
return str(output["text"]).strip()
|
| 415 |
+
if "result" in payload and isinstance(payload["result"], dict):
|
| 416 |
+
maybe_text = payload["result"].get("output_text")
|
| 417 |
+
if maybe_text:
|
| 418 |
+
return str(maybe_text).strip()
|
| 419 |
+
except (IndexError, KeyError, TypeError): # pragma: no cover - defensive
|
| 420 |
+
logger.exception("Unexpected response schema from provider %s", provider)
|
| 421 |
+
|
| 422 |
+
return str(payload)
|
| 423 |
+
|
| 424 |
+
def generate_specification(
|
| 425 |
+
self,
|
| 426 |
+
*,
|
| 427 |
+
prompt: str,
|
| 428 |
+
persona: str = "general",
|
| 429 |
+
user_message: Optional[str] = None,
|
| 430 |
+
) -> str:
|
| 431 |
+
"""Send ``prompt`` to a provider or return a deterministic demo response."""
|
| 432 |
+
|
| 433 |
+
if not isinstance(prompt, str) or not prompt.strip():
|
| 434 |
+
raise ValueError("Prompt must be a non-empty string.")
|
| 435 |
+
|
| 436 |
+
if self.config.demo_mode:
|
| 437 |
+
demo_persona = (
|
| 438 |
+
"business"
|
| 439 |
+
if persona == "requirements"
|
| 440 |
+
else "technical" if persona == "technical" else persona
|
| 441 |
+
)
|
| 442 |
+
return mock_ai_response(demo_persona, user_message or prompt)
|
| 443 |
+
|
| 444 |
+
provider, api_key = self._resolve_provider()
|
| 445 |
+
payload = call_ai_provider(
|
| 446 |
+
provider=provider,
|
| 447 |
+
model=None,
|
| 448 |
+
messages=[{"role": "user", "content": prompt.strip()}],
|
| 449 |
+
api_key=api_key,
|
| 450 |
+
)
|
| 451 |
+
return self._extract_text(provider, payload)
|
| 452 |
+
|
| 453 |
+
|
| 454 |
+
__all__ = [
|
| 455 |
+
"ProviderConfig",
|
| 456 |
+
"PROVIDERS",
|
| 457 |
+
"call_ai_provider",
|
| 458 |
+
"extract_specifications_from_response",
|
| 459 |
+
"get_provider_headers",
|
| 460 |
+
"handle_streaming_response",
|
| 461 |
+
"mock_ai_response",
|
| 462 |
+
"AIClient",
|
| 463 |
+
]
|
app.py
ADDED
|
@@ -0,0 +1,801 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Gradio user interface for the Naexya Docs AI application.
|
| 2 |
+
|
| 3 |
+
This module assembles the full interactive experience for the project while
|
| 4 |
+
remaining intentionally high-level so future contributors can plug in real
|
| 5 |
+
business logic. The interface models the end-to-end workflow for capturing
|
| 6 |
+
project requirements, collaborating with AI personas, validating the generated
|
| 7 |
+
content, and exporting approved specifications.
|
| 8 |
+
|
| 9 |
+
Key features implemented below:
|
| 10 |
+
|
| 11 |
+
* Application initialization that wires together configuration, the SQLite
|
| 12 |
+
database helper, and the AI client abstraction.
|
| 13 |
+
* Responsive Gradio ``Blocks`` interface composed of multiple tabs that mirror
|
| 14 |
+
the intended product workflow (projects, conversations, validation,
|
| 15 |
+
specification review, export, and settings).
|
| 16 |
+
* Robust state management powered by ``gr.State`` objects so interactions remain
|
| 17 |
+
consistent across user actions and refreshes.
|
| 18 |
+
* Extensive inline comments, docstrings, and structured sections to serve as a
|
| 19 |
+
living guide for engineers extending the tool.
|
| 20 |
+
* Demo data helpers that allow the UI to be exercised without API keys or
|
| 21 |
+
external dependencies—ideal for automated tests and onboarding sessions.
|
| 22 |
+
"""
|
| 23 |
+
|
| 24 |
+
from __future__ import annotations
|
| 25 |
+
|
| 26 |
+
import itertools
|
| 27 |
+
import logging
|
| 28 |
+
import traceback
|
| 29 |
+
from dataclasses import dataclass
|
| 30 |
+
from typing import Dict, Iterable, List, Optional, Tuple
|
| 31 |
+
|
| 32 |
+
import gradio as gr
|
| 33 |
+
|
| 34 |
+
from ai_client import AIClient
|
| 35 |
+
from config import AI_PROVIDERS, AppConfig
|
| 36 |
+
from database import DatabaseManager, SpecificationRecord
|
| 37 |
+
from utils import format_prompt, render_export
|
| 38 |
+
|
| 39 |
+
# ---------------------------------------------------------------------------
|
| 40 |
+
# Application bootstrapping
|
| 41 |
+
# ---------------------------------------------------------------------------
|
| 42 |
+
|
| 43 |
+
# Configure logging early so helpers can emit debug information. In production
|
| 44 |
+
# you might route this to structured logs or observability platforms.
|
| 45 |
+
logging.basicConfig(level=logging.INFO)
|
| 46 |
+
LOGGER = logging.getLogger(__name__)
|
| 47 |
+
|
| 48 |
+
# Instantiate configuration, database manager, and AI client when the module is
|
| 49 |
+
# imported. This ensures shared state is reused across Gradio requests.
|
| 50 |
+
CONFIG: AppConfig = AppConfig.from_environment()
|
| 51 |
+
DB_MANAGER = DatabaseManager(database_path=CONFIG.database_path)
|
| 52 |
+
AI = AIClient(config=CONFIG)
|
| 53 |
+
|
| 54 |
+
# Category definitions used throughout validation and reporting flows. The order
|
| 55 |
+
# controls how sections are rendered in the Specifications tab.
|
| 56 |
+
SPECIFICATION_CATEGORIES: Tuple[str, ...] = (
|
| 57 |
+
"Business Requirements",
|
| 58 |
+
"Functional Specifications",
|
| 59 |
+
"Non-Functional Requirements",
|
| 60 |
+
"Technical Architecture",
|
| 61 |
+
"Validation Criteria",
|
| 62 |
+
)
|
| 63 |
+
|
| 64 |
+
# Create a simple counter so each pending specification has a predictable,
|
| 65 |
+
# unique identifier. ``itertools.count`` is lightweight and thread-safe for the
|
| 66 |
+
# single-worker environments common when running Gradio locally.
|
| 67 |
+
PENDING_ID_SEQUENCE = itertools.count(1)
|
| 68 |
+
|
| 69 |
+
# Demo specification used when users enable mock data. Keeping the structure in
|
| 70 |
+
# a dataclass makes the code self-documenting.
|
| 71 |
+
@dataclass
|
| 72 |
+
class DemoSpecification:
|
| 73 |
+
"""Structure representing mock specifications bundled with the app."""
|
| 74 |
+
|
| 75 |
+
title: str
|
| 76 |
+
category: str
|
| 77 |
+
content: str
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
DEMO_PROJECT_NAME = "Demo Commerce Platform"
|
| 81 |
+
DEMO_SPECIFICATIONS: Tuple[DemoSpecification, ...] = (
|
| 82 |
+
DemoSpecification(
|
| 83 |
+
title="Customer Journey Overview",
|
| 84 |
+
category="Business Requirements",
|
| 85 |
+
content=(
|
| 86 |
+
"- Describe online storefront goals.\n"
|
| 87 |
+
"- Identify primary personas (shoppers, support, merchandising).\n"
|
| 88 |
+
"- Highlight success metrics such as conversion rate and AOV."
|
| 89 |
+
),
|
| 90 |
+
),
|
| 91 |
+
DemoSpecification(
|
| 92 |
+
title="Checkout Microservice",
|
| 93 |
+
category="Technical Architecture",
|
| 94 |
+
content=(
|
| 95 |
+
"- Python FastAPI service with PostgreSQL persistence.\n"
|
| 96 |
+
"- Integrates with payment gateway via REST webhooks.\n"
|
| 97 |
+
"- Includes observability hooks for latency and error tracking."
|
| 98 |
+
),
|
| 99 |
+
),
|
| 100 |
+
)
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
def _prepare_demo_database() -> None:
|
| 104 |
+
"""Seed the SQLite database with a small demo record if empty."""
|
| 105 |
+
|
| 106 |
+
existing = list(DB_MANAGER.fetch_recent_specifications(limit=1))
|
| 107 |
+
if existing:
|
| 108 |
+
return
|
| 109 |
+
|
| 110 |
+
LOGGER.info("Seeding demo specification records")
|
| 111 |
+
for spec in DEMO_SPECIFICATIONS:
|
| 112 |
+
title = f"{spec.category}::{DEMO_PROJECT_NAME}::{spec.title}"
|
| 113 |
+
DB_MANAGER.save_specification(title=title, content=spec.content)
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
# Ensure the schema exists and optionally seed demo content. The database manager
|
| 117 |
+
# already creates tables on initialization; we only add demo data if none exists
|
| 118 |
+
# to keep the repository self-contained for new users.
|
| 119 |
+
_prepare_demo_database()
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
# ---------------------------------------------------------------------------
|
| 123 |
+
# Helper utilities for stateful interactions
|
| 124 |
+
# ---------------------------------------------------------------------------
|
| 125 |
+
|
| 126 |
+
def _ensure_project_selected(project: Optional[str]) -> None:
|
| 127 |
+
"""Raise an informative error when a project has not been chosen."""
|
| 128 |
+
|
| 129 |
+
if not project:
|
| 130 |
+
raise ValueError(
|
| 131 |
+
"Please create or select a project on the Projects tab before using this feature."
|
| 132 |
+
)
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
def _create_pending_entry(
|
| 136 |
+
*,
|
| 137 |
+
project: str,
|
| 138 |
+
persona: str,
|
| 139 |
+
response: str,
|
| 140 |
+
category: str,
|
| 141 |
+
) -> Dict[str, str]:
|
| 142 |
+
"""Compose a dictionary representing a specification awaiting validation."""
|
| 143 |
+
|
| 144 |
+
pending_id = next(PENDING_ID_SEQUENCE)
|
| 145 |
+
title = f"{project} - {persona.title()} Draft #{pending_id}"
|
| 146 |
+
return {
|
| 147 |
+
"id": str(pending_id),
|
| 148 |
+
"project": project,
|
| 149 |
+
"persona": persona,
|
| 150 |
+
"category": category,
|
| 151 |
+
"title": title,
|
| 152 |
+
"content": response,
|
| 153 |
+
}
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
def _persona_prompt(persona: str, message: str) -> str:
|
| 157 |
+
"""Format the user message with persona-specific guidance."""
|
| 158 |
+
|
| 159 |
+
persona_guidance = {
|
| 160 |
+
"requirements": (
|
| 161 |
+
"Act as a business analyst capturing stakeholder goals, user personas, and"
|
| 162 |
+
" measurable outcomes."
|
| 163 |
+
),
|
| 164 |
+
"technical": (
|
| 165 |
+
"Act as a systems architect proposing services, integrations, and deployment"
|
| 166 |
+
" considerations."
|
| 167 |
+
),
|
| 168 |
+
}
|
| 169 |
+
guidance = persona_guidance.get(persona, "Act as an assistant.")
|
| 170 |
+
return (
|
| 171 |
+
"You are collaborating on Naexya Docs AI. "
|
| 172 |
+
f"{guidance}\n\nUser message:\n{message.strip()}"
|
| 173 |
+
)
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
def _record_conversation(
|
| 177 |
+
conversation_state: Dict[str, List[Tuple[str, str]]],
|
| 178 |
+
persona: str,
|
| 179 |
+
user_message: str,
|
| 180 |
+
ai_response: str,
|
| 181 |
+
) -> Dict[str, List[Tuple[str, str]]]:
|
| 182 |
+
"""Append conversation turns and return the mutated state copy."""
|
| 183 |
+
|
| 184 |
+
updated_history = {**conversation_state}
|
| 185 |
+
history = list(updated_history.get(persona, []))
|
| 186 |
+
history.append(("user", user_message))
|
| 187 |
+
history.append(("assistant", ai_response))
|
| 188 |
+
updated_history[persona] = history
|
| 189 |
+
return updated_history
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
def _format_validation_queue(queue: Iterable[Dict[str, str]]) -> List[Tuple[str, str]]:
|
| 193 |
+
"""Create friendly labels for pending specifications displayed in dropdowns."""
|
| 194 |
+
|
| 195 |
+
labels = []
|
| 196 |
+
for pending in queue:
|
| 197 |
+
label = f"#{pending['id']} · {pending['category']} · {pending['title']}"
|
| 198 |
+
labels.append((label, pending["id"]))
|
| 199 |
+
return labels
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
def _group_approved_specifications(records: Iterable[SpecificationRecord]) -> Dict[str, List[str]]:
|
| 203 |
+
"""Organize approved specs by category for the Specifications tab."""
|
| 204 |
+
|
| 205 |
+
grouped: Dict[str, List[str]] = {category: [] for category in SPECIFICATION_CATEGORIES}
|
| 206 |
+
for record in records:
|
| 207 |
+
if "::" in record.title:
|
| 208 |
+
category, project, name = record.title.split("::", 2)
|
| 209 |
+
else:
|
| 210 |
+
category, project, name = "Uncategorized", "Unknown Project", record.title
|
| 211 |
+
summary = f"**{project} — {name}**\n\n{record.content}".strip()
|
| 212 |
+
grouped.setdefault(category, []).append(summary)
|
| 213 |
+
return grouped
|
| 214 |
+
|
| 215 |
+
|
| 216 |
+
# ---------------------------------------------------------------------------
|
| 217 |
+
# Gradio callback functions (project management)
|
| 218 |
+
# ---------------------------------------------------------------------------
|
| 219 |
+
|
| 220 |
+
def bootstrap_application() -> Tuple[List[str], gr.Dropdown.update, str, Dict[str, List[Tuple[str, str]]], Dict[str, List[Dict[str, str]]], str]:
|
| 221 |
+
"""Return initial state for the interface when the app loads."""
|
| 222 |
+
|
| 223 |
+
projects = [DEMO_PROJECT_NAME]
|
| 224 |
+
current_project = DEMO_PROJECT_NAME
|
| 225 |
+
conversation_state = {"requirements": [], "technical": []}
|
| 226 |
+
pending_state = {"queue": []}
|
| 227 |
+
if CONFIG.demo_mode:
|
| 228 |
+
status = (
|
| 229 |
+
"Loaded demo mode. Use the Projects tab to explore with mock data or"
|
| 230 |
+
" add a project once you configure API keys."
|
| 231 |
+
)
|
| 232 |
+
else:
|
| 233 |
+
status = (
|
| 234 |
+
"Ready to collaborate. Create a project or load demo data while"
|
| 235 |
+
" authenticated providers generate live specifications."
|
| 236 |
+
)
|
| 237 |
+
dropdown_update = gr.Dropdown.update(choices=projects, value=current_project)
|
| 238 |
+
return projects, dropdown_update, current_project, conversation_state, pending_state, status
|
| 239 |
+
|
| 240 |
+
|
| 241 |
+
def create_project(
|
| 242 |
+
project_name: str,
|
| 243 |
+
projects: List[str],
|
| 244 |
+
current_project: Optional[str],
|
| 245 |
+
) -> Tuple[List[str], gr.Dropdown.update, str, gr.Textbox.update]:
|
| 246 |
+
"""Create a new project and update the selection dropdown."""
|
| 247 |
+
|
| 248 |
+
if not project_name or not project_name.strip():
|
| 249 |
+
raise ValueError("Project name cannot be empty.")
|
| 250 |
+
|
| 251 |
+
normalized_name = project_name.strip()
|
| 252 |
+
if normalized_name in projects:
|
| 253 |
+
raise ValueError(f"Project '{normalized_name}' already exists.")
|
| 254 |
+
|
| 255 |
+
updated_projects = projects + [normalized_name]
|
| 256 |
+
dropdown_update = gr.Dropdown.update(choices=updated_projects, value=normalized_name)
|
| 257 |
+
status = f"Created project '{normalized_name}' and set it as active."
|
| 258 |
+
clear_input = gr.Textbox.update(value="")
|
| 259 |
+
return updated_projects, dropdown_update, status, clear_input
|
| 260 |
+
|
| 261 |
+
|
| 262 |
+
def select_project(project_name: str) -> Tuple[str, str]:
|
| 263 |
+
"""Handle project selection from the dropdown."""
|
| 264 |
+
|
| 265 |
+
if not project_name:
|
| 266 |
+
raise ValueError("Select a project to continue.")
|
| 267 |
+
status = f"Active project switched to '{project_name}'."
|
| 268 |
+
return project_name, status
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
def load_demo_data(
|
| 272 |
+
projects: List[str],
|
| 273 |
+
conversation_state: Dict[str, List[Tuple[str, str]]],
|
| 274 |
+
pending_state: Dict[str, List[Dict[str, str]]],
|
| 275 |
+
) -> Tuple[List[str], Dict[str, List[Tuple[str, str]]], Dict[str, List[Dict[str, str]]], gr.Dropdown.update, str]:
|
| 276 |
+
"""Populate application state with mock data for testing."""
|
| 277 |
+
|
| 278 |
+
demo_projects = projects if DEMO_PROJECT_NAME in projects else projects + [DEMO_PROJECT_NAME]
|
| 279 |
+
|
| 280 |
+
conversation_state = {
|
| 281 |
+
"requirements": [
|
| 282 |
+
("user", "Outline the business goals for the ecommerce relaunch."),
|
| 283 |
+
(
|
| 284 |
+
"assistant",
|
| 285 |
+
"Generated demo summary covering revenue targets, customer journeys, and KPIs.",
|
| 286 |
+
),
|
| 287 |
+
],
|
| 288 |
+
"technical": [
|
| 289 |
+
("user", "Propose the core services and integrations we need."),
|
| 290 |
+
(
|
| 291 |
+
"assistant",
|
| 292 |
+
"Demo architecture: API gateway, checkout service, event bus, analytics pipeline.",
|
| 293 |
+
),
|
| 294 |
+
],
|
| 295 |
+
}
|
| 296 |
+
|
| 297 |
+
queue = [
|
| 298 |
+
_create_pending_entry(
|
| 299 |
+
project=DEMO_PROJECT_NAME,
|
| 300 |
+
persona="requirements",
|
| 301 |
+
response="Demo requirements specification awaiting approval.",
|
| 302 |
+
category="Business Requirements",
|
| 303 |
+
),
|
| 304 |
+
_create_pending_entry(
|
| 305 |
+
project=DEMO_PROJECT_NAME,
|
| 306 |
+
persona="technical",
|
| 307 |
+
response="Demo technical architecture overview pending validation.",
|
| 308 |
+
category="Technical Architecture",
|
| 309 |
+
),
|
| 310 |
+
]
|
| 311 |
+
|
| 312 |
+
pending_state = {"queue": queue}
|
| 313 |
+
dropdown_update = gr.Dropdown.update(choices=demo_projects, value=DEMO_PROJECT_NAME)
|
| 314 |
+
status = "Demo data loaded. Conversations and pending drafts now contain example content."
|
| 315 |
+
return demo_projects, conversation_state, pending_state, dropdown_update, status
|
| 316 |
+
|
| 317 |
+
|
| 318 |
+
# ---------------------------------------------------------------------------
|
| 319 |
+
# Gradio callback functions (AI conversations)
|
| 320 |
+
# ---------------------------------------------------------------------------
|
| 321 |
+
|
| 322 |
+
def _handle_conversation(
|
| 323 |
+
*,
|
| 324 |
+
persona: str,
|
| 325 |
+
message: str,
|
| 326 |
+
project: Optional[str],
|
| 327 |
+
conversation_state: Dict[str, List[Tuple[str, str]]],
|
| 328 |
+
pending_state: Dict[str, List[Dict[str, str]]],
|
| 329 |
+
) -> Tuple[List[Tuple[str, str]], Dict[str, List[Tuple[str, str]]], Dict[str, List[Dict[str, str]]], str]:
|
| 330 |
+
"""Core handler shared by both AI persona chat tabs."""
|
| 331 |
+
|
| 332 |
+
_ensure_project_selected(project)
|
| 333 |
+
if not message or not message.strip():
|
| 334 |
+
raise ValueError("Please provide a message for the AI persona.")
|
| 335 |
+
|
| 336 |
+
formatted_prompt = format_prompt(_persona_prompt(persona, message))
|
| 337 |
+
|
| 338 |
+
try:
|
| 339 |
+
ai_response = AI.generate_specification(
|
| 340 |
+
prompt=formatted_prompt,
|
| 341 |
+
persona=persona,
|
| 342 |
+
user_message=message,
|
| 343 |
+
)
|
| 344 |
+
except Exception as exc: # pragma: no cover - defensive guard for API failures
|
| 345 |
+
LOGGER.error("AI generation failed: %s", exc)
|
| 346 |
+
LOGGER.debug("Traceback: %s", traceback.format_exc())
|
| 347 |
+
raise RuntimeError("Unable to generate a response. Check provider settings.") from exc
|
| 348 |
+
|
| 349 |
+
updated_conversation = _record_conversation(
|
| 350 |
+
conversation_state=conversation_state,
|
| 351 |
+
persona=persona,
|
| 352 |
+
user_message=message,
|
| 353 |
+
ai_response=ai_response,
|
| 354 |
+
)
|
| 355 |
+
|
| 356 |
+
category = (
|
| 357 |
+
"Business Requirements"
|
| 358 |
+
if persona == "requirements"
|
| 359 |
+
else "Technical Architecture"
|
| 360 |
+
)
|
| 361 |
+
queue = list(pending_state.get("queue", []))
|
| 362 |
+
queue.append(
|
| 363 |
+
_create_pending_entry(
|
| 364 |
+
project=project,
|
| 365 |
+
persona=persona,
|
| 366 |
+
response=ai_response,
|
| 367 |
+
category=category,
|
| 368 |
+
)
|
| 369 |
+
)
|
| 370 |
+
updated_pending = {"queue": queue}
|
| 371 |
+
|
| 372 |
+
status = "Draft added to the validation queue. Review it on the Validation tab."
|
| 373 |
+
return updated_conversation[persona], updated_conversation, updated_pending, status
|
| 374 |
+
|
| 375 |
+
|
| 376 |
+
def handle_requirements_chat(
|
| 377 |
+
message: str,
|
| 378 |
+
project: Optional[str],
|
| 379 |
+
conversation_state: Dict[str, List[Tuple[str, str]]],
|
| 380 |
+
pending_state: Dict[str, List[Dict[str, str]]],
|
| 381 |
+
) -> Tuple[List[Tuple[str, str]], Dict[str, List[Tuple[str, str]]], Dict[str, List[Dict[str, str]]], str]:
|
| 382 |
+
"""Wrapper for the Requirements persona interaction."""
|
| 383 |
+
|
| 384 |
+
return _handle_conversation(
|
| 385 |
+
persona="requirements",
|
| 386 |
+
message=message,
|
| 387 |
+
project=project,
|
| 388 |
+
conversation_state=conversation_state,
|
| 389 |
+
pending_state=pending_state,
|
| 390 |
+
)
|
| 391 |
+
|
| 392 |
+
|
| 393 |
+
def handle_technical_chat(
|
| 394 |
+
message: str,
|
| 395 |
+
project: Optional[str],
|
| 396 |
+
conversation_state: Dict[str, List[Tuple[str, str]]],
|
| 397 |
+
pending_state: Dict[str, List[Dict[str, str]]],
|
| 398 |
+
) -> Tuple[List[Tuple[str, str]], Dict[str, List[Tuple[str, str]]], Dict[str, List[Dict[str, str]]], str]:
|
| 399 |
+
"""Wrapper for the Technical persona interaction."""
|
| 400 |
+
|
| 401 |
+
return _handle_conversation(
|
| 402 |
+
persona="technical",
|
| 403 |
+
message=message,
|
| 404 |
+
project=project,
|
| 405 |
+
conversation_state=conversation_state,
|
| 406 |
+
pending_state=pending_state,
|
| 407 |
+
)
|
| 408 |
+
|
| 409 |
+
|
| 410 |
+
# ---------------------------------------------------------------------------
|
| 411 |
+
# Gradio callback functions (validation and approvals)
|
| 412 |
+
# ---------------------------------------------------------------------------
|
| 413 |
+
|
| 414 |
+
def refresh_pending_specs(pending_state: Dict[str, List[Dict[str, str]]]) -> Tuple[gr.Dropdown.update, str]:
|
| 415 |
+
"""Update the pending specification dropdown and display guidance."""
|
| 416 |
+
|
| 417 |
+
queue = pending_state.get("queue", [])
|
| 418 |
+
if not queue:
|
| 419 |
+
return gr.Dropdown.update(choices=[], value=None), "No drafts awaiting validation."
|
| 420 |
+
|
| 421 |
+
labels = _format_validation_queue(queue)
|
| 422 |
+
first_id = queue[0]["id"]
|
| 423 |
+
return gr.Dropdown.update(choices=labels, value=first_id), "Select a draft to review."
|
| 424 |
+
|
| 425 |
+
|
| 426 |
+
def load_pending_spec(
|
| 427 |
+
spec_id: str,
|
| 428 |
+
pending_state: Dict[str, List[Dict[str, str]]],
|
| 429 |
+
) -> Tuple[str, str]:
|
| 430 |
+
"""Return the specification content for the selected pending draft."""
|
| 431 |
+
|
| 432 |
+
queue = pending_state.get("queue", [])
|
| 433 |
+
for pending in queue:
|
| 434 |
+
if pending["id"] == spec_id:
|
| 435 |
+
header = f"### {pending['title']}\n**Category:** {pending['category']}"
|
| 436 |
+
return header, pending["content"]
|
| 437 |
+
raise ValueError("Pending draft not found. Refresh the queue and try again.")
|
| 438 |
+
|
| 439 |
+
|
| 440 |
+
def approve_specification(
|
| 441 |
+
spec_id: str,
|
| 442 |
+
project: Optional[str],
|
| 443 |
+
pending_state: Dict[str, List[Dict[str, str]]],
|
| 444 |
+
) -> Tuple[Dict[str, List[Dict[str, str]]], str]:
|
| 445 |
+
"""Move a pending draft into the approved specifications list."""
|
| 446 |
+
|
| 447 |
+
_ensure_project_selected(project)
|
| 448 |
+
queue = list(pending_state.get("queue", []))
|
| 449 |
+
remaining: List[Dict[str, str]] = []
|
| 450 |
+
approved_entry: Optional[Dict[str, str]] = None
|
| 451 |
+
for pending in queue:
|
| 452 |
+
if pending["id"] == spec_id:
|
| 453 |
+
approved_entry = pending
|
| 454 |
+
else:
|
| 455 |
+
remaining.append(pending)
|
| 456 |
+
|
| 457 |
+
if approved_entry is None:
|
| 458 |
+
raise ValueError("Unable to locate draft for approval. Refresh and retry.")
|
| 459 |
+
|
| 460 |
+
title = f"{approved_entry['category']}::{approved_entry['project']}::{approved_entry['title']}"
|
| 461 |
+
DB_MANAGER.save_specification(title=title, content=approved_entry["content"])
|
| 462 |
+
|
| 463 |
+
updated_state = {"queue": remaining}
|
| 464 |
+
status = f"Approved '{approved_entry['title']}'. It is now available on the Specifications tab."
|
| 465 |
+
return updated_state, status
|
| 466 |
+
|
| 467 |
+
|
| 468 |
+
def reject_specification(
|
| 469 |
+
spec_id: str,
|
| 470 |
+
pending_state: Dict[str, List[Dict[str, str]]],
|
| 471 |
+
) -> Tuple[Dict[str, List[Dict[str, str]]], str]:
|
| 472 |
+
"""Remove a pending draft without saving it to the database."""
|
| 473 |
+
|
| 474 |
+
queue = list(pending_state.get("queue", []))
|
| 475 |
+
remaining: List[Dict[str, str]] = []
|
| 476 |
+
removed: Optional[Dict[str, str]] = None
|
| 477 |
+
for pending in queue:
|
| 478 |
+
if pending["id"] == spec_id:
|
| 479 |
+
removed = pending
|
| 480 |
+
else:
|
| 481 |
+
remaining.append(pending)
|
| 482 |
+
|
| 483 |
+
if removed is None:
|
| 484 |
+
raise ValueError("Draft not found. Refresh the queue and retry.")
|
| 485 |
+
|
| 486 |
+
updated_state = {"queue": remaining}
|
| 487 |
+
status = f"Rejected '{removed['title']}'. It has been removed from the queue."
|
| 488 |
+
return updated_state, status
|
| 489 |
+
|
| 490 |
+
|
| 491 |
+
# ---------------------------------------------------------------------------
|
| 492 |
+
# Gradio callback functions (specifications, export, and settings)
|
| 493 |
+
# ---------------------------------------------------------------------------
|
| 494 |
+
|
| 495 |
+
def refresh_specifications_view() -> List[str]:
|
| 496 |
+
"""Retrieve approved specifications and format markdown for each category."""
|
| 497 |
+
|
| 498 |
+
records = DB_MANAGER.fetch_recent_specifications(limit=200)
|
| 499 |
+
grouped = _group_approved_specifications(records)
|
| 500 |
+
rendered_sections: List[str] = []
|
| 501 |
+
for category in SPECIFICATION_CATEGORIES:
|
| 502 |
+
entries = grouped.get(category, [])
|
| 503 |
+
if entries:
|
| 504 |
+
rendered_sections.append("\n\n---\n\n".join(entries))
|
| 505 |
+
else:
|
| 506 |
+
rendered_sections.append("*No approved specifications yet.*")
|
| 507 |
+
return rendered_sections
|
| 508 |
+
|
| 509 |
+
|
| 510 |
+
def export_specification(
|
| 511 |
+
spec_id: str,
|
| 512 |
+
export_format: str,
|
| 513 |
+
) -> Tuple[str, str]:
|
| 514 |
+
"""Render the selected specification using the HTML or Markdown template."""
|
| 515 |
+
|
| 516 |
+
if not spec_id:
|
| 517 |
+
raise ValueError("Select a specification to export.")
|
| 518 |
+
|
| 519 |
+
records = list(DB_MANAGER.fetch_recent_specifications(limit=200))
|
| 520 |
+
selected: Optional[SpecificationRecord] = None
|
| 521 |
+
for record in records:
|
| 522 |
+
if record.id == int(spec_id):
|
| 523 |
+
selected = record
|
| 524 |
+
break
|
| 525 |
+
|
| 526 |
+
if selected is None:
|
| 527 |
+
raise ValueError("Select a specification to export.")
|
| 528 |
+
|
| 529 |
+
context = {"title": selected.title, "content": selected.content}
|
| 530 |
+
template = "export_html.html" if export_format == "HTML" else "export_markdown.md"
|
| 531 |
+
rendered = render_export(template_name=template, context=context)
|
| 532 |
+
notice = f"Rendered {export_format} export for specification #{selected.id}."
|
| 533 |
+
return rendered, notice
|
| 534 |
+
|
| 535 |
+
|
| 536 |
+
def list_exportable_specs() -> gr.Dropdown.update:
|
| 537 |
+
"""Populate the export dropdown with approved specifications."""
|
| 538 |
+
|
| 539 |
+
records = DB_MANAGER.fetch_recent_specifications(limit=200)
|
| 540 |
+
options = [(record.title, str(record.id)) for record in records]
|
| 541 |
+
return gr.Dropdown.update(choices=options, value=(options[0][1] if options else None))
|
| 542 |
+
|
| 543 |
+
|
| 544 |
+
def summarize_settings() -> str:
|
| 545 |
+
"""Provide a user-friendly summary of configured providers."""
|
| 546 |
+
|
| 547 |
+
lines: List[str] = []
|
| 548 |
+
for key, credential in CONFIG.providers.items():
|
| 549 |
+
display = AI_PROVIDERS.get(key, {}).get("display_name", key.title())
|
| 550 |
+
lines.append(
|
| 551 |
+
f"- **{display}:** {'Configured' if credential.api_key else 'Not configured'}"
|
| 552 |
+
)
|
| 553 |
+
|
| 554 |
+
if CONFIG.demo_mode:
|
| 555 |
+
lines.append(
|
| 556 |
+
"\nDemo mode is active because no API keys were detected."
|
| 557 |
+
" You can explore the interface with deterministic mock responses."
|
| 558 |
+
)
|
| 559 |
+
else:
|
| 560 |
+
lines.append(
|
| 561 |
+
"\nAt least one provider key is configured. Update `NAEXYA_DEFAULT_PROVIDER`"
|
| 562 |
+
" to control which service is used first."
|
| 563 |
+
)
|
| 564 |
+
|
| 565 |
+
if CONFIG.space_id:
|
| 566 |
+
lines.append(
|
| 567 |
+
"Running inside a Hugging Face Space. Persistent data is stored under `/data`."
|
| 568 |
+
)
|
| 569 |
+
|
| 570 |
+
return "\n".join(lines)
|
| 571 |
+
|
| 572 |
+
|
| 573 |
+
# ---------------------------------------------------------------------------
|
| 574 |
+
# Interface construction
|
| 575 |
+
# ---------------------------------------------------------------------------
|
| 576 |
+
|
| 577 |
+
RESPONSIVE_CSS = """
|
| 578 |
+
@media (max-width: 768px) {
|
| 579 |
+
.two-column {flex-direction: column !important;}
|
| 580 |
+
}
|
| 581 |
+
"""
|
| 582 |
+
|
| 583 |
+
|
| 584 |
+
def build_interface() -> gr.Blocks:
|
| 585 |
+
"""Create the Gradio Blocks interface with all workflow tabs."""
|
| 586 |
+
|
| 587 |
+
with gr.Blocks(title="Naexya Docs AI", css=RESPONSIVE_CSS) as demo:
|
| 588 |
+
gr.Markdown(
|
| 589 |
+
"""
|
| 590 |
+
# Naexya Docs AI
|
| 591 |
+
Collaborate with AI personas to capture, validate, and export rich project specifications.
|
| 592 |
+
Use the tabs below to move sequentially from project setup through final export.
|
| 593 |
+
"""
|
| 594 |
+
)
|
| 595 |
+
|
| 596 |
+
# Shared state stores the active project, persona chat histories, pending drafts,
|
| 597 |
+
# and the full list of projects available in the dropdown.
|
| 598 |
+
project_list_state = gr.State([DEMO_PROJECT_NAME])
|
| 599 |
+
current_project_state = gr.State(DEMO_PROJECT_NAME)
|
| 600 |
+
conversation_state = gr.State({"requirements": [], "technical": []})
|
| 601 |
+
pending_specs_state = gr.State({"queue": []})
|
| 602 |
+
|
| 603 |
+
# ------------------------------------------------------------------
|
| 604 |
+
# Projects tab: manage project lifecycle and demo content
|
| 605 |
+
# ------------------------------------------------------------------
|
| 606 |
+
with gr.TabItem("Projects"):
|
| 607 |
+
gr.Markdown(
|
| 608 |
+
"""Use this tab to create new projects, switch context, or load demo data."""
|
| 609 |
+
)
|
| 610 |
+
with gr.Row(elem_classes="two-column"):
|
| 611 |
+
with gr.Column():
|
| 612 |
+
project_name_input = gr.Textbox(label="New Project Name", placeholder="e.g. Mobile Banking App")
|
| 613 |
+
create_project_button = gr.Button("Create Project", variant="primary")
|
| 614 |
+
with gr.Column():
|
| 615 |
+
project_dropdown = gr.Dropdown(label="Active Project", choices=[DEMO_PROJECT_NAME], value=DEMO_PROJECT_NAME)
|
| 616 |
+
select_project_button = gr.Button("Set Active Project", variant="secondary")
|
| 617 |
+
demo_data_button = gr.Button("Load Demo Data", variant="secondary")
|
| 618 |
+
project_status = gr.Markdown()
|
| 619 |
+
|
| 620 |
+
# ------------------------------------------------------------------
|
| 621 |
+
# Requirements Chat tab
|
| 622 |
+
# ------------------------------------------------------------------
|
| 623 |
+
with gr.TabItem("Requirements Chat"):
|
| 624 |
+
gr.Markdown(
|
| 625 |
+
"""
|
| 626 |
+
Chat with a business analyst persona to capture stakeholder needs, success metrics,
|
| 627 |
+
and product scope. Each response is added to the validation queue.
|
| 628 |
+
"""
|
| 629 |
+
)
|
| 630 |
+
requirements_chat = gr.Chatbot(height=350)
|
| 631 |
+
with gr.Row(elem_classes="two-column"):
|
| 632 |
+
requirements_input = gr.Textbox(label="Message", placeholder="Describe goals, constraints, and personas...", lines=3)
|
| 633 |
+
requirements_submit = gr.Button("Send", variant="primary")
|
| 634 |
+
requirements_status = gr.Markdown()
|
| 635 |
+
|
| 636 |
+
# ------------------------------------------------------------------
|
| 637 |
+
# Technical Chat tab
|
| 638 |
+
# ------------------------------------------------------------------
|
| 639 |
+
with gr.TabItem("Technical Chat"):
|
| 640 |
+
gr.Markdown(
|
| 641 |
+
"""
|
| 642 |
+
Collaborate with a systems architect persona on integrations, services, and deployment
|
| 643 |
+
considerations. Drafts also flow into the validation queue for review.
|
| 644 |
+
"""
|
| 645 |
+
)
|
| 646 |
+
technical_chat = gr.Chatbot(height=350)
|
| 647 |
+
with gr.Row(elem_classes="two-column"):
|
| 648 |
+
technical_input = gr.Textbox(label="Message", placeholder="Ask for architecture proposals, sequencing, or risks...", lines=3)
|
| 649 |
+
technical_submit = gr.Button("Send", variant="primary")
|
| 650 |
+
technical_status = gr.Markdown()
|
| 651 |
+
|
| 652 |
+
# ------------------------------------------------------------------
|
| 653 |
+
# Validation tab
|
| 654 |
+
# ------------------------------------------------------------------
|
| 655 |
+
with gr.TabItem("Validation"):
|
| 656 |
+
gr.Markdown("""Review drafts generated by AI personas and approve or reject them.""")
|
| 657 |
+
refresh_pending_button = gr.Button("Refresh Pending Drafts", variant="secondary")
|
| 658 |
+
pending_dropdown = gr.Dropdown(label="Pending Drafts", choices=[], interactive=True)
|
| 659 |
+
pending_header = gr.Markdown()
|
| 660 |
+
pending_content = gr.Markdown()
|
| 661 |
+
with gr.Row():
|
| 662 |
+
approve_button = gr.Button("Approve", variant="primary")
|
| 663 |
+
reject_button = gr.Button("Reject", variant="stop")
|
| 664 |
+
validation_status = gr.Markdown()
|
| 665 |
+
|
| 666 |
+
# ------------------------------------------------------------------
|
| 667 |
+
# Specifications tab
|
| 668 |
+
# ------------------------------------------------------------------
|
| 669 |
+
with gr.TabItem("Specifications"):
|
| 670 |
+
gr.Markdown("""Browse approved specifications grouped by category.""")
|
| 671 |
+
refresh_specs_button = gr.Button("Refresh View", variant="secondary")
|
| 672 |
+
category_outputs = []
|
| 673 |
+
for category in SPECIFICATION_CATEGORIES:
|
| 674 |
+
with gr.Accordion(category, open=False):
|
| 675 |
+
markdown = gr.Markdown("*No approved specifications yet.*")
|
| 676 |
+
category_outputs.append(markdown)
|
| 677 |
+
|
| 678 |
+
# ------------------------------------------------------------------
|
| 679 |
+
# Export tab
|
| 680 |
+
# ------------------------------------------------------------------
|
| 681 |
+
with gr.TabItem("Export"):
|
| 682 |
+
gr.Markdown("""Select an approved specification and render it using the export templates.""")
|
| 683 |
+
export_refresh_button = gr.Button("Refresh Approved List", variant="secondary")
|
| 684 |
+
export_dropdown = gr.Dropdown(label="Approved Specifications", choices=[])
|
| 685 |
+
export_format_radio = gr.Radio(["Markdown", "HTML"], value="Markdown", label="Export Format")
|
| 686 |
+
export_button = gr.Button("Render Export", variant="primary")
|
| 687 |
+
export_preview = gr.Code(label="Export Preview", language="markdown")
|
| 688 |
+
export_status = gr.Markdown()
|
| 689 |
+
|
| 690 |
+
# ------------------------------------------------------------------
|
| 691 |
+
# Settings tab
|
| 692 |
+
# ------------------------------------------------------------------
|
| 693 |
+
with gr.TabItem("Settings"):
|
| 694 |
+
gr.Markdown(
|
| 695 |
+
"""
|
| 696 |
+
Configure AI providers by supplying API keys in your environment. Use this summary to
|
| 697 |
+
verify which providers are currently active. Demo data remains available even without keys.
|
| 698 |
+
"""
|
| 699 |
+
)
|
| 700 |
+
settings_summary = gr.Markdown(summarize_settings())
|
| 701 |
+
gr.Markdown(
|
| 702 |
+
"""Refer to `.env.example` for the list of supported providers and required environment variables."""
|
| 703 |
+
)
|
| 704 |
+
|
| 705 |
+
# ------------------------------------------------------------------
|
| 706 |
+
# Wiring callbacks to UI interactions
|
| 707 |
+
# ------------------------------------------------------------------
|
| 708 |
+
|
| 709 |
+
# Application bootstrap when the interface loads.
|
| 710 |
+
demo.load(
|
| 711 |
+
fn=bootstrap_application,
|
| 712 |
+
inputs=None,
|
| 713 |
+
outputs=[project_list_state, project_dropdown, current_project_state, conversation_state, pending_specs_state, project_status],
|
| 714 |
+
)
|
| 715 |
+
|
| 716 |
+
# Project management actions.
|
| 717 |
+
create_project_button.click(
|
| 718 |
+
fn=create_project,
|
| 719 |
+
inputs=[project_name_input, project_list_state, current_project_state],
|
| 720 |
+
outputs=[project_list_state, project_dropdown, project_status, project_name_input],
|
| 721 |
+
)
|
| 722 |
+
|
| 723 |
+
select_project_button.click(
|
| 724 |
+
fn=select_project,
|
| 725 |
+
inputs=project_dropdown,
|
| 726 |
+
outputs=[current_project_state, project_status],
|
| 727 |
+
)
|
| 728 |
+
|
| 729 |
+
demo_data_button.click(
|
| 730 |
+
fn=load_demo_data,
|
| 731 |
+
inputs=[project_list_state, conversation_state, pending_specs_state],
|
| 732 |
+
outputs=[project_list_state, conversation_state, pending_specs_state, project_dropdown, project_status],
|
| 733 |
+
)
|
| 734 |
+
|
| 735 |
+
# Requirements persona interactions.
|
| 736 |
+
requirements_submit.click(
|
| 737 |
+
fn=handle_requirements_chat,
|
| 738 |
+
inputs=[requirements_input, current_project_state, conversation_state, pending_specs_state],
|
| 739 |
+
outputs=[requirements_chat, conversation_state, pending_specs_state, requirements_status],
|
| 740 |
+
)
|
| 741 |
+
|
| 742 |
+
# Technical persona interactions.
|
| 743 |
+
technical_submit.click(
|
| 744 |
+
fn=handle_technical_chat,
|
| 745 |
+
inputs=[technical_input, current_project_state, conversation_state, pending_specs_state],
|
| 746 |
+
outputs=[technical_chat, conversation_state, pending_specs_state, technical_status],
|
| 747 |
+
)
|
| 748 |
+
|
| 749 |
+
# Validation workflows.
|
| 750 |
+
refresh_pending_button.click(
|
| 751 |
+
fn=refresh_pending_specs,
|
| 752 |
+
inputs=pending_specs_state,
|
| 753 |
+
outputs=[pending_dropdown, validation_status],
|
| 754 |
+
)
|
| 755 |
+
pending_dropdown.change(
|
| 756 |
+
fn=load_pending_spec,
|
| 757 |
+
inputs=[pending_dropdown, pending_specs_state],
|
| 758 |
+
outputs=[pending_header, pending_content],
|
| 759 |
+
)
|
| 760 |
+
approve_button.click(
|
| 761 |
+
fn=approve_specification,
|
| 762 |
+
inputs=[pending_dropdown, current_project_state, pending_specs_state],
|
| 763 |
+
outputs=[pending_specs_state, validation_status],
|
| 764 |
+
)
|
| 765 |
+
reject_button.click(
|
| 766 |
+
fn=reject_specification,
|
| 767 |
+
inputs=[pending_dropdown, pending_specs_state],
|
| 768 |
+
outputs=[pending_specs_state, validation_status],
|
| 769 |
+
)
|
| 770 |
+
|
| 771 |
+
# Approved specifications browsing.
|
| 772 |
+
refresh_specs_button.click(
|
| 773 |
+
fn=refresh_specifications_view,
|
| 774 |
+
inputs=None,
|
| 775 |
+
outputs=category_outputs,
|
| 776 |
+
)
|
| 777 |
+
|
| 778 |
+
# Export workflow.
|
| 779 |
+
export_refresh_button.click(
|
| 780 |
+
fn=list_exportable_specs,
|
| 781 |
+
inputs=None,
|
| 782 |
+
outputs=export_dropdown,
|
| 783 |
+
)
|
| 784 |
+
export_button.click(
|
| 785 |
+
fn=export_specification,
|
| 786 |
+
inputs=[export_dropdown, export_format_radio],
|
| 787 |
+
outputs=[export_preview, export_status],
|
| 788 |
+
)
|
| 789 |
+
|
| 790 |
+
return demo
|
| 791 |
+
|
| 792 |
+
|
| 793 |
+
def main() -> None:
|
| 794 |
+
"""Launch the Gradio development server."""
|
| 795 |
+
|
| 796 |
+
interface = build_interface()
|
| 797 |
+
interface.launch()
|
| 798 |
+
|
| 799 |
+
|
| 800 |
+
if __name__ == "__main__":
|
| 801 |
+
main()
|
config.py
ADDED
|
@@ -0,0 +1,397 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Centralized configuration for the Naexya Docs AI application.
|
| 2 |
+
|
| 3 |
+
This module defines provider metadata, persona prompt templates, specification
|
| 4 |
+
categories, and export rendering configuration in a single location. Keeping
|
| 5 |
+
these values together makes it easier to maintain consistent behaviour across
|
| 6 |
+
modules such as ``ai_client.py`` and ``app.py``.
|
| 7 |
+
|
| 8 |
+
The dictionaries below are intentionally verbose and heavily commented so that
|
| 9 |
+
future contributors can understand every field without cross-referencing API
|
| 10 |
+
documentation.
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
from __future__ import annotations
|
| 14 |
+
|
| 15 |
+
import os
|
| 16 |
+
from dataclasses import dataclass, field
|
| 17 |
+
from pathlib import Path
|
| 18 |
+
from typing import Any, Dict, Optional
|
| 19 |
+
|
| 20 |
+
try: # Loading .env files is optional but convenient for local development.
|
| 21 |
+
from dotenv import load_dotenv
|
| 22 |
+
except ImportError: # pragma: no cover - dependency may be missing in some envs.
|
| 23 |
+
def load_dotenv(*_args: object, **_kwargs: object) -> bool:
|
| 24 |
+
"""Fallback stub when python-dotenv is not installed."""
|
| 25 |
+
|
| 26 |
+
return False
|
| 27 |
+
|
| 28 |
+
# ---------------------------------------------------------------------------
|
| 29 |
+
# AI Provider configuration
|
| 30 |
+
# ---------------------------------------------------------------------------
|
| 31 |
+
# ``AI_PROVIDERS`` captures the details required to interact with each
|
| 32 |
+
# third-party large language model. Each entry explains the authentication
|
| 33 |
+
# header, supported models, and default parameter choices that the application
|
| 34 |
+
# should use. Additional providers can be added by following the same schema.
|
| 35 |
+
AI_PROVIDERS: Dict[str, Dict[str, Any]] = {
|
| 36 |
+
"openai": {
|
| 37 |
+
"display_name": "OpenAI",
|
| 38 |
+
# Base endpoint for Chat Completions. Individual modules append
|
| 39 |
+
# provider-specific paths as needed.
|
| 40 |
+
"base_url": "https://api.openai.com/v1",
|
| 41 |
+
"chat_endpoint": "https://api.openai.com/v1/chat/completions",
|
| 42 |
+
"default_model": "gpt-5",
|
| 43 |
+
"available_models": ["gpt-5"],
|
| 44 |
+
# The provider requires a Bearer token with the ``Authorization`` header.
|
| 45 |
+
"headers": {
|
| 46 |
+
"Authorization": "Bearer {api_key}",
|
| 47 |
+
"Content-Type": "application/json",
|
| 48 |
+
},
|
| 49 |
+
# Conservative defaults to balance quality with latency and cost.
|
| 50 |
+
"default_params": {"temperature": 0.7, "max_tokens": 2048},
|
| 51 |
+
# Basic rate-limit guidance for UI messaging and back-off strategies.
|
| 52 |
+
"rate_limits": {
|
| 53 |
+
"requests_per_minute": 500,
|
| 54 |
+
"tokens_per_minute": 600000,
|
| 55 |
+
},
|
| 56 |
+
},
|
| 57 |
+
"anthropic": {
|
| 58 |
+
"display_name": "Anthropic",
|
| 59 |
+
"base_url": "https://api.anthropic.com/v1",
|
| 60 |
+
"chat_endpoint": "https://api.anthropic.com/v1/messages",
|
| 61 |
+
"default_model": "claude-4-sonnet",
|
| 62 |
+
"available_models": ["claude-4-sonnet"],
|
| 63 |
+
# Anthropic expects both ``x-api-key`` and ``anthropic-version`` headers.
|
| 64 |
+
"headers": {
|
| 65 |
+
"x-api-key": "{api_key}",
|
| 66 |
+
"anthropic-version": "2023-06-01",
|
| 67 |
+
"Content-Type": "application/json",
|
| 68 |
+
},
|
| 69 |
+
"default_params": {"temperature": 0.7, "max_tokens": 2048},
|
| 70 |
+
"rate_limits": {
|
| 71 |
+
"requests_per_minute": 400,
|
| 72 |
+
"tokens_per_minute": 480000,
|
| 73 |
+
},
|
| 74 |
+
},
|
| 75 |
+
"google": {
|
| 76 |
+
"display_name": "Google",
|
| 77 |
+
"base_url": "https://generativelanguage.googleapis.com/v1",
|
| 78 |
+
"chat_endpoint": "https://generativelanguage.googleapis.com/v1/models/gemini-2.5-pro:generateContent",
|
| 79 |
+
"default_model": "gemini-2.5-pro",
|
| 80 |
+
"available_models": ["gemini-2.5-pro"],
|
| 81 |
+
# Gemini uses a query parameter for the API key; headers remain JSON.
|
| 82 |
+
"headers": {"Content-Type": "application/json"},
|
| 83 |
+
"default_params": {"temperature": 0.7, "max_output_tokens": 2048},
|
| 84 |
+
"rate_limits": {
|
| 85 |
+
"requests_per_minute": 300,
|
| 86 |
+
"tokens_per_minute": 360000,
|
| 87 |
+
},
|
| 88 |
+
},
|
| 89 |
+
"xai": {
|
| 90 |
+
"display_name": "xAI",
|
| 91 |
+
"base_url": "https://api.x.ai/v1",
|
| 92 |
+
"chat_endpoint": "https://api.x.ai/v1/chat/completions",
|
| 93 |
+
"default_model": "grok-4-fast",
|
| 94 |
+
"available_models": ["grok-4-fast"],
|
| 95 |
+
"headers": {
|
| 96 |
+
"Authorization": "Bearer {api_key}",
|
| 97 |
+
"Content-Type": "application/json",
|
| 98 |
+
},
|
| 99 |
+
"default_params": {"temperature": 0.7, "max_tokens": 2048},
|
| 100 |
+
"rate_limits": {
|
| 101 |
+
"requests_per_minute": 200,
|
| 102 |
+
"tokens_per_minute": 240000,
|
| 103 |
+
},
|
| 104 |
+
},
|
| 105 |
+
"moonshot": {
|
| 106 |
+
"display_name": "Moonshot",
|
| 107 |
+
"base_url": "https://api.moonshot.ai/v1",
|
| 108 |
+
"chat_endpoint": "https://api.moonshot.ai/v1/chat/completions",
|
| 109 |
+
"default_model": "kimi-k2",
|
| 110 |
+
"available_models": ["kimi-k2"],
|
| 111 |
+
"headers": {
|
| 112 |
+
"Authorization": "Bearer {api_key}",
|
| 113 |
+
"Content-Type": "application/json",
|
| 114 |
+
},
|
| 115 |
+
"default_params": {"temperature": 0.7, "max_tokens": 2048},
|
| 116 |
+
"rate_limits": {
|
| 117 |
+
"requests_per_minute": 150,
|
| 118 |
+
"tokens_per_minute": 180000,
|
| 119 |
+
},
|
| 120 |
+
},
|
| 121 |
+
"qwen": {
|
| 122 |
+
"display_name": "Qwen",
|
| 123 |
+
"base_url": "https://dashscope.aliyuncs.com/api/v1",
|
| 124 |
+
"chat_endpoint": "https://dashscope.aliyuncs.com/api/v1/services/aigc/text-generation/generation",
|
| 125 |
+
"default_model": "qwen3-next",
|
| 126 |
+
"available_models": ["qwen3-next"],
|
| 127 |
+
"headers": {
|
| 128 |
+
"Authorization": "Bearer {api_key}",
|
| 129 |
+
"Content-Type": "application/json",
|
| 130 |
+
},
|
| 131 |
+
"default_params": {"temperature": 0.7, "max_tokens": 2048},
|
| 132 |
+
"rate_limits": {
|
| 133 |
+
"requests_per_minute": 250,
|
| 134 |
+
"tokens_per_minute": 300000,
|
| 135 |
+
},
|
| 136 |
+
},
|
| 137 |
+
}
|
| 138 |
+
|
| 139 |
+
# ---------------------------------------------------------------------------
|
| 140 |
+
# Persona configuration
|
| 141 |
+
# ---------------------------------------------------------------------------
|
| 142 |
+
# Personas determine how AI assistants respond to users. Providing rich,
|
| 143 |
+
# descriptive prompts ensures that conversations remain on-topic and that the
|
| 144 |
+
# extracted specifications are actionable.
|
| 145 |
+
AI_PERSONAS: Dict[str, Dict[str, str]] = {
|
| 146 |
+
"requirements_specialist": {
|
| 147 |
+
"display_name": "Requirements Specialist",
|
| 148 |
+
"prompt": (
|
| 149 |
+
"You are an expert business analyst specializing in gathering and "
|
| 150 |
+
"documenting software requirements. Focus on user stories, business "
|
| 151 |
+
"features, workflows, and functional requirements. Always ask "
|
| 152 |
+
"clarifying questions and provide structured output."
|
| 153 |
+
),
|
| 154 |
+
},
|
| 155 |
+
"technical_architect": {
|
| 156 |
+
"display_name": "Technical Architect",
|
| 157 |
+
"prompt": (
|
| 158 |
+
"You are a senior technical architect specializing in system design "
|
| 159 |
+
"and implementation. Focus on API specifications, database schemas, "
|
| 160 |
+
"system architecture, and technical implementation details. Provide "
|
| 161 |
+
"detailed technical specifications."
|
| 162 |
+
),
|
| 163 |
+
},
|
| 164 |
+
}
|
| 165 |
+
|
| 166 |
+
# ---------------------------------------------------------------------------
|
| 167 |
+
# Specification taxonomy
|
| 168 |
+
# ---------------------------------------------------------------------------
|
| 169 |
+
# ``SPECIFICATION_TYPES`` controls the categories displayed in the UI when
|
| 170 |
+
# reviewing and exporting specifications.
|
| 171 |
+
SPECIFICATION_TYPES = [
|
| 172 |
+
"User Stories",
|
| 173 |
+
"Features",
|
| 174 |
+
"API Endpoints",
|
| 175 |
+
"Database Design",
|
| 176 |
+
"System Architecture",
|
| 177 |
+
]
|
| 178 |
+
|
| 179 |
+
# ---------------------------------------------------------------------------
|
| 180 |
+
# Export template configuration
|
| 181 |
+
# ---------------------------------------------------------------------------
|
| 182 |
+
# Each export format references template files stored under ``templates/``. The
|
| 183 |
+
# metadata here describes how those templates should be used by the export
|
| 184 |
+
# helpers in ``utils.py`` or ``app.py``.
|
| 185 |
+
EXPORT_TEMPLATES: Dict[str, Dict[str, str]] = {
|
| 186 |
+
"html": {
|
| 187 |
+
"path": "templates/export_html.html",
|
| 188 |
+
"content_type": "text/html",
|
| 189 |
+
"description": "Rich HTML report suitable for sharing with stakeholders.",
|
| 190 |
+
},
|
| 191 |
+
"markdown": {
|
| 192 |
+
"path": "templates/export_markdown.md",
|
| 193 |
+
"content_type": "text/markdown",
|
| 194 |
+
"description": "Lightweight Markdown export for version control or wikis.",
|
| 195 |
+
},
|
| 196 |
+
}
|
| 197 |
+
|
| 198 |
+
# ---------------------------------------------------------------------------
|
| 199 |
+
# Application configuration dataclasses
|
| 200 |
+
# ---------------------------------------------------------------------------
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
@dataclass
|
| 204 |
+
class ProviderCredential:
|
| 205 |
+
"""Runtime view of provider configuration resolved from the environment."""
|
| 206 |
+
|
| 207 |
+
provider: str
|
| 208 |
+
env_var: str
|
| 209 |
+
api_key: Optional[str] = None
|
| 210 |
+
|
| 211 |
+
@property
|
| 212 |
+
def display_name(self) -> str:
|
| 213 |
+
"""Return the human-friendly name defined in ``AI_PROVIDERS``."""
|
| 214 |
+
|
| 215 |
+
provider_meta = AI_PROVIDERS.get(self.provider, {})
|
| 216 |
+
return provider_meta.get("display_name", self.provider.title())
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
@dataclass
|
| 220 |
+
class AppConfig:
|
| 221 |
+
"""Container holding runtime configuration for the Gradio interface."""
|
| 222 |
+
|
| 223 |
+
database_path: Path
|
| 224 |
+
providers: Dict[str, ProviderCredential] = field(default_factory=dict)
|
| 225 |
+
default_provider: str = "openai"
|
| 226 |
+
demo_mode: bool = False
|
| 227 |
+
space_id: Optional[str] = None
|
| 228 |
+
|
| 229 |
+
@classmethod
|
| 230 |
+
def from_environment(cls) -> "AppConfig":
|
| 231 |
+
"""Build an :class:`AppConfig` instance using environment variables."""
|
| 232 |
+
|
| 233 |
+
load_dotenv()
|
| 234 |
+
validate_configuration()
|
| 235 |
+
|
| 236 |
+
env = os.environ
|
| 237 |
+
is_spaces = any(env.get(var) for var in ("SPACE_ID", "HF_SPACE_ID", "HF_HOME"))
|
| 238 |
+
data_dir = Path(
|
| 239 |
+
env.get("NAEXYA_DATA_DIR")
|
| 240 |
+
or ("/data" if is_spaces else Path(__file__).resolve().parent)
|
| 241 |
+
)
|
| 242 |
+
data_dir.mkdir(parents=True, exist_ok=True)
|
| 243 |
+
database_path = (data_dir / env.get("NAEXYA_DB_FILENAME", "naexya_docs_ai.db")).resolve()
|
| 244 |
+
|
| 245 |
+
provider_env_map = {
|
| 246 |
+
"openai": "OPENAI_API_KEY",
|
| 247 |
+
"anthropic": "ANTHROPIC_API_KEY",
|
| 248 |
+
"google": "GOOGLE_API_KEY",
|
| 249 |
+
"xai": "XAI_API_KEY",
|
| 250 |
+
"moonshot": "MOONSHOT_API_KEY",
|
| 251 |
+
"qwen": "QWEN_API_KEY",
|
| 252 |
+
}
|
| 253 |
+
|
| 254 |
+
providers = {
|
| 255 |
+
name: ProviderCredential(
|
| 256 |
+
provider=name,
|
| 257 |
+
env_var=env_var,
|
| 258 |
+
api_key=env.get(env_var) or None,
|
| 259 |
+
)
|
| 260 |
+
for name, env_var in provider_env_map.items()
|
| 261 |
+
}
|
| 262 |
+
|
| 263 |
+
# Choose a sensible default provider, preferring explicit environment configuration.
|
| 264 |
+
configured = [key for key, cred in providers.items() if cred.api_key]
|
| 265 |
+
requested_default = (env.get("NAEXYA_DEFAULT_PROVIDER") or "openai").lower()
|
| 266 |
+
if requested_default not in providers:
|
| 267 |
+
requested_default = "openai"
|
| 268 |
+
default_provider = requested_default if (configured and requested_default in configured) else (configured[0] if configured else "openai")
|
| 269 |
+
|
| 270 |
+
demo_mode = not bool(configured)
|
| 271 |
+
|
| 272 |
+
return cls(
|
| 273 |
+
database_path=database_path,
|
| 274 |
+
providers=providers,
|
| 275 |
+
default_provider=default_provider,
|
| 276 |
+
demo_mode=demo_mode,
|
| 277 |
+
space_id=env.get("SPACE_ID") or env.get("HF_SPACE_ID"),
|
| 278 |
+
)
|
| 279 |
+
|
| 280 |
+
def get_api_key(self, provider: str) -> Optional[str]:
|
| 281 |
+
"""Retrieve the configured API key for ``provider`` if available."""
|
| 282 |
+
|
| 283 |
+
credential = self.providers.get(provider.lower())
|
| 284 |
+
return credential.api_key if credential else None
|
| 285 |
+
|
| 286 |
+
def configured_providers(self) -> Dict[str, ProviderCredential]:
|
| 287 |
+
"""Return only the providers that currently have API keys configured."""
|
| 288 |
+
|
| 289 |
+
return {name: cred for name, cred in self.providers.items() if cred.api_key}
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
# ---------------------------------------------------------------------------
|
| 293 |
+
# Validation utilities
|
| 294 |
+
# ---------------------------------------------------------------------------
|
| 295 |
+
# The functions below provide quick sanity checks that configuration dictionaries
|
| 296 |
+
# contain the expected fields. They raise ``ValueError`` with descriptive
|
| 297 |
+
# messages so callers can fail fast during application start-up.
|
| 298 |
+
|
| 299 |
+
def validate_provider_config(provider_key: str) -> None:
|
| 300 |
+
"""Validate a single provider configuration entry.
|
| 301 |
+
|
| 302 |
+
Args:
|
| 303 |
+
provider_key: The dictionary key identifying the provider (e.g. ``"openai"``).
|
| 304 |
+
|
| 305 |
+
Raises:
|
| 306 |
+
ValueError: If required fields are missing or improperly formatted.
|
| 307 |
+
"""
|
| 308 |
+
|
| 309 |
+
config = AI_PROVIDERS.get(provider_key)
|
| 310 |
+
if config is None:
|
| 311 |
+
raise ValueError(f"Provider '{provider_key}' is not defined in AI_PROVIDERS.")
|
| 312 |
+
|
| 313 |
+
required_fields = [
|
| 314 |
+
"display_name",
|
| 315 |
+
"base_url",
|
| 316 |
+
"chat_endpoint",
|
| 317 |
+
"default_model",
|
| 318 |
+
"headers",
|
| 319 |
+
"default_params",
|
| 320 |
+
"rate_limits",
|
| 321 |
+
]
|
| 322 |
+
missing = [field for field in required_fields if field not in config]
|
| 323 |
+
if missing:
|
| 324 |
+
raise ValueError(
|
| 325 |
+
f"Provider '{provider_key}' is missing required fields: {', '.join(missing)}"
|
| 326 |
+
)
|
| 327 |
+
|
| 328 |
+
if "Authorization" in config["headers"] and "{api_key}" not in config["headers"]["Authorization"]:
|
| 329 |
+
raise ValueError(
|
| 330 |
+
f"Provider '{provider_key}' Authorization header must include '{{api_key}}' placeholder."
|
| 331 |
+
)
|
| 332 |
+
|
| 333 |
+
|
| 334 |
+
def validate_all_providers() -> None:
|
| 335 |
+
"""Validate every provider configuration entry."""
|
| 336 |
+
|
| 337 |
+
for provider_key in AI_PROVIDERS:
|
| 338 |
+
validate_provider_config(provider_key)
|
| 339 |
+
|
| 340 |
+
|
| 341 |
+
def validate_personas() -> None:
|
| 342 |
+
"""Ensure persona definitions include prompts for consistent behaviour."""
|
| 343 |
+
|
| 344 |
+
for key, persona in AI_PERSONAS.items():
|
| 345 |
+
if "prompt" not in persona or not persona["prompt"].strip():
|
| 346 |
+
raise ValueError(f"Persona '{key}' must include a non-empty prompt.")
|
| 347 |
+
|
| 348 |
+
|
| 349 |
+
def validate_specification_types() -> None:
|
| 350 |
+
"""Verify specification types are unique and non-empty."""
|
| 351 |
+
|
| 352 |
+
if not SPECIFICATION_TYPES:
|
| 353 |
+
raise ValueError("SPECIFICATION_TYPES must contain at least one entry.")
|
| 354 |
+
|
| 355 |
+
normalized = [spec.strip() for spec in SPECIFICATION_TYPES if spec.strip()]
|
| 356 |
+
if len(normalized) != len(SPECIFICATION_TYPES):
|
| 357 |
+
raise ValueError("SPECIFICATION_TYPES must not contain blank values.")
|
| 358 |
+
|
| 359 |
+
if len(set(normalized)) != len(normalized):
|
| 360 |
+
raise ValueError("SPECIFICATION_TYPES entries must be unique.")
|
| 361 |
+
|
| 362 |
+
|
| 363 |
+
def validate_export_templates() -> None:
|
| 364 |
+
"""Confirm export template metadata includes expected fields."""
|
| 365 |
+
|
| 366 |
+
required_fields = {"path", "content_type", "description"}
|
| 367 |
+
for key, template in EXPORT_TEMPLATES.items():
|
| 368 |
+
missing = required_fields - template.keys()
|
| 369 |
+
if missing:
|
| 370 |
+
raise ValueError(
|
| 371 |
+
f"Export template '{key}' is missing fields: {', '.join(sorted(missing))}"
|
| 372 |
+
)
|
| 373 |
+
|
| 374 |
+
|
| 375 |
+
def validate_configuration() -> None:
|
| 376 |
+
"""Run all configuration validators.
|
| 377 |
+
|
| 378 |
+
This helper is convenient during application start-up to ensure environment
|
| 379 |
+
configuration issues are detected early rather than failing deep inside the
|
| 380 |
+
request cycle.
|
| 381 |
+
"""
|
| 382 |
+
|
| 383 |
+
validate_all_providers()
|
| 384 |
+
validate_personas()
|
| 385 |
+
validate_specification_types()
|
| 386 |
+
validate_export_templates()
|
| 387 |
+
|
| 388 |
+
|
| 389 |
+
__all__ = [
|
| 390 |
+
"AI_PROVIDERS",
|
| 391 |
+
"AI_PERSONAS",
|
| 392 |
+
"SPECIFICATION_TYPES",
|
| 393 |
+
"EXPORT_TEMPLATES",
|
| 394 |
+
"ProviderCredential",
|
| 395 |
+
"AppConfig",
|
| 396 |
+
"validate_configuration",
|
| 397 |
+
]
|
config.yaml
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
title: Naexya Docs AI
|
| 2 |
+
emoji: "📋"
|
| 3 |
+
colorFrom: blue
|
| 4 |
+
colorTo: purple
|
| 5 |
+
sdk: gradio
|
| 6 |
+
sdk_version: 4.0.0
|
| 7 |
+
app_file: app.py
|
| 8 |
+
pinned: false
|
| 9 |
+
license: mit
|
database.py
ADDED
|
@@ -0,0 +1,589 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Database layer for Naexya Docs AI.
|
| 2 |
+
|
| 3 |
+
This module centralises all SQLite interactions used by the application. By
|
| 4 |
+
keeping the SQL logic in one place the rest of the codebase can focus on the
|
| 5 |
+
business workflow while delegating persistence concerns here. Each function is
|
| 6 |
+
carefully documented so future contributors understand not only *what* the
|
| 7 |
+
function does but *why* the design decisions were made.
|
| 8 |
+
|
| 9 |
+
The helper functions below follow a handful of guiding principles:
|
| 10 |
+
|
| 11 |
+
* **Single connection helper** – ``_get_connection`` ensures every call uses
|
| 12 |
+
the same connection configuration and enables ``sqlite3.Row`` mapping for
|
| 13 |
+
ergonomic dictionary-style access.
|
| 14 |
+
* **Explicit transactions** – ``with`` blocks are used to guarantee commits and
|
| 15 |
+
to automatically close connections regardless of success or failure.
|
| 16 |
+
* **Robust error handling** – problems are logged with contextual information
|
| 17 |
+
before being re-raised, giving the caller an opportunity to surface helpful
|
| 18 |
+
feedback in the UI while still capturing the original stack trace.
|
| 19 |
+
* **Comprehensive comments** – inline notes explain the schema, relationships,
|
| 20 |
+
and reasoning so the file doubles as lightweight documentation.
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
from __future__ import annotations
|
| 24 |
+
|
| 25 |
+
import logging
|
| 26 |
+
import sqlite3
|
| 27 |
+
from dataclasses import dataclass
|
| 28 |
+
from pathlib import Path
|
| 29 |
+
from typing import Dict, List, Optional
|
| 30 |
+
|
| 31 |
+
# ---------------------------------------------------------------------------
|
| 32 |
+
# Module-level configuration
|
| 33 |
+
# ---------------------------------------------------------------------------
|
| 34 |
+
|
| 35 |
+
# Resolve the database file relative to this module. Placing the database in
|
| 36 |
+
# the repository root keeps the demo self-contained while allowing advanced
|
| 37 |
+
# users to supply a custom path when embedding the library elsewhere.
|
| 38 |
+
DATABASE_PATH = Path(__file__).resolve().parent / "naexya_docs_ai.db"
|
| 39 |
+
|
| 40 |
+
# Configure a module-specific logger so calling code can hook into the
|
| 41 |
+
# application's logging setup. ``getLogger(__name__)`` ensures messages are
|
| 42 |
+
# namespaced to ``database`` making them easy to filter.
|
| 43 |
+
LOGGER = logging.getLogger(__name__)
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def _get_connection(db_path: Optional[Path] = None) -> sqlite3.Connection:
|
| 47 |
+
"""Create a SQLite connection with row access configured.
|
| 48 |
+
|
| 49 |
+
Args:
|
| 50 |
+
db_path: Optional custom database path. When ``None`` the default
|
| 51 |
+
``DATABASE_PATH`` constant is used.
|
| 52 |
+
|
| 53 |
+
Returns:
|
| 54 |
+
A ``sqlite3.Connection`` instance with ``row_factory`` set to
|
| 55 |
+
``sqlite3.Row`` so query results behave like dictionaries.
|
| 56 |
+
"""
|
| 57 |
+
|
| 58 |
+
connection = sqlite3.connect(db_path or DATABASE_PATH)
|
| 59 |
+
connection.row_factory = sqlite3.Row
|
| 60 |
+
return connection
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
# ---------------------------------------------------------------------------
|
| 64 |
+
# Schema management
|
| 65 |
+
# ---------------------------------------------------------------------------
|
| 66 |
+
|
| 67 |
+
def init_database(db_path: Optional[Path] = None) -> None:
|
| 68 |
+
"""Create all required tables if they do not already exist.
|
| 69 |
+
|
| 70 |
+
The application stores projects, conversations, chat messages, and
|
| 71 |
+
extracted specifications. ``init_database`` is idempotent; running it
|
| 72 |
+
multiple times simply ensures the schema remains available without wiping
|
| 73 |
+
existing data.
|
| 74 |
+
"""
|
| 75 |
+
|
| 76 |
+
LOGGER.debug("Initialising SQLite schema")
|
| 77 |
+
try:
|
| 78 |
+
with _get_connection(db_path) as conn:
|
| 79 |
+
cursor = conn.cursor()
|
| 80 |
+
|
| 81 |
+
# ``projects`` table stores the high-level workspace definition
|
| 82 |
+
# containing a human-friendly name and optional description.
|
| 83 |
+
cursor.execute(
|
| 84 |
+
"""
|
| 85 |
+
CREATE TABLE IF NOT EXISTS projects (
|
| 86 |
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
| 87 |
+
name TEXT NOT NULL UNIQUE,
|
| 88 |
+
description TEXT,
|
| 89 |
+
created_at DATETIME DEFAULT CURRENT_TIMESTAMP
|
| 90 |
+
)
|
| 91 |
+
"""
|
| 92 |
+
)
|
| 93 |
+
|
| 94 |
+
# ``conversations`` capture separate chat threads for each persona
|
| 95 |
+
# (requirements, technical, etc.) and link back to the owning
|
| 96 |
+
# project. ``is_locked`` helps us prevent further edits once a
|
| 97 |
+
# conversation has been validated.
|
| 98 |
+
cursor.execute(
|
| 99 |
+
"""
|
| 100 |
+
CREATE TABLE IF NOT EXISTS conversations (
|
| 101 |
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
| 102 |
+
project_id INTEGER NOT NULL,
|
| 103 |
+
persona_type TEXT NOT NULL,
|
| 104 |
+
is_locked INTEGER DEFAULT 0,
|
| 105 |
+
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
| 106 |
+
FOREIGN KEY (project_id) REFERENCES projects(id)
|
| 107 |
+
)
|
| 108 |
+
"""
|
| 109 |
+
)
|
| 110 |
+
|
| 111 |
+
# ``messages`` belong to a conversation and capture the actual
|
| 112 |
+
# dialog history. ``role`` mirrors the familiar OpenAI convention
|
| 113 |
+
# of ``user`` and ``assistant`` to keep the data structure flexible
|
| 114 |
+
# if additional participants are ever introduced.
|
| 115 |
+
cursor.execute(
|
| 116 |
+
"""
|
| 117 |
+
CREATE TABLE IF NOT EXISTS messages (
|
| 118 |
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
| 119 |
+
conversation_id INTEGER NOT NULL,
|
| 120 |
+
role TEXT NOT NULL,
|
| 121 |
+
content TEXT NOT NULL,
|
| 122 |
+
timestamp DATETIME DEFAULT CURRENT_TIMESTAMP,
|
| 123 |
+
FOREIGN KEY (conversation_id) REFERENCES conversations(id)
|
| 124 |
+
)
|
| 125 |
+
"""
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
# ``specifications`` house the structured outputs created by the
|
| 129 |
+
# AI personas. ``status`` tracks whether an item is pending
|
| 130 |
+
# validation or has been approved by a human reviewer.
|
| 131 |
+
cursor.execute(
|
| 132 |
+
"""
|
| 133 |
+
CREATE TABLE IF NOT EXISTS specifications (
|
| 134 |
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
| 135 |
+
project_id INTEGER NOT NULL,
|
| 136 |
+
conversation_id INTEGER,
|
| 137 |
+
spec_type TEXT NOT NULL,
|
| 138 |
+
title TEXT NOT NULL,
|
| 139 |
+
content TEXT NOT NULL,
|
| 140 |
+
status TEXT DEFAULT 'pending',
|
| 141 |
+
created_at DATETIME DEFAULT CURRENT_TIMESTAMP,
|
| 142 |
+
FOREIGN KEY (project_id) REFERENCES projects(id),
|
| 143 |
+
FOREIGN KEY (conversation_id) REFERENCES conversations(id)
|
| 144 |
+
)
|
| 145 |
+
"""
|
| 146 |
+
)
|
| 147 |
+
|
| 148 |
+
# ``approved_specs`` is a lightweight table dedicated to storing
|
| 149 |
+
# validated specification summaries used by the Gradio interface.
|
| 150 |
+
# Keeping a separate table avoids interfering with the richer
|
| 151 |
+
# workflow tables above while providing a simple history for
|
| 152 |
+
# export operations and demo content.
|
| 153 |
+
cursor.execute(
|
| 154 |
+
"""
|
| 155 |
+
CREATE TABLE IF NOT EXISTS approved_specs (
|
| 156 |
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
| 157 |
+
title TEXT NOT NULL,
|
| 158 |
+
content TEXT NOT NULL,
|
| 159 |
+
created_at DATETIME DEFAULT CURRENT_TIMESTAMP
|
| 160 |
+
)
|
| 161 |
+
"""
|
| 162 |
+
)
|
| 163 |
+
|
| 164 |
+
conn.commit()
|
| 165 |
+
except sqlite3.DatabaseError as error:
|
| 166 |
+
LOGGER.exception("Database initialisation failed: %s", error)
|
| 167 |
+
raise
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
# ---------------------------------------------------------------------------
|
| 171 |
+
# Project management helpers
|
| 172 |
+
# ---------------------------------------------------------------------------
|
| 173 |
+
|
| 174 |
+
def create_project(name: str, description: str = "", db_path: Optional[Path] = None) -> int:
|
| 175 |
+
"""Insert a new project row and return its generated ID."""
|
| 176 |
+
|
| 177 |
+
LOGGER.info("Creating project: %s", name)
|
| 178 |
+
try:
|
| 179 |
+
with _get_connection(db_path) as conn:
|
| 180 |
+
cursor = conn.execute(
|
| 181 |
+
"INSERT INTO projects (name, description) VALUES (?, ?)",
|
| 182 |
+
(name, description),
|
| 183 |
+
)
|
| 184 |
+
conn.commit()
|
| 185 |
+
project_id = cursor.lastrowid
|
| 186 |
+
LOGGER.debug("Created project %s with id %s", name, project_id)
|
| 187 |
+
return project_id
|
| 188 |
+
except sqlite3.IntegrityError as error:
|
| 189 |
+
# ``IntegrityError`` handles duplicate names and other constraint
|
| 190 |
+
# violations. Re-raising with context helps the UI provide clear
|
| 191 |
+
# feedback, for example when a user accidentally creates a duplicate
|
| 192 |
+
# project.
|
| 193 |
+
LOGGER.exception("Failed to create project '%s': %s", name, error)
|
| 194 |
+
raise
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
def get_projects(db_path: Optional[Path] = None) -> List[Dict[str, str]]:
|
| 198 |
+
"""Return all projects ordered by most recent first."""
|
| 199 |
+
|
| 200 |
+
LOGGER.debug("Fetching project list")
|
| 201 |
+
try:
|
| 202 |
+
with _get_connection(db_path) as conn:
|
| 203 |
+
rows = conn.execute(
|
| 204 |
+
"SELECT id, name, description, created_at FROM projects ORDER BY created_at DESC"
|
| 205 |
+
).fetchall()
|
| 206 |
+
return [dict(row) for row in rows]
|
| 207 |
+
except sqlite3.DatabaseError as error:
|
| 208 |
+
LOGGER.exception("Failed to fetch projects: %s", error)
|
| 209 |
+
raise
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
# ---------------------------------------------------------------------------
|
| 213 |
+
# Conversation helpers
|
| 214 |
+
# ---------------------------------------------------------------------------
|
| 215 |
+
|
| 216 |
+
def create_conversation(
|
| 217 |
+
project_id: int,
|
| 218 |
+
persona_type: str,
|
| 219 |
+
db_path: Optional[Path] = None,
|
| 220 |
+
) -> int:
|
| 221 |
+
"""Start a new conversation for the supplied project and persona."""
|
| 222 |
+
|
| 223 |
+
LOGGER.info("Starting %s conversation for project %s", persona_type, project_id)
|
| 224 |
+
try:
|
| 225 |
+
with _get_connection(db_path) as conn:
|
| 226 |
+
cursor = conn.execute(
|
| 227 |
+
"INSERT INTO conversations (project_id, persona_type) VALUES (?, ?)",
|
| 228 |
+
(project_id, persona_type),
|
| 229 |
+
)
|
| 230 |
+
conn.commit()
|
| 231 |
+
conversation_id = cursor.lastrowid
|
| 232 |
+
LOGGER.debug("Conversation %s created", conversation_id)
|
| 233 |
+
return conversation_id
|
| 234 |
+
except sqlite3.DatabaseError as error:
|
| 235 |
+
LOGGER.exception(
|
| 236 |
+
"Failed to create conversation for project %s (%s): %s",
|
| 237 |
+
project_id,
|
| 238 |
+
persona_type,
|
| 239 |
+
error,
|
| 240 |
+
)
|
| 241 |
+
raise
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
def add_message(
|
| 245 |
+
conversation_id: int,
|
| 246 |
+
role: str,
|
| 247 |
+
content: str,
|
| 248 |
+
db_path: Optional[Path] = None,
|
| 249 |
+
) -> int:
|
| 250 |
+
"""Persist an individual chat message belonging to a conversation."""
|
| 251 |
+
|
| 252 |
+
LOGGER.debug("Adding %s message to conversation %s", role, conversation_id)
|
| 253 |
+
try:
|
| 254 |
+
with _get_connection(db_path) as conn:
|
| 255 |
+
cursor = conn.execute(
|
| 256 |
+
"INSERT INTO messages (conversation_id, role, content) VALUES (?, ?, ?)",
|
| 257 |
+
(conversation_id, role, content),
|
| 258 |
+
)
|
| 259 |
+
conn.commit()
|
| 260 |
+
message_id = cursor.lastrowid
|
| 261 |
+
LOGGER.debug("Stored message %s", message_id)
|
| 262 |
+
return message_id
|
| 263 |
+
except sqlite3.DatabaseError as error:
|
| 264 |
+
LOGGER.exception(
|
| 265 |
+
"Failed to add message to conversation %s: %s", conversation_id, error
|
| 266 |
+
)
|
| 267 |
+
raise
|
| 268 |
+
|
| 269 |
+
|
| 270 |
+
def lock_conversation(conversation_id: int, db_path: Optional[Path] = None) -> None:
|
| 271 |
+
"""Mark a conversation as locked to prevent further editing."""
|
| 272 |
+
|
| 273 |
+
LOGGER.info("Locking conversation %s", conversation_id)
|
| 274 |
+
try:
|
| 275 |
+
with _get_connection(db_path) as conn:
|
| 276 |
+
conn.execute(
|
| 277 |
+
"UPDATE conversations SET is_locked = 1 WHERE id = ?",
|
| 278 |
+
(conversation_id,),
|
| 279 |
+
)
|
| 280 |
+
conn.commit()
|
| 281 |
+
except sqlite3.DatabaseError as error:
|
| 282 |
+
LOGGER.exception("Failed to lock conversation %s: %s", conversation_id, error)
|
| 283 |
+
raise
|
| 284 |
+
|
| 285 |
+
|
| 286 |
+
# ---------------------------------------------------------------------------
|
| 287 |
+
# Specification helpers
|
| 288 |
+
# ---------------------------------------------------------------------------
|
| 289 |
+
|
| 290 |
+
def create_specification(
|
| 291 |
+
project_id: int,
|
| 292 |
+
conversation_id: Optional[int],
|
| 293 |
+
spec_type: str,
|
| 294 |
+
title: str,
|
| 295 |
+
content: str,
|
| 296 |
+
db_path: Optional[Path] = None,
|
| 297 |
+
) -> int:
|
| 298 |
+
"""Save a generated specification in ``pending`` status."""
|
| 299 |
+
|
| 300 |
+
LOGGER.info("Recording %s specification for project %s", spec_type, project_id)
|
| 301 |
+
try:
|
| 302 |
+
with _get_connection(db_path) as conn:
|
| 303 |
+
cursor = conn.execute(
|
| 304 |
+
"""
|
| 305 |
+
INSERT INTO specifications (
|
| 306 |
+
project_id,
|
| 307 |
+
conversation_id,
|
| 308 |
+
spec_type,
|
| 309 |
+
title,
|
| 310 |
+
content
|
| 311 |
+
) VALUES (?, ?, ?, ?, ?)
|
| 312 |
+
""",
|
| 313 |
+
(project_id, conversation_id, spec_type, title, content),
|
| 314 |
+
)
|
| 315 |
+
conn.commit()
|
| 316 |
+
specification_id = cursor.lastrowid
|
| 317 |
+
LOGGER.debug("Specification %s stored", specification_id)
|
| 318 |
+
return specification_id
|
| 319 |
+
except sqlite3.DatabaseError as error:
|
| 320 |
+
LOGGER.exception(
|
| 321 |
+
"Failed to create specification for project %s: %s", project_id, error
|
| 322 |
+
)
|
| 323 |
+
raise
|
| 324 |
+
|
| 325 |
+
|
| 326 |
+
def get_pending_specifications(
|
| 327 |
+
project_id: int,
|
| 328 |
+
db_path: Optional[Path] = None,
|
| 329 |
+
) -> List[Dict[str, str]]:
|
| 330 |
+
"""Return specifications awaiting approval for the given project."""
|
| 331 |
+
|
| 332 |
+
LOGGER.debug("Fetching pending specifications for project %s", project_id)
|
| 333 |
+
try:
|
| 334 |
+
with _get_connection(db_path) as conn:
|
| 335 |
+
rows = conn.execute(
|
| 336 |
+
"""
|
| 337 |
+
SELECT id, spec_type, title, content, created_at
|
| 338 |
+
FROM specifications
|
| 339 |
+
WHERE project_id = ? AND status = 'pending'
|
| 340 |
+
ORDER BY created_at ASC
|
| 341 |
+
""",
|
| 342 |
+
(project_id,),
|
| 343 |
+
).fetchall()
|
| 344 |
+
return [dict(row) for row in rows]
|
| 345 |
+
except sqlite3.DatabaseError as error:
|
| 346 |
+
LOGGER.exception(
|
| 347 |
+
"Failed to retrieve pending specifications for project %s: %s",
|
| 348 |
+
project_id,
|
| 349 |
+
error,
|
| 350 |
+
)
|
| 351 |
+
raise
|
| 352 |
+
|
| 353 |
+
|
| 354 |
+
def approve_specification(spec_id: int, db_path: Optional[Path] = None) -> None:
|
| 355 |
+
"""Mark a specification as approved."""
|
| 356 |
+
|
| 357 |
+
LOGGER.info("Approving specification %s", spec_id)
|
| 358 |
+
try:
|
| 359 |
+
with _get_connection(db_path) as conn:
|
| 360 |
+
conn.execute(
|
| 361 |
+
"UPDATE specifications SET status = 'approved' WHERE id = ?",
|
| 362 |
+
(spec_id,),
|
| 363 |
+
)
|
| 364 |
+
conn.commit()
|
| 365 |
+
except sqlite3.DatabaseError as error:
|
| 366 |
+
LOGGER.exception("Failed to approve specification %s: %s", spec_id, error)
|
| 367 |
+
raise
|
| 368 |
+
|
| 369 |
+
|
| 370 |
+
def get_approved_specifications(
|
| 371 |
+
project_id: int,
|
| 372 |
+
spec_type: Optional[str] = None,
|
| 373 |
+
db_path: Optional[Path] = None,
|
| 374 |
+
) -> List[Dict[str, str]]:
|
| 375 |
+
"""Return approved specifications filtered by project and optional type."""
|
| 376 |
+
|
| 377 |
+
LOGGER.debug(
|
| 378 |
+
"Fetching approved specifications for project %s (type=%s)",
|
| 379 |
+
project_id,
|
| 380 |
+
spec_type or "*",
|
| 381 |
+
)
|
| 382 |
+
try:
|
| 383 |
+
with _get_connection(db_path) as conn:
|
| 384 |
+
if spec_type:
|
| 385 |
+
rows = conn.execute(
|
| 386 |
+
"""
|
| 387 |
+
SELECT id, spec_type, title, content, created_at
|
| 388 |
+
FROM specifications
|
| 389 |
+
WHERE project_id = ? AND status = 'approved' AND spec_type = ?
|
| 390 |
+
ORDER BY created_at DESC
|
| 391 |
+
""",
|
| 392 |
+
(project_id, spec_type),
|
| 393 |
+
).fetchall()
|
| 394 |
+
else:
|
| 395 |
+
rows = conn.execute(
|
| 396 |
+
"""
|
| 397 |
+
SELECT id, spec_type, title, content, created_at
|
| 398 |
+
FROM specifications
|
| 399 |
+
WHERE project_id = ? AND status = 'approved'
|
| 400 |
+
ORDER BY created_at DESC
|
| 401 |
+
""",
|
| 402 |
+
(project_id,),
|
| 403 |
+
).fetchall()
|
| 404 |
+
return [dict(row) for row in rows]
|
| 405 |
+
except sqlite3.DatabaseError as error:
|
| 406 |
+
LOGGER.exception(
|
| 407 |
+
"Failed to fetch approved specifications for project %s: %s",
|
| 408 |
+
project_id,
|
| 409 |
+
error,
|
| 410 |
+
)
|
| 411 |
+
raise
|
| 412 |
+
|
| 413 |
+
|
| 414 |
+
# ---------------------------------------------------------------------------
|
| 415 |
+
# Demo data
|
| 416 |
+
# ---------------------------------------------------------------------------
|
| 417 |
+
|
| 418 |
+
def create_sample_data(db_path: Optional[Path] = None) -> None:
|
| 419 |
+
"""Populate the database with a minimal set of demo records.
|
| 420 |
+
|
| 421 |
+
This helper is intentionally idempotent – it only inserts data when the
|
| 422 |
+
database is empty. The goal is to provide a ready-to-explore environment
|
| 423 |
+
for users trying the application without configuring API keys.
|
| 424 |
+
"""
|
| 425 |
+
|
| 426 |
+
LOGGER.info("Seeding sample data if database is empty")
|
| 427 |
+
try:
|
| 428 |
+
with _get_connection(db_path) as conn:
|
| 429 |
+
cursor = conn.execute("SELECT COUNT(*) as count FROM projects")
|
| 430 |
+
count = cursor.fetchone()["count"]
|
| 431 |
+
if count:
|
| 432 |
+
LOGGER.debug("Sample data already present; skipping seed")
|
| 433 |
+
return
|
| 434 |
+
|
| 435 |
+
# Create a sample project that the UI can immediately load.
|
| 436 |
+
project_id = create_project(
|
| 437 |
+
"Demo Product",
|
| 438 |
+
"Sample workspace showcasing Naexya Docs AI capabilities",
|
| 439 |
+
db_path=db_path,
|
| 440 |
+
)
|
| 441 |
+
|
| 442 |
+
# Start one conversation per persona to demonstrate the workflow.
|
| 443 |
+
requirements_conv = create_conversation(
|
| 444 |
+
project_id, "requirements", db_path=db_path
|
| 445 |
+
)
|
| 446 |
+
technical_conv = create_conversation(
|
| 447 |
+
project_id, "technical", db_path=db_path
|
| 448 |
+
)
|
| 449 |
+
|
| 450 |
+
# Seed a few representative chat messages to illustrate history.
|
| 451 |
+
add_message(
|
| 452 |
+
requirements_conv,
|
| 453 |
+
"user",
|
| 454 |
+
"We need a mobile app for ordering office supplies with approval workflows.",
|
| 455 |
+
db_path=db_path,
|
| 456 |
+
)
|
| 457 |
+
add_message(
|
| 458 |
+
requirements_conv,
|
| 459 |
+
"assistant",
|
| 460 |
+
"Understood. I'll outline the business goals and success metrics.",
|
| 461 |
+
db_path=db_path,
|
| 462 |
+
)
|
| 463 |
+
add_message(
|
| 464 |
+
technical_conv,
|
| 465 |
+
"assistant",
|
| 466 |
+
"Suggesting a serverless backend with OAuth authentication and inventory sync.",
|
| 467 |
+
db_path=db_path,
|
| 468 |
+
)
|
| 469 |
+
|
| 470 |
+
# Finally, add a mixture of pending and approved specifications so
|
| 471 |
+
# the validation and reporting tabs have realistic content.
|
| 472 |
+
spec_id = create_specification(
|
| 473 |
+
project_id,
|
| 474 |
+
requirements_conv,
|
| 475 |
+
"Business Requirements",
|
| 476 |
+
"Ordering Experience",
|
| 477 |
+
"Employees can browse catalogues, submit carts, and track approvals.",
|
| 478 |
+
db_path=db_path,
|
| 479 |
+
)
|
| 480 |
+
|
| 481 |
+
create_specification(
|
| 482 |
+
project_id,
|
| 483 |
+
technical_conv,
|
| 484 |
+
"Technical Architecture",
|
| 485 |
+
"Solution Overview",
|
| 486 |
+
"React Native client with AWS Lambda microservices and DynamoDB storage.",
|
| 487 |
+
db_path=db_path,
|
| 488 |
+
)
|
| 489 |
+
|
| 490 |
+
# Approve one specification to show both states in the UI.
|
| 491 |
+
approve_specification(spec_id, db_path=db_path)
|
| 492 |
+
|
| 493 |
+
LOGGER.info("Sample data created successfully")
|
| 494 |
+
except sqlite3.DatabaseError as error:
|
| 495 |
+
LOGGER.exception("Failed to create sample data: %s", error)
|
| 496 |
+
raise
|
| 497 |
+
|
| 498 |
+
|
| 499 |
+
# ---------------------------------------------------------------------------
|
| 500 |
+
# Lightweight manager used by the Gradio interface
|
| 501 |
+
# ---------------------------------------------------------------------------
|
| 502 |
+
|
| 503 |
+
|
| 504 |
+
@dataclass
|
| 505 |
+
class SpecificationRecord:
|
| 506 |
+
"""Representation of an approved specification stored for exports."""
|
| 507 |
+
|
| 508 |
+
id: int
|
| 509 |
+
title: str
|
| 510 |
+
content: str
|
| 511 |
+
created_at: str
|
| 512 |
+
|
| 513 |
+
|
| 514 |
+
class DatabaseManager:
|
| 515 |
+
"""Simplified database helper tailored for the Gradio UI flows."""
|
| 516 |
+
|
| 517 |
+
def __init__(self, database_path: Path):
|
| 518 |
+
self.database_path = Path(database_path)
|
| 519 |
+
init_database(self.database_path)
|
| 520 |
+
|
| 521 |
+
def save_specification(self, title: str, content: str) -> int:
|
| 522 |
+
"""Persist an approved specification for later browsing and export."""
|
| 523 |
+
|
| 524 |
+
LOGGER.info("Persisting approved specification: %s", title)
|
| 525 |
+
try:
|
| 526 |
+
with _get_connection(self.database_path) as conn:
|
| 527 |
+
cursor = conn.execute(
|
| 528 |
+
"INSERT INTO approved_specs (title, content) VALUES (?, ?)",
|
| 529 |
+
(title, content),
|
| 530 |
+
)
|
| 531 |
+
conn.commit()
|
| 532 |
+
spec_id = int(cursor.lastrowid)
|
| 533 |
+
LOGGER.debug("Approved specification stored with id %s", spec_id)
|
| 534 |
+
return spec_id
|
| 535 |
+
except sqlite3.DatabaseError as error:
|
| 536 |
+
LOGGER.exception("Failed to store approved specification '%s': %s", title, error)
|
| 537 |
+
raise
|
| 538 |
+
|
| 539 |
+
def fetch_recent_specifications(self, limit: int = 50) -> List[SpecificationRecord]:
|
| 540 |
+
"""Return the most recently stored approved specifications."""
|
| 541 |
+
|
| 542 |
+
LOGGER.debug("Fetching up to %s approved specifications", limit)
|
| 543 |
+
try:
|
| 544 |
+
with _get_connection(self.database_path) as conn:
|
| 545 |
+
rows = conn.execute(
|
| 546 |
+
"""
|
| 547 |
+
SELECT id, title, content, created_at
|
| 548 |
+
FROM approved_specs
|
| 549 |
+
ORDER BY created_at DESC
|
| 550 |
+
LIMIT ?
|
| 551 |
+
""",
|
| 552 |
+
(limit,),
|
| 553 |
+
).fetchall()
|
| 554 |
+
return [
|
| 555 |
+
SpecificationRecord(
|
| 556 |
+
id=int(row["id"]),
|
| 557 |
+
title=str(row["title"]),
|
| 558 |
+
content=str(row["content"]),
|
| 559 |
+
created_at=str(row["created_at"]),
|
| 560 |
+
)
|
| 561 |
+
for row in rows
|
| 562 |
+
]
|
| 563 |
+
except sqlite3.DatabaseError as error:
|
| 564 |
+
LOGGER.exception("Failed to fetch approved specifications: %s", error)
|
| 565 |
+
raise
|
| 566 |
+
|
| 567 |
+
|
| 568 |
+
# Ensure the schema exists whenever this module is imported. This keeps the
|
| 569 |
+
# rest of the application simple because it can assume the tables are present.
|
| 570 |
+
init_database()
|
| 571 |
+
|
| 572 |
+
|
| 573 |
+
__all__ = [
|
| 574 |
+
"DATABASE_PATH",
|
| 575 |
+
"DatabaseManager",
|
| 576 |
+
"SpecificationRecord",
|
| 577 |
+
"init_database",
|
| 578 |
+
"create_project",
|
| 579 |
+
"get_projects",
|
| 580 |
+
"create_conversation",
|
| 581 |
+
"add_message",
|
| 582 |
+
"lock_conversation",
|
| 583 |
+
"create_specification",
|
| 584 |
+
"get_pending_specifications",
|
| 585 |
+
"approve_specification",
|
| 586 |
+
"get_approved_specifications",
|
| 587 |
+
"create_sample_data",
|
| 588 |
+
]
|
| 589 |
+
|
requirements.txt
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Requirements for running the Naexya Docs AI Gradio application.
|
| 2 |
+
# Each dependency below is documented to explain its role in the project.
|
| 3 |
+
#
|
| 4 |
+
# gradio powers the web-based interface that enables users to interact with
|
| 5 |
+
# AI specification tools directly from the browser with minimal boilerplate.
|
| 6 |
+
gradio==4.0.0
|
| 7 |
+
# requests provides a simple yet powerful HTTP client for calling external AI
|
| 8 |
+
# services that do not have dedicated SDKs, ensuring consistent API handling.
|
| 9 |
+
requests==2.31.0
|
| 10 |
+
# sqlite3 is part of the Python standard library and powers lightweight local
|
| 11 |
+
# storage for specifications; the entry here serves as documentation only.
|
| 12 |
+
# No pip installation is required because sqlite3 ships with Python 3.
|
| 13 |
+
# sqlite3
|
| 14 |
+
# python-dotenv loads environment variables from a .env file, simplifying
|
| 15 |
+
# configuration management for different environments (development, staging,
|
| 16 |
+
# production) without hardcoding secrets in the codebase.
|
| 17 |
+
python-dotenv==1.0.1
|
| 18 |
+
# markdown converts project specifications into Markdown-formatted text for
|
| 19 |
+
# exports and previews inside the application and supporting services.
|
| 20 |
+
markdown==3.5.2
|
| 21 |
+
# jinja2 renders HTML and Markdown export templates with dynamic content,
|
| 22 |
+
# allowing flexible formatting of generated specification documents.
|
| 23 |
+
jinja2==3.1.3
|
templates/export_html.html
ADDED
|
@@ -0,0 +1,537 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!--
|
| 2 |
+
Professional export template for Naexya Docs AI.
|
| 3 |
+
This template is rendered via Jinja2 using context prepared in utils.generate_export_html.
|
| 4 |
+
Extensive comments describe each major block so designers can tweak branding or structure.
|
| 5 |
+
-->
|
| 6 |
+
<!DOCTYPE html>
|
| 7 |
+
<html lang="en">
|
| 8 |
+
<head>
|
| 9 |
+
<meta charset="UTF-8" />
|
| 10 |
+
<meta name="viewport" content="width=device-width, initial-scale=1" />
|
| 11 |
+
<title>{{ project_name }} – Specification Export | {{ brand_name }}</title>
|
| 12 |
+
<!--
|
| 13 |
+
Embedded stylesheet uses CSS variables for easy brand customization and
|
| 14 |
+
ensures the layout adapts gracefully for both screens and printed copies.
|
| 15 |
+
-->
|
| 16 |
+
<style>
|
| 17 |
+
:root {
|
| 18 |
+
--brand-primary: #1f3c88;
|
| 19 |
+
--brand-secondary: #19a974;
|
| 20 |
+
--brand-accent: #f5f7fb;
|
| 21 |
+
--text-color: #1f1f1f;
|
| 22 |
+
--muted-text: #5f6c7b;
|
| 23 |
+
--border-color: #d9e1ec;
|
| 24 |
+
--card-shadow: 0 12px 24px rgba(15, 34, 58, 0.08);
|
| 25 |
+
--font-family: "Inter", "Segoe UI", Roboto, Helvetica, Arial, sans-serif;
|
| 26 |
+
}
|
| 27 |
+
|
| 28 |
+
* {
|
| 29 |
+
box-sizing: border-box;
|
| 30 |
+
}
|
| 31 |
+
|
| 32 |
+
body {
|
| 33 |
+
margin: 0;
|
| 34 |
+
padding: 0;
|
| 35 |
+
font-family: var(--font-family);
|
| 36 |
+
background: #ffffff;
|
| 37 |
+
color: var(--text-color);
|
| 38 |
+
line-height: 1.6;
|
| 39 |
+
}
|
| 40 |
+
|
| 41 |
+
.page {
|
| 42 |
+
max-width: 960px;
|
| 43 |
+
margin: 0 auto;
|
| 44 |
+
padding: 2.5rem 1.75rem 3.5rem;
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
/*
|
| 48 |
+
Header block contains brand identity and key project metadata.
|
| 49 |
+
Flex layout ensures the section remains responsive.
|
| 50 |
+
*/
|
| 51 |
+
.export-header {
|
| 52 |
+
display: flex;
|
| 53 |
+
flex-wrap: wrap;
|
| 54 |
+
align-items: flex-start;
|
| 55 |
+
gap: 1.5rem;
|
| 56 |
+
padding: 2rem;
|
| 57 |
+
border-radius: 18px;
|
| 58 |
+
background: linear-gradient(135deg, rgba(31, 60, 136, 0.92), rgba(25, 169, 116, 0.85));
|
| 59 |
+
color: #ffffff;
|
| 60 |
+
box-shadow: var(--card-shadow);
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
.export-header .branding {
|
| 64 |
+
flex: 1 1 240px;
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
.export-header h1 {
|
| 68 |
+
margin: 0;
|
| 69 |
+
font-size: 2.25rem;
|
| 70 |
+
letter-spacing: 0.04em;
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
.export-header .tagline {
|
| 74 |
+
margin: 0.35rem 0 0;
|
| 75 |
+
font-size: 1rem;
|
| 76 |
+
opacity: 0.9;
|
| 77 |
+
}
|
| 78 |
+
|
| 79 |
+
.project-meta {
|
| 80 |
+
flex: 2 1 320px;
|
| 81 |
+
background: rgba(255, 255, 255, 0.15);
|
| 82 |
+
border-radius: 14px;
|
| 83 |
+
padding: 1.25rem 1.5rem;
|
| 84 |
+
backdrop-filter: blur(4px);
|
| 85 |
+
}
|
| 86 |
+
|
| 87 |
+
.project-meta h2 {
|
| 88 |
+
margin: 0 0 0.75rem;
|
| 89 |
+
font-size: 1.65rem;
|
| 90 |
+
}
|
| 91 |
+
|
| 92 |
+
.project-meta p {
|
| 93 |
+
margin: 0 0 1rem;
|
| 94 |
+
color: #f1f5fb;
|
| 95 |
+
}
|
| 96 |
+
|
| 97 |
+
.project-meta dl {
|
| 98 |
+
display: grid;
|
| 99 |
+
grid-template-columns: repeat(auto-fit, minmax(160px, 1fr));
|
| 100 |
+
gap: 0.75rem;
|
| 101 |
+
margin: 0;
|
| 102 |
+
}
|
| 103 |
+
|
| 104 |
+
.project-meta dt {
|
| 105 |
+
font-weight: 600;
|
| 106 |
+
font-size: 0.85rem;
|
| 107 |
+
text-transform: uppercase;
|
| 108 |
+
letter-spacing: 0.08em;
|
| 109 |
+
opacity: 0.75;
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
.project-meta dd {
|
| 113 |
+
margin: 0;
|
| 114 |
+
font-size: 1rem;
|
| 115 |
+
}
|
| 116 |
+
|
| 117 |
+
/*
|
| 118 |
+
Statistics grid highlights counts and health metrics using cards so
|
| 119 |
+
stakeholders can absorb the state of the project at a glance.
|
| 120 |
+
*/
|
| 121 |
+
.statistics-section {
|
| 122 |
+
margin: 2.75rem 0;
|
| 123 |
+
}
|
| 124 |
+
|
| 125 |
+
.statistics-section h2 {
|
| 126 |
+
margin-bottom: 1rem;
|
| 127 |
+
font-size: 1.5rem;
|
| 128 |
+
color: var(--brand-primary);
|
| 129 |
+
}
|
| 130 |
+
|
| 131 |
+
.statistics-grid {
|
| 132 |
+
display: grid;
|
| 133 |
+
grid-template-columns: repeat(auto-fit, minmax(180px, 1fr));
|
| 134 |
+
gap: 1rem;
|
| 135 |
+
}
|
| 136 |
+
|
| 137 |
+
.stat-card {
|
| 138 |
+
background: var(--brand-accent);
|
| 139 |
+
border-radius: 14px;
|
| 140 |
+
padding: 1.25rem 1.5rem;
|
| 141 |
+
box-shadow: var(--card-shadow);
|
| 142 |
+
}
|
| 143 |
+
|
| 144 |
+
.stat-card.span-2 {
|
| 145 |
+
grid-column: span 2;
|
| 146 |
+
}
|
| 147 |
+
|
| 148 |
+
.stat-label {
|
| 149 |
+
display: block;
|
| 150 |
+
font-size: 0.85rem;
|
| 151 |
+
text-transform: uppercase;
|
| 152 |
+
letter-spacing: 0.08em;
|
| 153 |
+
color: var(--muted-text);
|
| 154 |
+
}
|
| 155 |
+
|
| 156 |
+
.stat-value {
|
| 157 |
+
display: block;
|
| 158 |
+
margin-top: 0.35rem;
|
| 159 |
+
font-size: 1.8rem;
|
| 160 |
+
font-weight: 700;
|
| 161 |
+
color: var(--brand-primary);
|
| 162 |
+
}
|
| 163 |
+
|
| 164 |
+
.status-list {
|
| 165 |
+
list-style: none;
|
| 166 |
+
margin: 0.75rem 0 0;
|
| 167 |
+
padding: 0;
|
| 168 |
+
}
|
| 169 |
+
|
| 170 |
+
.status-list li {
|
| 171 |
+
display: flex;
|
| 172 |
+
justify-content: space-between;
|
| 173 |
+
padding: 0.35rem 0;
|
| 174 |
+
border-bottom: 1px dashed rgba(31, 60, 136, 0.2);
|
| 175 |
+
}
|
| 176 |
+
|
| 177 |
+
.status-name {
|
| 178 |
+
font-weight: 600;
|
| 179 |
+
}
|
| 180 |
+
|
| 181 |
+
.status-count {
|
| 182 |
+
font-variant-numeric: tabular-nums;
|
| 183 |
+
color: var(--brand-primary);
|
| 184 |
+
}
|
| 185 |
+
|
| 186 |
+
/*
|
| 187 |
+
Navigation panel offers quick jumps to each specification category.
|
| 188 |
+
Anchors reuse the slugified IDs produced by utils._slugify.
|
| 189 |
+
*/
|
| 190 |
+
.table-of-contents {
|
| 191 |
+
margin: 3rem 0 2rem;
|
| 192 |
+
padding: 1.75rem 2rem;
|
| 193 |
+
border-radius: 16px;
|
| 194 |
+
border: 1px solid var(--border-color);
|
| 195 |
+
background: #ffffff;
|
| 196 |
+
box-shadow: var(--card-shadow);
|
| 197 |
+
}
|
| 198 |
+
|
| 199 |
+
.table-of-contents h2 {
|
| 200 |
+
margin-top: 0;
|
| 201 |
+
color: var(--brand-primary);
|
| 202 |
+
font-size: 1.45rem;
|
| 203 |
+
}
|
| 204 |
+
|
| 205 |
+
.toc-list {
|
| 206 |
+
margin: 1.25rem 0 0;
|
| 207 |
+
padding: 0;
|
| 208 |
+
list-style: none;
|
| 209 |
+
display: grid;
|
| 210 |
+
grid-template-columns: repeat(auto-fit, minmax(220px, 1fr));
|
| 211 |
+
gap: 0.75rem 1.25rem;
|
| 212 |
+
}
|
| 213 |
+
|
| 214 |
+
.toc-list li a {
|
| 215 |
+
display: flex;
|
| 216 |
+
justify-content: space-between;
|
| 217 |
+
align-items: center;
|
| 218 |
+
padding: 0.75rem 0.95rem;
|
| 219 |
+
border-radius: 12px;
|
| 220 |
+
text-decoration: none;
|
| 221 |
+
color: var(--text-color);
|
| 222 |
+
background: #f9fbff;
|
| 223 |
+
border: 1px solid transparent;
|
| 224 |
+
transition: all 0.2s ease;
|
| 225 |
+
}
|
| 226 |
+
|
| 227 |
+
.toc-list li a:hover,
|
| 228 |
+
.toc-list li a:focus {
|
| 229 |
+
border-color: var(--brand-primary);
|
| 230 |
+
box-shadow: 0 0 0 3px rgba(31, 60, 136, 0.15);
|
| 231 |
+
}
|
| 232 |
+
|
| 233 |
+
.toc-count {
|
| 234 |
+
font-weight: 600;
|
| 235 |
+
color: var(--brand-primary);
|
| 236 |
+
}
|
| 237 |
+
|
| 238 |
+
/*
|
| 239 |
+
Main specification sections are rendered using <section> and <article>
|
| 240 |
+
with cards for each approved specification entry.
|
| 241 |
+
*/
|
| 242 |
+
.specification-sections {
|
| 243 |
+
display: flex;
|
| 244 |
+
flex-direction: column;
|
| 245 |
+
gap: 2.5rem;
|
| 246 |
+
}
|
| 247 |
+
|
| 248 |
+
.spec-section {
|
| 249 |
+
padding: 2rem 2.25rem;
|
| 250 |
+
border-radius: 18px;
|
| 251 |
+
background: #ffffff;
|
| 252 |
+
border: 1px solid var(--border-color);
|
| 253 |
+
box-shadow: var(--card-shadow);
|
| 254 |
+
}
|
| 255 |
+
|
| 256 |
+
.spec-section.empty {
|
| 257 |
+
text-align: center;
|
| 258 |
+
color: var(--muted-text);
|
| 259 |
+
}
|
| 260 |
+
|
| 261 |
+
.section-header {
|
| 262 |
+
display: flex;
|
| 263 |
+
flex-wrap: wrap;
|
| 264 |
+
gap: 0.75rem;
|
| 265 |
+
justify-content: space-between;
|
| 266 |
+
align-items: center;
|
| 267 |
+
margin-bottom: 1.5rem;
|
| 268 |
+
}
|
| 269 |
+
|
| 270 |
+
.section-header h2 {
|
| 271 |
+
margin: 0;
|
| 272 |
+
font-size: 1.6rem;
|
| 273 |
+
color: var(--brand-primary);
|
| 274 |
+
}
|
| 275 |
+
|
| 276 |
+
.badge {
|
| 277 |
+
display: inline-flex;
|
| 278 |
+
align-items: center;
|
| 279 |
+
justify-content: center;
|
| 280 |
+
min-width: 2.5rem;
|
| 281 |
+
padding: 0.35rem 0.75rem;
|
| 282 |
+
border-radius: 999px;
|
| 283 |
+
background: rgba(31, 60, 136, 0.08);
|
| 284 |
+
color: var(--brand-primary);
|
| 285 |
+
font-weight: 600;
|
| 286 |
+
font-size: 0.85rem;
|
| 287 |
+
}
|
| 288 |
+
|
| 289 |
+
.spec-card {
|
| 290 |
+
margin-bottom: 1.65rem;
|
| 291 |
+
padding: 1.5rem 1.75rem;
|
| 292 |
+
border-radius: 14px;
|
| 293 |
+
border: 1px solid rgba(31, 60, 136, 0.1);
|
| 294 |
+
background: #fdfefe;
|
| 295 |
+
transition: transform 0.2s ease;
|
| 296 |
+
}
|
| 297 |
+
|
| 298 |
+
.spec-card:hover {
|
| 299 |
+
transform: translateY(-2px);
|
| 300 |
+
}
|
| 301 |
+
|
| 302 |
+
.spec-card h3 {
|
| 303 |
+
margin: 0 0 0.75rem;
|
| 304 |
+
font-size: 1.35rem;
|
| 305 |
+
color: var(--text-color);
|
| 306 |
+
}
|
| 307 |
+
|
| 308 |
+
.spec-meta {
|
| 309 |
+
display: flex;
|
| 310 |
+
flex-wrap: wrap;
|
| 311 |
+
gap: 0.75rem 1.5rem;
|
| 312 |
+
align-items: center;
|
| 313 |
+
margin-bottom: 1rem;
|
| 314 |
+
color: var(--muted-text);
|
| 315 |
+
font-size: 0.95rem;
|
| 316 |
+
}
|
| 317 |
+
|
| 318 |
+
.status-pill {
|
| 319 |
+
padding: 0.35rem 0.85rem;
|
| 320 |
+
border-radius: 999px;
|
| 321 |
+
background: rgba(25, 169, 116, 0.12);
|
| 322 |
+
color: #198754;
|
| 323 |
+
font-weight: 600;
|
| 324 |
+
text-transform: uppercase;
|
| 325 |
+
letter-spacing: 0.04em;
|
| 326 |
+
font-size: 0.75rem;
|
| 327 |
+
}
|
| 328 |
+
|
| 329 |
+
.status-pending {
|
| 330 |
+
background: rgba(255, 193, 7, 0.18);
|
| 331 |
+
color: #ad7a00;
|
| 332 |
+
}
|
| 333 |
+
|
| 334 |
+
.status-approved {
|
| 335 |
+
background: rgba(25, 169, 116, 0.15);
|
| 336 |
+
color: #146c43;
|
| 337 |
+
}
|
| 338 |
+
|
| 339 |
+
.status-rejected {
|
| 340 |
+
background: rgba(220, 53, 69, 0.15);
|
| 341 |
+
color: #842029;
|
| 342 |
+
}
|
| 343 |
+
|
| 344 |
+
.conversation-link {
|
| 345 |
+
color: var(--brand-primary);
|
| 346 |
+
text-decoration: none;
|
| 347 |
+
font-weight: 600;
|
| 348 |
+
}
|
| 349 |
+
|
| 350 |
+
.conversation-link:hover,
|
| 351 |
+
.conversation-link:focus {
|
| 352 |
+
text-decoration: underline;
|
| 353 |
+
}
|
| 354 |
+
|
| 355 |
+
.spec-body p {
|
| 356 |
+
margin: 0 0 0.85rem;
|
| 357 |
+
}
|
| 358 |
+
|
| 359 |
+
.spec-body em {
|
| 360 |
+
color: var(--muted-text);
|
| 361 |
+
}
|
| 362 |
+
|
| 363 |
+
/*
|
| 364 |
+
Conversation reference list provides anchors back to the originating
|
| 365 |
+
chats or external systems. Designers can re-point the base URL using
|
| 366 |
+
project_data["conversation_base_url"].
|
| 367 |
+
*/
|
| 368 |
+
.conversation-section {
|
| 369 |
+
margin: 3rem 0;
|
| 370 |
+
padding: 2rem 2.25rem;
|
| 371 |
+
border-radius: 16px;
|
| 372 |
+
border: 1px solid var(--border-color);
|
| 373 |
+
background: var(--brand-accent);
|
| 374 |
+
}
|
| 375 |
+
|
| 376 |
+
.conversation-section h2 {
|
| 377 |
+
margin-top: 0;
|
| 378 |
+
font-size: 1.5rem;
|
| 379 |
+
color: var(--brand-primary);
|
| 380 |
+
}
|
| 381 |
+
|
| 382 |
+
.conversation-list {
|
| 383 |
+
list-style: none;
|
| 384 |
+
margin: 1.25rem 0 0;
|
| 385 |
+
padding: 0;
|
| 386 |
+
display: grid;
|
| 387 |
+
grid-template-columns: repeat(auto-fit, minmax(220px, 1fr));
|
| 388 |
+
gap: 0.85rem;
|
| 389 |
+
}
|
| 390 |
+
|
| 391 |
+
.conversation-list a {
|
| 392 |
+
display: block;
|
| 393 |
+
padding: 0.75rem 0.95rem;
|
| 394 |
+
border-radius: 12px;
|
| 395 |
+
background: #ffffff;
|
| 396 |
+
border: 1px solid rgba(31, 60, 136, 0.12);
|
| 397 |
+
color: var(--brand-primary);
|
| 398 |
+
font-weight: 600;
|
| 399 |
+
text-decoration: none;
|
| 400 |
+
}
|
| 401 |
+
|
| 402 |
+
.conversation-list a:hover,
|
| 403 |
+
.conversation-list a:focus {
|
| 404 |
+
border-color: var(--brand-primary);
|
| 405 |
+
box-shadow: 0 0 0 3px rgba(31, 60, 136, 0.15);
|
| 406 |
+
}
|
| 407 |
+
|
| 408 |
+
/*
|
| 409 |
+
Footer summarises export metadata for auditing and printouts.
|
| 410 |
+
*/
|
| 411 |
+
footer {
|
| 412 |
+
margin-top: 3.5rem;
|
| 413 |
+
padding-top: 1.5rem;
|
| 414 |
+
border-top: 1px solid var(--border-color);
|
| 415 |
+
color: var(--muted-text);
|
| 416 |
+
font-size: 0.9rem;
|
| 417 |
+
text-align: center;
|
| 418 |
+
}
|
| 419 |
+
|
| 420 |
+
/*
|
| 421 |
+
Responsive adjustments ensure comfortable reading on tablets and phones.
|
| 422 |
+
*/
|
| 423 |
+
@media (max-width: 768px) {
|
| 424 |
+
.page {
|
| 425 |
+
padding: 2rem 1.25rem 3rem;
|
| 426 |
+
}
|
| 427 |
+
|
| 428 |
+
.export-header {
|
| 429 |
+
padding: 1.75rem;
|
| 430 |
+
}
|
| 431 |
+
|
| 432 |
+
.project-meta {
|
| 433 |
+
padding: 1rem 1.25rem;
|
| 434 |
+
}
|
| 435 |
+
|
| 436 |
+
.spec-section {
|
| 437 |
+
padding: 1.5rem 1.6rem;
|
| 438 |
+
}
|
| 439 |
+
|
| 440 |
+
.table-of-contents {
|
| 441 |
+
padding: 1.5rem 1.6rem;
|
| 442 |
+
}
|
| 443 |
+
}
|
| 444 |
+
|
| 445 |
+
/*
|
| 446 |
+
Print rules remove shadows and adjust spacing for crisp documents.
|
| 447 |
+
*/
|
| 448 |
+
@media print {
|
| 449 |
+
body {
|
| 450 |
+
color: #000000;
|
| 451 |
+
}
|
| 452 |
+
|
| 453 |
+
.page {
|
| 454 |
+
max-width: none;
|
| 455 |
+
padding: 1.25rem;
|
| 456 |
+
}
|
| 457 |
+
|
| 458 |
+
.export-header,
|
| 459 |
+
.spec-section,
|
| 460 |
+
.table-of-contents,
|
| 461 |
+
.conversation-section,
|
| 462 |
+
.stat-card {
|
| 463 |
+
box-shadow: none;
|
| 464 |
+
}
|
| 465 |
+
|
| 466 |
+
.spec-card {
|
| 467 |
+
border: 1px solid #cccccc;
|
| 468 |
+
page-break-inside: avoid;
|
| 469 |
+
}
|
| 470 |
+
|
| 471 |
+
footer {
|
| 472 |
+
page-break-inside: avoid;
|
| 473 |
+
}
|
| 474 |
+
}
|
| 475 |
+
</style>
|
| 476 |
+
</head>
|
| 477 |
+
<body>
|
| 478 |
+
<div class="page">
|
| 479 |
+
<!-- Header: brand identity and core project metadata. -->
|
| 480 |
+
<header class="export-header">
|
| 481 |
+
<div class="branding">
|
| 482 |
+
<h1>{{ brand_name }}</h1>
|
| 483 |
+
<p class="tagline">Project Specification Portfolio</p>
|
| 484 |
+
</div>
|
| 485 |
+
<div class="project-meta">
|
| 486 |
+
<h2>{{ project_name }}</h2>
|
| 487 |
+
{% if project_description %}
|
| 488 |
+
<p>{{ project_description }}</p>
|
| 489 |
+
{% endif %}
|
| 490 |
+
<dl>
|
| 491 |
+
<div>
|
| 492 |
+
<dt>Project ID</dt>
|
| 493 |
+
<dd>{{ project_identifier }}</dd>
|
| 494 |
+
</div>
|
| 495 |
+
<div>
|
| 496 |
+
<dt>Created</dt>
|
| 497 |
+
<dd>{% if project_created_at %}{{ project_created_at }}{% else %}Not available{% endif %}</dd>
|
| 498 |
+
</div>
|
| 499 |
+
<div>
|
| 500 |
+
<dt>Total Specifications</dt>
|
| 501 |
+
<dd>{{ specification_total }}</dd>
|
| 502 |
+
</div>
|
| 503 |
+
<div>
|
| 504 |
+
<dt>Last Activity</dt>
|
| 505 |
+
<dd>{% if latest_activity %}{{ latest_activity }}{% else %}Not available{% endif %}</dd>
|
| 506 |
+
</div>
|
| 507 |
+
</dl>
|
| 508 |
+
</div>
|
| 509 |
+
</header>
|
| 510 |
+
|
| 511 |
+
<!-- Statistics summary: rendered from utils._build_statistics_block. -->
|
| 512 |
+
<section class="statistics-section">
|
| 513 |
+
<h2>Project Overview</h2>
|
| 514 |
+
{{ statistics_block }}
|
| 515 |
+
</section>
|
| 516 |
+
|
| 517 |
+
<!-- Table of contents with quick links to each specification category. -->
|
| 518 |
+
<nav class="table-of-contents" aria-label="Specification categories">
|
| 519 |
+
<h2>Table of Contents</h2>
|
| 520 |
+
{{ table_of_contents }}
|
| 521 |
+
</nav>
|
| 522 |
+
|
| 523 |
+
<!-- Main specification content grouped by type. -->
|
| 524 |
+
<main class="specification-sections">
|
| 525 |
+
{{ specification_sections }}
|
| 526 |
+
</main>
|
| 527 |
+
|
| 528 |
+
<!-- Linked conversation references encourage reviewers to trace context. -->
|
| 529 |
+
{{ conversation_references }}
|
| 530 |
+
|
| 531 |
+
<!-- Footer summarises export provenance. -->
|
| 532 |
+
<footer>
|
| 533 |
+
Generated on {{ generated_at }} by {{ brand_name }}.
|
| 534 |
+
</footer>
|
| 535 |
+
</div>
|
| 536 |
+
</body>
|
| 537 |
+
</html>
|
templates/export_markdown.md
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!--
|
| 2 |
+
Markdown export template optimised for AI coding agents.
|
| 3 |
+
Structured front matter ensures downstream tools can parse metadata while
|
| 4 |
+
the main body mirrors specification categories with consistent headings.
|
| 5 |
+
-->
|
| 6 |
+
---
|
| 7 |
+
project_id: {{ metadata.project_id }}
|
| 8 |
+
project_created_at: {{ metadata.project_created_at }}
|
| 9 |
+
generation_timestamp: {{ generation_date }}
|
| 10 |
+
specification_totals:
|
| 11 |
+
overall: {{ spec_count }}
|
| 12 |
+
{% if metadata.spec_counts %}
|
| 13 |
+
by_type:
|
| 14 |
+
{% for entry in metadata.spec_counts %} - type: {{ entry.type }}
|
| 15 |
+
count: {{ entry.count }}
|
| 16 |
+
{% endfor %}
|
| 17 |
+
{% else %}
|
| 18 |
+
by_type: []
|
| 19 |
+
{% endif %}
|
| 20 |
+
status_breakdown:
|
| 21 |
+
{% if metadata.status_counts %}
|
| 22 |
+
{% for entry in metadata.status_counts %} - status: {{ entry.status }}
|
| 23 |
+
count: {{ entry.count }}
|
| 24 |
+
{% endfor %}
|
| 25 |
+
{% else %} - status: none recorded
|
| 26 |
+
count: 0
|
| 27 |
+
{% endif %}
|
| 28 |
+
latest_activity: {{ metadata.latest_activity }}
|
| 29 |
+
conversation_links:
|
| 30 |
+
{% if metadata.conversation_links %}
|
| 31 |
+
{% for link in metadata.conversation_links %} - id: {{ link.id }}
|
| 32 |
+
url: {{ link.url }}
|
| 33 |
+
{% endfor %}
|
| 34 |
+
{% else %}
|
| 35 |
+
[]
|
| 36 |
+
{% endif %}
|
| 37 |
+
---
|
| 38 |
+
# Project: {{ project_name }}
|
| 39 |
+
|
| 40 |
+
## Overview
|
| 41 |
+
- Description: {{ project_description }}
|
| 42 |
+
- Generated: {{ generation_date }}
|
| 43 |
+
- Total Specifications: {{ spec_count }}
|
| 44 |
+
|
| 45 |
+
## User Stories
|
| 46 |
+
{{ user_stories_section }}
|
| 47 |
+
|
| 48 |
+
## Features
|
| 49 |
+
{{ features_section }}
|
| 50 |
+
|
| 51 |
+
## API Endpoints
|
| 52 |
+
{{ api_endpoints_section }}
|
| 53 |
+
|
| 54 |
+
## Database Design
|
| 55 |
+
{{ database_design_section }}
|
| 56 |
+
|
| 57 |
+
## System Architecture
|
| 58 |
+
{{ system_architecture_section }}
|
| 59 |
+
|
| 60 |
+
## Implementation Notes
|
| 61 |
+
{{ implementation_notes }}
|
| 62 |
+
|
| 63 |
+
{% if additional_sections %}## Additional Categories
|
| 64 |
+
{{ additional_sections }}
|
| 65 |
+
{% endif %}
|
| 66 |
+
|
| 67 |
+
<!-- End of export template -->
|
utils.py
ADDED
|
@@ -0,0 +1,898 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Utility helpers used across the Naexya Docs AI application.
|
| 2 |
+
|
| 3 |
+
The project pulls together configuration, database persistence, and a Gradio
|
| 4 |
+
interface. This module keeps shared helper functions in one place so they can
|
| 5 |
+
be reused by both the UI and background processes. Each function includes
|
| 6 |
+
extensive documentation that explains the intended behaviour, common edge
|
| 7 |
+
cases, and recommended extension points.
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
from __future__ import annotations
|
| 11 |
+
|
| 12 |
+
import html
|
| 13 |
+
import json
|
| 14 |
+
import logging
|
| 15 |
+
import re
|
| 16 |
+
import sqlite3
|
| 17 |
+
from collections import defaultdict
|
| 18 |
+
from datetime import datetime, timezone
|
| 19 |
+
from pathlib import Path
|
| 20 |
+
from typing import Any, Dict, List, Mapping, Optional, Sequence, Set, Tuple
|
| 21 |
+
|
| 22 |
+
try: # ``jinja2`` is optional at runtime but recommended for template rendering.
|
| 23 |
+
from jinja2 import Template
|
| 24 |
+
except ImportError: # pragma: no cover - executed only when dependency missing.
|
| 25 |
+
Template = None # type: ignore[misc]
|
| 26 |
+
|
| 27 |
+
from config import AI_PROVIDERS, EXPORT_TEMPLATES, SPECIFICATION_TYPES
|
| 28 |
+
from database import DATABASE_PATH
|
| 29 |
+
|
| 30 |
+
# Configure a dedicated logger so user action tracking and validation warnings
|
| 31 |
+
# can be filtered or redirected by the application-wide logging configuration.
|
| 32 |
+
LOGGER = logging.getLogger(__name__)
|
| 33 |
+
|
| 34 |
+
# Resolve repository root and template directory. ``BASE_DIR`` allows us to
|
| 35 |
+
# construct absolute paths for template loading even when the application is
|
| 36 |
+
# executed from a different working directory (e.g. when packaged as a module).
|
| 37 |
+
BASE_DIR = Path(__file__).resolve().parent
|
| 38 |
+
TEMPLATES_DIR = BASE_DIR / "templates"
|
| 39 |
+
|
| 40 |
+
# Mapping of canonical specification categories to Markdown template placeholders.
|
| 41 |
+
MARKDOWN_SECTION_KEYS: Dict[str, str] = {
|
| 42 |
+
"User Stories": "user_stories_section",
|
| 43 |
+
"Features": "features_section",
|
| 44 |
+
"API Endpoints": "api_endpoints_section",
|
| 45 |
+
"Database Design": "database_design_section",
|
| 46 |
+
"System Architecture": "system_architecture_section",
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
# ---------------------------------------------------------------------------
|
| 51 |
+
# Input validation helpers
|
| 52 |
+
# ---------------------------------------------------------------------------
|
| 53 |
+
|
| 54 |
+
def validate_api_key(provider: str, api_key: str) -> bool:
|
| 55 |
+
"""Perform lightweight validation of an API key for a given provider.
|
| 56 |
+
|
| 57 |
+
The function does **not** make external HTTP requests. Instead it checks
|
| 58 |
+
that the provider exists in :mod:`config`, ensures a key was supplied, and
|
| 59 |
+
verifies that it is of a plausible length. Applications can call this when
|
| 60 |
+
a user enters credentials to provide immediate feedback before attempting a
|
| 61 |
+
full API call.
|
| 62 |
+
|
| 63 |
+
Args:
|
| 64 |
+
provider: Provider identifier matching a key in ``AI_PROVIDERS``.
|
| 65 |
+
api_key: Raw string provided by the end user.
|
| 66 |
+
|
| 67 |
+
Returns:
|
| 68 |
+
``True`` when the key passes basic validation. ``False`` is returned
|
| 69 |
+
when validation fails but the caller prefers to handle messaging
|
| 70 |
+
without exceptions.
|
| 71 |
+
|
| 72 |
+
Raises:
|
| 73 |
+
ValueError: If the provider is unknown or the key is blank.
|
| 74 |
+
"""
|
| 75 |
+
|
| 76 |
+
if provider not in AI_PROVIDERS:
|
| 77 |
+
raise ValueError(
|
| 78 |
+
f"Provider '{provider}' is not recognised. Please choose one of: "
|
| 79 |
+
f"{', '.join(sorted(AI_PROVIDERS))}."
|
| 80 |
+
)
|
| 81 |
+
|
| 82 |
+
if not isinstance(api_key, str) or not api_key.strip():
|
| 83 |
+
raise ValueError("API key must be a non-empty string.")
|
| 84 |
+
|
| 85 |
+
cleaned = api_key.strip()
|
| 86 |
+
if len(cleaned) < 8:
|
| 87 |
+
LOGGER.warning(
|
| 88 |
+
"API key for provider '%s' appears unusually short.", provider
|
| 89 |
+
)
|
| 90 |
+
return False
|
| 91 |
+
|
| 92 |
+
# Many providers rely on Authorization headers containing ``{api_key}``.
|
| 93 |
+
header_format = AI_PROVIDERS[provider]["headers"].get("Authorization", "")
|
| 94 |
+
if "{api_key}" not in header_format:
|
| 95 |
+
LOGGER.debug(
|
| 96 |
+
"Provider '%s' does not use a standard Authorization header template.",
|
| 97 |
+
provider,
|
| 98 |
+
)
|
| 99 |
+
|
| 100 |
+
return True
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
# ---------------------------------------------------------------------------
|
| 104 |
+
# Conversation formatting
|
| 105 |
+
# ---------------------------------------------------------------------------
|
| 106 |
+
|
| 107 |
+
def format_conversation_history(
|
| 108 |
+
messages: Sequence[Mapping[str, Any]]
|
| 109 |
+
) -> str:
|
| 110 |
+
"""Create a readable transcript from stored conversation messages.
|
| 111 |
+
|
| 112 |
+
Each message mapping should contain at minimum ``role`` and ``content``
|
| 113 |
+
keys, with ``timestamp`` being optional. ``format_conversation_history``
|
| 114 |
+
sorts the messages chronologically (when timestamps are available) and
|
| 115 |
+
returns a newline-delimited string ready for display in the Gradio
|
| 116 |
+
interface or export templates.
|
| 117 |
+
|
| 118 |
+
Args:
|
| 119 |
+
messages: Iterable of dictionary-like objects representing chat turns.
|
| 120 |
+
|
| 121 |
+
Returns:
|
| 122 |
+
A human-friendly string. When no messages are provided a helpful
|
| 123 |
+
placeholder message is returned instead of an empty string.
|
| 124 |
+
"""
|
| 125 |
+
|
| 126 |
+
if not messages:
|
| 127 |
+
return "No conversation history available yet."
|
| 128 |
+
|
| 129 |
+
def _sort_key(message: Mapping[str, Any]):
|
| 130 |
+
ts = message.get("timestamp")
|
| 131 |
+
if isinstance(ts, datetime):
|
| 132 |
+
return (0, ts)
|
| 133 |
+
if isinstance(ts, str):
|
| 134 |
+
try:
|
| 135 |
+
parsed = datetime.fromisoformat(ts)
|
| 136 |
+
return (0, parsed)
|
| 137 |
+
except ValueError:
|
| 138 |
+
return (1, ts)
|
| 139 |
+
return (2, "")
|
| 140 |
+
|
| 141 |
+
sorted_messages = sorted(messages, key=_sort_key)
|
| 142 |
+
|
| 143 |
+
formatted_lines = []
|
| 144 |
+
for entry in sorted_messages:
|
| 145 |
+
role = str(entry.get("role", "unknown")).title()
|
| 146 |
+
timestamp = entry.get("timestamp")
|
| 147 |
+
human_time = f" [{timestamp}]" if timestamp else ""
|
| 148 |
+
content = entry.get("content", "")
|
| 149 |
+
if not isinstance(content, str):
|
| 150 |
+
content = str(content)
|
| 151 |
+
formatted_lines.append(f"{role}{human_time}:\n{content.strip()}\n")
|
| 152 |
+
|
| 153 |
+
return "\n".join(formatted_lines).strip()
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
# ---------------------------------------------------------------------------
|
| 157 |
+
# Export helpers
|
| 158 |
+
# ---------------------------------------------------------------------------
|
| 159 |
+
|
| 160 |
+
def _render_template(path: Path, context: Mapping[str, Any]) -> str:
|
| 161 |
+
"""Render a template file using Jinja2 when available.
|
| 162 |
+
|
| 163 |
+
This private helper keeps file reading and template rendering consistent for
|
| 164 |
+
both HTML and Markdown exports. When :mod:`jinja2` is unavailable the
|
| 165 |
+
function gracefully falls back to Python's :py:meth:`str.format` syntax,
|
| 166 |
+
ensuring the application still works albeit without advanced templating
|
| 167 |
+
features like loops or conditionals.
|
| 168 |
+
"""
|
| 169 |
+
|
| 170 |
+
if not path.exists():
|
| 171 |
+
raise FileNotFoundError(f"Template file '{path}' was not found.")
|
| 172 |
+
|
| 173 |
+
template_text = path.read_text(encoding="utf-8")
|
| 174 |
+
|
| 175 |
+
if Template is None:
|
| 176 |
+
LOGGER.warning(
|
| 177 |
+
"jinja2 is not installed; falling back to basic placeholder replacement for %s",
|
| 178 |
+
path,
|
| 179 |
+
)
|
| 180 |
+
if "{%" in template_text or "%}" in template_text:
|
| 181 |
+
raise RuntimeError(
|
| 182 |
+
"The export template requires jinja2 for conditional rendering. Install jinja2 to continue."
|
| 183 |
+
)
|
| 184 |
+
rendered = template_text
|
| 185 |
+
for key, value in context.items():
|
| 186 |
+
rendered = rendered.replace(f"{{{{{key}}}}}", str(value))
|
| 187 |
+
return rendered
|
| 188 |
+
|
| 189 |
+
return Template(template_text).render(**context)
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
def format_prompt(prompt: str) -> str:
|
| 193 |
+
"""Normalise user prompts before sending them to a provider."""
|
| 194 |
+
|
| 195 |
+
if not isinstance(prompt, str):
|
| 196 |
+
raise ValueError("Prompt must be provided as a string.")
|
| 197 |
+
|
| 198 |
+
cleaned = sanitize_input(prompt)
|
| 199 |
+
if not cleaned:
|
| 200 |
+
raise ValueError("Prompt cannot be empty after sanitisation.")
|
| 201 |
+
return cleaned
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
def render_export(template_name: str, context: Mapping[str, Any]) -> str:
|
| 205 |
+
"""Load a template from ``templates/`` and render it with ``context``."""
|
| 206 |
+
|
| 207 |
+
if not template_name or not isinstance(template_name, str):
|
| 208 |
+
raise ValueError("Template name must be a non-empty string.")
|
| 209 |
+
|
| 210 |
+
template_path = Path(template_name)
|
| 211 |
+
if not template_path.is_absolute():
|
| 212 |
+
template_path = TEMPLATES_DIR / template_path
|
| 213 |
+
|
| 214 |
+
return _render_template(template_path, context)
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
def _group_specifications(
|
| 218 |
+
specifications: Sequence[Mapping[str, Any]]
|
| 219 |
+
) -> Dict[str, List[Mapping[str, Any]]]:
|
| 220 |
+
"""Organise specification rows by their ``spec_type`` value."""
|
| 221 |
+
|
| 222 |
+
grouped: Dict[str, List[Mapping[str, Any]]] = {}
|
| 223 |
+
for spec in specifications:
|
| 224 |
+
spec_type = str(spec.get("spec_type") or "Uncategorised")
|
| 225 |
+
grouped.setdefault(spec_type, []).append(spec)
|
| 226 |
+
return grouped
|
| 227 |
+
|
| 228 |
+
|
| 229 |
+
def _prepare_html_export_context(
|
| 230 |
+
project_data: Mapping[str, Any],
|
| 231 |
+
specifications: Sequence[Mapping[str, Any]],
|
| 232 |
+
) -> Dict[str, Any]:
|
| 233 |
+
"""Assemble template context for the HTML export."""
|
| 234 |
+
|
| 235 |
+
brand_name = str(project_data.get("brand_name") or "Naexya Docs AI").strip() or "Naexya Docs AI"
|
| 236 |
+
project_name = str(project_data.get("name") or "Untitled Project").strip() or "Untitled Project"
|
| 237 |
+
description_raw = project_data.get("description")
|
| 238 |
+
project_description = (
|
| 239 |
+
html.escape(str(description_raw).strip()) if description_raw else ""
|
| 240 |
+
)
|
| 241 |
+
project_created_at = _format_datetime_for_display(project_data.get("created_at"))
|
| 242 |
+
project_identifier = html.escape(str(project_data.get("id") or "N/A"))
|
| 243 |
+
|
| 244 |
+
grouped = _group_specifications(specifications)
|
| 245 |
+
ordered_types: List[str] = list(SPECIFICATION_TYPES)
|
| 246 |
+
for spec_type in grouped.keys():
|
| 247 |
+
if spec_type not in ordered_types:
|
| 248 |
+
ordered_types.append(spec_type)
|
| 249 |
+
|
| 250 |
+
counts_by_type: Dict[str, int] = {spec_type: len(grouped.get(spec_type, [])) for spec_type in ordered_types}
|
| 251 |
+
total_specs = sum(counts_by_type.values())
|
| 252 |
+
|
| 253 |
+
status_counts: Dict[str, int] = defaultdict(int)
|
| 254 |
+
timestamp_candidates: List[datetime] = []
|
| 255 |
+
for items in grouped.values():
|
| 256 |
+
for spec in items:
|
| 257 |
+
status = str(spec.get("status") or "pending").strip().lower()
|
| 258 |
+
status_counts[status] += 1
|
| 259 |
+
parsed = _parse_datetime(spec.get("created_at"))
|
| 260 |
+
if parsed is not None:
|
| 261 |
+
timestamp_candidates.append(parsed)
|
| 262 |
+
|
| 263 |
+
latest_activity = _format_datetime_for_display(max(timestamp_candidates)) if timestamp_candidates else "Not available"
|
| 264 |
+
|
| 265 |
+
table_of_contents = _build_table_of_contents(ordered_types, counts_by_type)
|
| 266 |
+
conversation_base_url = str(project_data.get("conversation_base_url") or "").strip()
|
| 267 |
+
sections_html, conversation_ids = _build_specification_sections(
|
| 268 |
+
grouped, ordered_types, conversation_base_url
|
| 269 |
+
)
|
| 270 |
+
statistics_block = _build_statistics_block(total_specs, counts_by_type, status_counts, latest_activity)
|
| 271 |
+
conversation_references = _build_conversation_reference_section(
|
| 272 |
+
conversation_ids, conversation_base_url
|
| 273 |
+
)
|
| 274 |
+
|
| 275 |
+
return {
|
| 276 |
+
"brand_name": brand_name,
|
| 277 |
+
"project_name": html.escape(project_name),
|
| 278 |
+
"project_description": project_description,
|
| 279 |
+
"project_created_at": project_created_at,
|
| 280 |
+
"project_identifier": project_identifier,
|
| 281 |
+
"specification_total": total_specs,
|
| 282 |
+
"table_of_contents": table_of_contents,
|
| 283 |
+
"specification_sections": sections_html,
|
| 284 |
+
"statistics_block": statistics_block,
|
| 285 |
+
"conversation_references": conversation_references,
|
| 286 |
+
"latest_activity": latest_activity,
|
| 287 |
+
}
|
| 288 |
+
|
| 289 |
+
|
| 290 |
+
def _build_table_of_contents(spec_types: Sequence[str], counts: Mapping[str, int]) -> str:
|
| 291 |
+
"""Create an ordered list linking to each specification section."""
|
| 292 |
+
|
| 293 |
+
if not spec_types:
|
| 294 |
+
return (
|
| 295 |
+
"<p class=\"empty-state\">No specification categories are configured. "
|
| 296 |
+
"Update SPECIFICATION_TYPES to populate the table of contents.</p>"
|
| 297 |
+
)
|
| 298 |
+
|
| 299 |
+
lines = ["<ol class=\"toc-list\">"]
|
| 300 |
+
for spec_type in spec_types:
|
| 301 |
+
slug = _slugify(spec_type)
|
| 302 |
+
count = counts.get(spec_type, 0)
|
| 303 |
+
lines.append(
|
| 304 |
+
" <li>"
|
| 305 |
+
f"<a href=\"#{slug}\">"
|
| 306 |
+
f"<span class=\"toc-title\">{html.escape(spec_type)}</span>"
|
| 307 |
+
f"<span class=\"toc-count\">{count}</span>"
|
| 308 |
+
"</a>"
|
| 309 |
+
"</li>"
|
| 310 |
+
)
|
| 311 |
+
lines.append("</ol>")
|
| 312 |
+
return "\n".join(lines)
|
| 313 |
+
|
| 314 |
+
|
| 315 |
+
def _build_specification_sections(
|
| 316 |
+
grouped: Mapping[str, Sequence[Mapping[str, Any]]],
|
| 317 |
+
ordered_types: Sequence[str],
|
| 318 |
+
conversation_base_url: str,
|
| 319 |
+
) -> Tuple[str, Set[str]]:
|
| 320 |
+
"""Render each specification category into HTML sections."""
|
| 321 |
+
|
| 322 |
+
sections: List[str] = []
|
| 323 |
+
conversation_ids: Set[str] = set()
|
| 324 |
+
|
| 325 |
+
if not grouped:
|
| 326 |
+
sections.append(
|
| 327 |
+
"<section class=\"spec-section empty\">"
|
| 328 |
+
"<p>No specifications have been captured yet. Approve drafts to populate this report.</p>"
|
| 329 |
+
"</section>"
|
| 330 |
+
)
|
| 331 |
+
return "\n".join(sections), conversation_ids
|
| 332 |
+
|
| 333 |
+
for spec_type in ordered_types:
|
| 334 |
+
items = list(grouped.get(spec_type, []))
|
| 335 |
+
slug = _slugify(spec_type)
|
| 336 |
+
sections.append(f"<section id=\"{slug}\" class=\"spec-section\">")
|
| 337 |
+
header_html = (
|
| 338 |
+
" <header class=\"section-header\">"
|
| 339 |
+
f"<h2>{html.escape(spec_type)}</h2>"
|
| 340 |
+
f"<span class=\"badge\">{len(items)} items</span>"
|
| 341 |
+
"</header>"
|
| 342 |
+
)
|
| 343 |
+
sections.append(header_html)
|
| 344 |
+
|
| 345 |
+
if not items:
|
| 346 |
+
sections.append(
|
| 347 |
+
" <p class=\"empty-state\">No specifications approved for this category yet.</p>"
|
| 348 |
+
)
|
| 349 |
+
sections.append("</section>")
|
| 350 |
+
continue
|
| 351 |
+
|
| 352 |
+
for spec in items:
|
| 353 |
+
title = html.escape(str(spec.get("title") or "Untitled").strip() or "Untitled")
|
| 354 |
+
raw_status = str(spec.get("status") or "pending").strip() or "pending"
|
| 355 |
+
status_label = html.escape(raw_status.replace("_", " ").title())
|
| 356 |
+
status_class = _slugify(raw_status)
|
| 357 |
+
created_display = _format_datetime_for_display(spec.get("created_at"))
|
| 358 |
+
conversation_id = spec.get("conversation_id")
|
| 359 |
+
conversation_link = ""
|
| 360 |
+
if conversation_id is not None:
|
| 361 |
+
identifier = str(conversation_id)
|
| 362 |
+
conversation_ids.add(identifier)
|
| 363 |
+
link_href = _build_conversation_link(conversation_base_url, identifier)
|
| 364 |
+
link_text = html.escape(identifier)
|
| 365 |
+
conversation_link = (
|
| 366 |
+
f'<a class="conversation-link" href="{link_href}">'
|
| 367 |
+
f'View source conversation #{link_text}</a>'
|
| 368 |
+
)
|
| 369 |
+
|
| 370 |
+
body_html = _render_rich_text(str(spec.get("content") or ""))
|
| 371 |
+
|
| 372 |
+
sections.append(" <article class=\"spec-card\">")
|
| 373 |
+
sections.append(f" <h3>{title}</h3>")
|
| 374 |
+
sections.append(" <div class=\"spec-meta\">")
|
| 375 |
+
sections.append(
|
| 376 |
+
f" <span class=\"status-pill status-{status_class}\">{status_label}</span>"
|
| 377 |
+
)
|
| 378 |
+
if created_display:
|
| 379 |
+
sections.append(
|
| 380 |
+
f" <span class=\"timestamp\">Captured: {created_display}</span>"
|
| 381 |
+
)
|
| 382 |
+
if conversation_link:
|
| 383 |
+
sections.append(f" {conversation_link}")
|
| 384 |
+
sections.append(" </div>")
|
| 385 |
+
sections.append(f" <div class=\"spec-body\">{body_html}</div>")
|
| 386 |
+
sections.append(" </article>")
|
| 387 |
+
|
| 388 |
+
sections.append("</section>")
|
| 389 |
+
|
| 390 |
+
return "\n".join(sections), conversation_ids
|
| 391 |
+
|
| 392 |
+
|
| 393 |
+
def _build_statistics_block(
|
| 394 |
+
total_specs: int,
|
| 395 |
+
counts_by_type: Mapping[str, int],
|
| 396 |
+
status_counts: Mapping[str, int],
|
| 397 |
+
latest_activity: str,
|
| 398 |
+
) -> str:
|
| 399 |
+
"""Summarise key metrics for the exported project."""
|
| 400 |
+
|
| 401 |
+
cards: List[str] = ["<div class=\"statistics-grid\">"]
|
| 402 |
+
cards.append(
|
| 403 |
+
" <div class=\"stat-card\">"
|
| 404 |
+
"<span class=\"stat-label\">Total Specifications</span>"
|
| 405 |
+
f"<span class=\"stat-value\">{total_specs}</span>"
|
| 406 |
+
"</div>"
|
| 407 |
+
)
|
| 408 |
+
|
| 409 |
+
for spec_type, count in counts_by_type.items():
|
| 410 |
+
cards.append(
|
| 411 |
+
" <div class=\"stat-card\">"
|
| 412 |
+
f"<span class=\"stat-label\">{html.escape(spec_type)}</span>"
|
| 413 |
+
f"<span class=\"stat-value\">{count}</span>"
|
| 414 |
+
"</div>"
|
| 415 |
+
)
|
| 416 |
+
|
| 417 |
+
if status_counts:
|
| 418 |
+
status_items: List[str] = []
|
| 419 |
+
for status, count in sorted(status_counts.items()):
|
| 420 |
+
status_items.append(
|
| 421 |
+
"<li>"
|
| 422 |
+
f"<span class=\"status-name\">{html.escape(status.replace('_', ' ').title())}</span>"
|
| 423 |
+
f"<span class=\"status-count\">{count}</span>"
|
| 424 |
+
"</li>"
|
| 425 |
+
)
|
| 426 |
+
cards.append(
|
| 427 |
+
" <div class=\"stat-card span-2\">"
|
| 428 |
+
"<span class=\"stat-label\">By Status</span>"
|
| 429 |
+
f"<ul class=\"status-list\">{''.join(status_items)}</ul>"
|
| 430 |
+
"</div>"
|
| 431 |
+
)
|
| 432 |
+
|
| 433 |
+
cards.append(
|
| 434 |
+
" <div class=\"stat-card span-2\">"
|
| 435 |
+
"<span class=\"stat-label\">Last Updated</span>"
|
| 436 |
+
f"<span class=\"stat-value\">{latest_activity}</span>"
|
| 437 |
+
"</div>"
|
| 438 |
+
)
|
| 439 |
+
|
| 440 |
+
cards.append("</div>")
|
| 441 |
+
return "\n".join(cards)
|
| 442 |
+
|
| 443 |
+
|
| 444 |
+
def _build_conversation_reference_section(
|
| 445 |
+
conversation_ids: Set[str], conversation_base_url: str
|
| 446 |
+
) -> str:
|
| 447 |
+
"""Generate a section listing links back to the originating conversations."""
|
| 448 |
+
|
| 449 |
+
header = "<section id=\"conversation-references\" class=\"conversation-section\">"
|
| 450 |
+
header += "<h2>Conversation References</h2>"
|
| 451 |
+
|
| 452 |
+
if not conversation_ids:
|
| 453 |
+
return (
|
| 454 |
+
header
|
| 455 |
+
+ "<p>No linked conversations were captured for these specifications. Continue collaborating to enrich this section.</p>"
|
| 456 |
+
+ "</section>"
|
| 457 |
+
)
|
| 458 |
+
|
| 459 |
+
items: List[str] = []
|
| 460 |
+
for identifier in sorted(conversation_ids, key=lambda value: (len(value), value)):
|
| 461 |
+
href = _build_conversation_link(conversation_base_url, identifier)
|
| 462 |
+
items.append(
|
| 463 |
+
f"<li><a href=\"{href}\">Conversation #{html.escape(identifier)}</a></li>"
|
| 464 |
+
)
|
| 465 |
+
|
| 466 |
+
return header + "<ul class=\"conversation-list\">" + "".join(items) + "</ul></section>"
|
| 467 |
+
|
| 468 |
+
|
| 469 |
+
def _build_conversation_link(base_url: str, conversation_id: Any) -> str:
|
| 470 |
+
"""Return a safe hyperlink for a conversation reference."""
|
| 471 |
+
|
| 472 |
+
identifier = str(conversation_id)
|
| 473 |
+
if base_url:
|
| 474 |
+
href = f"{base_url.rstrip('/')}/{identifier}"
|
| 475 |
+
else:
|
| 476 |
+
href = f"#conversation-{identifier}"
|
| 477 |
+
return html.escape(href, quote=True)
|
| 478 |
+
|
| 479 |
+
|
| 480 |
+
def _render_rich_text(content: str) -> str:
|
| 481 |
+
"""Convert plain text into minimal HTML while preserving structure."""
|
| 482 |
+
|
| 483 |
+
stripped = content.strip()
|
| 484 |
+
if not stripped:
|
| 485 |
+
return "<p><em>No additional details provided.</em></p>"
|
| 486 |
+
|
| 487 |
+
escaped = html.escape(stripped)
|
| 488 |
+
paragraphs = [para for para in escaped.split("\n\n") if para]
|
| 489 |
+
if not paragraphs:
|
| 490 |
+
paragraphs = [escaped]
|
| 491 |
+
|
| 492 |
+
formatted: List[str] = []
|
| 493 |
+
for paragraph in paragraphs:
|
| 494 |
+
formatted.append("<p>" + paragraph.replace("\n", "<br />") + "</p>")
|
| 495 |
+
return "\n".join(formatted)
|
| 496 |
+
|
| 497 |
+
|
| 498 |
+
def _parse_datetime(value: Any) -> Optional[datetime]:
|
| 499 |
+
"""Safely parse a datetime from various formats used in the database."""
|
| 500 |
+
|
| 501 |
+
if isinstance(value, datetime):
|
| 502 |
+
dt = value
|
| 503 |
+
elif isinstance(value, str):
|
| 504 |
+
candidate = value.strip()
|
| 505 |
+
if not candidate:
|
| 506 |
+
return None
|
| 507 |
+
try:
|
| 508 |
+
dt = datetime.fromisoformat(candidate)
|
| 509 |
+
except ValueError:
|
| 510 |
+
return None
|
| 511 |
+
else:
|
| 512 |
+
return None
|
| 513 |
+
|
| 514 |
+
if dt.tzinfo is None:
|
| 515 |
+
dt = dt.replace(tzinfo=timezone.utc)
|
| 516 |
+
return dt
|
| 517 |
+
|
| 518 |
+
|
| 519 |
+
def _format_datetime_for_display(value: Any) -> str:
|
| 520 |
+
"""Render a datetime value in a human-friendly string."""
|
| 521 |
+
|
| 522 |
+
parsed = _parse_datetime(value)
|
| 523 |
+
if parsed is None:
|
| 524 |
+
if isinstance(value, str) and value.strip():
|
| 525 |
+
return html.escape(value.strip())
|
| 526 |
+
return ""
|
| 527 |
+
|
| 528 |
+
display = parsed.astimezone(timezone.utc).strftime("%d %B %Y %H:%M %Z")
|
| 529 |
+
return html.escape(display)
|
| 530 |
+
|
| 531 |
+
|
| 532 |
+
def _slugify(value: str) -> str:
|
| 533 |
+
"""Convert arbitrary text into an anchor-friendly slug."""
|
| 534 |
+
|
| 535 |
+
slug = re.sub(r"[^a-z0-9]+", "-", value.lower())
|
| 536 |
+
slug = slug.strip("-")
|
| 537 |
+
return slug or "section"
|
| 538 |
+
|
| 539 |
+
|
| 540 |
+
def _build_markdown_context(
|
| 541 |
+
project_data: Mapping[str, Any],
|
| 542 |
+
specifications: Sequence[Mapping[str, Any]],
|
| 543 |
+
generated_at: str,
|
| 544 |
+
) -> Dict[str, Any]:
|
| 545 |
+
"""Assemble structured context for the Markdown export template."""
|
| 546 |
+
|
| 547 |
+
project_name = (
|
| 548 |
+
str(project_data.get("name") or "Untitled Project").strip() or "Untitled Project"
|
| 549 |
+
)
|
| 550 |
+
description_raw = project_data.get("description")
|
| 551 |
+
project_description = (
|
| 552 |
+
str(description_raw).strip() if description_raw and str(description_raw).strip() else "Not provided."
|
| 553 |
+
)
|
| 554 |
+
conversation_base_url = str(project_data.get("conversation_base_url") or "").strip()
|
| 555 |
+
|
| 556 |
+
project_identifier = str(project_data.get("id") or "N/A")
|
| 557 |
+
created_dt = _parse_datetime(project_data.get("created_at"))
|
| 558 |
+
project_created_at = (
|
| 559 |
+
created_dt.astimezone(timezone.utc).isoformat() if created_dt else "not_recorded"
|
| 560 |
+
)
|
| 561 |
+
|
| 562 |
+
grouped = _group_specifications(specifications)
|
| 563 |
+
ordered_types: List[str] = list(SPECIFICATION_TYPES)
|
| 564 |
+
for spec_type in grouped:
|
| 565 |
+
if spec_type not in ordered_types:
|
| 566 |
+
ordered_types.append(spec_type)
|
| 567 |
+
|
| 568 |
+
spec_counts = [
|
| 569 |
+
{"type": spec_type, "count": len(grouped.get(spec_type, []))}
|
| 570 |
+
for spec_type in ordered_types
|
| 571 |
+
]
|
| 572 |
+
total_specs = sum(entry["count"] for entry in spec_counts)
|
| 573 |
+
|
| 574 |
+
status_totals: Dict[str, int] = defaultdict(int)
|
| 575 |
+
conversation_links: List[Dict[str, str]] = []
|
| 576 |
+
seen_conversations: Set[str] = set()
|
| 577 |
+
latest_candidates: List[datetime] = []
|
| 578 |
+
|
| 579 |
+
sections: Dict[str, str] = {
|
| 580 |
+
placeholder: f"_No {category} documented yet._"
|
| 581 |
+
for category, placeholder in MARKDOWN_SECTION_KEYS.items()
|
| 582 |
+
}
|
| 583 |
+
additional_section_blocks: List[str] = []
|
| 584 |
+
|
| 585 |
+
for spec_type in ordered_types:
|
| 586 |
+
items = list(grouped.get(spec_type, []))
|
| 587 |
+
|
| 588 |
+
for spec in items:
|
| 589 |
+
status = str(spec.get("status") or "pending").strip() or "pending"
|
| 590 |
+
status_totals[status] += 1
|
| 591 |
+
|
| 592 |
+
created = _parse_datetime(spec.get("created_at"))
|
| 593 |
+
if created is not None:
|
| 594 |
+
latest_candidates.append(created)
|
| 595 |
+
|
| 596 |
+
conversation_id = spec.get("conversation_id")
|
| 597 |
+
if conversation_id is not None:
|
| 598 |
+
identifier = str(conversation_id)
|
| 599 |
+
if identifier not in seen_conversations:
|
| 600 |
+
seen_conversations.add(identifier)
|
| 601 |
+
if conversation_base_url:
|
| 602 |
+
link_url = f"{conversation_base_url.rstrip('/')}/{identifier}"
|
| 603 |
+
else:
|
| 604 |
+
link_url = f"#conversation-{identifier}"
|
| 605 |
+
conversation_links.append({"id": identifier, "url": link_url})
|
| 606 |
+
|
| 607 |
+
section_text = _format_markdown_section(spec_type, items, conversation_base_url)
|
| 608 |
+
placeholder = MARKDOWN_SECTION_KEYS.get(spec_type)
|
| 609 |
+
if placeholder:
|
| 610 |
+
sections[placeholder] = section_text
|
| 611 |
+
elif section_text:
|
| 612 |
+
additional_section_blocks.append(f"### {spec_type}\n{section_text}")
|
| 613 |
+
|
| 614 |
+
status_counts = [
|
| 615 |
+
{"status": status.replace("_", " ").title(), "count": count}
|
| 616 |
+
for status, count in sorted(status_totals.items())
|
| 617 |
+
]
|
| 618 |
+
|
| 619 |
+
conversation_links.sort(key=lambda item: (len(item["id"]), item["id"]))
|
| 620 |
+
latest_activity = (
|
| 621 |
+
max(latest_candidates).astimezone(timezone.utc).isoformat()
|
| 622 |
+
if latest_candidates
|
| 623 |
+
else "not_recorded"
|
| 624 |
+
)
|
| 625 |
+
|
| 626 |
+
implementation_notes = str(project_data.get("implementation_notes") or "").strip()
|
| 627 |
+
if not implementation_notes:
|
| 628 |
+
implementation_notes = "_No implementation notes provided yet._"
|
| 629 |
+
|
| 630 |
+
additional_sections = "\n\n".join(additional_section_blocks)
|
| 631 |
+
|
| 632 |
+
metadata = {
|
| 633 |
+
"project_id": project_identifier,
|
| 634 |
+
"project_created_at": project_created_at,
|
| 635 |
+
"spec_counts": spec_counts,
|
| 636 |
+
"status_counts": status_counts,
|
| 637 |
+
"conversation_links": conversation_links,
|
| 638 |
+
"latest_activity": latest_activity,
|
| 639 |
+
}
|
| 640 |
+
|
| 641 |
+
context: Dict[str, Any] = {
|
| 642 |
+
"project_name": project_name,
|
| 643 |
+
"project_description": project_description,
|
| 644 |
+
"generation_date": generated_at,
|
| 645 |
+
"spec_count": total_specs,
|
| 646 |
+
"implementation_notes": implementation_notes,
|
| 647 |
+
"additional_sections": additional_sections,
|
| 648 |
+
"metadata": metadata,
|
| 649 |
+
}
|
| 650 |
+
context.update(sections)
|
| 651 |
+
return context
|
| 652 |
+
|
| 653 |
+
|
| 654 |
+
def _format_markdown_section(
|
| 655 |
+
spec_type: str,
|
| 656 |
+
items: Sequence[Mapping[str, Any]],
|
| 657 |
+
conversation_base_url: str,
|
| 658 |
+
) -> str:
|
| 659 |
+
"""Render a specification collection as a YAML-like Markdown block."""
|
| 660 |
+
|
| 661 |
+
if not items:
|
| 662 |
+
return f"_No {spec_type} documented yet._"
|
| 663 |
+
|
| 664 |
+
lines: List[str] = []
|
| 665 |
+
for spec in items:
|
| 666 |
+
lines.extend(_format_markdown_entry(spec, conversation_base_url))
|
| 667 |
+
return "\n".join(lines)
|
| 668 |
+
|
| 669 |
+
|
| 670 |
+
def _format_markdown_entry(
|
| 671 |
+
spec: Mapping[str, Any], conversation_base_url: str
|
| 672 |
+
) -> List[str]:
|
| 673 |
+
"""Create a structured bullet list representation for a specification."""
|
| 674 |
+
|
| 675 |
+
title = str(spec.get("title") or "Untitled").strip() or "Untitled"
|
| 676 |
+
status = str(spec.get("status") or "pending").strip() or "pending"
|
| 677 |
+
status_label = status.replace("_", " ").title()
|
| 678 |
+
|
| 679 |
+
entry: List[str] = [f"- title: {json.dumps(title)}", f" status: {json.dumps(status_label)}"]
|
| 680 |
+
|
| 681 |
+
spec_id = spec.get("id")
|
| 682 |
+
if spec_id is not None:
|
| 683 |
+
entry.append(f" specification_id: {json.dumps(str(spec_id))}")
|
| 684 |
+
|
| 685 |
+
conversation_id = spec.get("conversation_id")
|
| 686 |
+
if conversation_id is not None:
|
| 687 |
+
identifier = str(conversation_id)
|
| 688 |
+
entry.append(f" conversation_id: {json.dumps(identifier)}")
|
| 689 |
+
if conversation_base_url:
|
| 690 |
+
url = f"{conversation_base_url.rstrip('/')}/{identifier}"
|
| 691 |
+
else:
|
| 692 |
+
url = f"#conversation-{identifier}"
|
| 693 |
+
entry.append(f" conversation_url: {json.dumps(url)}")
|
| 694 |
+
|
| 695 |
+
created = _parse_datetime(spec.get("created_at"))
|
| 696 |
+
if created is not None:
|
| 697 |
+
entry.append(f" captured_at: {json.dumps(created.astimezone(timezone.utc).isoformat())}")
|
| 698 |
+
|
| 699 |
+
body = str(spec.get("content") or "").strip()
|
| 700 |
+
if body:
|
| 701 |
+
entry.append(" details: |")
|
| 702 |
+
for line in body.splitlines():
|
| 703 |
+
entry.append(f" {line}")
|
| 704 |
+
else:
|
| 705 |
+
entry.append(" details: _No additional narrative provided._")
|
| 706 |
+
|
| 707 |
+
return entry
|
| 708 |
+
|
| 709 |
+
|
| 710 |
+
def generate_export_html(
|
| 711 |
+
project_data: Mapping[str, Any],
|
| 712 |
+
specifications: Sequence[Mapping[str, Any]],
|
| 713 |
+
) -> str:
|
| 714 |
+
"""Generate an HTML report for a project and its specifications.
|
| 715 |
+
|
| 716 |
+
Args:
|
| 717 |
+
project_data: Metadata describing the project (name, description, etc.).
|
| 718 |
+
specifications: Collection of specification records to include.
|
| 719 |
+
|
| 720 |
+
Returns:
|
| 721 |
+
Rendered HTML string ready for download.
|
| 722 |
+
|
| 723 |
+
Raises:
|
| 724 |
+
ValueError: When the HTML template configuration is missing.
|
| 725 |
+
"""
|
| 726 |
+
|
| 727 |
+
html_template_meta = EXPORT_TEMPLATES.get("html")
|
| 728 |
+
if not html_template_meta:
|
| 729 |
+
raise ValueError("HTML export template is not configured.")
|
| 730 |
+
|
| 731 |
+
template_path = Path(html_template_meta["path"])
|
| 732 |
+
if not template_path.is_absolute():
|
| 733 |
+
template_path = BASE_DIR / template_path
|
| 734 |
+
|
| 735 |
+
context = _prepare_html_export_context(project_data, specifications)
|
| 736 |
+
context["generated_at"] = get_current_timestamp()
|
| 737 |
+
|
| 738 |
+
return _render_template(template_path, context)
|
| 739 |
+
|
| 740 |
+
|
| 741 |
+
def generate_export_markdown(
|
| 742 |
+
project_data: Mapping[str, Any],
|
| 743 |
+
specifications: Sequence[Mapping[str, Any]],
|
| 744 |
+
) -> str:
|
| 745 |
+
"""Generate a Markdown report mirroring the HTML export."""
|
| 746 |
+
|
| 747 |
+
md_template_meta = EXPORT_TEMPLATES.get("markdown")
|
| 748 |
+
if not md_template_meta:
|
| 749 |
+
raise ValueError("Markdown export template is not configured.")
|
| 750 |
+
|
| 751 |
+
template_path = Path(md_template_meta["path"])
|
| 752 |
+
if not template_path.is_absolute():
|
| 753 |
+
template_path = BASE_DIR / template_path
|
| 754 |
+
|
| 755 |
+
generated_at = get_current_timestamp()
|
| 756 |
+
context = _build_markdown_context(project_data, specifications, generated_at)
|
| 757 |
+
return _render_template(template_path, context)
|
| 758 |
+
|
| 759 |
+
|
| 760 |
+
# ---------------------------------------------------------------------------
|
| 761 |
+
# Security and auditing helpers
|
| 762 |
+
# ---------------------------------------------------------------------------
|
| 763 |
+
|
| 764 |
+
def sanitize_input(text: Optional[str]) -> str:
|
| 765 |
+
"""Escape potentially dangerous user input before rendering.
|
| 766 |
+
|
| 767 |
+
This helper strips leading/trailing whitespace, normalises line endings, and
|
| 768 |
+
escapes HTML-sensitive characters. It does **not** attempt to remove
|
| 769 |
+
Markdown formatting or SQL injection vectors; those concerns should be
|
| 770 |
+
handled by parameterised queries and additional context-specific checks.
|
| 771 |
+
"""
|
| 772 |
+
|
| 773 |
+
if text is None:
|
| 774 |
+
return ""
|
| 775 |
+
|
| 776 |
+
cleaned = text.replace("\r\n", "\n").replace("\r", "\n").strip()
|
| 777 |
+
return html.escape(cleaned)
|
| 778 |
+
|
| 779 |
+
|
| 780 |
+
def log_user_action(action: str, details: Optional[Mapping[str, Any]] = None) -> None:
|
| 781 |
+
"""Record high-level user events to aid debugging and auditing.
|
| 782 |
+
|
| 783 |
+
Args:
|
| 784 |
+
action: Short description of the operation (e.g. ``"create_project"``).
|
| 785 |
+
details: Optional mapping of additional metadata for structured logs.
|
| 786 |
+
"""
|
| 787 |
+
|
| 788 |
+
if not action:
|
| 789 |
+
raise ValueError("Action description must be provided for logging.")
|
| 790 |
+
|
| 791 |
+
if details is None:
|
| 792 |
+
details = {}
|
| 793 |
+
|
| 794 |
+
LOGGER.info("User action: %s | Details: %s", action, details)
|
| 795 |
+
|
| 796 |
+
|
| 797 |
+
# ---------------------------------------------------------------------------
|
| 798 |
+
# Time helpers
|
| 799 |
+
# ---------------------------------------------------------------------------
|
| 800 |
+
|
| 801 |
+
def get_current_timestamp() -> str:
|
| 802 |
+
"""Return the current UTC timestamp in ISO 8601 format."""
|
| 803 |
+
|
| 804 |
+
return datetime.now(timezone.utc).isoformat()
|
| 805 |
+
|
| 806 |
+
|
| 807 |
+
# ---------------------------------------------------------------------------
|
| 808 |
+
# Analytics helpers
|
| 809 |
+
# ---------------------------------------------------------------------------
|
| 810 |
+
|
| 811 |
+
def calculate_project_stats(project_id: int) -> Dict[str, Any]:
|
| 812 |
+
"""Compute aggregate metrics for a single project.
|
| 813 |
+
|
| 814 |
+
The resulting dictionary includes counts of conversations, messages, pending
|
| 815 |
+
specifications, approved specifications, and the timestamp of the most
|
| 816 |
+
recent activity. These metrics power dashboards or can be surfaced in the
|
| 817 |
+
"Specifications" tab to give users a quick overview of project health.
|
| 818 |
+
"""
|
| 819 |
+
|
| 820 |
+
if not isinstance(project_id, int) or project_id <= 0:
|
| 821 |
+
raise ValueError("project_id must be a positive integer.")
|
| 822 |
+
|
| 823 |
+
stats = {
|
| 824 |
+
"total_conversations": 0,
|
| 825 |
+
"total_messages": 0,
|
| 826 |
+
"pending_specifications": 0,
|
| 827 |
+
"approved_specifications": 0,
|
| 828 |
+
"last_activity": None,
|
| 829 |
+
}
|
| 830 |
+
|
| 831 |
+
try:
|
| 832 |
+
with sqlite3.connect(DATABASE_PATH) as conn:
|
| 833 |
+
conn.row_factory = sqlite3.Row
|
| 834 |
+
cursor = conn.cursor()
|
| 835 |
+
|
| 836 |
+
cursor.execute(
|
| 837 |
+
"SELECT COUNT(*) AS count FROM conversations WHERE project_id = ?",
|
| 838 |
+
(project_id,),
|
| 839 |
+
)
|
| 840 |
+
stats["total_conversations"] = cursor.fetchone()["count"]
|
| 841 |
+
|
| 842 |
+
cursor.execute(
|
| 843 |
+
"""
|
| 844 |
+
SELECT COUNT(*) AS count
|
| 845 |
+
FROM messages m
|
| 846 |
+
JOIN conversations c ON c.id = m.conversation_id
|
| 847 |
+
WHERE c.project_id = ?
|
| 848 |
+
""",
|
| 849 |
+
(project_id,),
|
| 850 |
+
)
|
| 851 |
+
stats["total_messages"] = cursor.fetchone()["count"]
|
| 852 |
+
|
| 853 |
+
cursor.execute(
|
| 854 |
+
"SELECT COUNT(*) AS count FROM specifications WHERE project_id = ? AND status = 'pending'",
|
| 855 |
+
(project_id,),
|
| 856 |
+
)
|
| 857 |
+
stats["pending_specifications"] = cursor.fetchone()["count"]
|
| 858 |
+
|
| 859 |
+
cursor.execute(
|
| 860 |
+
"SELECT COUNT(*) AS count FROM specifications WHERE project_id = ? AND status = 'approved'",
|
| 861 |
+
(project_id,),
|
| 862 |
+
)
|
| 863 |
+
stats["approved_specifications"] = cursor.fetchone()["count"]
|
| 864 |
+
|
| 865 |
+
cursor.execute(
|
| 866 |
+
"""
|
| 867 |
+
SELECT MAX(ts) AS last_activity
|
| 868 |
+
FROM (
|
| 869 |
+
SELECT MAX(created_at) AS ts FROM conversations WHERE project_id = ?
|
| 870 |
+
UNION ALL
|
| 871 |
+
SELECT MAX(timestamp) AS ts FROM messages m JOIN conversations c ON c.id = m.conversation_id WHERE c.project_id = ?
|
| 872 |
+
UNION ALL
|
| 873 |
+
SELECT MAX(created_at) AS ts FROM specifications WHERE project_id = ?
|
| 874 |
+
)
|
| 875 |
+
""",
|
| 876 |
+
(project_id, project_id, project_id),
|
| 877 |
+
)
|
| 878 |
+
row = cursor.fetchone()
|
| 879 |
+
stats["last_activity"] = row["last_activity"] if row else None
|
| 880 |
+
except sqlite3.DatabaseError as error:
|
| 881 |
+
LOGGER.exception("Failed to calculate stats for project %s: %s", project_id, error)
|
| 882 |
+
raise
|
| 883 |
+
|
| 884 |
+
return stats
|
| 885 |
+
|
| 886 |
+
|
| 887 |
+
__all__ = [
|
| 888 |
+
"validate_api_key",
|
| 889 |
+
"format_prompt",
|
| 890 |
+
"format_conversation_history",
|
| 891 |
+
"render_export",
|
| 892 |
+
"generate_export_html",
|
| 893 |
+
"generate_export_markdown",
|
| 894 |
+
"sanitize_input",
|
| 895 |
+
"log_user_action",
|
| 896 |
+
"get_current_timestamp",
|
| 897 |
+
"calculate_project_stats",
|
| 898 |
+
]
|