Spaces:
Sleeping
Sleeping
Commit ·
c2cb41b
0
Parent(s):
Intial commit for HF deployment
Browse files- .gitignore +36 -0
- DEPLOYMENT.md +148 -0
- Dockerfile +26 -0
- README.md +378 -0
- api/__init__.py +4 -0
- api/intelligence_routes.py +660 -0
- api/jira_routes.py +240 -0
- config/__init__.py +3 -0
- config/settings.py +39 -0
- integrations/__init__.py +3 -0
- integrations/jira_service.py +947 -0
- main.py +73 -0
- models/__init__.py +33 -0
- models/intelligence_models.py +211 -0
- models/jira_models.py +158 -0
- requirements.txt +16 -0
- services/__init__.py +3 -0
- services/intelligence_service.py +760 -0
- utils/__init__.py +29 -0
- utils/helpers.py +112 -0
.gitignore
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.env
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.py[cod]
|
| 4 |
+
*$py.class
|
| 5 |
+
*.so
|
| 6 |
+
.Python
|
| 7 |
+
build/
|
| 8 |
+
develop-eggs/
|
| 9 |
+
dist/
|
| 10 |
+
downloads/
|
| 11 |
+
eggs/
|
| 12 |
+
.eggs/
|
| 13 |
+
lib/
|
| 14 |
+
lib64/
|
| 15 |
+
parts/
|
| 16 |
+
sdist/
|
| 17 |
+
var/
|
| 18 |
+
wheels/
|
| 19 |
+
*.egg-info/
|
| 20 |
+
.installed.cfg
|
| 21 |
+
*.egg
|
| 22 |
+
MANIFEST
|
| 23 |
+
.pytest_cache/
|
| 24 |
+
.coverage
|
| 25 |
+
.hypothesis/
|
| 26 |
+
*.log
|
| 27 |
+
.venv
|
| 28 |
+
venv/
|
| 29 |
+
ENV/
|
| 30 |
+
env/
|
| 31 |
+
*.db
|
| 32 |
+
*.sqlite
|
| 33 |
+
*.sqlite3
|
| 34 |
+
.DS_Store
|
| 35 |
+
.vscode/
|
| 36 |
+
.idea/
|
DEPLOYMENT.md
ADDED
|
@@ -0,0 +1,148 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Deploying to Hugging Face Spaces
|
| 2 |
+
|
| 3 |
+
This guide will help you deploy your FastAPI application to Hugging Face Spaces.
|
| 4 |
+
|
| 5 |
+
## Prerequisites
|
| 6 |
+
|
| 7 |
+
1. A Hugging Face account (sign up at https://huggingface.co/)
|
| 8 |
+
2. Git installed on your system
|
| 9 |
+
3. Your Jira API credentials
|
| 10 |
+
|
| 11 |
+
## Deployment Steps
|
| 12 |
+
|
| 13 |
+
### Step 1: Create a New Space
|
| 14 |
+
|
| 15 |
+
1. Go to https://huggingface.co/new-space
|
| 16 |
+
2. Fill in the details:
|
| 17 |
+
- **Space name**: Choose a name (e.g., `enterprise-intelligence-api`)
|
| 18 |
+
- **License**: MIT (or your preferred license)
|
| 19 |
+
- **Select SDK**: Choose **Docker**
|
| 20 |
+
- **Space hardware**: Select the free tier or upgrade if needed
|
| 21 |
+
3. Click "Create Space"
|
| 22 |
+
|
| 23 |
+
### Step 2: Push Your Code to Hugging Face
|
| 24 |
+
|
| 25 |
+
1. **Initialize Git** (if not already done):
|
| 26 |
+
```bash
|
| 27 |
+
git init
|
| 28 |
+
git add .
|
| 29 |
+
git commit -m "Initial commit for HF deployment"
|
| 30 |
+
```
|
| 31 |
+
|
| 32 |
+
2. **Add Hugging Face Space as remote**:
|
| 33 |
+
```bash
|
| 34 |
+
git remote add hf https://huggingface.co/spaces/YOUR_USERNAME/YOUR_SPACE_NAME
|
| 35 |
+
```
|
| 36 |
+
Replace `YOUR_USERNAME` and `YOUR_SPACE_NAME` with your actual values.
|
| 37 |
+
|
| 38 |
+
3. **Push to Hugging Face**:
|
| 39 |
+
```bash
|
| 40 |
+
git push hf main
|
| 41 |
+
```
|
| 42 |
+
If you're on a different branch, use:
|
| 43 |
+
```bash
|
| 44 |
+
git push hf master:main
|
| 45 |
+
```
|
| 46 |
+
|
| 47 |
+
### Step 3: Configure Environment Variables (Secrets)
|
| 48 |
+
|
| 49 |
+
Your API needs environment variables (like Jira credentials). To add them:
|
| 50 |
+
|
| 51 |
+
1. Go to your Space settings: `https://huggingface.co/spaces/YOUR_USERNAME/YOUR_SPACE_NAME/settings`
|
| 52 |
+
2. Scroll down to **Repository secrets**
|
| 53 |
+
3. Add the following secrets one by one:
|
| 54 |
+
- `JIRA_URL`: Your Jira instance URL (e.g., `https://yourcompany.atlassian.net`)
|
| 55 |
+
- `JIRA_EMAIL`: Your Jira email
|
| 56 |
+
- `JIRA_API_TOKEN`: Your Jira API token
|
| 57 |
+
- `DEBUG`: Set to `false`
|
| 58 |
+
- `ENVIRONMENT`: Set to `production`
|
| 59 |
+
- Any other secrets from your `.env.example` file
|
| 60 |
+
|
| 61 |
+
4. Click "Add secret" for each one
|
| 62 |
+
|
| 63 |
+
### Step 4: Wait for Build
|
| 64 |
+
|
| 65 |
+
1. Hugging Face will automatically build your Docker container
|
| 66 |
+
2. Check the **Logs** tab to monitor the build process
|
| 67 |
+
3. Once complete, your API will be available at:
|
| 68 |
+
```
|
| 69 |
+
https://YOUR_USERNAME-YOUR_SPACE_NAME.hf.space
|
| 70 |
+
```
|
| 71 |
+
|
| 72 |
+
### Step 5: Test Your Deployment
|
| 73 |
+
|
| 74 |
+
1. Visit your Space URL
|
| 75 |
+
2. Add `/docs` to access the FastAPI Swagger documentation:
|
| 76 |
+
```
|
| 77 |
+
https://YOUR_USERNAME-YOUR_SPACE_NAME.hf.space/docs
|
| 78 |
+
```
|
| 79 |
+
3. Test the endpoints to ensure everything works
|
| 80 |
+
|
| 81 |
+
## Important Notes
|
| 82 |
+
|
| 83 |
+
### Free Tier Limitations
|
| 84 |
+
- The free tier may have limited resources
|
| 85 |
+
- The app might go to sleep after inactivity
|
| 86 |
+
- Consider upgrading to a paid tier for production workloads
|
| 87 |
+
|
| 88 |
+
### Environment Variables via Dockerfile (Alternative)
|
| 89 |
+
If you don't want to use secrets (not recommended for sensitive data), you can set default values in the Dockerfile:
|
| 90 |
+
```dockerfile
|
| 91 |
+
ENV JIRA_URL=https://example.atlassian.net
|
| 92 |
+
ENV DEBUG=false
|
| 93 |
+
```
|
| 94 |
+
|
| 95 |
+
### Updating Your Deployment
|
| 96 |
+
To update your deployed app:
|
| 97 |
+
```bash
|
| 98 |
+
git add .
|
| 99 |
+
git commit -m "Update: description of changes"
|
| 100 |
+
git push hf main
|
| 101 |
+
```
|
| 102 |
+
|
| 103 |
+
Hugging Face will automatically rebuild and redeploy.
|
| 104 |
+
|
| 105 |
+
## Troubleshooting
|
| 106 |
+
|
| 107 |
+
### Build Fails
|
| 108 |
+
- Check the **Logs** tab in your Space
|
| 109 |
+
- Ensure all dependencies in `requirements.txt` are compatible
|
| 110 |
+
- Make sure the Dockerfile syntax is correct
|
| 111 |
+
|
| 112 |
+
### App Crashes on Startup
|
| 113 |
+
- Check if all required environment variables are set in Secrets
|
| 114 |
+
- Review the application logs in the Space's Logs tab
|
| 115 |
+
- Ensure the port is 7860 (required by HF Spaces)
|
| 116 |
+
|
| 117 |
+
### Can't Access the API
|
| 118 |
+
- Verify the Space is "Running" (not "Sleeping" or "Building")
|
| 119 |
+
- Check the URL is correct: `https://YOUR_USERNAME-YOUR_SPACE_NAME.hf.space`
|
| 120 |
+
- Free tier spaces may take a moment to wake up
|
| 121 |
+
|
| 122 |
+
## Quick Reference Commands
|
| 123 |
+
|
| 124 |
+
```bash
|
| 125 |
+
# Clone your existing Space
|
| 126 |
+
git clone https://huggingface.co/spaces/YOUR_USERNAME/YOUR_SPACE_NAME
|
| 127 |
+
|
| 128 |
+
# Add changes
|
| 129 |
+
git add .
|
| 130 |
+
git commit -m "Your message"
|
| 131 |
+
git push
|
| 132 |
+
|
| 133 |
+
# Force push (use carefully)
|
| 134 |
+
git push --force
|
| 135 |
+
```
|
| 136 |
+
|
| 137 |
+
## Resources
|
| 138 |
+
|
| 139 |
+
- [Hugging Face Spaces Documentation](https://huggingface.co/docs/hub/spaces)
|
| 140 |
+
- [Docker Spaces Guide](https://huggingface.co/docs/hub/spaces-sdks-docker)
|
| 141 |
+
- [FastAPI Documentation](https://fastapi.tiangolo.com/)
|
| 142 |
+
|
| 143 |
+
## Support
|
| 144 |
+
|
| 145 |
+
If you encounter issues:
|
| 146 |
+
1. Check Hugging Face Spaces documentation
|
| 147 |
+
2. Review the build logs in your Space
|
| 148 |
+
3. Visit Hugging Face forums or Discord for community support
|
Dockerfile
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Use Python 3.11 slim image
|
| 2 |
+
FROM python:3.11-slim
|
| 3 |
+
|
| 4 |
+
# Set working directory
|
| 5 |
+
WORKDIR /app
|
| 6 |
+
|
| 7 |
+
# Install system dependencies
|
| 8 |
+
RUN apt-get update && apt-get install -y \
|
| 9 |
+
gcc \
|
| 10 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 11 |
+
|
| 12 |
+
# Copy requirements first for better caching
|
| 13 |
+
COPY requirements.txt .
|
| 14 |
+
|
| 15 |
+
# Install Python dependencies
|
| 16 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 17 |
+
|
| 18 |
+
# Copy the entire application
|
| 19 |
+
COPY . .
|
| 20 |
+
|
| 21 |
+
# Expose port 7860 (Hugging Face Spaces default)
|
| 22 |
+
EXPOSE 7860
|
| 23 |
+
|
| 24 |
+
# Command to run the application
|
| 25 |
+
# Hugging Face Spaces expects the app to run on 0.0.0.0:7860
|
| 26 |
+
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
|
README.md
ADDED
|
@@ -0,0 +1,378 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: Enterprise Delivery & Workforce Intelligence API
|
| 3 |
+
emoji: 🚀
|
| 4 |
+
colorFrom: blue
|
| 5 |
+
colorTo: purple
|
| 6 |
+
sdk: docker
|
| 7 |
+
pinned: false
|
| 8 |
+
license: mit
|
| 9 |
+
---
|
| 10 |
+
|
| 11 |
+
# Enterprise Delivery & Workforce Intelligence API
|
| 12 |
+
|
| 13 |
+
## Overview
|
| 14 |
+
AI-powered enterprise intelligence system that converts raw engineering activity from Jira (and GitHub) into meaningful business-level insights.
|
| 15 |
+
|
| 16 |
+
## Features
|
| 17 |
+
|
| 18 |
+
### 📊 Data Integration
|
| 19 |
+
- **Jira Integration**: Fetch issues, sprints, worklogs, team members, and project data
|
| 20 |
+
- **GitHub Integration**: (Coming soon - handled by frontend team)
|
| 21 |
+
|
| 22 |
+
### 🧠 Intelligence & Analytics
|
| 23 |
+
- **Delivery Health Metrics**: Track velocity, completion rates, cycle time, and sprint health
|
| 24 |
+
- **Productivity Metrics**: Monitor individual and team productivity, utilization, and efficiency
|
| 25 |
+
- **Cost Efficiency Analysis**: Calculate cost per feature, story point, and identify waste
|
| 26 |
+
- **Risk Alerts**: Automated detection of delivery delays, resource issues, and cost overruns
|
| 27 |
+
- **AI Insights**: Generate actionable recommendations based on engineering data
|
| 28 |
+
|
| 29 |
+
### 🎯 Business Impact
|
| 30 |
+
- Predict delivery delays early
|
| 31 |
+
- Optimize workforce allocation
|
| 32 |
+
- Identify cost inefficiencies
|
| 33 |
+
- Data-driven decision making for engineering leaders
|
| 34 |
+
|
| 35 |
+
## Project Structure
|
| 36 |
+
|
| 37 |
+
```
|
| 38 |
+
backend/
|
| 39 |
+
├── api/ # FastAPI route handlers
|
| 40 |
+
│ ├── jira_routes.py # Jira data endpoints
|
| 41 |
+
│ ├── intelligence_routes.py # Intelligence & metrics endpoints
|
| 42 |
+
│ └── __init__.py
|
| 43 |
+
├── config/ # Configuration management
|
| 44 |
+
│ ├── settings.py # Environment settings
|
| 45 |
+
│ └── __init__.py
|
| 46 |
+
├── integrations/ # External API integrations
|
| 47 |
+
│ ├── jira_service.py # Jira API client
|
| 48 |
+
│ └── __init__.py
|
| 49 |
+
├── models/ # Data models (Pydantic)
|
| 50 |
+
│ ├── jira_models.py # Jira domain models
|
| 51 |
+
│ ├── intelligence_models.py # Intelligence metrics models
|
| 52 |
+
│ └── __init__.py
|
| 53 |
+
├── services/ # Business logic
|
| 54 |
+
│ ├── intelligence_service.py # Analytics & insights generation
|
| 55 |
+
│ └── __init__.py
|
| 56 |
+
├── main.py # FastAPI application entry point
|
| 57 |
+
├── requirements.txt # Python dependencies
|
| 58 |
+
├── .env.example # Environment variables template
|
| 59 |
+
└── README.md # This file
|
| 60 |
+
```
|
| 61 |
+
|
| 62 |
+
## Setup Instructions
|
| 63 |
+
|
| 64 |
+
### Prerequisites
|
| 65 |
+
- Python 3.9+
|
| 66 |
+
- Jira account with API access
|
| 67 |
+
- Jira API token ([Get it here](https://id.atlassian.com/manage-profile/security/api-tokens))
|
| 68 |
+
|
| 69 |
+
### Installation
|
| 70 |
+
|
| 71 |
+
1. **Clone the repository** (if not already done)
|
| 72 |
+
```bash
|
| 73 |
+
cd d:\Datathon\backend
|
| 74 |
+
```
|
| 75 |
+
|
| 76 |
+
2. **Create virtual environment**
|
| 77 |
+
```bash
|
| 78 |
+
python -m venv venv
|
| 79 |
+
venv\Scripts\activate # On Windows
|
| 80 |
+
# source venv/bin/activate # On Linux/Mac
|
| 81 |
+
```
|
| 82 |
+
|
| 83 |
+
3. **Install dependencies**
|
| 84 |
+
```bash
|
| 85 |
+
pip install -r requirements.txt
|
| 86 |
+
```
|
| 87 |
+
|
| 88 |
+
4. **Configure environment variables**
|
| 89 |
+
```bash
|
| 90 |
+
copy .env.example .env
|
| 91 |
+
```
|
| 92 |
+
|
| 93 |
+
Edit `.env` with your credentials:
|
| 94 |
+
```env
|
| 95 |
+
JIRA_SERVER_URL=https://your-domain.atlassian.net
|
| 96 |
+
JIRA_EMAIL=your-email@example.com
|
| 97 |
+
JIRA_API_TOKEN=your_jira_api_token
|
| 98 |
+
SECRET_KEY=your-secret-key-here
|
| 99 |
+
```
|
| 100 |
+
|
| 101 |
+
5. **Run the application**
|
| 102 |
+
```bash
|
| 103 |
+
python main.py
|
| 104 |
+
```
|
| 105 |
+
|
| 106 |
+
Or using uvicorn directly:
|
| 107 |
+
```bash
|
| 108 |
+
uvicorn main:app --reload --host 0.0.0.0 --port 8000
|
| 109 |
+
```
|
| 110 |
+
|
| 111 |
+
6. **Access the API**
|
| 112 |
+
- API: http://localhost:8000
|
| 113 |
+
- Interactive docs: http://localhost:8000/docs
|
| 114 |
+
- Alternative docs: http://localhost:8000/redoc
|
| 115 |
+
|
| 116 |
+
## API Endpoints
|
| 117 |
+
|
| 118 |
+
### Jira Data Endpoints
|
| 119 |
+
|
| 120 |
+
#### Get All Projects
|
| 121 |
+
```http
|
| 122 |
+
GET /jira/projects
|
| 123 |
+
```
|
| 124 |
+
|
| 125 |
+
#### Get Project Issues
|
| 126 |
+
```http
|
| 127 |
+
GET /jira/projects/{project_key}/issues?start_date=2026-01-01&end_date=2026-02-07&max_results=100
|
| 128 |
+
```
|
| 129 |
+
|
| 130 |
+
#### Get Boards
|
| 131 |
+
```http
|
| 132 |
+
GET /jira/boards
|
| 133 |
+
```
|
| 134 |
+
|
| 135 |
+
#### Get Sprints for Board
|
| 136 |
+
```http
|
| 137 |
+
GET /jira/boards/{board_id}/sprints
|
| 138 |
+
```
|
| 139 |
+
|
| 140 |
+
#### Get Active Sprint
|
| 141 |
+
```http
|
| 142 |
+
GET /jira/boards/{board_id}/active-sprint
|
| 143 |
+
```
|
| 144 |
+
|
| 145 |
+
#### Get Sprint Issues
|
| 146 |
+
```http
|
| 147 |
+
GET /jira/sprints/{sprint_id}/issues
|
| 148 |
+
```
|
| 149 |
+
|
| 150 |
+
#### Get Team Members
|
| 151 |
+
```http
|
| 152 |
+
GET /jira/projects/{project_key}/team-members
|
| 153 |
+
```
|
| 154 |
+
|
| 155 |
+
#### Get Worklogs
|
| 156 |
+
```http
|
| 157 |
+
GET /jira/projects/{project_key}/worklogs?start_date=2026-01-01&end_date=2026-02-07
|
| 158 |
+
```
|
| 159 |
+
|
| 160 |
+
### Intelligence Endpoints
|
| 161 |
+
|
| 162 |
+
#### Get Delivery Health Metrics
|
| 163 |
+
```http
|
| 164 |
+
GET /intelligence/delivery-health/{project_key}?start_date=2026-01-01&end_date=2026-02-07
|
| 165 |
+
GET /intelligence/delivery-health/{project_key}?board_id=1&sprint_id=10
|
| 166 |
+
```
|
| 167 |
+
|
| 168 |
+
**Response:**
|
| 169 |
+
```json
|
| 170 |
+
{
|
| 171 |
+
"sprint_name": "Sprint 1",
|
| 172 |
+
"period_start": "2026-01-01",
|
| 173 |
+
"period_end": "2026-02-07",
|
| 174 |
+
"completed_story_points": 45.0,
|
| 175 |
+
"velocity": 45.0,
|
| 176 |
+
"completion_rate": 85.5,
|
| 177 |
+
"avg_cycle_time_hours": 12.5,
|
| 178 |
+
"health_score": 82.3,
|
| 179 |
+
"blocked_issues_count": 2
|
| 180 |
+
}
|
| 181 |
+
```
|
| 182 |
+
|
| 183 |
+
#### Get Productivity Metrics
|
| 184 |
+
```http
|
| 185 |
+
GET /intelligence/productivity/{project_key}?start_date=2026-01-01&end_date=2026-02-07
|
| 186 |
+
```
|
| 187 |
+
|
| 188 |
+
**Response:**
|
| 189 |
+
```json
|
| 190 |
+
[
|
| 191 |
+
{
|
| 192 |
+
"team_member_name": "John Doe",
|
| 193 |
+
"issues_completed": 8,
|
| 194 |
+
"story_points_completed": 21.0,
|
| 195 |
+
"total_hours_logged": 80.0,
|
| 196 |
+
"productivity_score": 78.5,
|
| 197 |
+
"utilization_rate": 90.0
|
| 198 |
+
}
|
| 199 |
+
]
|
| 200 |
+
```
|
| 201 |
+
|
| 202 |
+
#### Get Cost Efficiency Metrics
|
| 203 |
+
```http
|
| 204 |
+
GET /intelligence/cost-efficiency/{project_key}?start_date=2026-01-01&avg_hourly_rate=75
|
| 205 |
+
```
|
| 206 |
+
|
| 207 |
+
**Response:**
|
| 208 |
+
```json
|
| 209 |
+
{
|
| 210 |
+
"total_hours_logged": 320.0,
|
| 211 |
+
"estimated_cost": 24000.0,
|
| 212 |
+
"features_delivered": 15,
|
| 213 |
+
"cost_per_feature": 1600.0,
|
| 214 |
+
"cost_per_story_point": 533.33,
|
| 215 |
+
"waste_percentage": 8.5
|
| 216 |
+
}
|
| 217 |
+
```
|
| 218 |
+
|
| 219 |
+
#### Get Risk Alerts
|
| 220 |
+
```http
|
| 221 |
+
GET /intelligence/risk-alerts/{project_key}?start_date=2026-01-01
|
| 222 |
+
```
|
| 223 |
+
|
| 224 |
+
**Response:**
|
| 225 |
+
```json
|
| 226 |
+
[
|
| 227 |
+
{
|
| 228 |
+
"alert_type": "delivery_delay",
|
| 229 |
+
"severity": "high",
|
| 230 |
+
"title": "Low Completion Rate",
|
| 231 |
+
"description": "Only 45.0% of planned work is completed.",
|
| 232 |
+
"suggested_action": "Reduce scope or extend timeline to meet commitments."
|
| 233 |
+
}
|
| 234 |
+
]
|
| 235 |
+
```
|
| 236 |
+
|
| 237 |
+
#### Get AI Insights
|
| 238 |
+
```http
|
| 239 |
+
GET /intelligence/insights/{project_key}?start_date=2026-01-01
|
| 240 |
+
```
|
| 241 |
+
|
| 242 |
+
**Response:**
|
| 243 |
+
```json
|
| 244 |
+
[
|
| 245 |
+
{
|
| 246 |
+
"category": "delivery",
|
| 247 |
+
"title": "Velocity Analysis",
|
| 248 |
+
"description": "Team completed 45.0 story points with 85.5% completion rate.",
|
| 249 |
+
"confidence_score": 0.85,
|
| 250 |
+
"impact_level": "medium",
|
| 251 |
+
"recommendations": [
|
| 252 |
+
"Maintain current sprint planning strategy",
|
| 253 |
+
"Consider increasing capacity for higher throughput"
|
| 254 |
+
]
|
| 255 |
+
}
|
| 256 |
+
]
|
| 257 |
+
```
|
| 258 |
+
|
| 259 |
+
#### Get Complete Dashboard
|
| 260 |
+
```http
|
| 261 |
+
GET /intelligence/dashboard/{project_key}?start_date=2026-01-01&end_date=2026-02-07
|
| 262 |
+
```
|
| 263 |
+
|
| 264 |
+
**Response includes all metrics:**
|
| 265 |
+
- Delivery health
|
| 266 |
+
- Productivity metrics for all team members
|
| 267 |
+
- Cost efficiency
|
| 268 |
+
- Risk alerts
|
| 269 |
+
- AI insights
|
| 270 |
+
|
| 271 |
+
## Usage Examples
|
| 272 |
+
|
| 273 |
+
### Example 1: Monitor Sprint Health
|
| 274 |
+
```python
|
| 275 |
+
import requests
|
| 276 |
+
|
| 277 |
+
# Get active sprint
|
| 278 |
+
response = requests.get(
|
| 279 |
+
"http://localhost:8000/jira/boards/1/active-sprint"
|
| 280 |
+
)
|
| 281 |
+
sprint = response.json()
|
| 282 |
+
|
| 283 |
+
# Get delivery health for that sprint
|
| 284 |
+
response = requests.get(
|
| 285 |
+
f"http://localhost:8000/intelligence/delivery-health/PROJ",
|
| 286 |
+
params={
|
| 287 |
+
"board_id": 1,
|
| 288 |
+
"sprint_id": sprint["sprint_id"]
|
| 289 |
+
}
|
| 290 |
+
)
|
| 291 |
+
health = response.json()
|
| 292 |
+
print(f"Sprint Health Score: {health['health_score']}/100")
|
| 293 |
+
```
|
| 294 |
+
|
| 295 |
+
### Example 2: Team Productivity Report
|
| 296 |
+
```python
|
| 297 |
+
import requests
|
| 298 |
+
|
| 299 |
+
response = requests.get(
|
| 300 |
+
"http://localhost:8000/intelligence/productivity/PROJ",
|
| 301 |
+
params={
|
| 302 |
+
"start_date": "2026-01-01",
|
| 303 |
+
"end_date": "2026-02-07"
|
| 304 |
+
}
|
| 305 |
+
)
|
| 306 |
+
|
| 307 |
+
for member in response.json():
|
| 308 |
+
print(f"{member['team_member_name']}: {member['productivity_score']}/100")
|
| 309 |
+
```
|
| 310 |
+
|
| 311 |
+
### Example 3: Cost Analysis
|
| 312 |
+
```python
|
| 313 |
+
import requests
|
| 314 |
+
|
| 315 |
+
response = requests.get(
|
| 316 |
+
"http://localhost:8000/intelligence/cost-efficiency/PROJ",
|
| 317 |
+
params={
|
| 318 |
+
"start_date": "2026-01-01",
|
| 319 |
+
"avg_hourly_rate": 80
|
| 320 |
+
}
|
| 321 |
+
)
|
| 322 |
+
|
| 323 |
+
cost = response.json()
|
| 324 |
+
print(f"Cost per Story Point: ${cost['cost_per_story_point']:.2f}")
|
| 325 |
+
print(f"Waste: {cost['waste_percentage']:.1f}%")
|
| 326 |
+
```
|
| 327 |
+
|
| 328 |
+
## Integration with Frontend
|
| 329 |
+
|
| 330 |
+
The frontend team working on GitHub integration can consume these endpoints:
|
| 331 |
+
|
| 332 |
+
1. **Dashboard View**: Use `/intelligence/dashboard/{project_key}` for comprehensive data
|
| 333 |
+
2. **Real-time Alerts**: Poll `/intelligence/risk-alerts/{project_key}` for updates
|
| 334 |
+
3. **Team Performance**: Display `/intelligence/productivity/{project_key}` data
|
| 335 |
+
|
| 336 |
+
## Future Enhancements
|
| 337 |
+
|
| 338 |
+
- [ ] GitHub integration for code metrics
|
| 339 |
+
- [ ] Real-time websocket updates
|
| 340 |
+
- [ ] Historical trend analysis
|
| 341 |
+
- [ ] Machine learning predictions
|
| 342 |
+
- [ ] Custom metric definitions
|
| 343 |
+
- [ ] Team capacity planning
|
| 344 |
+
- [ ] Automated reporting
|
| 345 |
+
|
| 346 |
+
## Troubleshooting
|
| 347 |
+
|
| 348 |
+
### Common Issues
|
| 349 |
+
|
| 350 |
+
**401 Unauthorized Error**
|
| 351 |
+
- Check Jira API token is valid
|
| 352 |
+
- Verify email and server URL in `.env`
|
| 353 |
+
|
| 354 |
+
**No data returned**
|
| 355 |
+
- Ensure project key is correct
|
| 356 |
+
- Check date ranges are valid
|
| 357 |
+
- Verify you have access to the Jira project
|
| 358 |
+
|
| 359 |
+
**Import errors**
|
| 360 |
+
- Activate virtual environment
|
| 361 |
+
- Run `pip install -r requirements.txt`
|
| 362 |
+
|
| 363 |
+
## Contributing
|
| 364 |
+
|
| 365 |
+
This is an MVP for a datathon. Focus areas:
|
| 366 |
+
1. Jira data accuracy
|
| 367 |
+
2. Intelligence algorithm improvements
|
| 368 |
+
3. API performance optimization
|
| 369 |
+
|
| 370 |
+
## License
|
| 371 |
+
|
| 372 |
+
MIT License - Datathon 2026
|
| 373 |
+
|
| 374 |
+
---
|
| 375 |
+
|
| 376 |
+
**Built for**: AI-Driven Enterprise Delivery & Workforce Intelligence
|
| 377 |
+
**Team**: Backend (Jira) + Frontend (GitHub)
|
| 378 |
+
**Date**: February 2026
|
api/__init__.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .jira_routes import router as jira_router
|
| 2 |
+
from .intelligence_routes import router as intelligence_router
|
| 3 |
+
|
| 4 |
+
__all__ = ["jira_router", "intelligence_router"]
|
api/intelligence_routes.py
ADDED
|
@@ -0,0 +1,660 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import APIRouter, HTTPException, Query
|
| 2 |
+
from typing import List, Optional
|
| 3 |
+
from datetime import datetime, timedelta, date
|
| 4 |
+
from integrations.jira_service import jira_service
|
| 5 |
+
from services.intelligence_service import intelligence_service
|
| 6 |
+
from models.intelligence_models import (
|
| 7 |
+
DeliveryHealthMetrics,
|
| 8 |
+
ProductivityMetrics,
|
| 9 |
+
CostEfficiencyMetrics,
|
| 10 |
+
TeamCapacityMetrics,
|
| 11 |
+
RiskAlert,
|
| 12 |
+
InsightRecommendation,
|
| 13 |
+
KanbanFlowMetrics,
|
| 14 |
+
KanbanColumnAnalysis,
|
| 15 |
+
WIPLimitRecommendation
|
| 16 |
+
)
|
| 17 |
+
import logging
|
| 18 |
+
|
| 19 |
+
logger = logging.getLogger(__name__)
|
| 20 |
+
|
| 21 |
+
router = APIRouter(prefix="/intelligence", tags=["Intelligence"])
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
@router.get("/delivery-health/{project_key}", response_model=DeliveryHealthMetrics)
|
| 25 |
+
async def get_delivery_health(
|
| 26 |
+
project_key: str,
|
| 27 |
+
board_id: Optional[int] = None,
|
| 28 |
+
sprint_id: Optional[int] = None,
|
| 29 |
+
start_date: Optional[str] = None,
|
| 30 |
+
end_date: Optional[str] = None
|
| 31 |
+
):
|
| 32 |
+
"""Get delivery health metrics for a project or sprint"""
|
| 33 |
+
try:
|
| 34 |
+
# Parse dates
|
| 35 |
+
start_dt = date.fromisoformat(start_date) if start_date else None
|
| 36 |
+
end_dt = date.fromisoformat(end_date) if end_date else None
|
| 37 |
+
|
| 38 |
+
# Get sprint if specified
|
| 39 |
+
sprint = None
|
| 40 |
+
if sprint_id:
|
| 41 |
+
issues = jira_service.get_sprint_issues(sprint_id)
|
| 42 |
+
if board_id:
|
| 43 |
+
sprints = jira_service.get_sprints(board_id)
|
| 44 |
+
sprint = next((s for s in sprints if s.sprint_id == sprint_id), None)
|
| 45 |
+
else:
|
| 46 |
+
# Get all issues for the project
|
| 47 |
+
issues = jira_service.get_issues_by_project(
|
| 48 |
+
project_key=project_key,
|
| 49 |
+
max_results=1000,
|
| 50 |
+
start_date=datetime.combine(start_dt, datetime.min.time()) if start_dt else None,
|
| 51 |
+
end_date=datetime.combine(end_dt, datetime.max.time()) if end_dt else None
|
| 52 |
+
)
|
| 53 |
+
|
| 54 |
+
return intelligence_service.calculate_delivery_health(
|
| 55 |
+
issues=issues,
|
| 56 |
+
sprint=sprint,
|
| 57 |
+
period_start=start_dt,
|
| 58 |
+
period_end=end_dt
|
| 59 |
+
)
|
| 60 |
+
except ValueError as e:
|
| 61 |
+
raise HTTPException(status_code=400, detail=f"Invalid date format: {str(e)}")
|
| 62 |
+
except Exception as e:
|
| 63 |
+
logger.error(f"Error calculating delivery health: {str(e)}")
|
| 64 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
@router.get("/productivity/{project_key}", response_model=List[ProductivityMetrics])
|
| 68 |
+
async def get_productivity_metrics(
|
| 69 |
+
project_key: str,
|
| 70 |
+
start_date: Optional[str] = None,
|
| 71 |
+
end_date: Optional[str] = None
|
| 72 |
+
):
|
| 73 |
+
"""Get productivity metrics for all team members in a project"""
|
| 74 |
+
try:
|
| 75 |
+
# Parse dates
|
| 76 |
+
start_dt = date.fromisoformat(start_date) if start_date else (date.today() - timedelta(days=14))
|
| 77 |
+
end_dt = date.fromisoformat(end_date) if end_date else date.today()
|
| 78 |
+
|
| 79 |
+
# Get data
|
| 80 |
+
issues = jira_service.get_issues_by_project(
|
| 81 |
+
project_key=project_key,
|
| 82 |
+
max_results=1000,
|
| 83 |
+
start_date=datetime.combine(start_dt, datetime.min.time()),
|
| 84 |
+
end_date=datetime.combine(end_dt, datetime.max.time())
|
| 85 |
+
)
|
| 86 |
+
|
| 87 |
+
worklogs = jira_service.get_worklogs(
|
| 88 |
+
project_key=project_key,
|
| 89 |
+
start_date=datetime.combine(start_dt, datetime.min.time()),
|
| 90 |
+
end_date=datetime.combine(end_dt, datetime.max.time())
|
| 91 |
+
)
|
| 92 |
+
|
| 93 |
+
team_members = jira_service.get_team_members(project_key)
|
| 94 |
+
|
| 95 |
+
# Calculate metrics for each team member
|
| 96 |
+
metrics = []
|
| 97 |
+
for member in team_members:
|
| 98 |
+
metric = intelligence_service.calculate_productivity_metrics(
|
| 99 |
+
issues=issues,
|
| 100 |
+
worklogs=worklogs,
|
| 101 |
+
team_member_id=member.account_id,
|
| 102 |
+
team_member_name=member.display_name,
|
| 103 |
+
period_start=start_dt,
|
| 104 |
+
period_end=end_dt
|
| 105 |
+
)
|
| 106 |
+
metrics.append(metric)
|
| 107 |
+
|
| 108 |
+
return metrics
|
| 109 |
+
except ValueError as e:
|
| 110 |
+
raise HTTPException(status_code=400, detail=f"Invalid date format: {str(e)}")
|
| 111 |
+
except Exception as e:
|
| 112 |
+
logger.error(f"Error calculating productivity metrics: {str(e)}")
|
| 113 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
@router.get("/cost-efficiency/{project_key}", response_model=CostEfficiencyMetrics)
|
| 117 |
+
async def get_cost_efficiency(
|
| 118 |
+
project_key: str,
|
| 119 |
+
start_date: Optional[str] = None,
|
| 120 |
+
end_date: Optional[str] = None,
|
| 121 |
+
avg_hourly_rate: float = Query(75.0, gt=0)
|
| 122 |
+
):
|
| 123 |
+
"""Get cost efficiency metrics for a project"""
|
| 124 |
+
try:
|
| 125 |
+
# Parse dates
|
| 126 |
+
start_dt = date.fromisoformat(start_date) if start_date else (date.today() - timedelta(days=14))
|
| 127 |
+
end_dt = date.fromisoformat(end_date) if end_date else date.today()
|
| 128 |
+
|
| 129 |
+
# Get data
|
| 130 |
+
issues = jira_service.get_issues_by_project(
|
| 131 |
+
project_key=project_key,
|
| 132 |
+
max_results=1000,
|
| 133 |
+
start_date=datetime.combine(start_dt, datetime.min.time()),
|
| 134 |
+
end_date=datetime.combine(end_dt, datetime.max.time())
|
| 135 |
+
)
|
| 136 |
+
|
| 137 |
+
worklogs = jira_service.get_worklogs(
|
| 138 |
+
project_key=project_key,
|
| 139 |
+
start_date=datetime.combine(start_dt, datetime.min.time()),
|
| 140 |
+
end_date=datetime.combine(end_dt, datetime.max.time())
|
| 141 |
+
)
|
| 142 |
+
|
| 143 |
+
return intelligence_service.calculate_cost_efficiency(
|
| 144 |
+
issues=issues,
|
| 145 |
+
worklogs=worklogs,
|
| 146 |
+
period_start=start_dt,
|
| 147 |
+
period_end=end_dt,
|
| 148 |
+
avg_hourly_rate=avg_hourly_rate
|
| 149 |
+
)
|
| 150 |
+
except ValueError as e:
|
| 151 |
+
raise HTTPException(status_code=400, detail=f"Invalid date format: {str(e)}")
|
| 152 |
+
except Exception as e:
|
| 153 |
+
logger.error(f"Error calculating cost efficiency: {str(e)}")
|
| 154 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
@router.get("/risk-alerts/{project_key}", response_model=List[RiskAlert])
|
| 158 |
+
async def get_risk_alerts(
|
| 159 |
+
project_key: str,
|
| 160 |
+
board_id: Optional[int] = None,
|
| 161 |
+
sprint_id: Optional[int] = None,
|
| 162 |
+
start_date: Optional[str] = None,
|
| 163 |
+
end_date: Optional[str] = None
|
| 164 |
+
):
|
| 165 |
+
"""Get risk alerts for a project"""
|
| 166 |
+
try:
|
| 167 |
+
# Parse dates
|
| 168 |
+
start_dt = date.fromisoformat(start_date) if start_date else (date.today() - timedelta(days=14))
|
| 169 |
+
end_dt = date.fromisoformat(end_date) if end_date else date.today()
|
| 170 |
+
|
| 171 |
+
# Get all metrics
|
| 172 |
+
sprint = None
|
| 173 |
+
if sprint_id and board_id:
|
| 174 |
+
issues = jira_service.get_sprint_issues(sprint_id)
|
| 175 |
+
sprints = jira_service.get_sprints(board_id)
|
| 176 |
+
sprint = next((s for s in sprints if s.sprint_id == sprint_id), None)
|
| 177 |
+
else:
|
| 178 |
+
issues = jira_service.get_issues_by_project(
|
| 179 |
+
project_key=project_key,
|
| 180 |
+
max_results=1000,
|
| 181 |
+
start_date=datetime.combine(start_dt, datetime.min.time()),
|
| 182 |
+
end_date=datetime.combine(end_dt, datetime.max.time())
|
| 183 |
+
)
|
| 184 |
+
|
| 185 |
+
worklogs = jira_service.get_worklogs(
|
| 186 |
+
project_key=project_key,
|
| 187 |
+
start_date=datetime.combine(start_dt, datetime.min.time()),
|
| 188 |
+
end_date=datetime.combine(end_dt, datetime.max.time())
|
| 189 |
+
)
|
| 190 |
+
|
| 191 |
+
team_members = jira_service.get_team_members(project_key)
|
| 192 |
+
|
| 193 |
+
# Calculate metrics
|
| 194 |
+
delivery_health = intelligence_service.calculate_delivery_health(
|
| 195 |
+
issues=issues,
|
| 196 |
+
sprint=sprint,
|
| 197 |
+
period_start=start_dt,
|
| 198 |
+
period_end=end_dt
|
| 199 |
+
)
|
| 200 |
+
|
| 201 |
+
productivity_metrics = [
|
| 202 |
+
intelligence_service.calculate_productivity_metrics(
|
| 203 |
+
issues=issues,
|
| 204 |
+
worklogs=worklogs,
|
| 205 |
+
team_member_id=member.account_id,
|
| 206 |
+
team_member_name=member.display_name,
|
| 207 |
+
period_start=start_dt,
|
| 208 |
+
period_end=end_dt
|
| 209 |
+
)
|
| 210 |
+
for member in team_members
|
| 211 |
+
]
|
| 212 |
+
|
| 213 |
+
cost_metrics = intelligence_service.calculate_cost_efficiency(
|
| 214 |
+
issues=issues,
|
| 215 |
+
worklogs=worklogs,
|
| 216 |
+
period_start=start_dt,
|
| 217 |
+
period_end=end_dt
|
| 218 |
+
)
|
| 219 |
+
|
| 220 |
+
# Generate alerts
|
| 221 |
+
return intelligence_service.generate_risk_alerts(
|
| 222 |
+
delivery_health=delivery_health,
|
| 223 |
+
productivity_metrics=productivity_metrics,
|
| 224 |
+
cost_metrics=cost_metrics
|
| 225 |
+
)
|
| 226 |
+
except ValueError as e:
|
| 227 |
+
raise HTTPException(status_code=400, detail=f"Invalid date format: {str(e)}")
|
| 228 |
+
except Exception as e:
|
| 229 |
+
logger.error(f"Error generating risk alerts: {str(e)}")
|
| 230 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 231 |
+
|
| 232 |
+
|
| 233 |
+
@router.get("/insights/{project_key}", response_model=List[InsightRecommendation])
|
| 234 |
+
async def get_insights(
|
| 235 |
+
project_key: str,
|
| 236 |
+
board_id: Optional[int] = None,
|
| 237 |
+
sprint_id: Optional[int] = None,
|
| 238 |
+
start_date: Optional[str] = None,
|
| 239 |
+
end_date: Optional[str] = None
|
| 240 |
+
):
|
| 241 |
+
"""Get AI-generated insights and recommendations"""
|
| 242 |
+
try:
|
| 243 |
+
# Parse dates
|
| 244 |
+
start_dt = date.fromisoformat(start_date) if start_date else (date.today() - timedelta(days=14))
|
| 245 |
+
end_dt = date.fromisoformat(end_date) if end_date else date.today()
|
| 246 |
+
|
| 247 |
+
# Get all metrics (same as risk alerts)
|
| 248 |
+
sprint = None
|
| 249 |
+
if sprint_id and board_id:
|
| 250 |
+
issues = jira_service.get_sprint_issues(sprint_id)
|
| 251 |
+
sprints = jira_service.get_sprints(board_id)
|
| 252 |
+
sprint = next((s for s in sprints if s.sprint_id == sprint_id), None)
|
| 253 |
+
else:
|
| 254 |
+
issues = jira_service.get_issues_by_project(
|
| 255 |
+
project_key=project_key,
|
| 256 |
+
max_results=1000,
|
| 257 |
+
start_date=datetime.combine(start_dt, datetime.min.time()),
|
| 258 |
+
end_date=datetime.combine(end_dt, datetime.max.time())
|
| 259 |
+
)
|
| 260 |
+
|
| 261 |
+
worklogs = jira_service.get_worklogs(
|
| 262 |
+
project_key=project_key,
|
| 263 |
+
start_date=datetime.combine(start_dt, datetime.min.time()),
|
| 264 |
+
end_date=datetime.combine(end_dt, datetime.max.time())
|
| 265 |
+
)
|
| 266 |
+
|
| 267 |
+
team_members = jira_service.get_team_members(project_key)
|
| 268 |
+
|
| 269 |
+
# Calculate metrics
|
| 270 |
+
delivery_health = intelligence_service.calculate_delivery_health(
|
| 271 |
+
issues=issues,
|
| 272 |
+
sprint=sprint,
|
| 273 |
+
period_start=start_dt,
|
| 274 |
+
period_end=end_dt
|
| 275 |
+
)
|
| 276 |
+
|
| 277 |
+
productivity_metrics = [
|
| 278 |
+
intelligence_service.calculate_productivity_metrics(
|
| 279 |
+
issues=issues,
|
| 280 |
+
worklogs=worklogs,
|
| 281 |
+
team_member_id=member.account_id,
|
| 282 |
+
team_member_name=member.display_name,
|
| 283 |
+
period_start=start_dt,
|
| 284 |
+
period_end=end_dt
|
| 285 |
+
)
|
| 286 |
+
for member in team_members
|
| 287 |
+
]
|
| 288 |
+
|
| 289 |
+
cost_metrics = intelligence_service.calculate_cost_efficiency(
|
| 290 |
+
issues=issues,
|
| 291 |
+
worklogs=worklogs,
|
| 292 |
+
period_start=start_dt,
|
| 293 |
+
period_end=end_dt
|
| 294 |
+
)
|
| 295 |
+
|
| 296 |
+
# Generate insights
|
| 297 |
+
return intelligence_service.generate_insights(
|
| 298 |
+
delivery_health=delivery_health,
|
| 299 |
+
productivity_metrics=productivity_metrics,
|
| 300 |
+
cost_metrics=cost_metrics
|
| 301 |
+
)
|
| 302 |
+
except ValueError as e:
|
| 303 |
+
raise HTTPException(status_code=400, detail=f"Invalid date format: {str(e)}")
|
| 304 |
+
except Exception as e:
|
| 305 |
+
logger.error(f"Error generating insights: {str(e)}")
|
| 306 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 307 |
+
|
| 308 |
+
|
| 309 |
+
@router.get("/dashboard/{project_key}")
|
| 310 |
+
async def get_dashboard_data(
|
| 311 |
+
project_key: str,
|
| 312 |
+
board_id: Optional[int] = None,
|
| 313 |
+
sprint_id: Optional[int] = None,
|
| 314 |
+
start_date: Optional[str] = None,
|
| 315 |
+
end_date: Optional[str] = None
|
| 316 |
+
):
|
| 317 |
+
"""Get comprehensive dashboard data including all metrics"""
|
| 318 |
+
try:
|
| 319 |
+
# Parse dates
|
| 320 |
+
start_dt = date.fromisoformat(start_date) if start_date else (date.today() - timedelta(days=14))
|
| 321 |
+
end_dt = date.fromisoformat(end_date) if end_date else date.today()
|
| 322 |
+
|
| 323 |
+
# Get all data
|
| 324 |
+
sprint = None
|
| 325 |
+
if sprint_id and board_id:
|
| 326 |
+
issues = jira_service.get_sprint_issues(sprint_id)
|
| 327 |
+
sprints = jira_service.get_sprints(board_id)
|
| 328 |
+
sprint = next((s for s in sprints if s.sprint_id == sprint_id), None)
|
| 329 |
+
else:
|
| 330 |
+
issues = jira_service.get_issues_by_project(
|
| 331 |
+
project_key=project_key,
|
| 332 |
+
max_results=1000,
|
| 333 |
+
start_date=datetime.combine(start_dt, datetime.min.time()),
|
| 334 |
+
end_date=datetime.combine(end_dt, datetime.max.time())
|
| 335 |
+
)
|
| 336 |
+
|
| 337 |
+
worklogs = jira_service.get_worklogs(
|
| 338 |
+
project_key=project_key,
|
| 339 |
+
start_date=datetime.combine(start_dt, datetime.min.time()),
|
| 340 |
+
end_date=datetime.combine(end_dt, datetime.max.time())
|
| 341 |
+
)
|
| 342 |
+
|
| 343 |
+
team_members = jira_service.get_team_members(project_key)
|
| 344 |
+
|
| 345 |
+
# Calculate all metrics
|
| 346 |
+
delivery_health = intelligence_service.calculate_delivery_health(
|
| 347 |
+
issues=issues,
|
| 348 |
+
sprint=sprint,
|
| 349 |
+
period_start=start_dt,
|
| 350 |
+
period_end=end_dt
|
| 351 |
+
)
|
| 352 |
+
|
| 353 |
+
productivity_metrics = [
|
| 354 |
+
intelligence_service.calculate_productivity_metrics(
|
| 355 |
+
issues=issues,
|
| 356 |
+
worklogs=worklogs,
|
| 357 |
+
team_member_id=member.account_id,
|
| 358 |
+
team_member_name=member.display_name,
|
| 359 |
+
period_start=start_dt,
|
| 360 |
+
period_end=end_dt
|
| 361 |
+
)
|
| 362 |
+
for member in team_members
|
| 363 |
+
]
|
| 364 |
+
|
| 365 |
+
cost_metrics = intelligence_service.calculate_cost_efficiency(
|
| 366 |
+
issues=issues,
|
| 367 |
+
worklogs=worklogs,
|
| 368 |
+
period_start=start_dt,
|
| 369 |
+
period_end=end_dt
|
| 370 |
+
)
|
| 371 |
+
|
| 372 |
+
risk_alerts = intelligence_service.generate_risk_alerts(
|
| 373 |
+
delivery_health=delivery_health,
|
| 374 |
+
productivity_metrics=productivity_metrics,
|
| 375 |
+
cost_metrics=cost_metrics
|
| 376 |
+
)
|
| 377 |
+
|
| 378 |
+
insights = intelligence_service.generate_insights(
|
| 379 |
+
delivery_health=delivery_health,
|
| 380 |
+
productivity_metrics=productivity_metrics,
|
| 381 |
+
cost_metrics=cost_metrics
|
| 382 |
+
)
|
| 383 |
+
|
| 384 |
+
return {
|
| 385 |
+
"delivery_health": delivery_health,
|
| 386 |
+
"productivity_metrics": productivity_metrics,
|
| 387 |
+
"cost_efficiency": cost_metrics,
|
| 388 |
+
"risk_alerts": risk_alerts,
|
| 389 |
+
"insights": insights,
|
| 390 |
+
"period": {
|
| 391 |
+
"start": start_dt,
|
| 392 |
+
"end": end_dt
|
| 393 |
+
}
|
| 394 |
+
}
|
| 395 |
+
except ValueError as e:
|
| 396 |
+
raise HTTPException(status_code=400, detail=f"Invalid date format: {str(e)}")
|
| 397 |
+
except Exception as e:
|
| 398 |
+
logger.error(f"Error generating dashboard data: {str(e)}")
|
| 399 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 400 |
+
|
| 401 |
+
|
| 402 |
+
# ===== KANBAN INTELLIGENCE ENDPOINTS =====
|
| 403 |
+
|
| 404 |
+
@router.get("/kanban/flow-metrics/{board_id}", response_model=KanbanFlowMetrics)
|
| 405 |
+
async def get_kanban_flow_metrics(
|
| 406 |
+
board_id: int,
|
| 407 |
+
start_date: Optional[str] = None,
|
| 408 |
+
end_date: Optional[str] = None
|
| 409 |
+
):
|
| 410 |
+
"""Get Kanban flow efficiency metrics for a board"""
|
| 411 |
+
try:
|
| 412 |
+
# Parse dates
|
| 413 |
+
start_dt = date.fromisoformat(start_date) if start_date else (date.today() - timedelta(days=30))
|
| 414 |
+
end_dt = date.fromisoformat(end_date) if end_date else date.today()
|
| 415 |
+
|
| 416 |
+
# Get board info
|
| 417 |
+
board = jira_service.get_kanban_board_by_id(board_id)
|
| 418 |
+
if not board:
|
| 419 |
+
raise HTTPException(status_code=404, detail=f"Kanban board {board_id} not found")
|
| 420 |
+
|
| 421 |
+
# Get issues and columns
|
| 422 |
+
columns = jira_service.get_kanban_issues_by_column(board_id)
|
| 423 |
+
|
| 424 |
+
# Get all issues for the board
|
| 425 |
+
all_issues = []
|
| 426 |
+
for col in columns:
|
| 427 |
+
all_issues.extend(col.issues)
|
| 428 |
+
|
| 429 |
+
return intelligence_service.calculate_kanban_flow_metrics(
|
| 430 |
+
board_id=board_id,
|
| 431 |
+
board_name=board.board_name,
|
| 432 |
+
issues=all_issues,
|
| 433 |
+
columns=columns,
|
| 434 |
+
period_start=start_dt,
|
| 435 |
+
period_end=end_dt
|
| 436 |
+
)
|
| 437 |
+
except HTTPException:
|
| 438 |
+
raise
|
| 439 |
+
except ValueError as e:
|
| 440 |
+
raise HTTPException(status_code=400, detail=f"Invalid date format: {str(e)}")
|
| 441 |
+
except Exception as e:
|
| 442 |
+
logger.error(f"Error calculating Kanban flow metrics: {str(e)}")
|
| 443 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 444 |
+
|
| 445 |
+
|
| 446 |
+
@router.get("/kanban/column-analysis/{board_id}", response_model=List[KanbanColumnAnalysis])
|
| 447 |
+
async def get_kanban_column_analysis(board_id: int):
|
| 448 |
+
"""Get detailed analysis of each Kanban column"""
|
| 449 |
+
try:
|
| 450 |
+
# Get board info
|
| 451 |
+
board = jira_service.get_kanban_board_by_id(board_id)
|
| 452 |
+
if not board:
|
| 453 |
+
raise HTTPException(status_code=404, detail=f"Kanban board {board_id} not found")
|
| 454 |
+
|
| 455 |
+
# Get issues by column
|
| 456 |
+
columns = jira_service.get_kanban_issues_by_column(board_id)
|
| 457 |
+
|
| 458 |
+
# Get all issues
|
| 459 |
+
all_issues = []
|
| 460 |
+
for col in columns:
|
| 461 |
+
all_issues.extend(col.issues)
|
| 462 |
+
|
| 463 |
+
return intelligence_service.analyze_kanban_columns(
|
| 464 |
+
columns=columns,
|
| 465 |
+
issues=all_issues
|
| 466 |
+
)
|
| 467 |
+
except HTTPException:
|
| 468 |
+
raise
|
| 469 |
+
except Exception as e:
|
| 470 |
+
logger.error(f"Error analyzing Kanban columns: {str(e)}")
|
| 471 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 472 |
+
|
| 473 |
+
|
| 474 |
+
@router.get("/kanban/wip-recommendations/{board_id}", response_model=List[WIPLimitRecommendation])
|
| 475 |
+
async def get_wip_recommendations(
|
| 476 |
+
board_id: int,
|
| 477 |
+
start_date: Optional[str] = None,
|
| 478 |
+
end_date: Optional[str] = None
|
| 479 |
+
):
|
| 480 |
+
"""Get WIP limit recommendations for Kanban board columns"""
|
| 481 |
+
try:
|
| 482 |
+
# Parse dates
|
| 483 |
+
start_dt = date.fromisoformat(start_date) if start_date else (date.today() - timedelta(days=30))
|
| 484 |
+
end_dt = date.fromisoformat(end_date) if end_date else date.today()
|
| 485 |
+
|
| 486 |
+
# Get board info
|
| 487 |
+
board = jira_service.get_kanban_board_by_id(board_id)
|
| 488 |
+
if not board:
|
| 489 |
+
raise HTTPException(status_code=404, detail=f"Kanban board {board_id} not found")
|
| 490 |
+
|
| 491 |
+
# Get columns and issues
|
| 492 |
+
columns = jira_service.get_kanban_issues_by_column(board_id)
|
| 493 |
+
|
| 494 |
+
all_issues = []
|
| 495 |
+
for col in columns:
|
| 496 |
+
all_issues.extend(col.issues)
|
| 497 |
+
|
| 498 |
+
# Analyze columns
|
| 499 |
+
column_analyses = intelligence_service.analyze_kanban_columns(
|
| 500 |
+
columns=columns,
|
| 501 |
+
issues=all_issues
|
| 502 |
+
)
|
| 503 |
+
|
| 504 |
+
# Calculate flow metrics
|
| 505 |
+
flow_metrics = intelligence_service.calculate_kanban_flow_metrics(
|
| 506 |
+
board_id=board_id,
|
| 507 |
+
board_name=board.board_name,
|
| 508 |
+
issues=all_issues,
|
| 509 |
+
columns=columns,
|
| 510 |
+
period_start=start_dt,
|
| 511 |
+
period_end=end_dt
|
| 512 |
+
)
|
| 513 |
+
|
| 514 |
+
# Generate recommendations
|
| 515 |
+
return intelligence_service.generate_wip_recommendations(
|
| 516 |
+
column_analyses=column_analyses,
|
| 517 |
+
flow_metrics=flow_metrics
|
| 518 |
+
)
|
| 519 |
+
except HTTPException:
|
| 520 |
+
raise
|
| 521 |
+
except ValueError as e:
|
| 522 |
+
raise HTTPException(status_code=400, detail=f"Invalid date format: {str(e)}")
|
| 523 |
+
except Exception as e:
|
| 524 |
+
logger.error(f"Error generating WIP recommendations: {str(e)}")
|
| 525 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 526 |
+
|
| 527 |
+
|
| 528 |
+
@router.get("/kanban/insights/{board_id}", response_model=List[InsightRecommendation])
|
| 529 |
+
async def get_kanban_insights(
|
| 530 |
+
board_id: int,
|
| 531 |
+
start_date: Optional[str] = None,
|
| 532 |
+
end_date: Optional[str] = None
|
| 533 |
+
):
|
| 534 |
+
"""Get AI-generated insights for Kanban board"""
|
| 535 |
+
try:
|
| 536 |
+
# Parse dates
|
| 537 |
+
start_dt = date.fromisoformat(start_date) if start_date else (date.today() - timedelta(days=30))
|
| 538 |
+
end_dt = date.fromisoformat(end_date) if end_date else date.today()
|
| 539 |
+
|
| 540 |
+
# Get board info
|
| 541 |
+
board = jira_service.get_kanban_board_by_id(board_id)
|
| 542 |
+
if not board:
|
| 543 |
+
raise HTTPException(status_code=404, detail=f"Kanban board {board_id} not found")
|
| 544 |
+
|
| 545 |
+
# Get columns and issues
|
| 546 |
+
columns = jira_service.get_kanban_issues_by_column(board_id)
|
| 547 |
+
|
| 548 |
+
all_issues = []
|
| 549 |
+
for col in columns:
|
| 550 |
+
all_issues.extend(col.issues)
|
| 551 |
+
|
| 552 |
+
# Calculate all metrics
|
| 553 |
+
flow_metrics = intelligence_service.calculate_kanban_flow_metrics(
|
| 554 |
+
board_id=board_id,
|
| 555 |
+
board_name=board.board_name,
|
| 556 |
+
issues=all_issues,
|
| 557 |
+
columns=columns,
|
| 558 |
+
period_start=start_dt,
|
| 559 |
+
period_end=end_dt
|
| 560 |
+
)
|
| 561 |
+
|
| 562 |
+
column_analyses = intelligence_service.analyze_kanban_columns(
|
| 563 |
+
columns=columns,
|
| 564 |
+
issues=all_issues
|
| 565 |
+
)
|
| 566 |
+
|
| 567 |
+
wip_recommendations = intelligence_service.generate_wip_recommendations(
|
| 568 |
+
column_analyses=column_analyses,
|
| 569 |
+
flow_metrics=flow_metrics
|
| 570 |
+
)
|
| 571 |
+
|
| 572 |
+
# Generate insights
|
| 573 |
+
return intelligence_service.generate_kanban_insights(
|
| 574 |
+
flow_metrics=flow_metrics,
|
| 575 |
+
column_analyses=column_analyses,
|
| 576 |
+
wip_recommendations=wip_recommendations
|
| 577 |
+
)
|
| 578 |
+
except HTTPException:
|
| 579 |
+
raise
|
| 580 |
+
except ValueError as e:
|
| 581 |
+
raise HTTPException(status_code=400, detail=f"Invalid date format: {str(e)}")
|
| 582 |
+
except Exception as e:
|
| 583 |
+
logger.error(f"Error generating Kanban insights: {str(e)}")
|
| 584 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 585 |
+
|
| 586 |
+
|
| 587 |
+
@router.get("/kanban/dashboard/{board_id}")
|
| 588 |
+
async def get_kanban_dashboard(
|
| 589 |
+
board_id: int,
|
| 590 |
+
start_date: Optional[str] = None,
|
| 591 |
+
end_date: Optional[str] = None
|
| 592 |
+
):
|
| 593 |
+
"""Get comprehensive Kanban dashboard data"""
|
| 594 |
+
try:
|
| 595 |
+
# Parse dates
|
| 596 |
+
start_dt = date.fromisoformat(start_date) if start_date else (date.today() - timedelta(days=30))
|
| 597 |
+
end_dt = date.fromisoformat(end_date) if end_date else date.today()
|
| 598 |
+
|
| 599 |
+
# Get board info
|
| 600 |
+
board = jira_service.get_kanban_board_by_id(board_id)
|
| 601 |
+
if not board:
|
| 602 |
+
raise HTTPException(status_code=404, detail=f"Kanban board {board_id} not found")
|
| 603 |
+
|
| 604 |
+
# Get columns and issues
|
| 605 |
+
columns = jira_service.get_kanban_issues_by_column(board_id)
|
| 606 |
+
|
| 607 |
+
all_issues = []
|
| 608 |
+
for col in columns:
|
| 609 |
+
all_issues.extend(col.issues)
|
| 610 |
+
|
| 611 |
+
# Calculate all metrics
|
| 612 |
+
flow_metrics = intelligence_service.calculate_kanban_flow_metrics(
|
| 613 |
+
board_id=board_id,
|
| 614 |
+
board_name=board.board_name,
|
| 615 |
+
issues=all_issues,
|
| 616 |
+
columns=columns,
|
| 617 |
+
period_start=start_dt,
|
| 618 |
+
period_end=end_dt
|
| 619 |
+
)
|
| 620 |
+
|
| 621 |
+
column_analyses = intelligence_service.analyze_kanban_columns(
|
| 622 |
+
columns=columns,
|
| 623 |
+
issues=all_issues
|
| 624 |
+
)
|
| 625 |
+
|
| 626 |
+
wip_recommendations = intelligence_service.generate_wip_recommendations(
|
| 627 |
+
column_analyses=column_analyses,
|
| 628 |
+
flow_metrics=flow_metrics
|
| 629 |
+
)
|
| 630 |
+
|
| 631 |
+
insights = intelligence_service.generate_kanban_insights(
|
| 632 |
+
flow_metrics=flow_metrics,
|
| 633 |
+
column_analyses=column_analyses,
|
| 634 |
+
wip_recommendations=wip_recommendations
|
| 635 |
+
)
|
| 636 |
+
|
| 637 |
+
return {
|
| 638 |
+
"board": {
|
| 639 |
+
"id": board.board_id,
|
| 640 |
+
"name": board.board_name,
|
| 641 |
+
"type": board.board_type,
|
| 642 |
+
"project_key": board.project_key
|
| 643 |
+
},
|
| 644 |
+
"flow_metrics": flow_metrics,
|
| 645 |
+
"column_analyses": column_analyses,
|
| 646 |
+
"wip_recommendations": wip_recommendations,
|
| 647 |
+
"insights": insights,
|
| 648 |
+
"columns_with_issues": columns,
|
| 649 |
+
"period": {
|
| 650 |
+
"start": start_dt,
|
| 651 |
+
"end": end_dt
|
| 652 |
+
}
|
| 653 |
+
}
|
| 654 |
+
except HTTPException:
|
| 655 |
+
raise
|
| 656 |
+
except ValueError as e:
|
| 657 |
+
raise HTTPException(status_code=400, detail=f"Invalid date format: {str(e)}")
|
| 658 |
+
except Exception as e:
|
| 659 |
+
logger.error(f"Error generating Kanban dashboard: {str(e)}")
|
| 660 |
+
raise HTTPException(status_code=500, detail=str(e))
|
api/jira_routes.py
ADDED
|
@@ -0,0 +1,240 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import APIRouter, HTTPException, Query
|
| 2 |
+
from typing import List, Optional
|
| 3 |
+
from datetime import datetime, timedelta, date
|
| 4 |
+
from integrations.jira_service import jira_service
|
| 5 |
+
from models.jira_models import (
|
| 6 |
+
JiraIssue,
|
| 7 |
+
Sprint,
|
| 8 |
+
TeamMember,
|
| 9 |
+
WorklogEntry,
|
| 10 |
+
ProjectInfo,
|
| 11 |
+
KanbanBoard,
|
| 12 |
+
KanbanIssuesByColumn,
|
| 13 |
+
BoardConfiguration
|
| 14 |
+
)
|
| 15 |
+
import logging
|
| 16 |
+
|
| 17 |
+
logger = logging.getLogger(__name__)
|
| 18 |
+
|
| 19 |
+
router = APIRouter(prefix="/jira", tags=["Jira"])
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
@router.get("/projects", response_model=List[ProjectInfo])
|
| 23 |
+
async def get_projects():
|
| 24 |
+
"""Get all Jira projects"""
|
| 25 |
+
try:
|
| 26 |
+
return jira_service.get_projects()
|
| 27 |
+
except Exception as e:
|
| 28 |
+
logger.error(f"Error fetching projects: {str(e)}")
|
| 29 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
@router.get("/projects/{project_key}/issues", response_model=List[JiraIssue])
|
| 33 |
+
async def get_project_issues(
|
| 34 |
+
project_key: str,
|
| 35 |
+
max_results: int = Query(100, ge=1, le=1000),
|
| 36 |
+
start_date: Optional[str] = None,
|
| 37 |
+
end_date: Optional[str] = None
|
| 38 |
+
):
|
| 39 |
+
"""Get issues for a specific project"""
|
| 40 |
+
try:
|
| 41 |
+
start_dt = datetime.fromisoformat(start_date) if start_date else None
|
| 42 |
+
end_dt = datetime.fromisoformat(end_date) if end_date else None
|
| 43 |
+
|
| 44 |
+
return jira_service.get_issues_by_project(
|
| 45 |
+
project_key=project_key,
|
| 46 |
+
max_results=max_results,
|
| 47 |
+
start_date=start_dt,
|
| 48 |
+
end_date=end_dt
|
| 49 |
+
)
|
| 50 |
+
except ValueError as e:
|
| 51 |
+
raise HTTPException(status_code=400, detail=f"Invalid date format: {str(e)}")
|
| 52 |
+
except Exception as e:
|
| 53 |
+
logger.error(f"Error fetching issues: {str(e)}")
|
| 54 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
@router.get("/boards", response_model=List[dict])
|
| 58 |
+
async def get_boards():
|
| 59 |
+
"""Get all Jira boards"""
|
| 60 |
+
try:
|
| 61 |
+
return jira_service.get_boards()
|
| 62 |
+
except Exception as e:
|
| 63 |
+
logger.error(f"Error fetching boards: {str(e)}")
|
| 64 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
@router.get("/boards/{board_id}/sprints", response_model=List[Sprint])
|
| 68 |
+
async def get_board_sprints(board_id: int):
|
| 69 |
+
"""Get sprints for a board"""
|
| 70 |
+
try:
|
| 71 |
+
return jira_service.get_sprints(board_id)
|
| 72 |
+
except Exception as e:
|
| 73 |
+
logger.error(f"Error fetching sprints: {str(e)}")
|
| 74 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
@router.get("/boards/{board_id}/active-sprint", response_model=Optional[Sprint])
|
| 78 |
+
async def get_active_sprint(board_id: int):
|
| 79 |
+
"""Get the currently active sprint for a board"""
|
| 80 |
+
try:
|
| 81 |
+
return jira_service.get_active_sprint(board_id)
|
| 82 |
+
except Exception as e:
|
| 83 |
+
logger.error(f"Error fetching active sprint: {str(e)}")
|
| 84 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
@router.get("/sprints/{sprint_id}/issues", response_model=List[JiraIssue])
|
| 88 |
+
async def get_sprint_issues(sprint_id: int):
|
| 89 |
+
"""Get issues for a specific sprint"""
|
| 90 |
+
try:
|
| 91 |
+
return jira_service.get_sprint_issues(sprint_id)
|
| 92 |
+
except Exception as e:
|
| 93 |
+
logger.error(f"Error fetching sprint issues: {str(e)}")
|
| 94 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
@router.get("/projects/{project_key}/team-members", response_model=List[TeamMember])
|
| 98 |
+
async def get_team_members(project_key: str):
|
| 99 |
+
"""Get team members for a project"""
|
| 100 |
+
try:
|
| 101 |
+
return jira_service.get_team_members(project_key)
|
| 102 |
+
except Exception as e:
|
| 103 |
+
logger.error(f"Error fetching team members: {str(e)}")
|
| 104 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
@router.get("/projects/{project_key}/worklogs", response_model=List[WorklogEntry])
|
| 108 |
+
async def get_worklogs(
|
| 109 |
+
project_key: str,
|
| 110 |
+
start_date: Optional[str] = None,
|
| 111 |
+
end_date: Optional[str] = None
|
| 112 |
+
):
|
| 113 |
+
"""Get worklogs for a project"""
|
| 114 |
+
try:
|
| 115 |
+
start_dt = datetime.fromisoformat(start_date) if start_date else None
|
| 116 |
+
end_dt = datetime.fromisoformat(end_date) if end_date else None
|
| 117 |
+
|
| 118 |
+
return jira_service.get_worklogs(
|
| 119 |
+
project_key=project_key,
|
| 120 |
+
start_date=start_dt,
|
| 121 |
+
end_date=end_dt
|
| 122 |
+
)
|
| 123 |
+
except ValueError as e:
|
| 124 |
+
raise HTTPException(status_code=400, detail=f"Invalid date format: {str(e)}")
|
| 125 |
+
except Exception as e:
|
| 126 |
+
logger.error(f"Error fetching worklogs: {str(e)}")
|
| 127 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
@router.get("/issues/{issue_key}", response_model=JiraIssue)
|
| 131 |
+
async def get_issue(issue_key: str):
|
| 132 |
+
"""Get a specific issue by key"""
|
| 133 |
+
try:
|
| 134 |
+
issue = jira_service.get_issue_by_key(issue_key)
|
| 135 |
+
if not issue:
|
| 136 |
+
raise HTTPException(status_code=404, detail=f"Issue {issue_key} not found")
|
| 137 |
+
return issue
|
| 138 |
+
except HTTPException:
|
| 139 |
+
raise
|
| 140 |
+
except Exception as e:
|
| 141 |
+
logger.error(f"Error fetching issue: {str(e)}")
|
| 142 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
# ===== KANBAN BOARD ENDPOINTS =====
|
| 146 |
+
|
| 147 |
+
@router.get("/kanban/boards", response_model=List[KanbanBoard])
|
| 148 |
+
async def get_kanban_boards():
|
| 149 |
+
"""Get all Kanban boards with their column configurations"""
|
| 150 |
+
try:
|
| 151 |
+
return jira_service.get_kanban_boards()
|
| 152 |
+
except Exception as e:
|
| 153 |
+
logger.error(f"Error fetching Kanban boards: {str(e)}")
|
| 154 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
@router.get("/kanban/boards/{board_id}", response_model=KanbanBoard)
|
| 158 |
+
async def get_kanban_board(board_id: int):
|
| 159 |
+
"""Get a specific Kanban board by ID"""
|
| 160 |
+
try:
|
| 161 |
+
board = jira_service.get_kanban_board_by_id(board_id)
|
| 162 |
+
if not board:
|
| 163 |
+
raise HTTPException(
|
| 164 |
+
status_code=404,
|
| 165 |
+
detail=f"Kanban board {board_id} not found or is not a Kanban board"
|
| 166 |
+
)
|
| 167 |
+
return board
|
| 168 |
+
except HTTPException:
|
| 169 |
+
raise
|
| 170 |
+
except Exception as e:
|
| 171 |
+
logger.error(f"Error fetching Kanban board: {str(e)}")
|
| 172 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
@router.get("/boards/{board_id}/configuration", response_model=BoardConfiguration)
|
| 176 |
+
async def get_board_configuration(board_id: int):
|
| 177 |
+
"""Get detailed board configuration including columns, estimation, and ranking"""
|
| 178 |
+
try:
|
| 179 |
+
return jira_service.get_board_configuration(board_id)
|
| 180 |
+
except Exception as e:
|
| 181 |
+
logger.error(f"Error fetching board configuration: {str(e)}")
|
| 182 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
@router.get("/kanban/boards/{board_id}/issues", response_model=List[KanbanIssuesByColumn])
|
| 186 |
+
async def get_kanban_board_issues(board_id: int):
|
| 187 |
+
"""Get issues for a Kanban board grouped by columns"""
|
| 188 |
+
try:
|
| 189 |
+
return jira_service.get_kanban_issues_by_column(board_id)
|
| 190 |
+
except Exception as e:
|
| 191 |
+
logger.error(f"Error fetching Kanban board issues: {str(e)}")
|
| 192 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
@router.get("/boards/{board_id}/backlog", response_model=List[JiraIssue])
|
| 196 |
+
async def get_board_backlog(
|
| 197 |
+
board_id: int,
|
| 198 |
+
max_results: int = Query(100, ge=1, le=500)
|
| 199 |
+
):
|
| 200 |
+
"""Get backlog issues for a board"""
|
| 201 |
+
try:
|
| 202 |
+
return jira_service.get_board_backlog(board_id, max_results)
|
| 203 |
+
except Exception as e:
|
| 204 |
+
logger.error(f"Error fetching board backlog: {str(e)}")
|
| 205 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
@router.post("/issues/{issue_key}/transition")
|
| 209 |
+
async def transition_issue(
|
| 210 |
+
issue_key: str,
|
| 211 |
+
transition_name: str = Query(..., description="Name of the transition to execute")
|
| 212 |
+
):
|
| 213 |
+
"""Move an issue to a different status/column via transition"""
|
| 214 |
+
try:
|
| 215 |
+
success = jira_service.move_issue_to_status(issue_key, transition_name)
|
| 216 |
+
if not success:
|
| 217 |
+
raise HTTPException(
|
| 218 |
+
status_code=400,
|
| 219 |
+
detail=f"Transition '{transition_name}' not available for issue {issue_key}"
|
| 220 |
+
)
|
| 221 |
+
return {
|
| 222 |
+
"success": True,
|
| 223 |
+
"message": f"Issue {issue_key} transitioned to {transition_name}"
|
| 224 |
+
}
|
| 225 |
+
except HTTPException:
|
| 226 |
+
raise
|
| 227 |
+
except Exception as e:
|
| 228 |
+
logger.error(f"Error transitioning issue: {str(e)}")
|
| 229 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 230 |
+
|
| 231 |
+
|
| 232 |
+
@router.get("/issues/{issue_key}/transitions", response_model=List[dict])
|
| 233 |
+
async def get_issue_transitions(issue_key: str):
|
| 234 |
+
"""Get available transitions for an issue"""
|
| 235 |
+
try:
|
| 236 |
+
return jira_service.get_issue_transitions(issue_key)
|
| 237 |
+
except Exception as e:
|
| 238 |
+
logger.error(f"Error fetching issue transitions: {str(e)}")
|
| 239 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 240 |
+
|
config/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .settings import settings
|
| 2 |
+
|
| 3 |
+
__all__ = ["settings"]
|
config/settings.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pydantic_settings import BaseSettings
|
| 2 |
+
from typing import Optional
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class Settings(BaseSettings):
|
| 6 |
+
# Mock Data Configuration
|
| 7 |
+
use_mock_data: bool = False # Set to True to use mock data instead of real Jira
|
| 8 |
+
|
| 9 |
+
# Jira Configuration
|
| 10 |
+
jira_server_url: str = "https://mock.atlassian.net" # Default for mock mode
|
| 11 |
+
jira_email: str = "mock@example.com" # Default for mock mode
|
| 12 |
+
jira_api_token: str = "mock_token" # Default for mock mode
|
| 13 |
+
|
| 14 |
+
# GitHub Configuration
|
| 15 |
+
github_token: Optional[str] = None
|
| 16 |
+
github_org: Optional[str] = None
|
| 17 |
+
|
| 18 |
+
# Database
|
| 19 |
+
database_url: str = "sqlite:///./enterprise_intelligence.db"
|
| 20 |
+
|
| 21 |
+
# Redis
|
| 22 |
+
redis_url: str = "redis://localhost:6379/0"
|
| 23 |
+
|
| 24 |
+
# API Configuration
|
| 25 |
+
api_host: str = "0.0.0.0"
|
| 26 |
+
api_port: int = 8000
|
| 27 |
+
debug: bool = True
|
| 28 |
+
|
| 29 |
+
# Security
|
| 30 |
+
secret_key: str = "mock_secret_key_change_in_production" # Default for mock mode
|
| 31 |
+
algorithm: str = "HS256"
|
| 32 |
+
access_token_expire_minutes: int = 30
|
| 33 |
+
|
| 34 |
+
class Config:
|
| 35 |
+
env_file = ".env"
|
| 36 |
+
case_sensitive = False
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
settings = Settings()
|
integrations/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .jira_service import jira_service, JiraIntegrationService
|
| 2 |
+
|
| 3 |
+
__all__ = ["jira_service", "JiraIntegrationService"]
|
integrations/jira_service.py
ADDED
|
@@ -0,0 +1,947 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from jira import JIRA
|
| 2 |
+
from typing import List, Optional, Dict, Any
|
| 3 |
+
from datetime import datetime, timedelta
|
| 4 |
+
from config.settings import settings
|
| 5 |
+
from models.jira_models import (
|
| 6 |
+
JiraIssue,
|
| 7 |
+
Sprint,
|
| 8 |
+
TeamMember,
|
| 9 |
+
WorklogEntry,
|
| 10 |
+
ProjectInfo,
|
| 11 |
+
KanbanBoard,
|
| 12 |
+
KanbanColumn,
|
| 13 |
+
KanbanIssuesByColumn,
|
| 14 |
+
BoardConfiguration
|
| 15 |
+
)
|
| 16 |
+
import logging
|
| 17 |
+
import requests
|
| 18 |
+
|
| 19 |
+
logger = logging.getLogger(__name__)
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class JiraIntegrationService:
|
| 23 |
+
"""Service for integrating with Jira API"""
|
| 24 |
+
|
| 25 |
+
def __init__(self):
|
| 26 |
+
"""Initialize Jira client"""
|
| 27 |
+
try:
|
| 28 |
+
# Configure for API v3 (v2 is deprecated as of 2026)
|
| 29 |
+
options = {
|
| 30 |
+
'server': settings.jira_server_url,
|
| 31 |
+
'rest_api_version': '3', # Use API v3 instead of deprecated v2
|
| 32 |
+
'agile_rest_api_version': 'latest'
|
| 33 |
+
}
|
| 34 |
+
self.jira = JIRA(
|
| 35 |
+
options=options,
|
| 36 |
+
basic_auth=(settings.jira_email, settings.jira_api_token)
|
| 37 |
+
)
|
| 38 |
+
|
| 39 |
+
# Direct API access for new endpoints
|
| 40 |
+
self.base_url = settings.jira_server_url
|
| 41 |
+
self.auth = (settings.jira_email, settings.jira_api_token)
|
| 42 |
+
self.headers = {
|
| 43 |
+
'Accept': 'application/json',
|
| 44 |
+
'Content-Type': 'application/json'
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
logger.info("Jira client initialized successfully with API v3")
|
| 48 |
+
except Exception as e:
|
| 49 |
+
logger.error(f"Failed to initialize Jira client: {str(e)}")
|
| 50 |
+
raise
|
| 51 |
+
|
| 52 |
+
def get_projects(self) -> List[ProjectInfo]:
|
| 53 |
+
"""Fetch all projects"""
|
| 54 |
+
try:
|
| 55 |
+
projects = self.jira.projects()
|
| 56 |
+
return [
|
| 57 |
+
ProjectInfo(
|
| 58 |
+
project_key=proj.key,
|
| 59 |
+
project_name=proj.name,
|
| 60 |
+
project_type=getattr(proj, 'projectTypeKey', 'unknown'),
|
| 61 |
+
lead=getattr(proj.lead, 'displayName', None) if hasattr(proj, 'lead') else None,
|
| 62 |
+
description=getattr(proj, 'description', None)
|
| 63 |
+
)
|
| 64 |
+
for proj in projects
|
| 65 |
+
]
|
| 66 |
+
except Exception as e:
|
| 67 |
+
logger.error(f"Error fetching projects: {str(e)}")
|
| 68 |
+
raise
|
| 69 |
+
|
| 70 |
+
def get_issues_by_project(
|
| 71 |
+
self,
|
| 72 |
+
project_key: str,
|
| 73 |
+
max_results: int = 100,
|
| 74 |
+
start_date: Optional[datetime] = None,
|
| 75 |
+
end_date: Optional[datetime] = None
|
| 76 |
+
) -> List[JiraIssue]:
|
| 77 |
+
"""Fetch issues for a specific project"""
|
| 78 |
+
try:
|
| 79 |
+
jql = f"project = {project_key}"
|
| 80 |
+
|
| 81 |
+
if start_date:
|
| 82 |
+
jql += f" AND created >= '{start_date.strftime('%Y-%m-%d')}'"
|
| 83 |
+
if end_date:
|
| 84 |
+
jql += f" AND created <= '{end_date.strftime('%Y-%m-%d')}'"
|
| 85 |
+
|
| 86 |
+
jql += " ORDER BY created DESC"
|
| 87 |
+
|
| 88 |
+
issues = self._search_jql(
|
| 89 |
+
jql,
|
| 90 |
+
max_results=max_results,
|
| 91 |
+
fields=['summary', 'status', 'priority', 'assignee', 'reporter', 'created',
|
| 92 |
+
'updated', 'resolutiondate', 'customfield_10016', 'description',
|
| 93 |
+
'labels', 'components', 'issuetype', 'timespent', 'timeestimate']
|
| 94 |
+
)
|
| 95 |
+
|
| 96 |
+
return [self._parse_issue_dict(issue) for issue in issues]
|
| 97 |
+
except Exception as e:
|
| 98 |
+
logger.error(f"Error fetching issues for project {project_key}: {str(e)}")
|
| 99 |
+
raise
|
| 100 |
+
|
| 101 |
+
def get_sprint_issues(self, sprint_id: int) -> List[JiraIssue]:
|
| 102 |
+
"""Fetch issues for a specific sprint"""
|
| 103 |
+
try:
|
| 104 |
+
jql = f"sprint = {sprint_id} ORDER BY created DESC"
|
| 105 |
+
issues = self._search_jql(
|
| 106 |
+
jql,
|
| 107 |
+
max_results=500,
|
| 108 |
+
fields=['summary', 'status', 'priority', 'assignee', 'reporter', 'created',
|
| 109 |
+
'updated', 'resolutiondate', 'customfield_10016', 'description',
|
| 110 |
+
'labels', 'components', 'issuetype', 'timespent', 'timeestimate']
|
| 111 |
+
)
|
| 112 |
+
|
| 113 |
+
return [self._parse_issue_dict(issue) for issue in issues]
|
| 114 |
+
except Exception as e:
|
| 115 |
+
logger.error(f"Error fetching sprint issues: {str(e)}")
|
| 116 |
+
raise
|
| 117 |
+
|
| 118 |
+
def get_sprints(self, board_id: int) -> List[Sprint]:
|
| 119 |
+
"""Fetch sprints for a board"""
|
| 120 |
+
try:
|
| 121 |
+
# Use REST API directly
|
| 122 |
+
url = f"{self.base_url}/rest/agile/latest/board/{board_id}/sprint"
|
| 123 |
+
|
| 124 |
+
response = requests.get(url, headers=self.headers, auth=self.auth)
|
| 125 |
+
response.raise_for_status()
|
| 126 |
+
|
| 127 |
+
data = response.json()
|
| 128 |
+
sprints = data.get('values', [])
|
| 129 |
+
|
| 130 |
+
return [
|
| 131 |
+
Sprint(
|
| 132 |
+
sprint_id=sprint.get('id'),
|
| 133 |
+
sprint_name=sprint.get('name', ''),
|
| 134 |
+
state=sprint.get('state', ''),
|
| 135 |
+
start_date=self._parse_datetime(sprint.get('startDate')),
|
| 136 |
+
end_date=self._parse_datetime(sprint.get('endDate')),
|
| 137 |
+
complete_date=self._parse_datetime(sprint.get('completeDate')),
|
| 138 |
+
goal=sprint.get('goal')
|
| 139 |
+
)
|
| 140 |
+
for sprint in sprints
|
| 141 |
+
]
|
| 142 |
+
except Exception as e:
|
| 143 |
+
logger.error(f"Error fetching sprints for board {board_id}: {str(e)}")
|
| 144 |
+
raise
|
| 145 |
+
|
| 146 |
+
def get_active_sprint(self, board_id: int) -> Optional[Sprint]:
|
| 147 |
+
"""Get the currently active sprint"""
|
| 148 |
+
try:
|
| 149 |
+
sprints = self.get_sprints(board_id)
|
| 150 |
+
active_sprints = [s for s in sprints if s.state == 'active']
|
| 151 |
+
return active_sprints[0] if active_sprints else None
|
| 152 |
+
except Exception as e:
|
| 153 |
+
logger.error(f"Error fetching active sprint: {str(e)}")
|
| 154 |
+
return None
|
| 155 |
+
|
| 156 |
+
def get_team_members(self, project_key: str) -> List[TeamMember]:
|
| 157 |
+
"""Fetch team members for a project"""
|
| 158 |
+
try:
|
| 159 |
+
# Get users who have been assigned issues in the project
|
| 160 |
+
jql = f"project = {project_key} AND assignee is not EMPTY"
|
| 161 |
+
issues = self._search_jql(jql, max_results=1000, fields=['assignee'])
|
| 162 |
+
|
| 163 |
+
# Extract unique assignees
|
| 164 |
+
assignees_dict = {}
|
| 165 |
+
for issue in issues:
|
| 166 |
+
assignee = issue.get('fields', {}).get('assignee')
|
| 167 |
+
if assignee:
|
| 168 |
+
account_id = assignee.get('accountId')
|
| 169 |
+
if account_id and account_id not in assignees_dict:
|
| 170 |
+
# Fetch full user details to get email
|
| 171 |
+
user_details = self._get_user_details(account_id)
|
| 172 |
+
|
| 173 |
+
assignees_dict[account_id] = TeamMember(
|
| 174 |
+
account_id=account_id,
|
| 175 |
+
display_name=user_details.get('displayName') or assignee.get('displayName', ''),
|
| 176 |
+
email=user_details.get('emailAddress'),
|
| 177 |
+
active=user_details.get('active', assignee.get('active', True)),
|
| 178 |
+
avatar_url=user_details.get('avatarUrls', {}).get('48x48') or assignee.get('avatarUrls', {}).get('48x48')
|
| 179 |
+
)
|
| 180 |
+
|
| 181 |
+
return list(assignees_dict.values())
|
| 182 |
+
except Exception as e:
|
| 183 |
+
logger.error(f"Error fetching team members: {str(e)}")
|
| 184 |
+
raise
|
| 185 |
+
|
| 186 |
+
def _get_user_details(self, account_id: str) -> Dict[str, Any]:
|
| 187 |
+
"""Fetch detailed user information including email"""
|
| 188 |
+
try:
|
| 189 |
+
url = f"{self.base_url}/rest/api/3/user"
|
| 190 |
+
params = {
|
| 191 |
+
'accountId': account_id,
|
| 192 |
+
'expand': 'groups,applicationRoles'
|
| 193 |
+
}
|
| 194 |
+
|
| 195 |
+
response = requests.get(url, headers=self.headers, auth=self.auth, params=params)
|
| 196 |
+
response.raise_for_status()
|
| 197 |
+
|
| 198 |
+
return response.json()
|
| 199 |
+
except Exception as e:
|
| 200 |
+
logger.warning(f"Could not fetch details for user {account_id}: {str(e)}")
|
| 201 |
+
# Return empty dict if user details can't be fetched
|
| 202 |
+
return {}
|
| 203 |
+
raise
|
| 204 |
+
|
| 205 |
+
def get_worklogs(
|
| 206 |
+
self,
|
| 207 |
+
project_key: str,
|
| 208 |
+
start_date: Optional[datetime] = None,
|
| 209 |
+
end_date: Optional[datetime] = None
|
| 210 |
+
) -> List[WorklogEntry]:
|
| 211 |
+
"""Fetch worklogs for a project"""
|
| 212 |
+
try:
|
| 213 |
+
jql = f"project = {project_key} AND timespent > 0"
|
| 214 |
+
|
| 215 |
+
if start_date:
|
| 216 |
+
jql += f" AND updated >= '{start_date.strftime('%Y-%m-%d')}'"
|
| 217 |
+
if end_date:
|
| 218 |
+
jql += f" AND updated <= '{end_date.strftime('%Y-%m-%d')}'"
|
| 219 |
+
|
| 220 |
+
issues = self._search_jql(jql, max_results=1000, fields=['key'])
|
| 221 |
+
|
| 222 |
+
worklogs = []
|
| 223 |
+
for issue in issues:
|
| 224 |
+
try:
|
| 225 |
+
issue_key = issue.get('key')
|
| 226 |
+
if not issue_key:
|
| 227 |
+
continue
|
| 228 |
+
|
| 229 |
+
# Fetch worklogs using direct API
|
| 230 |
+
url = f"{self.base_url}/rest/api/3/issue/{issue_key}/worklog"
|
| 231 |
+
response = requests.get(url, headers=self.headers, auth=self.auth)
|
| 232 |
+
response.raise_for_status()
|
| 233 |
+
|
| 234 |
+
worklog_data = response.json()
|
| 235 |
+
for worklog in worklog_data.get('worklogs', []):
|
| 236 |
+
# Parse datetime before passing to model
|
| 237 |
+
started = self._parse_datetime(worklog.get('started'))
|
| 238 |
+
worklog['started'] = started
|
| 239 |
+
|
| 240 |
+
worklogs.append(
|
| 241 |
+
WorklogEntry.from_jira_worklog(worklog, issue_key)
|
| 242 |
+
)
|
| 243 |
+
except Exception as e:
|
| 244 |
+
logger.warning(f"Error fetching worklogs for issue {issue.get('key')}: {str(e)}")
|
| 245 |
+
continue
|
| 246 |
+
|
| 247 |
+
return worklogs
|
| 248 |
+
except Exception as e:
|
| 249 |
+
logger.error(f"Error fetching worklogs: {str(e)}")
|
| 250 |
+
raise
|
| 251 |
+
|
| 252 |
+
def get_issue_by_key(self, issue_key: str) -> Optional[JiraIssue]:
|
| 253 |
+
"""Fetch a specific issue by key"""
|
| 254 |
+
try:
|
| 255 |
+
# Use JQL search instead of direct issue fetch
|
| 256 |
+
issues = self._search_jql(
|
| 257 |
+
f"key = {issue_key}",
|
| 258 |
+
max_results=1,
|
| 259 |
+
fields=['summary', 'status', 'priority', 'assignee', 'reporter', 'created',
|
| 260 |
+
'updated', 'resolutiondate', 'customfield_10016', 'description',
|
| 261 |
+
'labels', 'components', 'issuetype', 'timespent', 'timeestimate']
|
| 262 |
+
)
|
| 263 |
+
|
| 264 |
+
if issues:
|
| 265 |
+
return self._parse_issue_dict(issues[0])
|
| 266 |
+
return None
|
| 267 |
+
except Exception as e:
|
| 268 |
+
logger.error(f"Error fetching issue {issue_key}: {str(e)}")
|
| 269 |
+
return None
|
| 270 |
+
|
| 271 |
+
def get_boards(self) -> List[Dict[str, Any]]:
|
| 272 |
+
"""Fetch all boards"""
|
| 273 |
+
try:
|
| 274 |
+
# Use REST API directly
|
| 275 |
+
url = f"{self.base_url}/rest/agile/latest/board"
|
| 276 |
+
|
| 277 |
+
response = requests.get(url, headers=self.headers, auth=self.auth)
|
| 278 |
+
response.raise_for_status()
|
| 279 |
+
|
| 280 |
+
data = response.json()
|
| 281 |
+
boards = data.get('values', [])
|
| 282 |
+
|
| 283 |
+
return [
|
| 284 |
+
{
|
| 285 |
+
'id': board.get('id'),
|
| 286 |
+
'name': board.get('name', ''),
|
| 287 |
+
'type': board.get('type', '')
|
| 288 |
+
}
|
| 289 |
+
for board in boards
|
| 290 |
+
]
|
| 291 |
+
except Exception as e:
|
| 292 |
+
logger.error(f"Error fetching boards: {str(e)}")
|
| 293 |
+
raise
|
| 294 |
+
|
| 295 |
+
def _parse_issue(self, issue) -> JiraIssue:
|
| 296 |
+
"""Parse Jira issue object to JiraIssue model"""
|
| 297 |
+
fields = issue.fields
|
| 298 |
+
|
| 299 |
+
# Extract story points (customfield_10016 is common for story points)
|
| 300 |
+
story_points = None
|
| 301 |
+
if hasattr(fields, 'customfield_10016'):
|
| 302 |
+
story_points = fields.customfield_10016
|
| 303 |
+
|
| 304 |
+
# Extract sprint information
|
| 305 |
+
sprint = None
|
| 306 |
+
if hasattr(fields, 'customfield_10020'): # Common sprint field
|
| 307 |
+
sprint_data = fields.customfield_10020
|
| 308 |
+
if sprint_data and isinstance(sprint_data, list) and len(sprint_data) > 0:
|
| 309 |
+
sprint = sprint_data[0].name if hasattr(sprint_data[0], 'name') else str(sprint_data[0])
|
| 310 |
+
|
| 311 |
+
return JiraIssue(
|
| 312 |
+
issue_key=issue.key,
|
| 313 |
+
summary=fields.summary,
|
| 314 |
+
description=getattr(fields, 'description', None),
|
| 315 |
+
status=fields.status.name,
|
| 316 |
+
priority=fields.priority.name if fields.priority else None,
|
| 317 |
+
assignee=fields.assignee.displayName if fields.assignee else None,
|
| 318 |
+
reporter=fields.reporter.displayName if fields.reporter else None,
|
| 319 |
+
created=self._parse_datetime(fields.created),
|
| 320 |
+
updated=self._parse_datetime(fields.updated),
|
| 321 |
+
resolved=self._parse_datetime(getattr(fields, 'resolutiondate', None)),
|
| 322 |
+
story_points=story_points,
|
| 323 |
+
sprint=sprint,
|
| 324 |
+
labels=getattr(fields, 'labels', []),
|
| 325 |
+
components=[comp.name for comp in getattr(fields, 'components', [])],
|
| 326 |
+
project_key=fields.project.key,
|
| 327 |
+
issue_type=fields.issuetype.name,
|
| 328 |
+
time_spent=getattr(fields, 'timespent', None),
|
| 329 |
+
time_estimate=getattr(fields, 'timeestimate', None)
|
| 330 |
+
)
|
| 331 |
+
|
| 332 |
+
def _parse_datetime(self, date_str: Optional[str]) -> Optional[datetime]:
|
| 333 |
+
"""Parse datetime string from Jira"""
|
| 334 |
+
if not date_str:
|
| 335 |
+
return None
|
| 336 |
+
try:
|
| 337 |
+
# Jira typically returns ISO format
|
| 338 |
+
return datetime.fromisoformat(date_str.replace('Z', '+00:00'))
|
| 339 |
+
except Exception:
|
| 340 |
+
return None
|
| 341 |
+
|
| 342 |
+
def _search_jql(
|
| 343 |
+
self,
|
| 344 |
+
jql: str,
|
| 345 |
+
max_results: int = 100,
|
| 346 |
+
fields: Optional[List[str]] = None
|
| 347 |
+
) -> List[Dict[str, Any]]:
|
| 348 |
+
"""Execute JQL search using the new /rest/api/3/search/jql endpoint"""
|
| 349 |
+
try:
|
| 350 |
+
url = f"{self.base_url}/rest/api/3/search/jql"
|
| 351 |
+
|
| 352 |
+
params = {
|
| 353 |
+
'jql': jql,
|
| 354 |
+
'maxResults': max_results,
|
| 355 |
+
'startAt': 0
|
| 356 |
+
}
|
| 357 |
+
|
| 358 |
+
if fields:
|
| 359 |
+
params['fields'] = ','.join(fields)
|
| 360 |
+
|
| 361 |
+
response = requests.get(
|
| 362 |
+
url,
|
| 363 |
+
params=params,
|
| 364 |
+
headers=self.headers,
|
| 365 |
+
auth=self.auth
|
| 366 |
+
)
|
| 367 |
+
response.raise_for_status()
|
| 368 |
+
|
| 369 |
+
data = response.json()
|
| 370 |
+
return data.get('issues', [])
|
| 371 |
+
|
| 372 |
+
except requests.exceptions.RequestException as e:
|
| 373 |
+
logger.error(f"Error executing JQL search: {str(e)}")
|
| 374 |
+
raise
|
| 375 |
+
|
| 376 |
+
def _parse_issue_dict(self, issue_dict: Dict[str, Any]) -> JiraIssue:
|
| 377 |
+
"""Parse issue dictionary from API response to JiraIssue model"""
|
| 378 |
+
fields = issue_dict.get('fields', {})
|
| 379 |
+
|
| 380 |
+
# Extract story points (customfield_10016 is common for story points)
|
| 381 |
+
story_points = fields.get('customfield_10016')
|
| 382 |
+
|
| 383 |
+
# Extract sprint information
|
| 384 |
+
sprint = None
|
| 385 |
+
sprint_data = fields.get('customfield_10020') # Common sprint field
|
| 386 |
+
if sprint_data and isinstance(sprint_data, list) and len(sprint_data) > 0:
|
| 387 |
+
sprint_obj = sprint_data[0]
|
| 388 |
+
if isinstance(sprint_obj, dict):
|
| 389 |
+
sprint = sprint_obj.get('name')
|
| 390 |
+
else:
|
| 391 |
+
sprint = str(sprint_obj)
|
| 392 |
+
|
| 393 |
+
# Parse assignee
|
| 394 |
+
assignee = None
|
| 395 |
+
if fields.get('assignee'):
|
| 396 |
+
assignee = fields['assignee'].get('displayName')
|
| 397 |
+
|
| 398 |
+
# Parse reporter
|
| 399 |
+
reporter = None
|
| 400 |
+
if fields.get('reporter'):
|
| 401 |
+
reporter = fields['reporter'].get('displayName')
|
| 402 |
+
|
| 403 |
+
# Parse priority
|
| 404 |
+
priority = None
|
| 405 |
+
if fields.get('priority'):
|
| 406 |
+
priority = fields['priority'].get('name')
|
| 407 |
+
|
| 408 |
+
# Parse status
|
| 409 |
+
status = fields.get('status', {}).get('name', 'Unknown')
|
| 410 |
+
|
| 411 |
+
# Parse issue type
|
| 412 |
+
issue_type = fields.get('issuetype', {}).get('name', 'Unknown')
|
| 413 |
+
|
| 414 |
+
# Parse project
|
| 415 |
+
project_key = fields.get('project', {}).get('key', '')
|
| 416 |
+
|
| 417 |
+
# Parse components
|
| 418 |
+
components = [comp.get('name', '') for comp in fields.get('components', [])]
|
| 419 |
+
|
| 420 |
+
# Parse description (API v3 may return ADF format)
|
| 421 |
+
description = self._parse_description(fields.get('description'))
|
| 422 |
+
|
| 423 |
+
return JiraIssue(
|
| 424 |
+
issue_key=issue_dict.get('key', ''),
|
| 425 |
+
summary=fields.get('summary', ''),
|
| 426 |
+
description=description,
|
| 427 |
+
status=status,
|
| 428 |
+
priority=priority,
|
| 429 |
+
assignee=assignee,
|
| 430 |
+
reporter=reporter,
|
| 431 |
+
created=self._parse_datetime(fields.get('created')),
|
| 432 |
+
updated=self._parse_datetime(fields.get('updated')),
|
| 433 |
+
resolved=self._parse_datetime(fields.get('resolutiondate')),
|
| 434 |
+
story_points=story_points,
|
| 435 |
+
sprint=sprint,
|
| 436 |
+
labels=fields.get('labels', []),
|
| 437 |
+
components=components,
|
| 438 |
+
project_key=project_key,
|
| 439 |
+
issue_type=issue_type,
|
| 440 |
+
time_spent=fields.get('timespent'),
|
| 441 |
+
time_estimate=fields.get('timeestimate')
|
| 442 |
+
)
|
| 443 |
+
|
| 444 |
+
def _parse_description(self, description_data: Any) -> Optional[str]:
|
| 445 |
+
"""Parse description field from Jira API v3 (handles rich text format)"""
|
| 446 |
+
if not description_data:
|
| 447 |
+
return None
|
| 448 |
+
|
| 449 |
+
# If it's already a string, return it
|
| 450 |
+
if isinstance(description_data, str):
|
| 451 |
+
return description_data
|
| 452 |
+
|
| 453 |
+
# If it's a dict (Atlassian Document Format), extract text content
|
| 454 |
+
if isinstance(description_data, dict):
|
| 455 |
+
return self._extract_text_from_adf(description_data)
|
| 456 |
+
|
| 457 |
+
return None
|
| 458 |
+
|
| 459 |
+
def _extract_text_from_adf(self, adf_content: Dict[str, Any]) -> str:
|
| 460 |
+
"""Extract plain text from Atlassian Document Format (ADF)"""
|
| 461 |
+
if not adf_content:
|
| 462 |
+
return ""
|
| 463 |
+
|
| 464 |
+
text_parts = []
|
| 465 |
+
|
| 466 |
+
def extract_text(node):
|
| 467 |
+
"""Recursively extract text from ADF nodes"""
|
| 468 |
+
if isinstance(node, dict):
|
| 469 |
+
# If node has text, add it
|
| 470 |
+
if 'text' in node:
|
| 471 |
+
text_parts.append(node['text'])
|
| 472 |
+
|
| 473 |
+
# Process content array
|
| 474 |
+
if 'content' in node and isinstance(node['content'], list):
|
| 475 |
+
for child in node['content']:
|
| 476 |
+
extract_text(child)
|
| 477 |
+
elif isinstance(node, list):
|
| 478 |
+
for item in node:
|
| 479 |
+
extract_text(item)
|
| 480 |
+
|
| 481 |
+
extract_text(adf_content)
|
| 482 |
+
return ' '.join(text_parts).strip()
|
| 483 |
+
|
| 484 |
+
# ===== KANBAN BOARD METHODS =====
|
| 485 |
+
|
| 486 |
+
def get_kanban_boards(self) -> List[KanbanBoard]:
|
| 487 |
+
"""Fetch all Kanban boards"""
|
| 488 |
+
try:
|
| 489 |
+
# Use REST API directly
|
| 490 |
+
url = f"{self.base_url}/rest/agile/latest/board"
|
| 491 |
+
params = {'type': 'kanban'}
|
| 492 |
+
|
| 493 |
+
response = requests.get(url, headers=self.headers, auth=self.auth, params=params)
|
| 494 |
+
response.raise_for_status()
|
| 495 |
+
|
| 496 |
+
data = response.json()
|
| 497 |
+
boards_data = data.get('values', [])
|
| 498 |
+
|
| 499 |
+
kanban_boards = []
|
| 500 |
+
for board in boards_data:
|
| 501 |
+
# Get board configuration to extract columns
|
| 502 |
+
board_config = self._get_board_config_details(board.get('id'))
|
| 503 |
+
|
| 504 |
+
# Extract location info
|
| 505 |
+
location = board.get('location', {})
|
| 506 |
+
project_key = location.get('projectKey') if location else None
|
| 507 |
+
|
| 508 |
+
kanban_boards.append(KanbanBoard(
|
| 509 |
+
board_id=board.get('id'),
|
| 510 |
+
board_name=board.get('name', ''),
|
| 511 |
+
board_type=board.get('type', 'kanban'),
|
| 512 |
+
project_key=project_key,
|
| 513 |
+
columns=board_config.get('columns', []),
|
| 514 |
+
filter_id=board.get('filter', {}).get('id') if board.get('filter') else None,
|
| 515 |
+
location=None # Simplified to avoid serialization issues
|
| 516 |
+
))
|
| 517 |
+
|
| 518 |
+
return kanban_boards
|
| 519 |
+
except Exception as e:
|
| 520 |
+
logger.error(f"Error fetching Kanban boards: {str(e)}")
|
| 521 |
+
raise
|
| 522 |
+
|
| 523 |
+
def get_kanban_board_by_id(self, board_id: int) -> Optional[KanbanBoard]:
|
| 524 |
+
"""Fetch a specific Kanban board by ID with actual issue counts"""
|
| 525 |
+
try:
|
| 526 |
+
# Use REST API directly
|
| 527 |
+
url = f"{self.base_url}/rest/agile/latest/board/{board_id}"
|
| 528 |
+
|
| 529 |
+
response = requests.get(url, headers=self.headers, auth=self.auth)
|
| 530 |
+
response.raise_for_status()
|
| 531 |
+
|
| 532 |
+
board = response.json()
|
| 533 |
+
|
| 534 |
+
if board.get('type', '').lower() != 'kanban':
|
| 535 |
+
logger.warning(f"Board {board_id} is not a Kanban board")
|
| 536 |
+
return None
|
| 537 |
+
|
| 538 |
+
# Get board configuration
|
| 539 |
+
board_config = self._get_board_config_details(board.get('id'))
|
| 540 |
+
columns = board_config.get('columns', [])
|
| 541 |
+
|
| 542 |
+
# Fetch all issues for the board to get real counts and statuses
|
| 543 |
+
issues_url = f"{self.base_url}/rest/agile/latest/board/{board_id}/issue"
|
| 544 |
+
params = {
|
| 545 |
+
'maxResults': 500,
|
| 546 |
+
'fields': 'summary,status,priority,assignee,reporter,created,updated,resolutiondate,customfield_10016,description,labels,components,issuetype,timespent,timeestimate'
|
| 547 |
+
}
|
| 548 |
+
|
| 549 |
+
issues_response = requests.get(issues_url, headers=self.headers, auth=self.auth, params=params)
|
| 550 |
+
issues_response.raise_for_status()
|
| 551 |
+
|
| 552 |
+
issues_data = issues_response.json()
|
| 553 |
+
raw_issues = issues_data.get('issues', [])
|
| 554 |
+
|
| 555 |
+
# Parse issues to JiraIssue objects
|
| 556 |
+
parsed_issues = [self._parse_issue_dict(issue) for issue in raw_issues]
|
| 557 |
+
|
| 558 |
+
logger.info(f"Board {board_id}: Found {len(parsed_issues)} issues")
|
| 559 |
+
|
| 560 |
+
# If columns have empty statuses, infer them from actual issues
|
| 561 |
+
if columns and all(not col.statuses for col in columns):
|
| 562 |
+
logger.info(f"Board {board_id}: Column statuses are empty, inferring from issues")
|
| 563 |
+
columns = self._infer_columns_from_issues(parsed_issues, columns)
|
| 564 |
+
|
| 565 |
+
# Count issues per column based on their status
|
| 566 |
+
status_to_column = {}
|
| 567 |
+
for col in columns:
|
| 568 |
+
for status in col.statuses:
|
| 569 |
+
status_to_column[status.lower()] = col.name
|
| 570 |
+
|
| 571 |
+
# Count issues
|
| 572 |
+
column_counts = {col.name: 0 for col in columns}
|
| 573 |
+
for issue in parsed_issues:
|
| 574 |
+
if issue.status:
|
| 575 |
+
column_name = status_to_column.get(issue.status.lower())
|
| 576 |
+
if column_name:
|
| 577 |
+
column_counts[column_name] += 1
|
| 578 |
+
|
| 579 |
+
# Update issue counts
|
| 580 |
+
for col in columns:
|
| 581 |
+
col.issue_count = column_counts.get(col.name, 0)
|
| 582 |
+
|
| 583 |
+
logger.info(f"Board {board_id} column counts: {column_counts}")
|
| 584 |
+
|
| 585 |
+
# Extract location info
|
| 586 |
+
location = board.get('location', {})
|
| 587 |
+
project_key = location.get('projectKey') if location else None
|
| 588 |
+
|
| 589 |
+
return KanbanBoard(
|
| 590 |
+
board_id=board.get('id'),
|
| 591 |
+
board_name=board.get('name', ''),
|
| 592 |
+
board_type=board.get('type', 'kanban'),
|
| 593 |
+
project_key=project_key,
|
| 594 |
+
columns=columns,
|
| 595 |
+
filter_id=board.get('filter', {}).get('id') if board.get('filter') else None,
|
| 596 |
+
location=None # Simplified to avoid serialization issues
|
| 597 |
+
)
|
| 598 |
+
except Exception as e:
|
| 599 |
+
logger.error(f"Error fetching Kanban board {board_id}: {str(e)}")
|
| 600 |
+
raise
|
| 601 |
+
|
| 602 |
+
def get_board_configuration(self, board_id: int) -> BoardConfiguration:
|
| 603 |
+
"""Get detailed board configuration"""
|
| 604 |
+
try:
|
| 605 |
+
url = f"{self.base_url}/rest/agile/latest/board/{board_id}/configuration"
|
| 606 |
+
response = requests.get(url, headers=self.headers, auth=self.auth)
|
| 607 |
+
response.raise_for_status()
|
| 608 |
+
|
| 609 |
+
config_data = response.json()
|
| 610 |
+
|
| 611 |
+
return BoardConfiguration(
|
| 612 |
+
board_id=board_id,
|
| 613 |
+
name=config_data.get('name', ''),
|
| 614 |
+
self_link=config_data.get('self', ''),
|
| 615 |
+
estimation=config_data.get('estimation'),
|
| 616 |
+
ranking=config_data.get('ranking'),
|
| 617 |
+
column_config=config_data.get('columnConfig')
|
| 618 |
+
)
|
| 619 |
+
except Exception as e:
|
| 620 |
+
logger.error(f"Error fetching board configuration: {str(e)}")
|
| 621 |
+
raise
|
| 622 |
+
|
| 623 |
+
def get_kanban_issues_by_column(self, board_id: int) -> List[KanbanIssuesByColumn]:
|
| 624 |
+
"""Get issues grouped by Kanban board columns"""
|
| 625 |
+
try:
|
| 626 |
+
# Get board configuration first
|
| 627 |
+
board_config = self._get_board_config_details(board_id)
|
| 628 |
+
columns = board_config.get('columns', [])
|
| 629 |
+
|
| 630 |
+
# Get all issues for the board using the Agile API endpoint
|
| 631 |
+
url = f"{self.base_url}/rest/agile/latest/board/{board_id}/issue"
|
| 632 |
+
params = {
|
| 633 |
+
'maxResults': 500,
|
| 634 |
+
'fields': 'summary,status,priority,assignee,reporter,created,updated,resolutiondate,customfield_10016,description,labels,components,issuetype,timespent,timeestimate'
|
| 635 |
+
}
|
| 636 |
+
|
| 637 |
+
response = requests.get(url, headers=self.headers, auth=self.auth, params=params)
|
| 638 |
+
response.raise_for_status()
|
| 639 |
+
|
| 640 |
+
data = response.json()
|
| 641 |
+
issues = data.get('issues', [])
|
| 642 |
+
|
| 643 |
+
parsed_issues = [self._parse_issue_dict(issue) for issue in issues]
|
| 644 |
+
|
| 645 |
+
logger.info(f"Found {len(parsed_issues)} total issues for board {board_id}")
|
| 646 |
+
|
| 647 |
+
# Check if columns have any statuses configured
|
| 648 |
+
has_configured_statuses = any(col.statuses for col in columns)
|
| 649 |
+
|
| 650 |
+
if not has_configured_statuses and parsed_issues:
|
| 651 |
+
# Fallback: Infer columns from actual issue statuses
|
| 652 |
+
logger.warning(f"No statuses configured in columns, inferring from {len(parsed_issues)} issues")
|
| 653 |
+
columns = self._infer_columns_from_issues(parsed_issues, columns)
|
| 654 |
+
|
| 655 |
+
# Group issues by column
|
| 656 |
+
issues_by_column = []
|
| 657 |
+
matched_issues = set()
|
| 658 |
+
|
| 659 |
+
for column in columns:
|
| 660 |
+
# Case-insensitive status matching
|
| 661 |
+
column_statuses_lower = [s.lower() for s in column.statuses]
|
| 662 |
+
column_issues = [
|
| 663 |
+
issue for issue in parsed_issues
|
| 664 |
+
if issue.status.lower() in column_statuses_lower
|
| 665 |
+
]
|
| 666 |
+
|
| 667 |
+
# Track which issues were matched
|
| 668 |
+
for issue in column_issues:
|
| 669 |
+
matched_issues.add(issue.issue_key)
|
| 670 |
+
|
| 671 |
+
logger.info(f"Column '{column.name}' has {len(column_issues)} issues (statuses: {column.statuses})")
|
| 672 |
+
|
| 673 |
+
issues_by_column.append(KanbanIssuesByColumn(
|
| 674 |
+
column_name=column.name,
|
| 675 |
+
statuses=column.statuses,
|
| 676 |
+
issues=column_issues,
|
| 677 |
+
wip_limit_min=column.min,
|
| 678 |
+
wip_limit_max=column.max,
|
| 679 |
+
issue_count=len(column_issues)
|
| 680 |
+
))
|
| 681 |
+
|
| 682 |
+
# Add unmatched issues to a separate "Other" column if any
|
| 683 |
+
unmatched_issues = [i for i in parsed_issues if i.issue_key not in matched_issues]
|
| 684 |
+
if unmatched_issues:
|
| 685 |
+
logger.warning(f"Found {len(unmatched_issues)} unmatched issues with statuses: {set([i.status for i in unmatched_issues])}")
|
| 686 |
+
issues_by_column.append(KanbanIssuesByColumn(
|
| 687 |
+
column_name="Other",
|
| 688 |
+
statuses=list(set([i.status for i in unmatched_issues])), # Remove duplicates
|
| 689 |
+
issues=unmatched_issues,
|
| 690 |
+
wip_limit_min=None,
|
| 691 |
+
wip_limit_max=None,
|
| 692 |
+
issue_count=len(unmatched_issues)
|
| 693 |
+
))
|
| 694 |
+
|
| 695 |
+
return issues_by_column
|
| 696 |
+
except Exception as e:
|
| 697 |
+
logger.error(f"Error fetching Kanban issues by column: {str(e)}")
|
| 698 |
+
raise
|
| 699 |
+
|
| 700 |
+
def _infer_columns_from_issues(self, issues: List[JiraIssue], existing_columns: List[KanbanColumn]) -> List[KanbanColumn]:
|
| 701 |
+
"""Infer column configurations from actual issue statuses"""
|
| 702 |
+
# Get unique statuses from issues
|
| 703 |
+
unique_statuses = {}
|
| 704 |
+
for issue in issues:
|
| 705 |
+
status = issue.status
|
| 706 |
+
if status not in unique_statuses:
|
| 707 |
+
unique_statuses[status] = 0
|
| 708 |
+
unique_statuses[status] += 1
|
| 709 |
+
|
| 710 |
+
logger.info(f"Found unique statuses: {list(unique_statuses.keys())}")
|
| 711 |
+
|
| 712 |
+
# Map statuses to columns by matching column names
|
| 713 |
+
inferred_columns = []
|
| 714 |
+
|
| 715 |
+
for col in existing_columns:
|
| 716 |
+
# Try to match statuses by column name (case-insensitive partial match)
|
| 717 |
+
col_name_lower = col.name.lower()
|
| 718 |
+
matched_statuses = []
|
| 719 |
+
|
| 720 |
+
for status in unique_statuses.keys():
|
| 721 |
+
status_lower = status.lower()
|
| 722 |
+
# Try exact match first
|
| 723 |
+
if status_lower == col_name_lower:
|
| 724 |
+
matched_statuses.append(status)
|
| 725 |
+
# Try partial match (e.g., "In Progress" matches "Progress")
|
| 726 |
+
elif status_lower in col_name_lower or col_name_lower in status_lower:
|
| 727 |
+
matched_statuses.append(status)
|
| 728 |
+
|
| 729 |
+
# If no match found, check for common patterns
|
| 730 |
+
if not matched_statuses:
|
| 731 |
+
if 'backlog' in col_name_lower:
|
| 732 |
+
matched_statuses = [s for s in unique_statuses.keys() if 'backlog' in s.lower()]
|
| 733 |
+
elif 'progress' in col_name_lower or 'doing' in col_name_lower or 'development' in col_name_lower:
|
| 734 |
+
matched_statuses = [s for s in unique_statuses.keys() if 'progress' in s.lower() or 'development' in s.lower()]
|
| 735 |
+
elif 'done' in col_name_lower or 'complete' in col_name_lower:
|
| 736 |
+
matched_statuses = [s for s in unique_statuses.keys() if 'done' in s.lower() or 'complete' in s.lower()]
|
| 737 |
+
elif 'selected' in col_name_lower or 'ready' in col_name_lower or 'todo' in col_name_lower:
|
| 738 |
+
matched_statuses = [s for s in unique_statuses.keys() if 'selected' in s.lower() or 'ready' in s.lower() or 'todo' in s.lower() or 'to do' in s.lower()]
|
| 739 |
+
|
| 740 |
+
logger.info(f"Inferred column '{col.name}': {matched_statuses}")
|
| 741 |
+
|
| 742 |
+
inferred_columns.append(KanbanColumn(
|
| 743 |
+
name=col.name,
|
| 744 |
+
statuses=matched_statuses,
|
| 745 |
+
min=col.min,
|
| 746 |
+
max=col.max,
|
| 747 |
+
issue_count=0
|
| 748 |
+
))
|
| 749 |
+
|
| 750 |
+
# If we still have unmapped statuses, they'll go to "Other"
|
| 751 |
+
mapped_statuses = set()
|
| 752 |
+
for col in inferred_columns:
|
| 753 |
+
mapped_statuses.update(col.statuses)
|
| 754 |
+
|
| 755 |
+
unmapped = set(unique_statuses.keys()) - mapped_statuses
|
| 756 |
+
if unmapped:
|
| 757 |
+
logger.warning(f"Unmapped statuses after inference: {unmapped}")
|
| 758 |
+
|
| 759 |
+
return inferred_columns
|
| 760 |
+
|
| 761 |
+
def get_board_backlog(self, board_id: int, max_results: int = 100) -> List[JiraIssue]:
|
| 762 |
+
"""Get backlog issues for a board
|
| 763 |
+
|
| 764 |
+
For Scrum boards: Uses the /backlog endpoint
|
| 765 |
+
For Kanban boards: Returns issues in 'Backlog' status/column
|
| 766 |
+
"""
|
| 767 |
+
try:
|
| 768 |
+
# First, check if this is a Kanban or Scrum board
|
| 769 |
+
board_url = f"{self.base_url}/rest/agile/latest/board/{board_id}"
|
| 770 |
+
board_response = requests.get(board_url, headers=self.headers, auth=self.auth)
|
| 771 |
+
board_response.raise_for_status()
|
| 772 |
+
board_data = board_response.json()
|
| 773 |
+
board_type = board_data.get('type', '').lower()
|
| 774 |
+
|
| 775 |
+
if board_type == 'scrum':
|
| 776 |
+
# Use the backlog endpoint for Scrum boards
|
| 777 |
+
url = f"{self.base_url}/rest/agile/latest/board/{board_id}/backlog"
|
| 778 |
+
params = {
|
| 779 |
+
'maxResults': max_results,
|
| 780 |
+
'fields': 'summary,status,priority,assignee,reporter,created,updated,resolutiondate,customfield_10016,description,labels,components,issuetype,timespent,timeestimate'
|
| 781 |
+
}
|
| 782 |
+
|
| 783 |
+
response = requests.get(url, headers=self.headers, auth=self.auth, params=params)
|
| 784 |
+
response.raise_for_status()
|
| 785 |
+
|
| 786 |
+
data = response.json()
|
| 787 |
+
issues = data.get('issues', [])
|
| 788 |
+
|
| 789 |
+
return [self._parse_issue_dict(issue) for issue in issues]
|
| 790 |
+
else:
|
| 791 |
+
# For Kanban boards, get all issues and filter by Backlog status
|
| 792 |
+
logger.info(f"Board {board_id} is Kanban, filtering for Backlog column issues")
|
| 793 |
+
|
| 794 |
+
url = f"{self.base_url}/rest/agile/latest/board/{board_id}/issue"
|
| 795 |
+
params = {
|
| 796 |
+
'maxResults': max_results,
|
| 797 |
+
'fields': 'summary,status,priority,assignee,reporter,created,updated,resolutiondate,customfield_10016,description,labels,components,issuetype,timespent,timeestimate'
|
| 798 |
+
}
|
| 799 |
+
|
| 800 |
+
response = requests.get(url, headers=self.headers, auth=self.auth, params=params)
|
| 801 |
+
response.raise_for_status()
|
| 802 |
+
|
| 803 |
+
data = response.json()
|
| 804 |
+
all_issues = data.get('issues', [])
|
| 805 |
+
|
| 806 |
+
# Filter issues that are in "Backlog" status
|
| 807 |
+
backlog_issues = [
|
| 808 |
+
self._parse_issue_dict(issue)
|
| 809 |
+
for issue in all_issues
|
| 810 |
+
if 'backlog' in issue.get('fields', {}).get('status', {}).get('name', '').lower()
|
| 811 |
+
]
|
| 812 |
+
|
| 813 |
+
logger.info(f"Found {len(backlog_issues)} backlog issues out of {len(all_issues)} total")
|
| 814 |
+
return backlog_issues
|
| 815 |
+
|
| 816 |
+
except Exception as e:
|
| 817 |
+
logger.error(f"Error fetching board backlog: {str(e)}")
|
| 818 |
+
raise
|
| 819 |
+
|
| 820 |
+
def move_issue_to_status(self, issue_key: str, transition_name: str) -> bool:
|
| 821 |
+
"""Move an issue to a different status (column) via transition"""
|
| 822 |
+
try:
|
| 823 |
+
# Get available transitions for the issue
|
| 824 |
+
url = f"{self.base_url}/rest/api/3/issue/{issue_key}/transitions"
|
| 825 |
+
response = requests.get(url, headers=self.headers, auth=self.auth)
|
| 826 |
+
response.raise_for_status()
|
| 827 |
+
|
| 828 |
+
transitions = response.json().get('transitions', [])
|
| 829 |
+
|
| 830 |
+
# Find the matching transition
|
| 831 |
+
transition_id = None
|
| 832 |
+
for transition in transitions:
|
| 833 |
+
if transition.get('name', '').lower() == transition_name.lower():
|
| 834 |
+
transition_id = transition.get('id')
|
| 835 |
+
break
|
| 836 |
+
|
| 837 |
+
if not transition_id:
|
| 838 |
+
logger.error(f"Transition '{transition_name}' not found for issue {issue_key}")
|
| 839 |
+
return False
|
| 840 |
+
|
| 841 |
+
# Execute the transition
|
| 842 |
+
transition_data = {
|
| 843 |
+
'transition': {'id': transition_id}
|
| 844 |
+
}
|
| 845 |
+
|
| 846 |
+
response = requests.post(
|
| 847 |
+
url,
|
| 848 |
+
json=transition_data,
|
| 849 |
+
headers=self.headers,
|
| 850 |
+
auth=self.auth
|
| 851 |
+
)
|
| 852 |
+
response.raise_for_status()
|
| 853 |
+
|
| 854 |
+
logger.info(f"Successfully moved issue {issue_key} to {transition_name}")
|
| 855 |
+
return True
|
| 856 |
+
|
| 857 |
+
except Exception as e:
|
| 858 |
+
logger.error(f"Error moving issue {issue_key}: {str(e)}")
|
| 859 |
+
raise
|
| 860 |
+
|
| 861 |
+
def get_issue_transitions(self, issue_key: str) -> List[Dict[str, Any]]:
|
| 862 |
+
"""Get available transitions for an issue"""
|
| 863 |
+
try:
|
| 864 |
+
url = f"{self.base_url}/rest/api/3/issue/{issue_key}/transitions"
|
| 865 |
+
response = requests.get(url, headers=self.headers, auth=self.auth)
|
| 866 |
+
response.raise_for_status()
|
| 867 |
+
|
| 868 |
+
transitions = response.json().get('transitions', [])
|
| 869 |
+
|
| 870 |
+
return [
|
| 871 |
+
{
|
| 872 |
+
'id': t.get('id'),
|
| 873 |
+
'name': t.get('name'),
|
| 874 |
+
'to_status': t.get('to', {}).get('name'),
|
| 875 |
+
'has_screen': t.get('hasScreen', False)
|
| 876 |
+
}
|
| 877 |
+
for t in transitions
|
| 878 |
+
]
|
| 879 |
+
except Exception as e:
|
| 880 |
+
logger.error(f"Error fetching transitions for {issue_key}: {str(e)}")
|
| 881 |
+
raise
|
| 882 |
+
|
| 883 |
+
def _get_board_config_details(self, board_id: int) -> Dict[str, Any]:
|
| 884 |
+
"""Helper method to get board configuration details"""
|
| 885 |
+
try:
|
| 886 |
+
url = f"{self.base_url}/rest/agile/latest/board/{board_id}/configuration"
|
| 887 |
+
response = requests.get(url, headers=self.headers, auth=self.auth)
|
| 888 |
+
response.raise_for_status()
|
| 889 |
+
|
| 890 |
+
config_data = response.json()
|
| 891 |
+
column_config = config_data.get('columnConfig', {})
|
| 892 |
+
columns_data = column_config.get('columns', [])
|
| 893 |
+
|
| 894 |
+
logger.info(f"Board {board_id} config: {len(columns_data)} columns found")
|
| 895 |
+
|
| 896 |
+
# Parse columns with their statuses and WIP limits
|
| 897 |
+
columns = []
|
| 898 |
+
for col in columns_data:
|
| 899 |
+
# Get statuses - handle both 'statuses' array and 'mappedStatuses'
|
| 900 |
+
statuses = []
|
| 901 |
+
|
| 902 |
+
# Try 'statuses' first
|
| 903 |
+
status_list = col.get('statuses', [])
|
| 904 |
+
if status_list:
|
| 905 |
+
statuses = [
|
| 906 |
+
s.get('name') if isinstance(s, dict) else str(s)
|
| 907 |
+
for s in status_list
|
| 908 |
+
if s and (isinstance(s, str) or (isinstance(s, dict) and s.get('name')))
|
| 909 |
+
]
|
| 910 |
+
|
| 911 |
+
# If no statuses found, try 'mappedStatuses'
|
| 912 |
+
if not statuses:
|
| 913 |
+
mapped_statuses = col.get('mappedStatuses', [])
|
| 914 |
+
if mapped_statuses:
|
| 915 |
+
statuses = [
|
| 916 |
+
s.get('name') if isinstance(s, dict) else str(s)
|
| 917 |
+
for s in mapped_statuses
|
| 918 |
+
if s and (isinstance(s, str) or (isinstance(s, dict) and s.get('name')))
|
| 919 |
+
]
|
| 920 |
+
|
| 921 |
+
column_name = col.get('name', '')
|
| 922 |
+
logger.info(f" Column '{column_name}': {len(statuses)} statuses = {statuses}")
|
| 923 |
+
|
| 924 |
+
# Only add columns that have at least a name
|
| 925 |
+
if column_name:
|
| 926 |
+
columns.append(KanbanColumn(
|
| 927 |
+
name=column_name,
|
| 928 |
+
statuses=statuses,
|
| 929 |
+
min=col.get('min'),
|
| 930 |
+
max=col.get('max'),
|
| 931 |
+
issue_count=0
|
| 932 |
+
))
|
| 933 |
+
|
| 934 |
+
return {'columns': columns, 'raw_config': config_data}
|
| 935 |
+
|
| 936 |
+
except Exception as e:
|
| 937 |
+
logger.warning(f"Could not fetch board configuration for {board_id}: {str(e)}")
|
| 938 |
+
# Log the raw response if available for debugging
|
| 939 |
+
try:
|
| 940 |
+
logger.warning(f"Raw config data: {config_data if 'config_data' in locals() else 'Not available'}")
|
| 941 |
+
except:
|
| 942 |
+
pass
|
| 943 |
+
return {'columns': [], 'raw_config': {}}
|
| 944 |
+
|
| 945 |
+
|
| 946 |
+
# Singleton instance
|
| 947 |
+
jira_service = JiraIntegrationService()
|
main.py
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import FastAPI
|
| 2 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 3 |
+
from api.jira_routes import router as jira_router
|
| 4 |
+
from api.intelligence_routes import router as intelligence_router
|
| 5 |
+
from config.settings import settings
|
| 6 |
+
import logging
|
| 7 |
+
|
| 8 |
+
# Configure logging
|
| 9 |
+
logging.basicConfig(
|
| 10 |
+
level=logging.INFO,
|
| 11 |
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
| 12 |
+
)
|
| 13 |
+
|
| 14 |
+
logger = logging.getLogger(__name__)
|
| 15 |
+
|
| 16 |
+
# Create FastAPI app
|
| 17 |
+
app = FastAPI(
|
| 18 |
+
title="Enterprise Delivery & Workforce Intelligence API",
|
| 19 |
+
description="AI-powered enterprise intelligence system for engineering operations",
|
| 20 |
+
version="1.0.0",
|
| 21 |
+
debug=settings.debug
|
| 22 |
+
)
|
| 23 |
+
|
| 24 |
+
# Configure CORS
|
| 25 |
+
app.add_middleware(
|
| 26 |
+
CORSMiddleware,
|
| 27 |
+
allow_origins=["*"], # In production, specify actual origins
|
| 28 |
+
allow_credentials=True,
|
| 29 |
+
allow_methods=["*"],
|
| 30 |
+
allow_headers=["*"],
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
# Include routers
|
| 34 |
+
app.include_router(jira_router)
|
| 35 |
+
app.include_router(intelligence_router)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
@app.get("/")
|
| 39 |
+
async def root():
|
| 40 |
+
"""Root endpoint"""
|
| 41 |
+
return {
|
| 42 |
+
"message": "Enterprise Delivery & Workforce Intelligence API",
|
| 43 |
+
"version": "1.0.0",
|
| 44 |
+
"status": "operational",
|
| 45 |
+
"endpoints": {
|
| 46 |
+
"jira": "/jira",
|
| 47 |
+
"intelligence": "/intelligence",
|
| 48 |
+
"docs": "/docs",
|
| 49 |
+
"redoc": "/redoc"
|
| 50 |
+
}
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
@app.get("/health")
|
| 55 |
+
async def health_check():
|
| 56 |
+
"""Health check endpoint"""
|
| 57 |
+
return {
|
| 58 |
+
"status": "healthy",
|
| 59 |
+
"timestamp": "2026-02-07T00:00:00Z"
|
| 60 |
+
}
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
if __name__ == "__main__":
|
| 64 |
+
import uvicorn
|
| 65 |
+
|
| 66 |
+
logger.info(f"Starting server on {settings.api_host}:{settings.api_port}")
|
| 67 |
+
uvicorn.run(
|
| 68 |
+
"main:app",
|
| 69 |
+
host=settings.api_host,
|
| 70 |
+
port=settings.api_port,
|
| 71 |
+
reload=settings.debug
|
| 72 |
+
)
|
| 73 |
+
|
models/__init__.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .jira_models import (
|
| 2 |
+
JiraIssue,
|
| 3 |
+
Sprint,
|
| 4 |
+
TeamMember,
|
| 5 |
+
WorklogEntry,
|
| 6 |
+
ProjectInfo,
|
| 7 |
+
PriorityLevel,
|
| 8 |
+
IssueStatus
|
| 9 |
+
)
|
| 10 |
+
from .intelligence_models import (
|
| 11 |
+
DeliveryHealthMetrics,
|
| 12 |
+
ProductivityMetrics,
|
| 13 |
+
CostEfficiencyMetrics,
|
| 14 |
+
TeamCapacityMetrics,
|
| 15 |
+
RiskAlert,
|
| 16 |
+
InsightRecommendation
|
| 17 |
+
)
|
| 18 |
+
|
| 19 |
+
__all__ = [
|
| 20 |
+
"JiraIssue",
|
| 21 |
+
"Sprint",
|
| 22 |
+
"TeamMember",
|
| 23 |
+
"WorklogEntry",
|
| 24 |
+
"ProjectInfo",
|
| 25 |
+
"PriorityLevel",
|
| 26 |
+
"IssueStatus",
|
| 27 |
+
"DeliveryHealthMetrics",
|
| 28 |
+
"ProductivityMetrics",
|
| 29 |
+
"CostEfficiencyMetrics",
|
| 30 |
+
"TeamCapacityMetrics",
|
| 31 |
+
"RiskAlert",
|
| 32 |
+
"InsightRecommendation"
|
| 33 |
+
]
|
models/intelligence_models.py
ADDED
|
@@ -0,0 +1,211 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pydantic import BaseModel, Field
|
| 2 |
+
from typing import List, Optional, Dict
|
| 3 |
+
from datetime import datetime, date
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class DeliveryHealthMetrics(BaseModel):
|
| 7 |
+
"""Metrics for delivery health analysis"""
|
| 8 |
+
sprint_id: Optional[str] = None
|
| 9 |
+
sprint_name: Optional[str] = None
|
| 10 |
+
period_start: date
|
| 11 |
+
period_end: date
|
| 12 |
+
|
| 13 |
+
# Velocity Metrics
|
| 14 |
+
planned_story_points: float = 0
|
| 15 |
+
completed_story_points: float = 0
|
| 16 |
+
velocity: float = 0
|
| 17 |
+
velocity_trend: float = 0 # percentage change from previous period
|
| 18 |
+
|
| 19 |
+
# Completion Metrics
|
| 20 |
+
total_issues: int = 0
|
| 21 |
+
completed_issues: int = 0
|
| 22 |
+
completion_rate: float = 0
|
| 23 |
+
|
| 24 |
+
# Time Metrics
|
| 25 |
+
avg_cycle_time_hours: float = 0
|
| 26 |
+
avg_lead_time_hours: float = 0
|
| 27 |
+
|
| 28 |
+
# Quality Indicators
|
| 29 |
+
blocked_issues_count: int = 0
|
| 30 |
+
overdue_issues_count: int = 0
|
| 31 |
+
reopened_issues_count: int = 0
|
| 32 |
+
|
| 33 |
+
# Risk Indicators
|
| 34 |
+
at_risk_issues: int = 0
|
| 35 |
+
health_score: float = 0 # 0-100
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class ProductivityMetrics(BaseModel):
|
| 39 |
+
"""Workforce productivity metrics"""
|
| 40 |
+
team_member_id: str
|
| 41 |
+
team_member_name: str
|
| 42 |
+
period_start: date
|
| 43 |
+
period_end: date
|
| 44 |
+
|
| 45 |
+
# Activity Metrics
|
| 46 |
+
issues_completed: int = 0
|
| 47 |
+
story_points_completed: float = 0
|
| 48 |
+
code_commits: int = 0 # from GitHub
|
| 49 |
+
pull_requests: int = 0 # from GitHub
|
| 50 |
+
|
| 51 |
+
# Time Metrics
|
| 52 |
+
total_hours_logged: float = 0
|
| 53 |
+
avg_hours_per_day: float = 0
|
| 54 |
+
|
| 55 |
+
# Efficiency Metrics
|
| 56 |
+
avg_issue_completion_time_hours: float = 0
|
| 57 |
+
productivity_score: float = 0 # 0-100
|
| 58 |
+
|
| 59 |
+
# Workload Metrics
|
| 60 |
+
current_assigned_issues: int = 0
|
| 61 |
+
current_story_points: float = 0
|
| 62 |
+
utilization_rate: float = 0 # percentage
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
class CostEfficiencyMetrics(BaseModel):
|
| 66 |
+
"""Cost and efficiency analysis"""
|
| 67 |
+
period_start: date
|
| 68 |
+
period_end: date
|
| 69 |
+
project_key: Optional[str] = None
|
| 70 |
+
|
| 71 |
+
# Resource Metrics
|
| 72 |
+
total_team_members: int = 0
|
| 73 |
+
total_hours_logged: float = 0
|
| 74 |
+
estimated_cost: float = 0 # based on hours
|
| 75 |
+
|
| 76 |
+
# Output Metrics
|
| 77 |
+
features_delivered: int = 0
|
| 78 |
+
story_points_delivered: float = 0
|
| 79 |
+
|
| 80 |
+
# Efficiency Ratios
|
| 81 |
+
cost_per_feature: float = 0
|
| 82 |
+
cost_per_story_point: float = 0
|
| 83 |
+
hours_per_story_point: float = 0
|
| 84 |
+
|
| 85 |
+
# Waste Indicators
|
| 86 |
+
blocked_time_hours: float = 0
|
| 87 |
+
rework_hours: float = 0
|
| 88 |
+
waste_percentage: float = 0
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
class TeamCapacityMetrics(BaseModel):
|
| 92 |
+
"""Team capacity and utilization"""
|
| 93 |
+
team_id: Optional[str] = None
|
| 94 |
+
team_name: str
|
| 95 |
+
period_start: date
|
| 96 |
+
period_end: date
|
| 97 |
+
|
| 98 |
+
# Capacity Metrics
|
| 99 |
+
total_capacity_hours: float = 0
|
| 100 |
+
allocated_hours: float = 0
|
| 101 |
+
available_hours: float = 0
|
| 102 |
+
utilization_rate: float = 0
|
| 103 |
+
|
| 104 |
+
# Workload Distribution
|
| 105 |
+
team_members: List[Dict] = Field(default_factory=list)
|
| 106 |
+
overloaded_members: int = 0
|
| 107 |
+
underutilized_members: int = 0
|
| 108 |
+
|
| 109 |
+
# Sprint Metrics
|
| 110 |
+
current_sprint_load: float = 0
|
| 111 |
+
forecasted_capacity: float = 0
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
class RiskAlert(BaseModel):
|
| 115 |
+
"""Risk and alert model"""
|
| 116 |
+
alert_id: str
|
| 117 |
+
alert_type: str # delivery_delay, cost_overrun, resource_shortage, quality_issue
|
| 118 |
+
severity: str # critical, high, medium, low
|
| 119 |
+
title: str
|
| 120 |
+
description: str
|
| 121 |
+
affected_entity: str # sprint, project, team_member
|
| 122 |
+
entity_id: str
|
| 123 |
+
detected_at: datetime
|
| 124 |
+
suggested_action: Optional[str] = None
|
| 125 |
+
metrics: Dict = Field(default_factory=dict)
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
class InsightRecommendation(BaseModel):
|
| 129 |
+
"""AI-generated insights and recommendations"""
|
| 130 |
+
insight_id: str
|
| 131 |
+
category: str # delivery, productivity, cost, resource
|
| 132 |
+
title: str
|
| 133 |
+
description: str
|
| 134 |
+
confidence_score: float = 0 # 0-1
|
| 135 |
+
impact_level: str # high, medium, low
|
| 136 |
+
recommendations: List[str] = Field(default_factory=list)
|
| 137 |
+
supporting_data: Dict = Field(default_factory=dict)
|
| 138 |
+
generated_at: datetime
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
class KanbanFlowMetrics(BaseModel):
|
| 142 |
+
"""Kanban flow efficiency metrics"""
|
| 143 |
+
board_id: int
|
| 144 |
+
board_name: str
|
| 145 |
+
period_start: date
|
| 146 |
+
period_end: date
|
| 147 |
+
|
| 148 |
+
# Flow Metrics
|
| 149 |
+
throughput: int = 0 # Issues completed in period
|
| 150 |
+
avg_cycle_time_days: float = 0
|
| 151 |
+
avg_lead_time_days: float = 0
|
| 152 |
+
flow_efficiency: float = 0 # 0-100
|
| 153 |
+
|
| 154 |
+
# WIP Metrics
|
| 155 |
+
current_wip: int = 0
|
| 156 |
+
avg_wip: float = 0
|
| 157 |
+
wip_violations: int = 0 # Number of times WIP limits were exceeded
|
| 158 |
+
|
| 159 |
+
# Column Metrics
|
| 160 |
+
bottleneck_column: Optional[str] = None
|
| 161 |
+
bottleneck_score: float = 0
|
| 162 |
+
|
| 163 |
+
# Predictability
|
| 164 |
+
throughput_variance: float = 0
|
| 165 |
+
cycle_time_variance: float = 0
|
| 166 |
+
|
| 167 |
+
# Health Score
|
| 168 |
+
flow_health_score: float = 0 # 0-100
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
class KanbanColumnAnalysis(BaseModel):
|
| 172 |
+
"""Analysis for a specific Kanban column"""
|
| 173 |
+
column_name: str
|
| 174 |
+
statuses: List[str]
|
| 175 |
+
|
| 176 |
+
# Current State
|
| 177 |
+
current_issue_count: int = 0
|
| 178 |
+
wip_limit_min: Optional[int] = None
|
| 179 |
+
wip_limit_max: Optional[int] = None
|
| 180 |
+
is_over_wip_limit: bool = False
|
| 181 |
+
|
| 182 |
+
# Flow Metrics
|
| 183 |
+
avg_time_in_column_days: float = 0
|
| 184 |
+
throughput: int = 0 # Issues exited this column
|
| 185 |
+
|
| 186 |
+
# Efficiency
|
| 187 |
+
utilization_rate: float = 0 # current / max limit
|
| 188 |
+
is_bottleneck: bool = False
|
| 189 |
+
bottleneck_score: float = 0 # Higher = more of a bottleneck
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
class KanbanCumulativeFlow(BaseModel):
|
| 193 |
+
"""Cumulative flow diagram data"""
|
| 194 |
+
board_id: int
|
| 195 |
+
period_start: date
|
| 196 |
+
period_end: date
|
| 197 |
+
|
| 198 |
+
# Daily snapshots
|
| 199 |
+
data_points: List[Dict] = Field(default_factory=list)
|
| 200 |
+
# Each data point: {date, column_name, issue_count}
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
class WIPLimitRecommendation(BaseModel):
|
| 204 |
+
"""WIP limit optimization recommendations"""
|
| 205 |
+
column_name: str
|
| 206 |
+
current_limit: Optional[int]
|
| 207 |
+
recommended_min: int
|
| 208 |
+
recommended_max: int
|
| 209 |
+
reasoning: str
|
| 210 |
+
confidence_score: float = 0 # 0-1
|
| 211 |
+
|
models/jira_models.py
ADDED
|
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from pydantic import BaseModel, Field
|
| 2 |
+
from typing import List, Optional, Dict, Any
|
| 3 |
+
from datetime import datetime
|
| 4 |
+
from enum import Enum
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class PriorityLevel(str, Enum):
|
| 8 |
+
HIGHEST = "Highest"
|
| 9 |
+
HIGH = "High"
|
| 10 |
+
MEDIUM = "Medium"
|
| 11 |
+
LOW = "Low"
|
| 12 |
+
LOWEST = "Lowest"
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class IssueStatus(str, Enum):
|
| 16 |
+
TODO = "To Do"
|
| 17 |
+
IN_PROGRESS = "In Progress"
|
| 18 |
+
IN_REVIEW = "In Review"
|
| 19 |
+
DONE = "Done"
|
| 20 |
+
BLOCKED = "Blocked"
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class JiraIssue(BaseModel):
|
| 24 |
+
issue_key: str
|
| 25 |
+
summary: str
|
| 26 |
+
description: Optional[str] = None
|
| 27 |
+
status: str
|
| 28 |
+
priority: Optional[str] = None
|
| 29 |
+
assignee: Optional[str] = None
|
| 30 |
+
reporter: Optional[str] = None
|
| 31 |
+
created: datetime
|
| 32 |
+
updated: datetime
|
| 33 |
+
resolved: Optional[datetime] = None
|
| 34 |
+
story_points: Optional[float] = None
|
| 35 |
+
sprint: Optional[str] = None
|
| 36 |
+
labels: List[str] = Field(default_factory=list)
|
| 37 |
+
components: List[str] = Field(default_factory=list)
|
| 38 |
+
project_key: str
|
| 39 |
+
issue_type: str
|
| 40 |
+
time_spent: Optional[int] = None # in seconds
|
| 41 |
+
time_estimate: Optional[int] = None # in seconds
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
class Sprint(BaseModel):
|
| 45 |
+
sprint_id: int
|
| 46 |
+
sprint_name: str
|
| 47 |
+
state: str # active, closed, future
|
| 48 |
+
start_date: Optional[datetime] = None
|
| 49 |
+
end_date: Optional[datetime] = None
|
| 50 |
+
complete_date: Optional[datetime] = None
|
| 51 |
+
goal: Optional[str] = None
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
class TeamMember(BaseModel):
|
| 55 |
+
account_id: str
|
| 56 |
+
display_name: str
|
| 57 |
+
email: Optional[str] = None
|
| 58 |
+
active: bool = True
|
| 59 |
+
avatar_url: Optional[str] = None
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
class WorklogEntry(BaseModel):
|
| 63 |
+
id: str
|
| 64 |
+
issue_key: str
|
| 65 |
+
author: str
|
| 66 |
+
time_spent_seconds: int
|
| 67 |
+
started: datetime
|
| 68 |
+
comment: Optional[str] = None
|
| 69 |
+
|
| 70 |
+
@classmethod
|
| 71 |
+
def from_jira_worklog(cls, worklog_data: dict, issue_key: str):
|
| 72 |
+
"""Create WorklogEntry from Jira API response, handling ADF comment format"""
|
| 73 |
+
comment = worklog_data.get('comment')
|
| 74 |
+
|
| 75 |
+
# Convert ADF (Atlassian Document Format) to plain text
|
| 76 |
+
if isinstance(comment, dict):
|
| 77 |
+
comment = cls._extract_text_from_adf(comment)
|
| 78 |
+
|
| 79 |
+
return cls(
|
| 80 |
+
id=worklog_data.get('id'),
|
| 81 |
+
issue_key=issue_key,
|
| 82 |
+
author=worklog_data.get('author', {}).get('displayName', ''),
|
| 83 |
+
time_spent_seconds=worklog_data.get('timeSpentSeconds', 0),
|
| 84 |
+
started=worklog_data.get('started'),
|
| 85 |
+
comment=comment
|
| 86 |
+
)
|
| 87 |
+
|
| 88 |
+
@staticmethod
|
| 89 |
+
def _extract_text_from_adf(adf: dict) -> str:
|
| 90 |
+
"""Extract plain text from Atlassian Document Format"""
|
| 91 |
+
if not adf or not isinstance(adf, dict):
|
| 92 |
+
return ""
|
| 93 |
+
|
| 94 |
+
text_parts = []
|
| 95 |
+
|
| 96 |
+
def extract_content(node):
|
| 97 |
+
if isinstance(node, dict):
|
| 98 |
+
# Extract text from text nodes
|
| 99 |
+
if node.get('type') == 'text':
|
| 100 |
+
text_parts.append(node.get('text', ''))
|
| 101 |
+
# Recursively process content array
|
| 102 |
+
if 'content' in node:
|
| 103 |
+
for child in node['content']:
|
| 104 |
+
extract_content(child)
|
| 105 |
+
elif isinstance(node, list):
|
| 106 |
+
for item in node:
|
| 107 |
+
extract_content(item)
|
| 108 |
+
|
| 109 |
+
extract_content(adf)
|
| 110 |
+
return ' '.join(text_parts).strip()
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
class ProjectInfo(BaseModel):
|
| 114 |
+
project_key: str
|
| 115 |
+
project_name: str
|
| 116 |
+
project_type: str
|
| 117 |
+
lead: Optional[str] = None
|
| 118 |
+
description: Optional[str] = None
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
class KanbanColumn(BaseModel):
|
| 122 |
+
"""Represents a column in a Kanban board"""
|
| 123 |
+
name: str
|
| 124 |
+
statuses: List[str] = Field(default_factory=list)
|
| 125 |
+
min: Optional[int] = None # WIP limit min
|
| 126 |
+
max: Optional[int] = None # WIP limit max
|
| 127 |
+
issue_count: int = 0
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
class KanbanBoard(BaseModel):
|
| 131 |
+
"""Represents a Kanban board with its configuration"""
|
| 132 |
+
board_id: int
|
| 133 |
+
board_name: str
|
| 134 |
+
board_type: str
|
| 135 |
+
project_key: Optional[str] = None
|
| 136 |
+
columns: List[KanbanColumn] = Field(default_factory=list)
|
| 137 |
+
filter_id: Optional[int] = None
|
| 138 |
+
location: Optional[Dict[str, Any]] = None
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
class KanbanIssuesByColumn(BaseModel):
|
| 142 |
+
"""Issues grouped by Kanban columns"""
|
| 143 |
+
column_name: str
|
| 144 |
+
statuses: List[str]
|
| 145 |
+
issues: List[JiraIssue] = Field(default_factory=list)
|
| 146 |
+
wip_limit_min: Optional[int] = None
|
| 147 |
+
wip_limit_max: Optional[int] = None
|
| 148 |
+
issue_count: int = 0
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
class BoardConfiguration(BaseModel):
|
| 152 |
+
"""Board configuration details"""
|
| 153 |
+
board_id: int
|
| 154 |
+
name: str
|
| 155 |
+
self_link: str
|
| 156 |
+
estimation: Optional[Dict[str, Any]] = None
|
| 157 |
+
ranking: Optional[Dict[str, Any]] = None
|
| 158 |
+
column_config: Optional[Dict[str, Any]] = None
|
requirements.txt
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
fastapi==0.109.0
|
| 2 |
+
uvicorn==0.27.0
|
| 3 |
+
pydantic==2.5.3
|
| 4 |
+
pydantic-settings==2.1.0
|
| 5 |
+
jira==3.8.0
|
| 6 |
+
httpx==0.26.0
|
| 7 |
+
python-dotenv==1.0.0
|
| 8 |
+
pandas==2.1.4
|
| 9 |
+
numpy==1.26.3
|
| 10 |
+
sqlalchemy==2.0.25
|
| 11 |
+
redis==5.0.1
|
| 12 |
+
celery==5.3.6
|
| 13 |
+
python-jose[cryptography]==3.3.0
|
| 14 |
+
passlib[bcrypt]==1.7.4
|
| 15 |
+
python-multipart==0.0.6
|
| 16 |
+
aiohttp==3.9.1
|
services/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .intelligence_service import intelligence_service, IntelligenceService
|
| 2 |
+
|
| 3 |
+
__all__ = ["intelligence_service", "IntelligenceService"]
|
services/intelligence_service.py
ADDED
|
@@ -0,0 +1,760 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List, Dict, Optional
|
| 2 |
+
from datetime import datetime, timedelta, date
|
| 3 |
+
from collections import defaultdict
|
| 4 |
+
import numpy as np
|
| 5 |
+
from models.jira_models import JiraIssue, Sprint, WorklogEntry
|
| 6 |
+
from models.intelligence_models import (
|
| 7 |
+
DeliveryHealthMetrics,
|
| 8 |
+
ProductivityMetrics,
|
| 9 |
+
CostEfficiencyMetrics,
|
| 10 |
+
TeamCapacityMetrics,
|
| 11 |
+
RiskAlert,
|
| 12 |
+
InsightRecommendation,
|
| 13 |
+
KanbanFlowMetrics,
|
| 14 |
+
KanbanColumnAnalysis,
|
| 15 |
+
KanbanCumulativeFlow,
|
| 16 |
+
WIPLimitRecommendation
|
| 17 |
+
)
|
| 18 |
+
import logging
|
| 19 |
+
import uuid
|
| 20 |
+
|
| 21 |
+
logger = logging.getLogger(__name__)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class IntelligenceService:
|
| 25 |
+
"""Service for generating business intelligence from engineering data"""
|
| 26 |
+
|
| 27 |
+
def calculate_delivery_health(
|
| 28 |
+
self,
|
| 29 |
+
issues: List[JiraIssue],
|
| 30 |
+
sprint: Optional[Sprint] = None,
|
| 31 |
+
period_start: Optional[date] = None,
|
| 32 |
+
period_end: Optional[date] = None
|
| 33 |
+
) -> DeliveryHealthMetrics:
|
| 34 |
+
"""Calculate delivery health metrics"""
|
| 35 |
+
|
| 36 |
+
if not period_start and sprint:
|
| 37 |
+
period_start = sprint.start_date.date() if sprint.start_date else date.today()
|
| 38 |
+
if not period_end and sprint:
|
| 39 |
+
period_end = sprint.end_date.date() if sprint.end_date else date.today()
|
| 40 |
+
|
| 41 |
+
if not period_start:
|
| 42 |
+
period_start = date.today() - timedelta(days=14)
|
| 43 |
+
if not period_end:
|
| 44 |
+
period_end = date.today()
|
| 45 |
+
|
| 46 |
+
# Filter issues within the period
|
| 47 |
+
period_issues = [
|
| 48 |
+
issue for issue in issues
|
| 49 |
+
if period_start <= issue.created.date() <= period_end
|
| 50 |
+
]
|
| 51 |
+
|
| 52 |
+
# Calculate metrics
|
| 53 |
+
total_issues = len(period_issues)
|
| 54 |
+
completed_issues = len([i for i in period_issues if i.status.lower() in ['done', 'closed']])
|
| 55 |
+
blocked_issues = len([i for i in period_issues if i.status.lower() == 'blocked'])
|
| 56 |
+
|
| 57 |
+
# Story points
|
| 58 |
+
planned_points = sum([i.story_points or 0 for i in period_issues])
|
| 59 |
+
completed_points = sum([
|
| 60 |
+
i.story_points or 0 for i in period_issues
|
| 61 |
+
if i.status.lower() in ['done', 'closed']
|
| 62 |
+
])
|
| 63 |
+
|
| 64 |
+
# Cycle time calculation
|
| 65 |
+
cycle_times = []
|
| 66 |
+
for issue in period_issues:
|
| 67 |
+
if issue.resolved and issue.created:
|
| 68 |
+
cycle_time = (issue.resolved - issue.created).total_seconds() / 3600
|
| 69 |
+
cycle_times.append(cycle_time)
|
| 70 |
+
|
| 71 |
+
avg_cycle_time = np.mean(cycle_times) if cycle_times else 0
|
| 72 |
+
|
| 73 |
+
# Calculate completion rate
|
| 74 |
+
completion_rate = (completed_issues / total_issues * 100) if total_issues > 0 else 0
|
| 75 |
+
|
| 76 |
+
# Calculate health score (0-100)
|
| 77 |
+
health_score = self._calculate_health_score(
|
| 78 |
+
completion_rate=completion_rate,
|
| 79 |
+
blocked_ratio=blocked_issues / total_issues if total_issues > 0 else 0,
|
| 80 |
+
velocity_ratio=completed_points / planned_points if planned_points > 0 else 0
|
| 81 |
+
)
|
| 82 |
+
|
| 83 |
+
return DeliveryHealthMetrics(
|
| 84 |
+
sprint_id=str(sprint.sprint_id) if sprint else None,
|
| 85 |
+
sprint_name=sprint.sprint_name if sprint else None,
|
| 86 |
+
period_start=period_start,
|
| 87 |
+
period_end=period_end,
|
| 88 |
+
planned_story_points=planned_points,
|
| 89 |
+
completed_story_points=completed_points,
|
| 90 |
+
velocity=completed_points,
|
| 91 |
+
velocity_trend=0, # TODO: Calculate from historical data
|
| 92 |
+
total_issues=total_issues,
|
| 93 |
+
completed_issues=completed_issues,
|
| 94 |
+
completion_rate=completion_rate,
|
| 95 |
+
avg_cycle_time_hours=avg_cycle_time,
|
| 96 |
+
avg_lead_time_hours=avg_cycle_time, # Simplified
|
| 97 |
+
blocked_issues_count=blocked_issues,
|
| 98 |
+
overdue_issues_count=0, # TODO: Calculate based on due dates
|
| 99 |
+
reopened_issues_count=0, # TODO: Track issue history
|
| 100 |
+
at_risk_issues=blocked_issues,
|
| 101 |
+
health_score=health_score
|
| 102 |
+
)
|
| 103 |
+
|
| 104 |
+
def calculate_productivity_metrics(
|
| 105 |
+
self,
|
| 106 |
+
issues: List[JiraIssue],
|
| 107 |
+
worklogs: List[WorklogEntry],
|
| 108 |
+
team_member_id: str,
|
| 109 |
+
team_member_name: str,
|
| 110 |
+
period_start: date,
|
| 111 |
+
period_end: date
|
| 112 |
+
) -> ProductivityMetrics:
|
| 113 |
+
"""Calculate productivity metrics for a team member"""
|
| 114 |
+
|
| 115 |
+
# Filter issues assigned to this member
|
| 116 |
+
member_issues = [
|
| 117 |
+
i for i in issues
|
| 118 |
+
if i.assignee == team_member_name
|
| 119 |
+
and period_start <= i.created.date() <= period_end
|
| 120 |
+
]
|
| 121 |
+
|
| 122 |
+
# Filter worklogs for this member
|
| 123 |
+
member_worklogs = [
|
| 124 |
+
w for w in worklogs
|
| 125 |
+
if w.author == team_member_name
|
| 126 |
+
and period_start <= w.started.date() <= period_end
|
| 127 |
+
]
|
| 128 |
+
|
| 129 |
+
# Calculate metrics
|
| 130 |
+
completed_issues = len([i for i in member_issues if i.status.lower() in ['done', 'closed']])
|
| 131 |
+
story_points_completed = sum([
|
| 132 |
+
i.story_points or 0 for i in member_issues
|
| 133 |
+
if i.status.lower() in ['done', 'closed']
|
| 134 |
+
])
|
| 135 |
+
|
| 136 |
+
# Time metrics
|
| 137 |
+
total_hours = sum([w.time_spent_seconds / 3600 for w in member_worklogs])
|
| 138 |
+
days_in_period = (period_end - period_start).days + 1
|
| 139 |
+
avg_hours_per_day = total_hours / days_in_period if days_in_period > 0 else 0
|
| 140 |
+
|
| 141 |
+
# Completion time
|
| 142 |
+
completion_times = []
|
| 143 |
+
for issue in member_issues:
|
| 144 |
+
if issue.resolved and issue.created:
|
| 145 |
+
completion_time = (issue.resolved - issue.created).total_seconds() / 3600
|
| 146 |
+
completion_times.append(completion_time)
|
| 147 |
+
|
| 148 |
+
avg_completion_time = np.mean(completion_times) if completion_times else 0
|
| 149 |
+
|
| 150 |
+
# Productivity score
|
| 151 |
+
productivity_score = self._calculate_productivity_score(
|
| 152 |
+
completed_issues=completed_issues,
|
| 153 |
+
story_points_completed=story_points_completed,
|
| 154 |
+
avg_completion_time=avg_completion_time,
|
| 155 |
+
hours_logged=total_hours
|
| 156 |
+
)
|
| 157 |
+
|
| 158 |
+
# Current workload
|
| 159 |
+
current_assigned = len([i for i in issues if i.assignee == team_member_name and i.status.lower() not in ['done', 'closed']])
|
| 160 |
+
current_points = sum([i.story_points or 0 for i in issues if i.assignee == team_member_name and i.status.lower() not in ['done', 'closed']])
|
| 161 |
+
|
| 162 |
+
return ProductivityMetrics(
|
| 163 |
+
team_member_id=team_member_id,
|
| 164 |
+
team_member_name=team_member_name,
|
| 165 |
+
period_start=period_start,
|
| 166 |
+
period_end=period_end,
|
| 167 |
+
issues_completed=completed_issues,
|
| 168 |
+
story_points_completed=story_points_completed,
|
| 169 |
+
code_commits=0, # TODO: Integrate with GitHub
|
| 170 |
+
pull_requests=0, # TODO: Integrate with GitHub
|
| 171 |
+
total_hours_logged=total_hours,
|
| 172 |
+
avg_hours_per_day=avg_hours_per_day,
|
| 173 |
+
avg_issue_completion_time_hours=avg_completion_time,
|
| 174 |
+
productivity_score=productivity_score,
|
| 175 |
+
current_assigned_issues=current_assigned,
|
| 176 |
+
current_story_points=current_points,
|
| 177 |
+
utilization_rate=min(100, (total_hours / (days_in_period * 8)) * 100) if days_in_period > 0 else 0
|
| 178 |
+
)
|
| 179 |
+
|
| 180 |
+
def calculate_cost_efficiency(
|
| 181 |
+
self,
|
| 182 |
+
issues: List[JiraIssue],
|
| 183 |
+
worklogs: List[WorklogEntry],
|
| 184 |
+
period_start: date,
|
| 185 |
+
period_end: date,
|
| 186 |
+
avg_hourly_rate: float = 75.0 # Default rate
|
| 187 |
+
) -> CostEfficiencyMetrics:
|
| 188 |
+
"""Calculate cost efficiency metrics"""
|
| 189 |
+
|
| 190 |
+
# Filter data for period
|
| 191 |
+
period_issues = [
|
| 192 |
+
i for i in issues
|
| 193 |
+
if period_start <= i.created.date() <= period_end
|
| 194 |
+
]
|
| 195 |
+
|
| 196 |
+
period_worklogs = [
|
| 197 |
+
w for w in worklogs
|
| 198 |
+
if period_start <= w.started.date() <= period_end
|
| 199 |
+
]
|
| 200 |
+
|
| 201 |
+
# Get unique team members
|
| 202 |
+
team_members = set([w.author for w in period_worklogs])
|
| 203 |
+
total_team_members = len(team_members)
|
| 204 |
+
|
| 205 |
+
# Calculate hours and cost
|
| 206 |
+
total_hours = sum([w.time_spent_seconds / 3600 for w in period_worklogs])
|
| 207 |
+
estimated_cost = total_hours * avg_hourly_rate
|
| 208 |
+
|
| 209 |
+
# Output metrics
|
| 210 |
+
features_delivered = len([i for i in period_issues if i.status.lower() in ['done', 'closed'] and i.issue_type.lower() in ['story', 'feature']])
|
| 211 |
+
story_points_delivered = sum([
|
| 212 |
+
i.story_points or 0 for i in period_issues
|
| 213 |
+
if i.status.lower() in ['done', 'closed']
|
| 214 |
+
])
|
| 215 |
+
|
| 216 |
+
# Efficiency ratios
|
| 217 |
+
cost_per_feature = estimated_cost / features_delivered if features_delivered > 0 else 0
|
| 218 |
+
cost_per_story_point = estimated_cost / story_points_delivered if story_points_delivered > 0 else 0
|
| 219 |
+
hours_per_story_point = total_hours / story_points_delivered if story_points_delivered > 0 else 0
|
| 220 |
+
|
| 221 |
+
# Waste calculation (blocked time)
|
| 222 |
+
blocked_hours = sum([
|
| 223 |
+
(w.time_spent_seconds / 3600) for w in period_worklogs
|
| 224 |
+
if any(i.issue_key == w.issue_key and i.status.lower() == 'blocked' for i in period_issues)
|
| 225 |
+
])
|
| 226 |
+
|
| 227 |
+
waste_percentage = (blocked_hours / total_hours * 100) if total_hours > 0 else 0
|
| 228 |
+
|
| 229 |
+
return CostEfficiencyMetrics(
|
| 230 |
+
period_start=period_start,
|
| 231 |
+
period_end=period_end,
|
| 232 |
+
total_team_members=total_team_members,
|
| 233 |
+
total_hours_logged=total_hours,
|
| 234 |
+
estimated_cost=estimated_cost,
|
| 235 |
+
features_delivered=features_delivered,
|
| 236 |
+
story_points_delivered=story_points_delivered,
|
| 237 |
+
cost_per_feature=cost_per_feature,
|
| 238 |
+
cost_per_story_point=cost_per_story_point,
|
| 239 |
+
hours_per_story_point=hours_per_story_point,
|
| 240 |
+
blocked_time_hours=blocked_hours,
|
| 241 |
+
rework_hours=0, # TODO: Track rework
|
| 242 |
+
waste_percentage=waste_percentage
|
| 243 |
+
)
|
| 244 |
+
|
| 245 |
+
def generate_risk_alerts(
|
| 246 |
+
self,
|
| 247 |
+
delivery_health: DeliveryHealthMetrics,
|
| 248 |
+
productivity_metrics: List[ProductivityMetrics],
|
| 249 |
+
cost_metrics: CostEfficiencyMetrics
|
| 250 |
+
) -> List[RiskAlert]:
|
| 251 |
+
"""Generate risk alerts based on metrics"""
|
| 252 |
+
|
| 253 |
+
alerts = []
|
| 254 |
+
|
| 255 |
+
# Delivery health alerts
|
| 256 |
+
if delivery_health.health_score < 50:
|
| 257 |
+
alerts.append(RiskAlert(
|
| 258 |
+
alert_id=str(uuid.uuid4()),
|
| 259 |
+
alert_type="delivery_delay",
|
| 260 |
+
severity="critical" if delivery_health.health_score < 30 else "high",
|
| 261 |
+
title="Low Delivery Health Score",
|
| 262 |
+
description=f"Sprint health score is {delivery_health.health_score:.1f}/100, indicating significant delivery risks.",
|
| 263 |
+
affected_entity="sprint",
|
| 264 |
+
entity_id=delivery_health.sprint_id or "unknown",
|
| 265 |
+
detected_at=datetime.now(),
|
| 266 |
+
suggested_action="Review blocked issues, reassign workload, and identify bottlenecks.",
|
| 267 |
+
metrics={"health_score": delivery_health.health_score}
|
| 268 |
+
))
|
| 269 |
+
|
| 270 |
+
if delivery_health.completion_rate < 60:
|
| 271 |
+
alerts.append(RiskAlert(
|
| 272 |
+
alert_id=str(uuid.uuid4()),
|
| 273 |
+
alert_type="delivery_delay",
|
| 274 |
+
severity="high",
|
| 275 |
+
title="Low Completion Rate",
|
| 276 |
+
description=f"Only {delivery_health.completion_rate:.1f}% of planned work is completed.",
|
| 277 |
+
affected_entity="sprint",
|
| 278 |
+
entity_id=delivery_health.sprint_id or "unknown",
|
| 279 |
+
detected_at=datetime.now(),
|
| 280 |
+
suggested_action="Reduce scope or extend timeline to meet commitments.",
|
| 281 |
+
metrics={"completion_rate": delivery_health.completion_rate}
|
| 282 |
+
))
|
| 283 |
+
|
| 284 |
+
# Productivity alerts
|
| 285 |
+
overworked = [p for p in productivity_metrics if p.avg_hours_per_day > 10]
|
| 286 |
+
if overworked:
|
| 287 |
+
for member in overworked:
|
| 288 |
+
alerts.append(RiskAlert(
|
| 289 |
+
alert_id=str(uuid.uuid4()),
|
| 290 |
+
alert_type="resource_shortage",
|
| 291 |
+
severity="medium",
|
| 292 |
+
title="Team Member Overworked",
|
| 293 |
+
description=f"{member.team_member_name} is logging {member.avg_hours_per_day:.1f} hours/day.",
|
| 294 |
+
affected_entity="team_member",
|
| 295 |
+
entity_id=member.team_member_id,
|
| 296 |
+
detected_at=datetime.now(),
|
| 297 |
+
suggested_action="Redistribute workload to prevent burnout.",
|
| 298 |
+
metrics={"avg_hours_per_day": member.avg_hours_per_day}
|
| 299 |
+
))
|
| 300 |
+
|
| 301 |
+
# Cost alerts
|
| 302 |
+
if cost_metrics.waste_percentage > 20:
|
| 303 |
+
alerts.append(RiskAlert(
|
| 304 |
+
alert_id=str(uuid.uuid4()),
|
| 305 |
+
alert_type="cost_overrun",
|
| 306 |
+
severity="high",
|
| 307 |
+
title="High Waste Percentage",
|
| 308 |
+
description=f"{cost_metrics.waste_percentage:.1f}% of time is wasted on blocked work.",
|
| 309 |
+
affected_entity="project",
|
| 310 |
+
entity_id="project",
|
| 311 |
+
detected_at=datetime.now(),
|
| 312 |
+
suggested_action="Identify and remove blockers urgently.",
|
| 313 |
+
metrics={"waste_percentage": cost_metrics.waste_percentage}
|
| 314 |
+
))
|
| 315 |
+
|
| 316 |
+
return alerts
|
| 317 |
+
|
| 318 |
+
def generate_insights(
|
| 319 |
+
self,
|
| 320 |
+
delivery_health: DeliveryHealthMetrics,
|
| 321 |
+
productivity_metrics: List[ProductivityMetrics],
|
| 322 |
+
cost_metrics: CostEfficiencyMetrics
|
| 323 |
+
) -> List[InsightRecommendation]:
|
| 324 |
+
"""Generate AI-powered insights and recommendations"""
|
| 325 |
+
|
| 326 |
+
insights = []
|
| 327 |
+
|
| 328 |
+
# Velocity trend insight
|
| 329 |
+
if delivery_health.velocity > 0:
|
| 330 |
+
insights.append(InsightRecommendation(
|
| 331 |
+
insight_id=str(uuid.uuid4()),
|
| 332 |
+
category="delivery",
|
| 333 |
+
title="Velocity Analysis",
|
| 334 |
+
description=f"Team completed {delivery_health.completed_story_points:.1f} story points with {delivery_health.completion_rate:.1f}% completion rate.",
|
| 335 |
+
confidence_score=0.85,
|
| 336 |
+
impact_level="medium",
|
| 337 |
+
recommendations=[
|
| 338 |
+
"Maintain current sprint planning strategy",
|
| 339 |
+
"Consider increasing capacity for higher throughput"
|
| 340 |
+
],
|
| 341 |
+
supporting_data={
|
| 342 |
+
"completed_points": delivery_health.completed_story_points,
|
| 343 |
+
"completion_rate": delivery_health.completion_rate
|
| 344 |
+
},
|
| 345 |
+
generated_at=datetime.now()
|
| 346 |
+
))
|
| 347 |
+
|
| 348 |
+
# Team efficiency insight
|
| 349 |
+
if productivity_metrics:
|
| 350 |
+
avg_productivity = np.mean([p.productivity_score for p in productivity_metrics])
|
| 351 |
+
insights.append(InsightRecommendation(
|
| 352 |
+
insight_id=str(uuid.uuid4()),
|
| 353 |
+
category="productivity",
|
| 354 |
+
title="Team Productivity Overview",
|
| 355 |
+
description=f"Average team productivity score is {avg_productivity:.1f}/100.",
|
| 356 |
+
confidence_score=0.80,
|
| 357 |
+
impact_level="high" if avg_productivity < 60 else "low",
|
| 358 |
+
recommendations=[
|
| 359 |
+
"Identify top performers and share best practices",
|
| 360 |
+
"Provide additional support to team members scoring below 50",
|
| 361 |
+
"Review tool and process efficiency"
|
| 362 |
+
] if avg_productivity < 70 else [
|
| 363 |
+
"Team is performing well",
|
| 364 |
+
"Focus on maintaining current practices"
|
| 365 |
+
],
|
| 366 |
+
supporting_data={"avg_productivity_score": avg_productivity},
|
| 367 |
+
generated_at=datetime.now()
|
| 368 |
+
))
|
| 369 |
+
|
| 370 |
+
# Cost optimization insight
|
| 371 |
+
if cost_metrics.cost_per_story_point > 0:
|
| 372 |
+
insights.append(InsightRecommendation(
|
| 373 |
+
insight_id=str(uuid.uuid4()),
|
| 374 |
+
category="cost",
|
| 375 |
+
title="Cost Efficiency Analysis",
|
| 376 |
+
description=f"Current cost per story point is ${cost_metrics.cost_per_story_point:.2f}.",
|
| 377 |
+
confidence_score=0.75,
|
| 378 |
+
impact_level="medium",
|
| 379 |
+
recommendations=[
|
| 380 |
+
"Track this metric over time to identify trends",
|
| 381 |
+
"Compare with industry benchmarks",
|
| 382 |
+
"Focus on reducing waste to improve efficiency"
|
| 383 |
+
],
|
| 384 |
+
supporting_data={
|
| 385 |
+
"cost_per_story_point": cost_metrics.cost_per_story_point,
|
| 386 |
+
"waste_percentage": cost_metrics.waste_percentage
|
| 387 |
+
},
|
| 388 |
+
generated_at=datetime.now()
|
| 389 |
+
))
|
| 390 |
+
|
| 391 |
+
return insights
|
| 392 |
+
|
| 393 |
+
def _calculate_health_score(
|
| 394 |
+
self,
|
| 395 |
+
completion_rate: float,
|
| 396 |
+
blocked_ratio: float,
|
| 397 |
+
velocity_ratio: float
|
| 398 |
+
) -> float:
|
| 399 |
+
"""Calculate overall health score (0-100)"""
|
| 400 |
+
|
| 401 |
+
# Weighted scoring
|
| 402 |
+
completion_weight = 0.4
|
| 403 |
+
blocked_weight = 0.3
|
| 404 |
+
velocity_weight = 0.3
|
| 405 |
+
|
| 406 |
+
completion_score = completion_rate
|
| 407 |
+
blocked_score = max(0, 100 - (blocked_ratio * 200)) # Penalize blocked items
|
| 408 |
+
velocity_score = min(100, velocity_ratio * 100)
|
| 409 |
+
|
| 410 |
+
health_score = (
|
| 411 |
+
completion_score * completion_weight +
|
| 412 |
+
blocked_score * blocked_weight +
|
| 413 |
+
velocity_score * velocity_weight
|
| 414 |
+
)
|
| 415 |
+
|
| 416 |
+
return max(0, min(100, health_score))
|
| 417 |
+
|
| 418 |
+
def _calculate_productivity_score(
|
| 419 |
+
self,
|
| 420 |
+
completed_issues: int,
|
| 421 |
+
story_points_completed: float,
|
| 422 |
+
avg_completion_time: float,
|
| 423 |
+
hours_logged: float
|
| 424 |
+
) -> float:
|
| 425 |
+
"""Calculate productivity score (0-100)"""
|
| 426 |
+
|
| 427 |
+
# Simple scoring based on output
|
| 428 |
+
output_score = min(100, (completed_issues * 10) + (story_points_completed * 5))
|
| 429 |
+
|
| 430 |
+
# Efficiency score (inverse of completion time)
|
| 431 |
+
efficiency_score = min(100, 100 - (avg_completion_time / 10)) if avg_completion_time > 0 else 50
|
| 432 |
+
|
| 433 |
+
# Average the scores
|
| 434 |
+
productivity_score = (output_score * 0.6) + (efficiency_score * 0.4)
|
| 435 |
+
|
| 436 |
+
return max(0, min(100, productivity_score))
|
| 437 |
+
|
| 438 |
+
|
| 439 |
+
# ===== KANBAN INTELLIGENCE METHODS =====
|
| 440 |
+
|
| 441 |
+
def calculate_kanban_flow_metrics(
|
| 442 |
+
self,
|
| 443 |
+
board_id: int,
|
| 444 |
+
board_name: str,
|
| 445 |
+
issues: List[JiraIssue],
|
| 446 |
+
columns: List, # List of KanbanIssuesByColumn
|
| 447 |
+
period_start: date,
|
| 448 |
+
period_end: date
|
| 449 |
+
) -> KanbanFlowMetrics:
|
| 450 |
+
"""Calculate Kanban flow efficiency metrics"""
|
| 451 |
+
|
| 452 |
+
# Filter issues within period
|
| 453 |
+
period_issues = [
|
| 454 |
+
issue for issue in issues
|
| 455 |
+
if period_start <= issue.created.date() <= period_end
|
| 456 |
+
]
|
| 457 |
+
|
| 458 |
+
# Calculate throughput (completed in period)
|
| 459 |
+
completed_issues = [
|
| 460 |
+
i for i in period_issues
|
| 461 |
+
if i.status.lower() in ['done', 'closed'] and i.resolved
|
| 462 |
+
and period_start <= i.resolved.date() <= period_end
|
| 463 |
+
]
|
| 464 |
+
throughput = len(completed_issues)
|
| 465 |
+
|
| 466 |
+
# Calculate cycle time for completed issues
|
| 467 |
+
cycle_times = []
|
| 468 |
+
for issue in completed_issues:
|
| 469 |
+
if issue.resolved and issue.created:
|
| 470 |
+
cycle_time = (issue.resolved.date() - issue.created.date()).days
|
| 471 |
+
cycle_times.append(cycle_time)
|
| 472 |
+
|
| 473 |
+
avg_cycle_time = np.mean(cycle_times) if cycle_times else 0
|
| 474 |
+
cycle_time_variance = np.std(cycle_times) if len(cycle_times) > 1 else 0
|
| 475 |
+
|
| 476 |
+
# Calculate WIP
|
| 477 |
+
current_wip = sum([col.issue_count for col in columns if col.column_name.lower() != 'done'])
|
| 478 |
+
|
| 479 |
+
# Calculate WIP violations
|
| 480 |
+
wip_violations = sum([
|
| 481 |
+
1 for col in columns
|
| 482 |
+
if col.wip_limit_max and col.issue_count > col.wip_limit_max
|
| 483 |
+
])
|
| 484 |
+
|
| 485 |
+
# Identify bottleneck
|
| 486 |
+
bottleneck_column = None
|
| 487 |
+
bottleneck_score = 0
|
| 488 |
+
|
| 489 |
+
for col in columns:
|
| 490 |
+
if col.column_name.lower() not in ['backlog', 'done']:
|
| 491 |
+
# Bottleneck score based on utilization and throughput
|
| 492 |
+
utilization = col.issue_count / col.wip_limit_max if col.wip_limit_max else 0
|
| 493 |
+
score = utilization * col.issue_count
|
| 494 |
+
if score > bottleneck_score:
|
| 495 |
+
bottleneck_score = score
|
| 496 |
+
bottleneck_column = col.column_name
|
| 497 |
+
|
| 498 |
+
# Calculate flow efficiency (simplified)
|
| 499 |
+
flow_efficiency = min(100, (throughput / max(1, current_wip)) * 100)
|
| 500 |
+
|
| 501 |
+
# Calculate flow health score
|
| 502 |
+
flow_health_score = self._calculate_flow_health_score(
|
| 503 |
+
throughput=throughput,
|
| 504 |
+
avg_cycle_time=avg_cycle_time,
|
| 505 |
+
wip_violations=wip_violations,
|
| 506 |
+
current_wip=current_wip
|
| 507 |
+
)
|
| 508 |
+
|
| 509 |
+
return KanbanFlowMetrics(
|
| 510 |
+
board_id=board_id,
|
| 511 |
+
board_name=board_name,
|
| 512 |
+
period_start=period_start,
|
| 513 |
+
period_end=period_end,
|
| 514 |
+
throughput=throughput,
|
| 515 |
+
avg_cycle_time_days=avg_cycle_time,
|
| 516 |
+
avg_lead_time_days=avg_cycle_time, # Simplified
|
| 517 |
+
flow_efficiency=flow_efficiency,
|
| 518 |
+
current_wip=current_wip,
|
| 519 |
+
avg_wip=current_wip, # TODO: Calculate historical average
|
| 520 |
+
wip_violations=wip_violations,
|
| 521 |
+
bottleneck_column=bottleneck_column,
|
| 522 |
+
bottleneck_score=bottleneck_score,
|
| 523 |
+
throughput_variance=0, # TODO: Calculate from historical data
|
| 524 |
+
cycle_time_variance=cycle_time_variance,
|
| 525 |
+
flow_health_score=flow_health_score
|
| 526 |
+
)
|
| 527 |
+
|
| 528 |
+
def analyze_kanban_columns(
|
| 529 |
+
self,
|
| 530 |
+
columns: List, # List of KanbanIssuesByColumn
|
| 531 |
+
issues: List[JiraIssue]
|
| 532 |
+
) -> List[KanbanColumnAnalysis]:
|
| 533 |
+
"""Analyze each Kanban column for bottlenecks and efficiency"""
|
| 534 |
+
|
| 535 |
+
analyses = []
|
| 536 |
+
max_issue_count = max([col.issue_count for col in columns]) if columns else 1
|
| 537 |
+
|
| 538 |
+
for col in columns:
|
| 539 |
+
# Calculate average time in column (simplified)
|
| 540 |
+
column_issues = col.issues
|
| 541 |
+
times_in_column = []
|
| 542 |
+
|
| 543 |
+
for issue in column_issues:
|
| 544 |
+
if issue.resolved and issue.created:
|
| 545 |
+
time_in_col = (issue.resolved - issue.created).total_seconds() / (3600 * 24)
|
| 546 |
+
times_in_column.append(time_in_col)
|
| 547 |
+
|
| 548 |
+
avg_time = np.mean(times_in_column) if times_in_column else 0
|
| 549 |
+
|
| 550 |
+
# Calculate utilization
|
| 551 |
+
utilization = 0
|
| 552 |
+
if col.wip_limit_max and col.wip_limit_max > 0:
|
| 553 |
+
utilization = (col.issue_count / col.wip_limit_max) * 100
|
| 554 |
+
|
| 555 |
+
# Determine if bottleneck (high issue count relative to others)
|
| 556 |
+
is_bottleneck = col.issue_count >= (max_issue_count * 0.7) and col.column_name.lower() not in ['backlog', 'done']
|
| 557 |
+
bottleneck_score = (col.issue_count / max_issue_count) * 100
|
| 558 |
+
|
| 559 |
+
analyses.append(KanbanColumnAnalysis(
|
| 560 |
+
column_name=col.column_name,
|
| 561 |
+
statuses=col.statuses,
|
| 562 |
+
current_issue_count=col.issue_count,
|
| 563 |
+
wip_limit_min=col.wip_limit_min,
|
| 564 |
+
wip_limit_max=col.wip_limit_max,
|
| 565 |
+
is_over_wip_limit=bool(col.wip_limit_max and col.issue_count > col.wip_limit_max),
|
| 566 |
+
avg_time_in_column_days=avg_time,
|
| 567 |
+
throughput=len(column_issues),
|
| 568 |
+
utilization_rate=utilization,
|
| 569 |
+
is_bottleneck=is_bottleneck,
|
| 570 |
+
bottleneck_score=bottleneck_score
|
| 571 |
+
))
|
| 572 |
+
|
| 573 |
+
return analyses
|
| 574 |
+
|
| 575 |
+
def generate_wip_recommendations(
|
| 576 |
+
self,
|
| 577 |
+
column_analyses: List[KanbanColumnAnalysis],
|
| 578 |
+
flow_metrics: KanbanFlowMetrics
|
| 579 |
+
) -> List[WIPLimitRecommendation]:
|
| 580 |
+
"""Generate WIP limit recommendations for columns"""
|
| 581 |
+
|
| 582 |
+
recommendations = []
|
| 583 |
+
|
| 584 |
+
for analysis in column_analyses:
|
| 585 |
+
if analysis.column_name.lower() in ['backlog', 'done']:
|
| 586 |
+
continue # Skip backlog and done columns
|
| 587 |
+
|
| 588 |
+
current_limit = analysis.wip_limit_max
|
| 589 |
+
current_count = analysis.current_issue_count
|
| 590 |
+
|
| 591 |
+
# Calculate recommended limits
|
| 592 |
+
if analysis.is_bottleneck:
|
| 593 |
+
# Bottleneck: recommend lower WIP to force upstream to slow down
|
| 594 |
+
recommended_max = max(2, int(current_count * 0.7))
|
| 595 |
+
recommended_min = max(1, int(recommended_max * 0.5))
|
| 596 |
+
reasoning = f"Column is a bottleneck. Reducing WIP limit will help identify and resolve issues faster."
|
| 597 |
+
confidence = 0.8
|
| 598 |
+
elif analysis.is_over_wip_limit:
|
| 599 |
+
# Over limit: recommend current count or slightly higher
|
| 600 |
+
recommended_max = max(current_limit or 5, current_count)
|
| 601 |
+
recommended_min = max(1, int(recommended_max * 0.5))
|
| 602 |
+
reasoning = f"Currently over WIP limit. Recommend increasing limit to {recommended_max} or focusing on moving work out of this column."
|
| 603 |
+
confidence = 0.7
|
| 604 |
+
elif current_limit and analysis.utilization_rate < 50:
|
| 605 |
+
# Underutilized: recommend lower limit
|
| 606 |
+
recommended_max = max(2, int(current_limit * 0.7))
|
| 607 |
+
recommended_min = max(1, int(recommended_max * 0.5))
|
| 608 |
+
reasoning = f"Column is underutilized ({analysis.utilization_rate:.1f}%). Consider reducing WIP limit to improve focus."
|
| 609 |
+
confidence = 0.6
|
| 610 |
+
else:
|
| 611 |
+
# Optimal: keep current or suggest based on team size
|
| 612 |
+
recommended_max = current_limit or max(3, int(current_count * 1.2))
|
| 613 |
+
recommended_min = max(1, int(recommended_max * 0.5))
|
| 614 |
+
reasoning = f"Current WIP appears optimal. Maintain current limits and monitor flow."
|
| 615 |
+
confidence = 0.5
|
| 616 |
+
|
| 617 |
+
recommendations.append(WIPLimitRecommendation(
|
| 618 |
+
column_name=analysis.column_name,
|
| 619 |
+
current_limit=current_limit,
|
| 620 |
+
recommended_min=recommended_min,
|
| 621 |
+
recommended_max=recommended_max,
|
| 622 |
+
reasoning=reasoning,
|
| 623 |
+
confidence_score=confidence
|
| 624 |
+
))
|
| 625 |
+
|
| 626 |
+
return recommendations
|
| 627 |
+
|
| 628 |
+
def generate_kanban_insights(
|
| 629 |
+
self,
|
| 630 |
+
flow_metrics: KanbanFlowMetrics,
|
| 631 |
+
column_analyses: List[KanbanColumnAnalysis],
|
| 632 |
+
wip_recommendations: List[WIPLimitRecommendation]
|
| 633 |
+
) -> List[InsightRecommendation]:
|
| 634 |
+
"""Generate Kanban-specific insights and recommendations"""
|
| 635 |
+
|
| 636 |
+
insights = []
|
| 637 |
+
|
| 638 |
+
# Flow efficiency insight
|
| 639 |
+
insights.append(InsightRecommendation(
|
| 640 |
+
insight_id=str(uuid.uuid4()),
|
| 641 |
+
category="kanban_flow",
|
| 642 |
+
title="Kanban Flow Efficiency",
|
| 643 |
+
description=f"Board throughput is {flow_metrics.throughput} items with average cycle time of {flow_metrics.avg_cycle_time_days:.1f} days.",
|
| 644 |
+
confidence_score=0.85,
|
| 645 |
+
impact_level="high" if flow_metrics.flow_health_score < 60 else "medium",
|
| 646 |
+
recommendations=[
|
| 647 |
+
f"Current flow health score: {flow_metrics.flow_health_score:.1f}/100",
|
| 648 |
+
"Focus on reducing WIP to improve flow" if flow_metrics.current_wip > 10 else "WIP levels are healthy",
|
| 649 |
+
f"Address bottleneck in '{flow_metrics.bottleneck_column}' column" if flow_metrics.bottleneck_column else "No major bottlenecks detected"
|
| 650 |
+
],
|
| 651 |
+
supporting_data={
|
| 652 |
+
"throughput": flow_metrics.throughput,
|
| 653 |
+
"cycle_time": flow_metrics.avg_cycle_time_days,
|
| 654 |
+
"wip": flow_metrics.current_wip,
|
| 655 |
+
"health_score": flow_metrics.flow_health_score
|
| 656 |
+
},
|
| 657 |
+
generated_at=datetime.now()
|
| 658 |
+
))
|
| 659 |
+
|
| 660 |
+
# WIP violations insight
|
| 661 |
+
if flow_metrics.wip_violations > 0:
|
| 662 |
+
insights.append(InsightRecommendation(
|
| 663 |
+
insight_id=str(uuid.uuid4()),
|
| 664 |
+
category="kanban_wip",
|
| 665 |
+
title="WIP Limit Violations Detected",
|
| 666 |
+
description=f"{flow_metrics.wip_violations} column(s) are exceeding their WIP limits.",
|
| 667 |
+
confidence_score=0.95,
|
| 668 |
+
impact_level="high",
|
| 669 |
+
recommendations=[
|
| 670 |
+
"Review and enforce WIP limits to maintain flow",
|
| 671 |
+
"Investigate why work is accumulating in these columns",
|
| 672 |
+
"Consider if WIP limits need adjustment or if there are blockers"
|
| 673 |
+
],
|
| 674 |
+
supporting_data={"wip_violations": flow_metrics.wip_violations},
|
| 675 |
+
generated_at=datetime.now()
|
| 676 |
+
))
|
| 677 |
+
|
| 678 |
+
# Bottleneck insight
|
| 679 |
+
if flow_metrics.bottleneck_column:
|
| 680 |
+
bottleneck_analysis = next(
|
| 681 |
+
(a for a in column_analyses if a.column_name == flow_metrics.bottleneck_column),
|
| 682 |
+
None
|
| 683 |
+
)
|
| 684 |
+
|
| 685 |
+
if bottleneck_analysis:
|
| 686 |
+
insights.append(InsightRecommendation(
|
| 687 |
+
insight_id=str(uuid.uuid4()),
|
| 688 |
+
category="kanban_bottleneck",
|
| 689 |
+
title=f"Bottleneck Identified: {flow_metrics.bottleneck_column}",
|
| 690 |
+
description=f"The '{flow_metrics.bottleneck_column}' column has {bottleneck_analysis.current_issue_count} items with {bottleneck_analysis.avg_time_in_column_days:.1f} days average dwell time.",
|
| 691 |
+
confidence_score=0.80,
|
| 692 |
+
impact_level="high",
|
| 693 |
+
recommendations=[
|
| 694 |
+
"Add more resources to this stage of the workflow",
|
| 695 |
+
"Review and optimize processes in this column",
|
| 696 |
+
"Consider splitting this column into smaller steps",
|
| 697 |
+
"Implement swarming: have team members help clear this column"
|
| 698 |
+
],
|
| 699 |
+
supporting_data={
|
| 700 |
+
"column": flow_metrics.bottleneck_column,
|
| 701 |
+
"issue_count": bottleneck_analysis.current_issue_count,
|
| 702 |
+
"avg_time_days": bottleneck_analysis.avg_time_in_column_days
|
| 703 |
+
},
|
| 704 |
+
generated_at=datetime.now()
|
| 705 |
+
))
|
| 706 |
+
|
| 707 |
+
# Cycle time insight
|
| 708 |
+
if flow_metrics.cycle_time_variance > flow_metrics.avg_cycle_time_days * 0.5:
|
| 709 |
+
insights.append(InsightRecommendation(
|
| 710 |
+
insight_id=str(uuid.uuid4()),
|
| 711 |
+
category="kanban_predictability",
|
| 712 |
+
title="High Cycle Time Variability",
|
| 713 |
+
description=f"Cycle times are inconsistent (variance: {flow_metrics.cycle_time_variance:.1f} days), making delivery predictions difficult.",
|
| 714 |
+
confidence_score=0.75,
|
| 715 |
+
impact_level="medium",
|
| 716 |
+
recommendations=[
|
| 717 |
+
"Standardize work item sizes for better predictability",
|
| 718 |
+
"Identify and address sources of variation",
|
| 719 |
+
"Consider using work item types or classes of service",
|
| 720 |
+
"Break down larger items into smaller, more predictable pieces"
|
| 721 |
+
],
|
| 722 |
+
supporting_data={
|
| 723 |
+
"avg_cycle_time": flow_metrics.avg_cycle_time_days,
|
| 724 |
+
"variance": flow_metrics.cycle_time_variance
|
| 725 |
+
},
|
| 726 |
+
generated_at=datetime.now()
|
| 727 |
+
))
|
| 728 |
+
|
| 729 |
+
return insights
|
| 730 |
+
|
| 731 |
+
def _calculate_flow_health_score(
|
| 732 |
+
self,
|
| 733 |
+
throughput: int,
|
| 734 |
+
avg_cycle_time: float,
|
| 735 |
+
wip_violations: int,
|
| 736 |
+
current_wip: int
|
| 737 |
+
) -> float:
|
| 738 |
+
"""Calculate Kanban flow health score (0-100)"""
|
| 739 |
+
|
| 740 |
+
# Throughput score (higher is better)
|
| 741 |
+
throughput_score = min(100, throughput * 10)
|
| 742 |
+
|
| 743 |
+
# Cycle time score (lower is better, assuming < 7 days is good)
|
| 744 |
+
cycle_time_score = max(0, 100 - (avg_cycle_time * 10))
|
| 745 |
+
|
| 746 |
+
# WIP score (penalize violations and high WIP)
|
| 747 |
+
wip_score = max(0, 100 - (wip_violations * 30) - (current_wip * 2))
|
| 748 |
+
|
| 749 |
+
# Weighted average
|
| 750 |
+
health_score = (
|
| 751 |
+
throughput_score * 0.4 +
|
| 752 |
+
cycle_time_score * 0.4 +
|
| 753 |
+
wip_score * 0.2
|
| 754 |
+
)
|
| 755 |
+
|
| 756 |
+
return max(0, min(100, health_score))
|
| 757 |
+
|
| 758 |
+
|
| 759 |
+
# Singleton instance
|
| 760 |
+
intelligence_service = IntelligenceService()
|
utils/__init__.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .helpers import (
|
| 2 |
+
parse_date_string,
|
| 3 |
+
parse_datetime_string,
|
| 4 |
+
get_date_range,
|
| 5 |
+
calculate_percentage,
|
| 6 |
+
safe_divide,
|
| 7 |
+
format_hours,
|
| 8 |
+
format_days,
|
| 9 |
+
extract_unique_values,
|
| 10 |
+
group_by_key,
|
| 11 |
+
serialize_datetime,
|
| 12 |
+
to_json,
|
| 13 |
+
PerformanceTimer
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
+
__all__ = [
|
| 17 |
+
"parse_date_string",
|
| 18 |
+
"parse_datetime_string",
|
| 19 |
+
"get_date_range",
|
| 20 |
+
"calculate_percentage",
|
| 21 |
+
"safe_divide",
|
| 22 |
+
"format_hours",
|
| 23 |
+
"format_days",
|
| 24 |
+
"extract_unique_values",
|
| 25 |
+
"group_by_key",
|
| 26 |
+
"serialize_datetime",
|
| 27 |
+
"to_json",
|
| 28 |
+
"PerformanceTimer"
|
| 29 |
+
]
|
utils/helpers.py
ADDED
|
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional, List, Dict, Any
|
| 2 |
+
from datetime import datetime, date, timedelta
|
| 3 |
+
import json
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def parse_date_string(date_str: Optional[str], default: Optional[date] = None) -> Optional[date]:
|
| 7 |
+
"""Parse date string to date object"""
|
| 8 |
+
if not date_str:
|
| 9 |
+
return default
|
| 10 |
+
try:
|
| 11 |
+
return date.fromisoformat(date_str)
|
| 12 |
+
except ValueError:
|
| 13 |
+
return default
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def parse_datetime_string(datetime_str: Optional[str], default: Optional[datetime] = None) -> Optional[datetime]:
|
| 17 |
+
"""Parse datetime string to datetime object"""
|
| 18 |
+
if not datetime_str:
|
| 19 |
+
return default
|
| 20 |
+
try:
|
| 21 |
+
return datetime.fromisoformat(datetime_str.replace('Z', '+00:00'))
|
| 22 |
+
except ValueError:
|
| 23 |
+
return default
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def get_date_range(days: int = 14) -> tuple[date, date]:
|
| 27 |
+
"""Get date range for last N days"""
|
| 28 |
+
end_date = date.today()
|
| 29 |
+
start_date = end_date - timedelta(days=days)
|
| 30 |
+
return start_date, end_date
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def calculate_percentage(part: float, whole: float, decimals: int = 2) -> float:
|
| 34 |
+
"""Calculate percentage safely"""
|
| 35 |
+
if whole == 0:
|
| 36 |
+
return 0.0
|
| 37 |
+
return round((part / whole) * 100, decimals)
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def safe_divide(numerator: float, denominator: float, default: float = 0.0) -> float:
|
| 41 |
+
"""Divide safely, returning default if denominator is 0"""
|
| 42 |
+
if denominator == 0:
|
| 43 |
+
return default
|
| 44 |
+
return numerator / denominator
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def format_hours(seconds: int) -> float:
|
| 48 |
+
"""Convert seconds to hours"""
|
| 49 |
+
if not seconds:
|
| 50 |
+
return 0.0
|
| 51 |
+
return round(seconds / 3600, 2)
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def format_days(hours: float) -> float:
|
| 55 |
+
"""Convert hours to days (8-hour workday)"""
|
| 56 |
+
if not hours:
|
| 57 |
+
return 0.0
|
| 58 |
+
return round(hours / 8, 2)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def extract_unique_values(items: List[Dict[str, Any]], key: str) -> List[Any]:
|
| 62 |
+
"""Extract unique values from list of dictionaries"""
|
| 63 |
+
return list(set([item.get(key) for item in items if item.get(key)]))
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def group_by_key(items: List[Dict[str, Any]], key: str) -> Dict[Any, List[Dict[str, Any]]]:
|
| 67 |
+
"""Group list of dictionaries by a specific key"""
|
| 68 |
+
groups = {}
|
| 69 |
+
for item in items:
|
| 70 |
+
key_value = item.get(key)
|
| 71 |
+
if key_value:
|
| 72 |
+
if key_value not in groups:
|
| 73 |
+
groups[key_value] = []
|
| 74 |
+
groups[key_value].append(item)
|
| 75 |
+
return groups
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
def serialize_datetime(obj):
|
| 79 |
+
"""JSON serializer for datetime objects"""
|
| 80 |
+
if isinstance(obj, (datetime, date)):
|
| 81 |
+
return obj.isoformat()
|
| 82 |
+
raise TypeError(f"Type {type(obj)} not serializable")
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def to_json(data: Any) -> str:
|
| 86 |
+
"""Convert data to JSON string with datetime support"""
|
| 87 |
+
return json.dumps(data, default=serialize_datetime, indent=2)
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
class PerformanceTimer:
|
| 91 |
+
"""Simple performance timer context manager"""
|
| 92 |
+
|
| 93 |
+
def __init__(self, name: str = "Operation"):
|
| 94 |
+
self.name = name
|
| 95 |
+
self.start_time = None
|
| 96 |
+
self.end_time = None
|
| 97 |
+
|
| 98 |
+
def __enter__(self):
|
| 99 |
+
self.start_time = datetime.now()
|
| 100 |
+
return self
|
| 101 |
+
|
| 102 |
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
| 103 |
+
self.end_time = datetime.now()
|
| 104 |
+
duration = (self.end_time - self.start_time).total_seconds()
|
| 105 |
+
print(f"{self.name} took {duration:.2f} seconds")
|
| 106 |
+
|
| 107 |
+
@property
|
| 108 |
+
def duration(self) -> float:
|
| 109 |
+
"""Get duration in seconds"""
|
| 110 |
+
if self.start_time and self.end_time:
|
| 111 |
+
return (self.end_time - self.start_time).total_seconds()
|
| 112 |
+
return 0.0
|