Spaces:
Running
Running
Commit
·
84c328d
1
Parent(s):
8327a4f
feat: sync backend changes from SDDRI-Hackathon-2
Browse filesSyncing Phase 8 advanced features implementation:
- Due dates with datetime picker
- Reminders with browser notifications
- Recurring tasks support
- Priority enum fixes
- Database migrations 008 and 009
Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
- 001_add_user_id_index.sql +2 -0
- 002_add_conversation_and_message_tables.sql +67 -0
- 003_add_due_date_and_priority_to_tasks.sql +10 -0
- 004_add_performance_indexes.sql +75 -0
- 005_add_tags_to_tasks.sql +13 -0
- 008_add_advanced_features.sql +88 -0
- 009_fix_priority_case.sql +16 -0
- CLAUDE.md +1 -147
- __init__.py +0 -0
- api/CLAUDE.md +1 -40
- audit.py +267 -0
- auth.py +384 -0
- chat.py +478 -0
- config.py +54 -0
- conversation.py +31 -0
- database.py +92 -0
- deps.py +89 -0
- logging.py +125 -0
- mcp_server/tools/CLAUDE.md +1 -6
- message.py +46 -0
- middleware.py +95 -0
- migrations/CLAUDE.md +1 -11
- models/CLAUDE.md +1 -21
- nlp_service.py +122 -0
- rate_limiter.py +181 -0
- recurrence.py +68 -0
- recurrence_service.py +219 -0
- run_migration.py +84 -0
- security.py +276 -0
- server.py +58 -0
- services/CLAUDE.md +1 -11
- task.py +237 -0
- tasks.py +532 -0
- tools/CLAUDE.md +12 -0
- tools/__init__.py +51 -0
- tools/add_task.py +320 -0
- tools/complete_all_tasks.py +160 -0
- tools/complete_task.py +144 -0
- tools/delete_all_tasks.py +168 -0
- tools/delete_task.py +129 -0
- tools/list_tasks.py +242 -0
- tools/update_task.py +303 -0
- user.py +53 -0
- validators.py +144 -0
- verify_schema.py +134 -0
001_add_user_id_index.sql
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
-- Add index on tasks.user_id for improved query performance
|
| 2 |
+
CREATE INDEX IF NOT EXISTS idx_tasks_user_id ON tasks(user_id);
|
002_add_conversation_and_message_tables.sql
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
-- Migration: Add conversation and message tables for AI Chatbot (Phase III)
|
| 2 |
+
-- [Task]: T007
|
| 3 |
+
-- [From]: specs/004-ai-chatbot/plan.md
|
| 4 |
+
|
| 5 |
+
-- Enable UUID extension if not exists
|
| 6 |
+
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
|
| 7 |
+
|
| 8 |
+
-- Create conversation table
|
| 9 |
+
CREATE TABLE IF NOT EXISTS conversation (
|
| 10 |
+
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
|
| 11 |
+
user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
| 12 |
+
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW(),
|
| 13 |
+
updated_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW()
|
| 14 |
+
);
|
| 15 |
+
|
| 16 |
+
-- Create index on user_id for conversation lookup
|
| 17 |
+
CREATE INDEX IF NOT EXISTS idx_conversation_user_id ON conversation(user_id);
|
| 18 |
+
CREATE INDEX IF NOT EXISTS idx_conversation_updated_at ON conversation(updated_at DESC);
|
| 19 |
+
|
| 20 |
+
-- Create composite index for user's conversations ordered by update time
|
| 21 |
+
CREATE INDEX IF NOT EXISTS idx_conversation_user_updated ON conversation(user_id, updated_at DESC);
|
| 22 |
+
|
| 23 |
+
-- Create message table
|
| 24 |
+
CREATE TABLE IF NOT EXISTS message (
|
| 25 |
+
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
|
| 26 |
+
conversation_id UUID NOT NULL REFERENCES conversation(id) ON DELETE CASCADE,
|
| 27 |
+
user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
| 28 |
+
role VARCHAR(10) NOT NULL CHECK (role IN ('user', 'assistant')),
|
| 29 |
+
content TEXT NOT NULL,
|
| 30 |
+
created_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT NOW()
|
| 31 |
+
);
|
| 32 |
+
|
| 33 |
+
-- Create indexes for message queries
|
| 34 |
+
CREATE INDEX IF NOT EXISTS idx_message_conversation_id ON message(conversation_id);
|
| 35 |
+
CREATE INDEX IF NOT EXISTS idx_message_user_id ON message(user_id);
|
| 36 |
+
CREATE INDEX IF NOT EXISTS idx_message_role ON message(role);
|
| 37 |
+
CREATE INDEX IF NOT EXISTS idx_message_created_at ON message(created_at DESC);
|
| 38 |
+
|
| 39 |
+
-- Create composite index for conversation messages (optimization for loading conversation history)
|
| 40 |
+
CREATE INDEX IF NOT EXISTS idx_message_conversation_created ON message(conversation_id, created_at ASC);
|
| 41 |
+
|
| 42 |
+
-- Add trigger to update conversation.updated_at when new message is added
|
| 43 |
+
-- This requires PL/pgSQL
|
| 44 |
+
CREATE OR REPLACE FUNCTION update_conversation_updated_at()
|
| 45 |
+
RETURNS TRIGGER AS $$
|
| 46 |
+
BEGIN
|
| 47 |
+
UPDATE conversation
|
| 48 |
+
SET updated_at = NOW()
|
| 49 |
+
WHERE id = NEW.conversation_id;
|
| 50 |
+
RETURN NEW;
|
| 51 |
+
END;
|
| 52 |
+
$$ LANGUAGE plpgsql;
|
| 53 |
+
|
| 54 |
+
-- Drop trigger if exists to avoid errors
|
| 55 |
+
DROP TRIGGER IF EXISTS trigger_update_conversation_updated_at ON message;
|
| 56 |
+
|
| 57 |
+
-- Create trigger
|
| 58 |
+
CREATE TRIGGER trigger_update_conversation_updated_at
|
| 59 |
+
AFTER INSERT ON message
|
| 60 |
+
FOR EACH ROW
|
| 61 |
+
EXECUTE FUNCTION update_conversation_updated_at();
|
| 62 |
+
|
| 63 |
+
-- Add comment for documentation
|
| 64 |
+
COMMENT ON TABLE conversation IS 'Stores chat sessions between users and AI assistant';
|
| 65 |
+
COMMENT ON TABLE message IS 'Stores individual messages in conversations';
|
| 66 |
+
COMMENT ON COLUMN message.role IS 'Either "user" or "assistant" - who sent the message';
|
| 67 |
+
COMMENT ON COLUMN message.content IS 'Message content with max length of 10,000 characters';
|
003_add_due_date_and_priority_to_tasks.sql
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
-- Add due_date and priority columns to tasks table
|
| 2 |
+
-- Migration: 003
|
| 3 |
+
-- [From]: specs/004-ai-chatbot/plan.md - Task Model Extensions
|
| 4 |
+
|
| 5 |
+
-- Add due_date column (nullable, with index for filtering)
|
| 6 |
+
ALTER TABLE tasks ADD COLUMN IF NOT EXISTS due_date TIMESTAMP WITH TIME ZONE;
|
| 7 |
+
CREATE INDEX IF NOT EXISTS idx_tasks_due_date ON tasks(due_date);
|
| 8 |
+
|
| 9 |
+
-- Add priority column with default value
|
| 10 |
+
ALTER TABLE tasks ADD COLUMN IF NOT EXISTS priority VARCHAR(10) DEFAULT 'medium';
|
004_add_performance_indexes.sql
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
-- Database indexes for conversation and message queries
|
| 2 |
+
--
|
| 3 |
+
-- [Task]: T059
|
| 4 |
+
-- [From]: specs/004-ai-chatbot/tasks.md
|
| 5 |
+
--
|
| 6 |
+
-- These indexes optimize common queries for:
|
| 7 |
+
-- - Conversation lookup by user_id
|
| 8 |
+
-- - Message lookup by conversation_id
|
| 9 |
+
-- - Message ordering by created_at
|
| 10 |
+
-- - Composite indexes for filtering
|
| 11 |
+
|
| 12 |
+
-- Index on conversations for user lookup
|
| 13 |
+
-- Optimizes: SELECT * FROM conversations WHERE user_id = ?
|
| 14 |
+
CREATE INDEX IF NOT EXISTS idx_conversations_user_id
|
| 15 |
+
ON conversations(user_id);
|
| 16 |
+
|
| 17 |
+
-- Index on conversations for updated_at sorting (cleanup)
|
| 18 |
+
-- Optimizes: SELECT * FROM conversations WHERE updated_at < ? (90-day cleanup)
|
| 19 |
+
CREATE INDEX IF NOT EXISTS idx_conversations_updated_at
|
| 20 |
+
ON conversations(updated_at);
|
| 21 |
+
|
| 22 |
+
-- Composite index for user conversations ordered by activity
|
| 23 |
+
-- Optimizes: SELECT * FROM conversations WHERE user_id = ? ORDER BY updated_at DESC
|
| 24 |
+
CREATE INDEX IF NOT EXISTS idx_conversations_user_updated
|
| 25 |
+
ON conversations(user_id, updated_at DESC);
|
| 26 |
+
|
| 27 |
+
-- Index on messages for conversation lookup
|
| 28 |
+
-- Optimizes: SELECT * FROM messages WHERE conversation_id = ?
|
| 29 |
+
CREATE INDEX IF NOT EXISTS idx_messages_conversation_id
|
| 30 |
+
ON messages(conversation_id);
|
| 31 |
+
|
| 32 |
+
-- Index on messages for user lookup
|
| 33 |
+
-- Optimizes: SELECT * FROM messages WHERE user_id = ?
|
| 34 |
+
CREATE INDEX IF NOT EXISTS idx_messages_user_id
|
| 35 |
+
ON messages(user_id);
|
| 36 |
+
|
| 37 |
+
-- Index on messages for timestamp ordering
|
| 38 |
+
-- Optimizes: SELECT * FROM messages WHERE conversation_id = ? ORDER BY created_at ASC
|
| 39 |
+
CREATE INDEX IF NOT EXISTS idx_messages_created_at
|
| 40 |
+
ON messages(created_at);
|
| 41 |
+
|
| 42 |
+
-- Composite index for conversation message retrieval
|
| 43 |
+
-- Optimizes: SELECT * FROM messages WHERE conversation_id = ? ORDER BY created_at ASC
|
| 44 |
+
CREATE INDEX IF NOT EXISTS idx_messages_conversation_created
|
| 45 |
+
ON messages(conversation_id, created_at ASC);
|
| 46 |
+
|
| 47 |
+
-- Index on messages for role filtering
|
| 48 |
+
-- Optimizes: SELECT * FROM messages WHERE conversation_id = ? AND role = ?
|
| 49 |
+
CREATE INDEX IF NOT EXISTS idx_messages_conversation_role
|
| 50 |
+
ON messages(conversation_id, role);
|
| 51 |
+
|
| 52 |
+
-- Index on tasks for user lookup (if not exists)
|
| 53 |
+
-- Optimizes: SELECT * FROM tasks WHERE user_id = ?
|
| 54 |
+
CREATE INDEX IF NOT EXISTS idx_tasks_user_id
|
| 55 |
+
ON tasks(user_id);
|
| 56 |
+
|
| 57 |
+
-- Index on tasks for completion status filtering
|
| 58 |
+
-- Optimizes: SELECT * FROM tasks WHERE user_id = ? AND completed = ?
|
| 59 |
+
CREATE INDEX IF NOT EXISTS idx_tasks_user_completed
|
| 60 |
+
ON tasks(user_id, completed);
|
| 61 |
+
|
| 62 |
+
-- Index on tasks for due date filtering
|
| 63 |
+
-- Optimizes: SELECT * FROM tasks WHERE user_id = ? AND due_date IS NOT NULL AND due_date < ?
|
| 64 |
+
CREATE INDEX IF NOT EXISTS idx_tasks_due_date
|
| 65 |
+
ON tasks(due_date) WHERE due_date IS NOT NULL;
|
| 66 |
+
|
| 67 |
+
-- Composite index for task priority filtering
|
| 68 |
+
-- Optimizes: SELECT * FROM tasks WHERE user_id = ? AND priority = ?
|
| 69 |
+
CREATE INDEX IF NOT EXISTS idx_tasks_user_priority
|
| 70 |
+
ON tasks(user_id, priority);
|
| 71 |
+
|
| 72 |
+
-- Index on tasks for created_at sorting
|
| 73 |
+
-- Optimizes: SELECT * FROM tasks WHERE user_id = ? ORDER BY created_at DESC
|
| 74 |
+
CREATE INDEX IF NOT EXISTS idx_tasks_user_created
|
| 75 |
+
ON tasks(user_id, created_at DESC);
|
005_add_tags_to_tasks.sql
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
-- Add tags column to tasks table
|
| 2 |
+
-- Migration: 005_add_tags_to_tasks.sql
|
| 3 |
+
-- [Task]: T036, T037
|
| 4 |
+
-- [From]: specs/007-intermediate-todo-features/tasks.md
|
| 5 |
+
|
| 6 |
+
-- Add tags column as TEXT array (default: empty array)
|
| 7 |
+
ALTER TABLE tasks ADD COLUMN IF NOT EXISTS tags TEXT[] NOT NULL DEFAULT '{}';
|
| 8 |
+
|
| 9 |
+
-- Add index on tags for faster tag-based queries
|
| 10 |
+
CREATE INDEX IF NOT EXISTS idx_tasks_tags ON tasks USING GIN (tags);
|
| 11 |
+
|
| 12 |
+
-- Add comment for documentation
|
| 13 |
+
COMMENT ON COLUMN tasks.tags IS 'Array of tag strings associated with the task';
|
008_add_advanced_features.sql
ADDED
|
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
-- Migration: Add advanced features to tasks table
|
| 2 |
+
-- Version: 008_advanced_features
|
| 3 |
+
-- Date: 2026-02-04
|
| 4 |
+
-- [Task]: T001
|
| 5 |
+
|
| 6 |
+
-- Step 1: Add new columns for reminders
|
| 7 |
+
ALTER TABLE tasks
|
| 8 |
+
ADD COLUMN IF NOT EXISTS reminder_offset INTEGER,
|
| 9 |
+
ADD COLUMN IF NOT EXISTS reminder_sent BOOLEAN DEFAULT FALSE;
|
| 10 |
+
|
| 11 |
+
-- Step 2: Add new columns for recurrence
|
| 12 |
+
ALTER TABLE tasks
|
| 13 |
+
ADD COLUMN IF NOT EXISTS recurrence JSONB,
|
| 14 |
+
ADD COLUMN IF NOT EXISTS parent_task_id UUID REFERENCES tasks(id) ON DELETE SET NULL;
|
| 15 |
+
|
| 16 |
+
-- Step 3: Create indexes for performance
|
| 17 |
+
CREATE INDEX IF NOT EXISTS idx_tasks_parent_task_id ON tasks(parent_task_id);
|
| 18 |
+
CREATE INDEX IF NOT EXISTS idx_tasks_reminder_sent ON tasks(reminder_sent) WHERE reminder_sent = FALSE;
|
| 19 |
+
|
| 20 |
+
-- Step 4: Add constraints (without IF NOT EXISTS - use DO blocks instead)
|
| 21 |
+
DO $$
|
| 22 |
+
BEGIN
|
| 23 |
+
-- Add reminder offset positive constraint
|
| 24 |
+
IF NOT EXISTS (
|
| 25 |
+
SELECT 1 FROM pg_constraint
|
| 26 |
+
WHERE conname = 'chk_reminder_offset_positive'
|
| 27 |
+
) THEN
|
| 28 |
+
ALTER TABLE tasks
|
| 29 |
+
ADD CONSTRAINT chk_reminder_offset_positive
|
| 30 |
+
CHECK (reminder_offset IS NULL OR reminder_offset >= 0);
|
| 31 |
+
END IF;
|
| 32 |
+
|
| 33 |
+
-- Add recurrence no self-reference constraint
|
| 34 |
+
IF NOT EXISTS (
|
| 35 |
+
SELECT 1 FROM pg_constraint
|
| 36 |
+
WHERE conname = 'chk_recurrence_no_self_reference'
|
| 37 |
+
) THEN
|
| 38 |
+
ALTER TABLE tasks
|
| 39 |
+
ADD CONSTRAINT chk_recurrence_no_self_reference
|
| 40 |
+
CHECK (parent_task_id IS NULL OR id != parent_task_id);
|
| 41 |
+
END IF;
|
| 42 |
+
END $$;
|
| 43 |
+
|
| 44 |
+
-- Step 5: Add comments for documentation
|
| 45 |
+
COMMENT ON COLUMN tasks.reminder_offset IS 'Minutes before due_date to send notification (0 = at due time)';
|
| 46 |
+
COMMENT ON COLUMN tasks.reminder_sent IS 'Whether notification has been sent for this task';
|
| 47 |
+
COMMENT ON COLUMN tasks.recurrence IS 'Recurrence rule as JSONB (frequency, interval, count, end_date)';
|
| 48 |
+
COMMENT ON COLUMN tasks.parent_task_id IS 'For recurring task instances, links to the original task';
|
| 49 |
+
|
| 50 |
+
-- Step 6: Create validation function for recurrence JSONB
|
| 51 |
+
CREATE OR REPLACE FUNCTION validate_recurrence(rule jsonb)
|
| 52 |
+
RETURNS boolean AS $$
|
| 53 |
+
BEGIN
|
| 54 |
+
-- Check frequency is present and valid
|
| 55 |
+
IF rule->>'frequency' NOT IN ('daily', 'weekly', 'monthly') THEN
|
| 56 |
+
RETURN false;
|
| 57 |
+
END IF;
|
| 58 |
+
|
| 59 |
+
-- Check interval is valid if present
|
| 60 |
+
IF (rule->>'interval') IS NOT NULL THEN
|
| 61 |
+
IF (rule->>'interval')::integer < 1 OR (rule->>'interval')::integer > 365 THEN
|
| 62 |
+
RETURN false;
|
| 63 |
+
END IF;
|
| 64 |
+
END IF;
|
| 65 |
+
|
| 66 |
+
-- Check count is valid if present
|
| 67 |
+
IF (rule->>'count') IS NOT NULL THEN
|
| 68 |
+
IF (rule->>'count')::integer < 1 OR (rule->>'count')::integer > 100 THEN
|
| 69 |
+
RETURN false;
|
| 70 |
+
END IF;
|
| 71 |
+
END IF;
|
| 72 |
+
|
| 73 |
+
RETURN true;
|
| 74 |
+
END;
|
| 75 |
+
$$ LANGUAGE plpgsql;
|
| 76 |
+
|
| 77 |
+
-- Step 7: Add recurrence valid constraint
|
| 78 |
+
DO $$
|
| 79 |
+
BEGIN
|
| 80 |
+
IF NOT EXISTS (
|
| 81 |
+
SELECT 1 FROM pg_constraint
|
| 82 |
+
WHERE conname = 'chk_recurrence_valid'
|
| 83 |
+
) THEN
|
| 84 |
+
ALTER TABLE tasks
|
| 85 |
+
ADD CONSTRAINT chk_recurrence_valid
|
| 86 |
+
CHECK (recurrence IS NULL OR validate_recurrence(recurrence));
|
| 87 |
+
END IF;
|
| 88 |
+
END $$;
|
009_fix_priority_case.sql
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
-- Migration 009: Fix priority values to match enum (uppercase)
|
| 2 |
+
-- [Task]: Data fix for existing tasks
|
| 3 |
+
-- [From]: Issue with existing lowercase priority values
|
| 4 |
+
|
| 5 |
+
-- Update all priority values to uppercase to match PriorityLevel enum
|
| 6 |
+
UPDATE tasks
|
| 7 |
+
SET priority = CASE
|
| 8 |
+
WHEN priority = 'low' THEN 'LOW'
|
| 9 |
+
WHEN priority = 'medium' THEN 'MEDIUM'
|
| 10 |
+
WHEN priority = 'high' THEN 'HIGH'
|
| 11 |
+
ELSE priority
|
| 12 |
+
END
|
| 13 |
+
WHERE priority IN ('low', 'medium', 'high');
|
| 14 |
+
|
| 15 |
+
-- Verify the update
|
| 16 |
+
SELECT COUNT(*) as tasks_updated FROM tasks;
|
CLAUDE.md
CHANGED
|
@@ -1,153 +1,7 @@
|
|
| 1 |
-
# Backend Development Guidelines
|
| 2 |
-
|
| 3 |
-
## Project Overview
|
| 4 |
-
|
| 5 |
-
This directory contains the backend API for the Todo List application, built with Python FastAPI and SQLModel.
|
| 6 |
-
|
| 7 |
-
## Technology Stack
|
| 8 |
-
|
| 9 |
-
- **Language**: Python 3.13+
|
| 10 |
-
- **Web Framework**: FastAPI (modern, high-performance web framework for building APIs)
|
| 11 |
-
- **ORM**: SQLModel (combines SQLAlchemy and Pydantic for database interactions and validation)
|
| 12 |
-
- **Database**: Neon Serverless PostgreSQL
|
| 13 |
-
- **Package Manager**: UV
|
| 14 |
-
|
| 15 |
-
## Project Structure
|
| 16 |
-
|
| 17 |
-
```
|
| 18 |
-
backend/
|
| 19 |
-
├── src/ # Application source code
|
| 20 |
-
│ ├── models/ # SQLModel database models
|
| 21 |
-
│ ├── api/ # API route handlers
|
| 22 |
-
│ ├── services/ # Business logic layer
|
| 23 |
-
│ ├── database.py # Database connection and session management
|
| 24 |
-
│ └── main.py # FastAPI application entry point
|
| 25 |
-
├── tests/ # Test suite
|
| 26 |
-
├── pyproject.toml # UV project configuration
|
| 27 |
-
└── CLAUDE.md # This file
|
| 28 |
-
```
|
| 29 |
-
|
| 30 |
-
## Development Commands
|
| 31 |
-
|
| 32 |
-
```bash
|
| 33 |
-
cd backend
|
| 34 |
-
|
| 35 |
-
# Install dependencies
|
| 36 |
-
uv sync
|
| 37 |
-
|
| 38 |
-
# Run development server
|
| 39 |
-
uv run python src/main.py
|
| 40 |
-
|
| 41 |
-
# Run tests
|
| 42 |
-
uv run pytest tests/
|
| 43 |
-
|
| 44 |
-
# Run with auto-reload during development
|
| 45 |
-
uv run uvicorn src.main:app --reload
|
| 46 |
-
|
| 47 |
-
# Check code quality
|
| 48 |
-
uv run ruff check .
|
| 49 |
-
```
|
| 50 |
-
|
| 51 |
-
## API Endpoints
|
| 52 |
-
|
| 53 |
-
The following REST API endpoints are implemented:
|
| 54 |
-
|
| 55 |
-
| Method | Endpoint | Description |
|
| 56 |
-
|--------|----------|-------------|
|
| 57 |
-
| GET | `/api/{user_id}/tasks` | List all tasks for a user |
|
| 58 |
-
| POST | `/api/{user_id}/tasks` | Create a new task |
|
| 59 |
-
| GET | `/api/{user_id}/tasks/{id}` | Get task details |
|
| 60 |
-
| PUT | `/api/{user_id}/tasks/{id}` | Update a task |
|
| 61 |
-
| DELETE | `/api/{user_id}/tasks/{id}` | Delete a task |
|
| 62 |
-
| PATCH | `/api/{user_id}/tasks/{id}/complete` | Toggle completion status |
|
| 63 |
-
|
| 64 |
-
## Database Models
|
| 65 |
-
|
| 66 |
-
### Task Model
|
| 67 |
-
- `id`: Unique identifier (auto-generated)
|
| 68 |
-
- `user_id`: Foreign key to user (for data segregation)
|
| 69 |
-
- `title`: Task title (required, max 255 characters)
|
| 70 |
-
- `description`: Task description (optional, max 2000 characters)
|
| 71 |
-
- `completed`: Boolean status (default: false)
|
| 72 |
-
- `created_at`: Timestamp of creation
|
| 73 |
-
- `updated_at`: Timestamp of last update
|
| 74 |
-
|
| 75 |
-
## Key Features
|
| 76 |
-
|
| 77 |
-
- **FastAPI Auto-Documentation**: Interactive API docs available at `/docs` and `/redoc`
|
| 78 |
-
- **Validation**: Automatic request/response validation via Pydantic
|
| 79 |
-
- **Async Support**: Built-in async/await for high-performance I/O
|
| 80 |
-
- **Type Safety**: Full type hints with SQLModel and Pydantic
|
| 81 |
-
- **Database Migrations**: SQLModel schema management with Alembic (if needed)
|
| 82 |
-
|
| 83 |
-
## Development Notes
|
| 84 |
-
|
| 85 |
-
- Authentication is NOT enforced in this phase (user_id is passed as path parameter)
|
| 86 |
-
- Database connection string should be provided via `DATABASE_URL` environment variable
|
| 87 |
-
- Default pagination: 50 tasks per request, maximum 100
|
| 88 |
-
- All timestamps are in UTC
|
| 89 |
-
- Use dependency injection for database sessions
|
| 90 |
-
|
| 91 |
-
## Environment Variables
|
| 92 |
-
|
| 93 |
-
```bash
|
| 94 |
-
DATABASE_URL=postgresql://user:password@host/database
|
| 95 |
-
```
|
| 96 |
-
|
| 97 |
-
## Testing Strategy
|
| 98 |
-
|
| 99 |
-
- Unit tests for business logic
|
| 100 |
-
- Integration tests for API endpoints
|
| 101 |
-
- Database tests with test fixtures
|
| 102 |
-
- Use pytest for test runner
|
| 103 |
-
- Mock external dependencies where appropriate
|
| 104 |
-
|
| 105 |
-
## Code Style
|
| 106 |
-
|
| 107 |
-
- Follow Python 3.13+ standard conventions
|
| 108 |
-
- Use type hints for all function signatures
|
| 109 |
-
- Docstrings for all public functions and classes
|
| 110 |
-
- Ruff for linting and formatting
|
| 111 |
-
|
| 112 |
-
## Performance Considerations
|
| 113 |
-
|
| 114 |
-
- Use database indexing on frequently queried fields (user_id, created_at)
|
| 115 |
-
- Implement pagination for list endpoints to prevent large result sets
|
| 116 |
-
- Use async database operations for better concurrency
|
| 117 |
-
- Connection pooling for database connections
|
| 118 |
-
|
| 119 |
-
## Documentation Resources
|
| 120 |
-
|
| 121 |
-
- [FastAPI Documentation](https://fastapi.tiangolo.com/)
|
| 122 |
-
- [SQLModel Documentation](https://sqlmodel.tiangolo.com/)
|
| 123 |
-
- [Pydantic Documentation](https://docs.pydantic.dev/)
|
| 124 |
-
|
| 125 |
-
## Related Specs
|
| 126 |
-
|
| 127 |
-
- Feature Specification: [specs/001-backend-task-api/spec.md](../specs/001-backend-task-api/spec.md)
|
| 128 |
-
- Project Constitution: [constitution.md](../.memory/constitution.md)
|
| 129 |
-
|
| 130 |
-
|
| 131 |
<claude-mem-context>
|
| 132 |
# Recent Activity
|
| 133 |
|
| 134 |
<!-- This section is auto-generated by claude-mem. Edit content outside the tags. -->
|
| 135 |
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
| ID | Time | T | Title | Read |
|
| 139 |
-
|----|------|---|-------|------|
|
| 140 |
-
| #58 | 3:17 PM | ✅ | Installed httpx-ws package for WebSocket testing support | ~187 |
|
| 141 |
-
|
| 142 |
-
### Jan 28, 2026
|
| 143 |
-
|
| 144 |
-
| ID | Time | T | Title | Read |
|
| 145 |
-
|----|------|---|-------|------|
|
| 146 |
-
| #587 | 8:43 PM | 🔵 | Backend pyproject.toml Defines All Python Dependencies | ~191 |
|
| 147 |
-
|
| 148 |
-
### Jan 30, 2026
|
| 149 |
-
|
| 150 |
-
| ID | Time | T | Title | Read |
|
| 151 |
-
|----|------|---|-------|------|
|
| 152 |
-
| #920 | 12:06 PM | 🔵 | Reviewed main.py to update logging configuration call | ~200 |
|
| 153 |
</claude-mem-context>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
<claude-mem-context>
|
| 2 |
# Recent Activity
|
| 3 |
|
| 4 |
<!-- This section is auto-generated by claude-mem. Edit content outside the tags. -->
|
| 5 |
|
| 6 |
+
*No recent activity*
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7 |
</claude-mem-context>
|
__init__.py
ADDED
|
File without changes
|
api/CLAUDE.md
CHANGED
|
@@ -3,44 +3,5 @@
|
|
| 3 |
|
| 4 |
<!-- This section is auto-generated by claude-mem. Edit content outside the tags. -->
|
| 5 |
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
| ID | Time | T | Title | Read |
|
| 9 |
-
|----|------|---|-------|------|
|
| 10 |
-
| #63 | 3:50 PM | 🔴 | Fixed import error in chat.py by moving decode_access_token to core.security | ~209 |
|
| 11 |
-
| #60 | 3:46 PM | 🔴 | Fixed import path for WebSocket manager from websockets to ws_manager | ~198 |
|
| 12 |
-
| #56 | 3:04 PM | 🟣 | Completed Phase 11 WebSocket real-time streaming implementation with 14 tasks | ~677 |
|
| 13 |
-
| #42 | 2:58 PM | 🟣 | Implemented complete WebSocket backend infrastructure for real-time progress streaming | ~395 |
|
| 14 |
-
| #40 | 2:57 PM | 🟣 | Added WebSocket endpoint to chat API for real-time progress streaming | ~483 |
|
| 15 |
-
| #39 | " | 🟣 | Added WebSocket imports to chat API for real-time progress streaming | ~303 |
|
| 16 |
-
| #10 | 1:51 PM | 🟣 | Implemented Phase 10 security, audit logging, database indexes, and documentation for AI chatbot | ~448 |
|
| 17 |
-
|
| 18 |
-
### Jan 28, 2026
|
| 19 |
-
|
| 20 |
-
| ID | Time | T | Title | Read |
|
| 21 |
-
|----|------|---|-------|------|
|
| 22 |
-
| #693 | 11:02 PM | 🟣 | List Tasks Endpoint Extended with Priority Query Parameter | ~303 |
|
| 23 |
-
| #664 | 10:50 PM | 🟣 | Task Creation Updated to Support Priority, Tags, and Due Date Fields | ~232 |
|
| 24 |
-
| #663 | " | 🔵 | Task API Endpoints Implement JWT-Authenticated CRUD Operations | ~439 |
|
| 25 |
-
|
| 26 |
-
### Jan 29, 2026
|
| 27 |
-
|
| 28 |
-
| ID | Time | T | Title | Read |
|
| 29 |
-
|----|------|---|-------|------|
|
| 30 |
-
| #876 | 7:40 PM | 🔴 | Priority enum value mismatch causing database query failure | ~238 |
|
| 31 |
-
| #868 | 7:34 PM | 🔴 | Backend database schema missing tags column in tasks table | ~258 |
|
| 32 |
-
|
| 33 |
-
### Jan 30, 2026
|
| 34 |
-
|
| 35 |
-
| ID | Time | T | Title | Read |
|
| 36 |
-
|----|------|---|-------|------|
|
| 37 |
-
| #946 | 1:01 PM | 🔵 | Reviewed chat API error handling for AI service configuration | ~228 |
|
| 38 |
-
| #945 | 1:00 PM | 🔵 | Reviewed chat endpoint implementation for AI service integration | ~261 |
|
| 39 |
-
| #944 | " | 🔵 | Reviewed chat.py API endpoint error handling for AI agent streaming | ~238 |
|
| 40 |
-
| #943 | 12:59 PM | 🔵 | Located AI agent integration in chat API endpoint | ~185 |
|
| 41 |
-
| #922 | 12:32 PM | 🔴 | Identified SQLModel Session.exec() parameter error in list_tags endpoint | ~290 |
|
| 42 |
-
| #921 | 12:31 PM | 🔵 | Verified correct route ordering in tasks.py after refactor | ~213 |
|
| 43 |
-
| #916 | 12:05 PM | 🔴 | Identified duplicate route definitions in tasks.py after route reordering | ~258 |
|
| 44 |
-
| #914 | 11:13 AM | 🔴 | Identified route definition order in tasks.py requiring reorganization | ~296 |
|
| 45 |
-
| #909 | 10:37 AM | 🔴 | Identified FastAPI route ordering issue causing UUID validation error | ~262 |
|
| 46 |
</claude-mem-context>
|
|
|
|
| 3 |
|
| 4 |
<!-- This section is auto-generated by claude-mem. Edit content outside the tags. -->
|
| 5 |
|
| 6 |
+
*No recent activity*
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7 |
</claude-mem-context>
|
audit.py
ADDED
|
@@ -0,0 +1,267 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Audit logging service for MCP tool invocations.
|
| 2 |
+
|
| 3 |
+
[Task]: T058
|
| 4 |
+
[From]: specs/004-ai-chatbot/tasks.md
|
| 5 |
+
|
| 6 |
+
This module provides audit logging for all MCP tool invocations to track
|
| 7 |
+
usage patterns, detect abuse, and maintain compliance records.
|
| 8 |
+
"""
|
| 9 |
+
import logging
|
| 10 |
+
import json
|
| 11 |
+
from datetime import datetime
|
| 12 |
+
from typing import Any, Optional
|
| 13 |
+
from uuid import UUID
|
| 14 |
+
|
| 15 |
+
from sqlmodel import Session
|
| 16 |
+
|
| 17 |
+
from core.database import engine
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
# Configure audit logger
|
| 21 |
+
audit_logger = logging.getLogger("audit")
|
| 22 |
+
audit_logger.setLevel(logging.INFO)
|
| 23 |
+
|
| 24 |
+
# Audit log handler (separate from main logs)
|
| 25 |
+
audit_handler = logging.FileHandler("logs/audit.log")
|
| 26 |
+
audit_handler.setFormatter(logging.Formatter(
|
| 27 |
+
'%(asctime)s | %(levelname)s | %(message)s'
|
| 28 |
+
))
|
| 29 |
+
audit_logger.addHandler(audit_handler)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def log_tool_invocation(
|
| 33 |
+
tool_name: str,
|
| 34 |
+
user_id: str | UUID,
|
| 35 |
+
args: dict[str, Any],
|
| 36 |
+
result: dict[str, Any],
|
| 37 |
+
conversation_id: Optional[str | UUID] = None,
|
| 38 |
+
execution_time_ms: Optional[float] = None,
|
| 39 |
+
error: Optional[str] = None
|
| 40 |
+
) -> None:
|
| 41 |
+
"""Log an MCP tool invocation for audit purposes.
|
| 42 |
+
|
| 43 |
+
[From]: specs/004-ai-chatbot/spec.md - NFR-018
|
| 44 |
+
|
| 45 |
+
Args:
|
| 46 |
+
tool_name: Name of the tool that was invoked
|
| 47 |
+
user_id: ID of the user who invoked the tool
|
| 48 |
+
args: Arguments passed to the tool
|
| 49 |
+
result: Result returned by the tool
|
| 50 |
+
conversation_id: Optional conversation context
|
| 51 |
+
execution_time_ms: Optional execution time in milliseconds
|
| 52 |
+
error: Optional error message if invocation failed
|
| 53 |
+
"""
|
| 54 |
+
log_entry = {
|
| 55 |
+
"timestamp": datetime.utcnow().isoformat(),
|
| 56 |
+
"tool_name": tool_name,
|
| 57 |
+
"user_id": str(user_id),
|
| 58 |
+
"conversation_id": str(conversation_id) if conversation_id else None,
|
| 59 |
+
"success": error is None,
|
| 60 |
+
"error": error,
|
| 61 |
+
"execution_time_ms": execution_time_ms,
|
| 62 |
+
"args_summary": _summarize_args(tool_name, args),
|
| 63 |
+
"result_summary": _summarize_result(result)
|
| 64 |
+
}
|
| 65 |
+
|
| 66 |
+
# Log to file
|
| 67 |
+
audit_logger.info(json.dumps(log_entry))
|
| 68 |
+
|
| 69 |
+
# Also log to database for querying (if needed)
|
| 70 |
+
_persist_audit_log(log_entry)
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def _summarize_args(tool_name: str, args: dict[str, Any]) -> dict[str, Any]:
|
| 74 |
+
"""Create a summary of tool arguments for logging.
|
| 75 |
+
|
| 76 |
+
[From]: T058 - Add audit logging for all MCP tool invocations
|
| 77 |
+
|
| 78 |
+
Args:
|
| 79 |
+
tool_name: Name of the tool
|
| 80 |
+
args: Full arguments dict
|
| 81 |
+
|
| 82 |
+
Returns:
|
| 83 |
+
Summarized arguments (sanitized for sensitive data)
|
| 84 |
+
"""
|
| 85 |
+
# Don't log full user content for privacy
|
| 86 |
+
if "message" in args:
|
| 87 |
+
return {"message_length": len(str(args.get("message", "")))}
|
| 88 |
+
|
| 89 |
+
# For task operations, log relevant info
|
| 90 |
+
if tool_name in ["add_task", "update_task", "complete_task", "delete_task"]:
|
| 91 |
+
summary = {}
|
| 92 |
+
if "task_id" in args:
|
| 93 |
+
summary["task_id"] = str(args["task_id"])
|
| 94 |
+
if "title" in args:
|
| 95 |
+
summary["title"] = args["title"][:50] # Truncate long titles
|
| 96 |
+
if "completed" in args:
|
| 97 |
+
summary["completed"] = args["completed"]
|
| 98 |
+
if "priority" in args:
|
| 99 |
+
summary["priority"] = args["priority"]
|
| 100 |
+
return summary
|
| 101 |
+
|
| 102 |
+
# For list_tasks, log filters
|
| 103 |
+
if tool_name == "list_tasks":
|
| 104 |
+
summary = {}
|
| 105 |
+
if "status" in args:
|
| 106 |
+
summary["status"] = args["status"]
|
| 107 |
+
if "limit" in args:
|
| 108 |
+
summary["limit"] = args["limit"]
|
| 109 |
+
return summary
|
| 110 |
+
|
| 111 |
+
# Default: return all args (tool-specific sanitization could be added)
|
| 112 |
+
return args
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
def _summarize_result(result: dict[str, Any]) -> dict[str, Any]:
|
| 116 |
+
"""Create a summary of tool result for logging.
|
| 117 |
+
|
| 118 |
+
[From]: T058 - Add audit logging for all MCP tool invocations
|
| 119 |
+
|
| 120 |
+
Args:
|
| 121 |
+
result: Full result dict from tool
|
| 122 |
+
|
| 123 |
+
Returns:
|
| 124 |
+
Summarized result
|
| 125 |
+
"""
|
| 126 |
+
if not isinstance(result, dict):
|
| 127 |
+
return {"result_type": type(result).__name__}
|
| 128 |
+
|
| 129 |
+
summary = {}
|
| 130 |
+
|
| 131 |
+
# Extract key fields
|
| 132 |
+
if "success" in result:
|
| 133 |
+
summary["success"] = result["success"]
|
| 134 |
+
|
| 135 |
+
if "error" in result:
|
| 136 |
+
summary["error"] = result["error"]
|
| 137 |
+
|
| 138 |
+
if "task" in result:
|
| 139 |
+
task = result["task"]
|
| 140 |
+
summary["task_id"] = task.get("id")
|
| 141 |
+
summary["task_title"] = task.get("title", "")[:50] if task.get("title") else None
|
| 142 |
+
|
| 143 |
+
if "tasks" in result:
|
| 144 |
+
tasks = result.get("tasks", [])
|
| 145 |
+
summary["task_count"] = len(tasks) if isinstance(tasks, list) else 0
|
| 146 |
+
|
| 147 |
+
if "updated_count" in result:
|
| 148 |
+
summary["updated_count"] = result["updated_count"]
|
| 149 |
+
|
| 150 |
+
if "deleted_count" in result:
|
| 151 |
+
summary["deleted_count"] = result["deleted_count"]
|
| 152 |
+
|
| 153 |
+
if "message" in result:
|
| 154 |
+
# Truncate long messages
|
| 155 |
+
msg = result["message"]
|
| 156 |
+
summary["message"] = msg[:100] + "..." if len(msg) > 100 else msg
|
| 157 |
+
|
| 158 |
+
return summary
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
def _persist_audit_log(log_entry: dict) -> None:
|
| 162 |
+
"""Persist audit log to database for querying.
|
| 163 |
+
|
| 164 |
+
[From]: T058 - Add audit logging for all MCP tool invocations
|
| 165 |
+
|
| 166 |
+
Args:
|
| 167 |
+
log_entry: The audit log entry to persist
|
| 168 |
+
"""
|
| 169 |
+
# Note: This could be extended to write to an audit_logs table
|
| 170 |
+
# For now, file-based logging is sufficient
|
| 171 |
+
pass
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
def get_user_activity_summary(
|
| 175 |
+
user_id: str | UUID,
|
| 176 |
+
limit: int = 100
|
| 177 |
+
) -> list[dict[str, Any]]:
|
| 178 |
+
"""Get a summary of user activity from audit logs.
|
| 179 |
+
|
| 180 |
+
[From]: T058 - Add audit logging for all MCP tool invocations
|
| 181 |
+
|
| 182 |
+
Args:
|
| 183 |
+
user_id: User ID to get activity for
|
| 184 |
+
limit: Maximum number of entries to return
|
| 185 |
+
|
| 186 |
+
Returns:
|
| 187 |
+
List of audit log entries for the user
|
| 188 |
+
"""
|
| 189 |
+
# Read audit log file and filter by user_id
|
| 190 |
+
try:
|
| 191 |
+
with open("logs/audit.log", "r") as f:
|
| 192 |
+
user_entries = []
|
| 193 |
+
for line in f:
|
| 194 |
+
try:
|
| 195 |
+
entry = json.loads(line.split(" | ", 2)[-1])
|
| 196 |
+
if entry.get("user_id") == str(user_id):
|
| 197 |
+
user_entries.append(entry)
|
| 198 |
+
if len(user_entries) >= limit:
|
| 199 |
+
break
|
| 200 |
+
except (json.JSONDecodeError, IndexError):
|
| 201 |
+
continue
|
| 202 |
+
return user_entries
|
| 203 |
+
except FileNotFoundError:
|
| 204 |
+
return []
|
| 205 |
+
|
| 206 |
+
|
| 207 |
+
# Decorator for automatic audit logging of MCP tools
|
| 208 |
+
def audit_log(tool_name: Optional[str] = None):
|
| 209 |
+
"""Decorator to automatically log MCP tool invocations.
|
| 210 |
+
|
| 211 |
+
[From]: T058 - Add audit logging for all MCP tool invocations
|
| 212 |
+
|
| 213 |
+
Args:
|
| 214 |
+
tool_name: Optional override for tool name (defaults to function name)
|
| 215 |
+
|
| 216 |
+
Usage:
|
| 217 |
+
@audit_log("add_task")
|
| 218 |
+
async def add_task(user_id: str, title: str, ...):
|
| 219 |
+
...
|
| 220 |
+
"""
|
| 221 |
+
import functools
|
| 222 |
+
import time
|
| 223 |
+
|
| 224 |
+
def decorator(func):
|
| 225 |
+
@functools.wraps(func)
|
| 226 |
+
async def wrapper(*args, **kwargs):
|
| 227 |
+
name = tool_name or func.__name__
|
| 228 |
+
start_time = time.time()
|
| 229 |
+
|
| 230 |
+
# Extract user_id from args/kwargs
|
| 231 |
+
user_id = kwargs.get("user_id") or (args[0] if args else None)
|
| 232 |
+
|
| 233 |
+
try:
|
| 234 |
+
result = await func(*args, **kwargs)
|
| 235 |
+
execution_time = (time.time() - start_time) * 1000
|
| 236 |
+
|
| 237 |
+
log_tool_invocation(
|
| 238 |
+
tool_name=name,
|
| 239 |
+
user_id=user_id or "unknown",
|
| 240 |
+
args=kwargs,
|
| 241 |
+
result=result,
|
| 242 |
+
execution_time_ms=execution_time
|
| 243 |
+
)
|
| 244 |
+
return result
|
| 245 |
+
|
| 246 |
+
except Exception as e:
|
| 247 |
+
execution_time = (time.time() - start_time) * 1000
|
| 248 |
+
|
| 249 |
+
log_tool_invocation(
|
| 250 |
+
tool_name=name,
|
| 251 |
+
user_id=user_id or "unknown",
|
| 252 |
+
args=kwargs,
|
| 253 |
+
result={},
|
| 254 |
+
execution_time_ms=execution_time,
|
| 255 |
+
error=str(e)
|
| 256 |
+
)
|
| 257 |
+
raise
|
| 258 |
+
|
| 259 |
+
return wrapper
|
| 260 |
+
return decorator
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
__all__ = [
|
| 264 |
+
"log_tool_invocation",
|
| 265 |
+
"get_user_activity_summary",
|
| 266 |
+
"audit_log",
|
| 267 |
+
]
|
auth.py
ADDED
|
@@ -0,0 +1,384 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Authentication API endpoints.
|
| 2 |
+
|
| 3 |
+
[Task]: T017
|
| 4 |
+
[From]: specs/001-user-auth/contracts/openapi.yaml, specs/001-user-auth/plan.md
|
| 5 |
+
"""
|
| 6 |
+
import re
|
| 7 |
+
from typing import Optional
|
| 8 |
+
from datetime import datetime, timedelta
|
| 9 |
+
|
| 10 |
+
from fastapi import APIRouter, Depends, HTTPException, status, Cookie
|
| 11 |
+
from fastapi.responses import JSONResponse, Response
|
| 12 |
+
from sqlmodel import Session, select
|
| 13 |
+
|
| 14 |
+
from models.user import User, UserCreate, UserRead, UserLogin
|
| 15 |
+
from core.database import get_session
|
| 16 |
+
from core.security import get_password_hash, verify_password, create_access_token, decode_access_token
|
| 17 |
+
from core.config import get_settings
|
| 18 |
+
from api.deps import get_current_user
|
| 19 |
+
|
| 20 |
+
settings = get_settings()
|
| 21 |
+
|
| 22 |
+
router = APIRouter(prefix="/api/auth", tags=["Authentication"])
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def validate_email_format(email: str) -> bool:
|
| 26 |
+
"""Validate email format.
|
| 27 |
+
|
| 28 |
+
Check for @ symbol and domain part.
|
| 29 |
+
|
| 30 |
+
[Task]: T019
|
| 31 |
+
[From]: specs/001-user-auth/spec.md
|
| 32 |
+
|
| 33 |
+
Args:
|
| 34 |
+
email: Email address to validate
|
| 35 |
+
|
| 36 |
+
Returns:
|
| 37 |
+
True if email format is valid, False otherwise
|
| 38 |
+
"""
|
| 39 |
+
if not email:
|
| 40 |
+
return False
|
| 41 |
+
|
| 42 |
+
# Basic email validation: must contain @ and at least one . after @
|
| 43 |
+
pattern = r'^[^@]+@[^@]+\.[^@]+$'
|
| 44 |
+
return re.match(pattern, email) is not None
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def validate_password(password: str) -> bool:
|
| 48 |
+
"""Validate password length.
|
| 49 |
+
|
| 50 |
+
Minimum 8 characters.
|
| 51 |
+
|
| 52 |
+
[Task]: T020
|
| 53 |
+
[From]: specs/001-user-auth/spec.md
|
| 54 |
+
|
| 55 |
+
Args:
|
| 56 |
+
password: Password to validate
|
| 57 |
+
|
| 58 |
+
Returns:
|
| 59 |
+
True if password meets requirements, False otherwise
|
| 60 |
+
"""
|
| 61 |
+
return len(password) >= 8
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
@router.post("/sign-up", response_model=dict, status_code=status.HTTP_200_OK)
|
| 65 |
+
async def sign_up(
|
| 66 |
+
user_data: UserCreate,
|
| 67 |
+
session: Session = Depends(get_session)
|
| 68 |
+
):
|
| 69 |
+
"""Register a new user account.
|
| 70 |
+
|
| 71 |
+
Validates email format and password length, checks email uniqueness,
|
| 72 |
+
hashes password with bcrypt, creates user in database.
|
| 73 |
+
|
| 74 |
+
[Task]: T018
|
| 75 |
+
[From]: specs/001-user-auth/contracts/openapi.yaml
|
| 76 |
+
|
| 77 |
+
Args:
|
| 78 |
+
user_data: User registration data (email, password)
|
| 79 |
+
session: Database session
|
| 80 |
+
|
| 81 |
+
Returns:
|
| 82 |
+
Success response with message and user data
|
| 83 |
+
|
| 84 |
+
Raises:
|
| 85 |
+
HTTPException 400: Invalid email format or password too short
|
| 86 |
+
HTTPException 409: Email already registered
|
| 87 |
+
HTTPException 500: Database error
|
| 88 |
+
"""
|
| 89 |
+
# Validate email format
|
| 90 |
+
if not validate_email_format(user_data.email):
|
| 91 |
+
raise HTTPException(
|
| 92 |
+
status_code=status.HTTP_400_BAD_REQUEST,
|
| 93 |
+
detail="Invalid email format"
|
| 94 |
+
)
|
| 95 |
+
|
| 96 |
+
# Validate password length
|
| 97 |
+
if not validate_password(user_data.password):
|
| 98 |
+
raise HTTPException(
|
| 99 |
+
status_code=status.HTTP_400_BAD_REQUEST,
|
| 100 |
+
detail="Password must be at least 8 characters"
|
| 101 |
+
)
|
| 102 |
+
|
| 103 |
+
# Check if email already exists
|
| 104 |
+
existing_user = session.exec(
|
| 105 |
+
select(User).where(User.email == user_data.email)
|
| 106 |
+
).first()
|
| 107 |
+
if existing_user:
|
| 108 |
+
raise HTTPException(
|
| 109 |
+
status_code=status.HTTP_409_CONFLICT,
|
| 110 |
+
detail="Email already registered"
|
| 111 |
+
)
|
| 112 |
+
|
| 113 |
+
# Hash password with bcrypt
|
| 114 |
+
hashed_password = get_password_hash(user_data.password)
|
| 115 |
+
|
| 116 |
+
# Create user
|
| 117 |
+
user = User(
|
| 118 |
+
email=user_data.email,
|
| 119 |
+
hashed_password=hashed_password
|
| 120 |
+
)
|
| 121 |
+
|
| 122 |
+
try:
|
| 123 |
+
session.add(user)
|
| 124 |
+
session.commit()
|
| 125 |
+
session.refresh(user)
|
| 126 |
+
except Exception as e:
|
| 127 |
+
session.rollback()
|
| 128 |
+
raise HTTPException(
|
| 129 |
+
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
| 130 |
+
detail="Failed to create user account"
|
| 131 |
+
)
|
| 132 |
+
|
| 133 |
+
# Return user data (excluding password)
|
| 134 |
+
user_dict = UserRead.model_validate(user).model_dump(mode='json')
|
| 135 |
+
return {
|
| 136 |
+
"success": True,
|
| 137 |
+
"message": "Account created successfully",
|
| 138 |
+
"user": user_dict
|
| 139 |
+
}
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
def get_user_by_email(email: str, session: Session) -> Optional[User]:
|
| 143 |
+
"""Query database for user by email.
|
| 144 |
+
|
| 145 |
+
[Task]: T030
|
| 146 |
+
[From]: specs/001-user-auth/plan.md
|
| 147 |
+
|
| 148 |
+
Args:
|
| 149 |
+
email: User email address
|
| 150 |
+
session: Database session
|
| 151 |
+
|
| 152 |
+
Returns:
|
| 153 |
+
User object if found, None otherwise
|
| 154 |
+
"""
|
| 155 |
+
return session.exec(select(User).where(User.email == email)).first()
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
@router.post("/sign-in", response_model=dict, status_code=status.HTTP_200_OK)
|
| 159 |
+
async def sign_in(
|
| 160 |
+
user_data: UserLogin,
|
| 161 |
+
session: Session = Depends(get_session)
|
| 162 |
+
):
|
| 163 |
+
"""Authenticate user and generate JWT token.
|
| 164 |
+
|
| 165 |
+
Verifies credentials, generates JWT token, sets httpOnly cookie,
|
| 166 |
+
returns token and user data.
|
| 167 |
+
|
| 168 |
+
[Task]: T027
|
| 169 |
+
[From]: specs/001-user-auth/contracts/openapi.yaml
|
| 170 |
+
|
| 171 |
+
Args:
|
| 172 |
+
user_data: User login credentials (email, password)
|
| 173 |
+
session: Database session
|
| 174 |
+
|
| 175 |
+
Returns:
|
| 176 |
+
Login response with JWT token, user data, and expiration time
|
| 177 |
+
|
| 178 |
+
Raises:
|
| 179 |
+
HTTPException 401: Invalid credentials
|
| 180 |
+
HTTPException 500: Database or JWT generation error
|
| 181 |
+
"""
|
| 182 |
+
# Get user by email
|
| 183 |
+
user = get_user_by_email(user_data.email, session)
|
| 184 |
+
if not user:
|
| 185 |
+
# Generic error message (don't reveal if email exists)
|
| 186 |
+
raise HTTPException(
|
| 187 |
+
status_code=status.HTTP_401_UNAUTHORIZED,
|
| 188 |
+
detail="Invalid email or password"
|
| 189 |
+
)
|
| 190 |
+
|
| 191 |
+
# Verify password
|
| 192 |
+
if not verify_password(user_data.password, user.hashed_password):
|
| 193 |
+
raise HTTPException(
|
| 194 |
+
status_code=status.HTTP_401_UNAUTHORIZED,
|
| 195 |
+
detail="Invalid email or password"
|
| 196 |
+
)
|
| 197 |
+
|
| 198 |
+
# Generate JWT token
|
| 199 |
+
access_token = create_access_token(
|
| 200 |
+
data={"sub": str(user.id)},
|
| 201 |
+
expires_delta=timedelta(days=settings.jwt_expiration_days)
|
| 202 |
+
)
|
| 203 |
+
|
| 204 |
+
# Calculate expiration time
|
| 205 |
+
expires_at = datetime.utcnow() + timedelta(days=settings.jwt_expiration_days)
|
| 206 |
+
|
| 207 |
+
# Create response
|
| 208 |
+
response_data = {
|
| 209 |
+
"success": True,
|
| 210 |
+
"token": access_token,
|
| 211 |
+
"user": UserRead.model_validate(user).model_dump(mode='json'),
|
| 212 |
+
"expires_at": expires_at.isoformat() + "Z"
|
| 213 |
+
}
|
| 214 |
+
|
| 215 |
+
# Create response with httpOnly cookie
|
| 216 |
+
response = JSONResponse(content=response_data)
|
| 217 |
+
|
| 218 |
+
# Set httpOnly cookie with JWT token
|
| 219 |
+
response.set_cookie(
|
| 220 |
+
key="auth_token",
|
| 221 |
+
value=access_token,
|
| 222 |
+
httponly=True,
|
| 223 |
+
secure=settings.environment == "production", # Only send over HTTPS in production
|
| 224 |
+
samesite="lax", # CSRF protection
|
| 225 |
+
max_age=settings.jwt_expiration_days * 24 * 60 * 60, # Convert days to seconds
|
| 226 |
+
path="/"
|
| 227 |
+
)
|
| 228 |
+
|
| 229 |
+
return response
|
| 230 |
+
|
| 231 |
+
|
| 232 |
+
@router.get("/session", response_model=dict, status_code=status.HTTP_200_OK)
|
| 233 |
+
async def get_session(
|
| 234 |
+
response: Response,
|
| 235 |
+
Authorization: Optional[str] = None,
|
| 236 |
+
auth_token: Optional[str] = Cookie(None),
|
| 237 |
+
session: Session = Depends(get_session)
|
| 238 |
+
):
|
| 239 |
+
"""Verify JWT token and return user session data.
|
| 240 |
+
|
| 241 |
+
Checks JWT token from Authorization header or httpOnly cookie,
|
| 242 |
+
verifies signature, returns user data if authenticated.
|
| 243 |
+
|
| 244 |
+
[Task]: T026
|
| 245 |
+
[From]: specs/001-user-auth/contracts/openapi.yaml
|
| 246 |
+
|
| 247 |
+
Args:
|
| 248 |
+
response: FastAPI response object
|
| 249 |
+
Authorization: Bearer token from Authorization header
|
| 250 |
+
auth_token: JWT token from httpOnly cookie
|
| 251 |
+
session: Database session
|
| 252 |
+
|
| 253 |
+
Returns:
|
| 254 |
+
Session response with authentication status and user data
|
| 255 |
+
|
| 256 |
+
Raises:
|
| 257 |
+
HTTPException 401: Invalid, expired, or missing token
|
| 258 |
+
"""
|
| 259 |
+
# Extract token from Authorization header or cookie
|
| 260 |
+
token = None
|
| 261 |
+
|
| 262 |
+
# Try Authorization header first
|
| 263 |
+
if Authorization:
|
| 264 |
+
try:
|
| 265 |
+
scheme, header_token = Authorization.split()
|
| 266 |
+
if scheme.lower() == "bearer":
|
| 267 |
+
token = header_token
|
| 268 |
+
except ValueError:
|
| 269 |
+
pass # Fall through to cookie
|
| 270 |
+
|
| 271 |
+
# If no token in header, try cookie
|
| 272 |
+
if not token and auth_token:
|
| 273 |
+
token = auth_token
|
| 274 |
+
|
| 275 |
+
if not token:
|
| 276 |
+
raise HTTPException(
|
| 277 |
+
status_code=status.HTTP_401_UNAUTHORIZED,
|
| 278 |
+
detail="Not authenticated"
|
| 279 |
+
)
|
| 280 |
+
|
| 281 |
+
try:
|
| 282 |
+
# Decode and verify token
|
| 283 |
+
payload = decode_access_token(token)
|
| 284 |
+
user_id = payload.get("sub")
|
| 285 |
+
|
| 286 |
+
if not user_id:
|
| 287 |
+
raise HTTPException(
|
| 288 |
+
status_code=status.HTTP_401_UNAUTHORIZED,
|
| 289 |
+
detail="Invalid token: user_id missing"
|
| 290 |
+
)
|
| 291 |
+
|
| 292 |
+
# Query user from database
|
| 293 |
+
user = session.get(User, user_id)
|
| 294 |
+
if not user:
|
| 295 |
+
raise HTTPException(
|
| 296 |
+
status_code=status.HTTP_401_UNAUTHORIZED,
|
| 297 |
+
detail="User not found"
|
| 298 |
+
)
|
| 299 |
+
|
| 300 |
+
# Calculate expiration time
|
| 301 |
+
exp = payload.get("exp")
|
| 302 |
+
expires_at = None
|
| 303 |
+
if exp:
|
| 304 |
+
expires_at = datetime.fromtimestamp(exp).isoformat() + "Z"
|
| 305 |
+
|
| 306 |
+
return {
|
| 307 |
+
"authenticated": True,
|
| 308 |
+
"user": UserRead.model_validate(user).model_dump(mode='json'),
|
| 309 |
+
"expires_at": expires_at
|
| 310 |
+
}
|
| 311 |
+
|
| 312 |
+
except HTTPException:
|
| 313 |
+
raise
|
| 314 |
+
except Exception as e:
|
| 315 |
+
raise HTTPException(
|
| 316 |
+
status_code=status.HTTP_401_UNAUTHORIZED,
|
| 317 |
+
detail="Could not validate credentials"
|
| 318 |
+
)
|
| 319 |
+
|
| 320 |
+
|
| 321 |
+
@router.get("/users/me")
|
| 322 |
+
async def get_users_me(
|
| 323 |
+
current_user: User = Depends(get_current_user)
|
| 324 |
+
):
|
| 325 |
+
"""Get current authenticated user information.
|
| 326 |
+
|
| 327 |
+
Example protected endpoint that requires JWT authentication.
|
| 328 |
+
Returns user data for authenticated user.
|
| 329 |
+
|
| 330 |
+
[Task]: T038
|
| 331 |
+
[From]: specs/001-user-auth/plan.md
|
| 332 |
+
|
| 333 |
+
Args:
|
| 334 |
+
current_user: Authenticated user from dependency
|
| 335 |
+
|
| 336 |
+
Returns:
|
| 337 |
+
User data for current user
|
| 338 |
+
|
| 339 |
+
Raises:
|
| 340 |
+
HTTPException 401: If not authenticated
|
| 341 |
+
"""
|
| 342 |
+
return UserRead.model_validate(current_user).model_dump(mode='json')
|
| 343 |
+
|
| 344 |
+
|
| 345 |
+
@router.post("/sign-out", status_code=status.HTTP_200_OK)
|
| 346 |
+
async def sign_out(
|
| 347 |
+
response: Response,
|
| 348 |
+
current_user: User = Depends(get_current_user)
|
| 349 |
+
):
|
| 350 |
+
"""Logout current user.
|
| 351 |
+
|
| 352 |
+
Client-side logout (clears httpOnly cookie).
|
| 353 |
+
Server-side token is stateless (JWT), so no server storage to clear.
|
| 354 |
+
|
| 355 |
+
[Task]: T043
|
| 356 |
+
[From]: specs/001-user-auth/contracts/openapi.yaml
|
| 357 |
+
|
| 358 |
+
Args:
|
| 359 |
+
response: FastAPI response object
|
| 360 |
+
current_user: Authenticated user (for validation only)
|
| 361 |
+
|
| 362 |
+
Returns:
|
| 363 |
+
Success message
|
| 364 |
+
|
| 365 |
+
Raises:
|
| 366 |
+
HTTPException 401: If not authenticated
|
| 367 |
+
"""
|
| 368 |
+
# Create response with success message
|
| 369 |
+
response_data = {
|
| 370 |
+
"success": True,
|
| 371 |
+
"message": "Logged out successfully"
|
| 372 |
+
}
|
| 373 |
+
|
| 374 |
+
# Create response with cleared httpOnly cookie
|
| 375 |
+
response_obj = JSONResponse(content=response_data)
|
| 376 |
+
|
| 377 |
+
# Clear the httpOnly cookie by setting it to expire
|
| 378 |
+
response_obj.delete_cookie(
|
| 379 |
+
key="auth_token",
|
| 380 |
+
path="/",
|
| 381 |
+
samesite="lax"
|
| 382 |
+
)
|
| 383 |
+
|
| 384 |
+
return response_obj
|
chat.py
ADDED
|
@@ -0,0 +1,478 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Chat API endpoint for AI-powered task management.
|
| 2 |
+
|
| 3 |
+
[Task]: T015, T071
|
| 4 |
+
[From]: specs/004-ai-chatbot/tasks.md
|
| 5 |
+
|
| 6 |
+
This endpoint provides a conversational interface for task management.
|
| 7 |
+
Users can create, list, update, complete, and delete tasks through natural language.
|
| 8 |
+
|
| 9 |
+
Also includes WebSocket endpoint for real-time progress streaming.
|
| 10 |
+
"""
|
| 11 |
+
import uuid
|
| 12 |
+
import logging
|
| 13 |
+
import asyncio
|
| 14 |
+
from datetime import datetime
|
| 15 |
+
from typing import Annotated, Optional
|
| 16 |
+
from fastapi import APIRouter, HTTPException, status, Depends, WebSocket, WebSocketDisconnect, BackgroundTasks
|
| 17 |
+
from pydantic import BaseModel, Field, field_validator, ValidationError
|
| 18 |
+
from sqlmodel import Session
|
| 19 |
+
from sqlalchemy.exc import SQLAlchemyError
|
| 20 |
+
|
| 21 |
+
from core.database import get_db
|
| 22 |
+
from core.validators import validate_message_length
|
| 23 |
+
from core.security import decode_access_token
|
| 24 |
+
from models.message import Message, MessageRole
|
| 25 |
+
from services.security import sanitize_message
|
| 26 |
+
from models.conversation import Conversation
|
| 27 |
+
from ai_agent import run_agent_with_streaming, is_gemini_configured
|
| 28 |
+
from services.conversation import (
|
| 29 |
+
get_or_create_conversation,
|
| 30 |
+
load_conversation_history,
|
| 31 |
+
update_conversation_timestamp
|
| 32 |
+
)
|
| 33 |
+
from services.rate_limiter import check_rate_limit
|
| 34 |
+
from ws_manager.manager import manager
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
# Configure error logger
|
| 38 |
+
error_logger = logging.getLogger("api.errors")
|
| 39 |
+
error_logger.setLevel(logging.ERROR)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
# Request/Response models
|
| 43 |
+
class ChatRequest(BaseModel):
|
| 44 |
+
"""Request model for chat endpoint.
|
| 45 |
+
|
| 46 |
+
[From]: specs/004-ai-chatbot/plan.md - API Contract
|
| 47 |
+
"""
|
| 48 |
+
message: str = Field(
|
| 49 |
+
...,
|
| 50 |
+
description="User message content",
|
| 51 |
+
min_length=1,
|
| 52 |
+
max_length=10000 # FR-042
|
| 53 |
+
)
|
| 54 |
+
conversation_id: Optional[str] = Field(
|
| 55 |
+
None,
|
| 56 |
+
description="Optional conversation ID to continue existing conversation"
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
@field_validator('message')
|
| 60 |
+
@classmethod
|
| 61 |
+
def validate_message(cls, v: str) -> str:
|
| 62 |
+
"""Validate message content."""
|
| 63 |
+
if not v or not v.strip():
|
| 64 |
+
raise ValueError("Message content cannot be empty")
|
| 65 |
+
if len(v) > 10000:
|
| 66 |
+
raise ValueError("Message content exceeds maximum length of 10,000 characters")
|
| 67 |
+
return v.strip()
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
class TaskReference(BaseModel):
|
| 71 |
+
"""Reference to a task created or modified by AI."""
|
| 72 |
+
id: str
|
| 73 |
+
title: str
|
| 74 |
+
description: Optional[str] = None
|
| 75 |
+
due_date: Optional[str] = None
|
| 76 |
+
priority: Optional[str] = None
|
| 77 |
+
completed: bool = False
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
class ChatResponse(BaseModel):
|
| 81 |
+
"""Response model for chat endpoint.
|
| 82 |
+
|
| 83 |
+
[From]: specs/004-ai-chatbot/plan.md - API Contract
|
| 84 |
+
"""
|
| 85 |
+
response: str = Field(
|
| 86 |
+
...,
|
| 87 |
+
description="AI assistant's text response"
|
| 88 |
+
)
|
| 89 |
+
conversation_id: str = Field(
|
| 90 |
+
...,
|
| 91 |
+
description="Conversation ID (new or existing)"
|
| 92 |
+
)
|
| 93 |
+
tasks: list[TaskReference] = Field(
|
| 94 |
+
default_factory=list,
|
| 95 |
+
description="List of tasks created or modified in this interaction"
|
| 96 |
+
)
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
# Create API router
|
| 100 |
+
router = APIRouter(prefix="/api", tags=["chat"])
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
@router.post("/{user_id}/chat", response_model=ChatResponse, status_code=status.HTTP_200_OK)
|
| 104 |
+
async def chat(
|
| 105 |
+
user_id: str,
|
| 106 |
+
request: ChatRequest,
|
| 107 |
+
background_tasks: BackgroundTasks,
|
| 108 |
+
db: Session = Depends(get_db)
|
| 109 |
+
):
|
| 110 |
+
"""Process user message through AI agent and return response.
|
| 111 |
+
|
| 112 |
+
[From]: specs/004-ai-chatbot/spec.md - US1
|
| 113 |
+
|
| 114 |
+
This endpoint:
|
| 115 |
+
1. Validates user input and rate limits
|
| 116 |
+
2. Gets or creates conversation
|
| 117 |
+
3. Runs AI agent with WebSocket progress streaming
|
| 118 |
+
4. Returns AI response immediately
|
| 119 |
+
5. Saves messages to DB in background (non-blocking)
|
| 120 |
+
|
| 121 |
+
Args:
|
| 122 |
+
user_id: User ID (UUID string from path)
|
| 123 |
+
request: Chat request with message and optional conversation_id
|
| 124 |
+
background_tasks: FastAPI background tasks for non-blocking DB saves
|
| 125 |
+
db: Database session
|
| 126 |
+
|
| 127 |
+
Returns:
|
| 128 |
+
ChatResponse with AI response, conversation_id, and task references
|
| 129 |
+
|
| 130 |
+
Raises:
|
| 131 |
+
HTTPException 400: Invalid message content
|
| 132 |
+
HTTPException 503: AI service unavailable
|
| 133 |
+
"""
|
| 134 |
+
# Check if Gemini API is configured
|
| 135 |
+
# [From]: specs/004-ai-chatbot/tasks.md - T022
|
| 136 |
+
# [From]: T060 - Add comprehensive error messages for edge cases
|
| 137 |
+
if not is_gemini_configured():
|
| 138 |
+
raise HTTPException(
|
| 139 |
+
status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
|
| 140 |
+
detail={
|
| 141 |
+
"error": "AI service unavailable",
|
| 142 |
+
"message": "The AI service is currently not configured. Please ensure GEMINI_API_KEY is set in the environment.",
|
| 143 |
+
"suggestion": "Contact your administrator or check your API key configuration."
|
| 144 |
+
}
|
| 145 |
+
)
|
| 146 |
+
|
| 147 |
+
# Validate user_id format
|
| 148 |
+
# [From]: T060 - Add comprehensive error messages for edge cases
|
| 149 |
+
try:
|
| 150 |
+
user_uuid = uuid.UUID(user_id)
|
| 151 |
+
except ValueError:
|
| 152 |
+
raise HTTPException(
|
| 153 |
+
status_code=status.HTTP_400_BAD_REQUEST,
|
| 154 |
+
detail={
|
| 155 |
+
"error": "Invalid user ID",
|
| 156 |
+
"message": f"User ID '{user_id}' is not a valid UUID format.",
|
| 157 |
+
"expected_format": "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx",
|
| 158 |
+
"suggestion": "Ensure you are using a valid UUID for the user_id path parameter."
|
| 159 |
+
}
|
| 160 |
+
)
|
| 161 |
+
|
| 162 |
+
# Validate message content
|
| 163 |
+
# [From]: T060 - Add comprehensive error messages for edge cases
|
| 164 |
+
try:
|
| 165 |
+
validated_message = validate_message_length(request.message)
|
| 166 |
+
except ValueError as e:
|
| 167 |
+
raise HTTPException(
|
| 168 |
+
status_code=status.HTTP_400_BAD_REQUEST,
|
| 169 |
+
detail={
|
| 170 |
+
"error": "Message validation failed",
|
| 171 |
+
"message": str(e),
|
| 172 |
+
"max_length": 10000,
|
| 173 |
+
"suggestion": "Keep your message under 10,000 characters and ensure it contains meaningful content."
|
| 174 |
+
}
|
| 175 |
+
)
|
| 176 |
+
|
| 177 |
+
# Sanitize message to prevent prompt injection
|
| 178 |
+
# [From]: T057 - Implement prompt injection sanitization
|
| 179 |
+
# [From]: T060 - Add comprehensive error messages for edge cases
|
| 180 |
+
try:
|
| 181 |
+
sanitized_message = sanitize_message(validated_message)
|
| 182 |
+
except ValueError as e:
|
| 183 |
+
raise HTTPException(
|
| 184 |
+
status_code=status.HTTP_400_BAD_REQUEST,
|
| 185 |
+
detail={
|
| 186 |
+
"error": "Message content blocked",
|
| 187 |
+
"message": str(e),
|
| 188 |
+
"suggestion": "Please rephrase your message without attempting to manipulate system instructions."
|
| 189 |
+
}
|
| 190 |
+
)
|
| 191 |
+
|
| 192 |
+
# Check rate limit
|
| 193 |
+
# [From]: specs/004-ai-chatbot/spec.md - NFR-011
|
| 194 |
+
# [From]: T021 - Implement daily message limit enforcement (100/day)
|
| 195 |
+
# [From]: T060 - Add comprehensive error messages for edge cases
|
| 196 |
+
try:
|
| 197 |
+
allowed, remaining, reset_time = check_rate_limit(db, user_uuid)
|
| 198 |
+
|
| 199 |
+
if not allowed:
|
| 200 |
+
raise HTTPException(
|
| 201 |
+
status_code=status.HTTP_429_TOO_MANY_REQUESTS,
|
| 202 |
+
detail={
|
| 203 |
+
"error": "Rate limit exceeded",
|
| 204 |
+
"message": "You have reached the daily message limit. Please try again later.",
|
| 205 |
+
"limit": 100,
|
| 206 |
+
"resets_at": reset_time.isoformat() if reset_time else None,
|
| 207 |
+
"suggestion": "Free tier accounts are limited to 100 messages per day. Upgrade for unlimited access."
|
| 208 |
+
}
|
| 209 |
+
)
|
| 210 |
+
except HTTPException:
|
| 211 |
+
# Re-raise HTTP exceptions (rate limit errors)
|
| 212 |
+
raise
|
| 213 |
+
except Exception as e:
|
| 214 |
+
# Log unexpected errors but don't block the request
|
| 215 |
+
error_logger.error(f"Rate limit check failed for user {user_id}: {e}")
|
| 216 |
+
# Continue processing - fail open for rate limit errors
|
| 217 |
+
|
| 218 |
+
# Get or create conversation
|
| 219 |
+
# [From]: T016 - Implement conversation history loading
|
| 220 |
+
# [From]: T035 - Handle auto-deleted conversations gracefully
|
| 221 |
+
# [From]: T060 - Add comprehensive error messages for edge cases
|
| 222 |
+
conversation_id: uuid.UUID
|
| 223 |
+
|
| 224 |
+
if request.conversation_id:
|
| 225 |
+
# Load existing conversation using service
|
| 226 |
+
try:
|
| 227 |
+
conv_uuid = uuid.UUID(request.conversation_id)
|
| 228 |
+
except ValueError:
|
| 229 |
+
# Invalid conversation_id format
|
| 230 |
+
raise HTTPException(
|
| 231 |
+
status_code=status.HTTP_400_BAD_REQUEST,
|
| 232 |
+
detail={
|
| 233 |
+
"error": "Invalid conversation ID",
|
| 234 |
+
"message": f"Conversation ID '{request.conversation_id}' is not a valid UUID format.",
|
| 235 |
+
"suggestion": "Provide a valid UUID or omit the conversation_id to start a new conversation."
|
| 236 |
+
}
|
| 237 |
+
)
|
| 238 |
+
|
| 239 |
+
try:
|
| 240 |
+
conversation = get_or_create_conversation(
|
| 241 |
+
db=db,
|
| 242 |
+
user_id=user_uuid,
|
| 243 |
+
conversation_id=conv_uuid
|
| 244 |
+
)
|
| 245 |
+
conversation_id = conversation.id
|
| 246 |
+
except (KeyError, ValueError) as e:
|
| 247 |
+
# Conversation may have been auto-deleted (90-day policy) or otherwise not found
|
| 248 |
+
# [From]: T035 - Handle auto-deleted conversations gracefully
|
| 249 |
+
# Create a new conversation instead of failing
|
| 250 |
+
conversation = get_or_create_conversation(
|
| 251 |
+
db=db,
|
| 252 |
+
user_id=user_uuid
|
| 253 |
+
)
|
| 254 |
+
conversation_id = conversation.id
|
| 255 |
+
else:
|
| 256 |
+
# Create new conversation using service
|
| 257 |
+
conversation = get_or_create_conversation(
|
| 258 |
+
db=db,
|
| 259 |
+
user_id=user_uuid
|
| 260 |
+
)
|
| 261 |
+
conversation_id = conversation.id
|
| 262 |
+
|
| 263 |
+
# Load conversation history using service
|
| 264 |
+
# [From]: T016 - Implement conversation history loading
|
| 265 |
+
# [From]: T060 - Add comprehensive error messages for edge cases
|
| 266 |
+
try:
|
| 267 |
+
conversation_history = load_conversation_history(
|
| 268 |
+
db=db,
|
| 269 |
+
conversation_id=conversation_id
|
| 270 |
+
)
|
| 271 |
+
except SQLAlchemyError as e:
|
| 272 |
+
error_logger.error(f"Database error loading conversation history for {conversation_id}: {e}")
|
| 273 |
+
# Continue with empty history if load fails
|
| 274 |
+
conversation_history = []
|
| 275 |
+
|
| 276 |
+
# Prepare user message for background save
|
| 277 |
+
user_message_id = uuid.uuid4()
|
| 278 |
+
user_message_data = {
|
| 279 |
+
"id": user_message_id,
|
| 280 |
+
"conversation_id": conversation_id,
|
| 281 |
+
"user_id": user_uuid,
|
| 282 |
+
"role": MessageRole.USER,
|
| 283 |
+
"content": sanitized_message,
|
| 284 |
+
"created_at": datetime.utcnow()
|
| 285 |
+
}
|
| 286 |
+
|
| 287 |
+
# Add current user message to conversation history for AI processing
|
| 288 |
+
# This is critical - the agent needs the user's current message in context
|
| 289 |
+
messages_for_agent = conversation_history + [
|
| 290 |
+
{"role": "user", "content": sanitized_message}
|
| 291 |
+
]
|
| 292 |
+
|
| 293 |
+
# Run AI agent with streaming (broadcasts WebSocket events)
|
| 294 |
+
# [From]: T014 - Initialize OpenAI Agents SDK with Gemini
|
| 295 |
+
# [From]: T072 - Use streaming agent for real-time progress
|
| 296 |
+
# [From]: T060 - Add comprehensive error messages for edge cases
|
| 297 |
+
try:
|
| 298 |
+
ai_response_text = await run_agent_with_streaming(
|
| 299 |
+
messages=messages_for_agent,
|
| 300 |
+
user_id=user_id
|
| 301 |
+
)
|
| 302 |
+
except ValueError as e:
|
| 303 |
+
# Configuration errors (missing API key, invalid model)
|
| 304 |
+
# [From]: T022 - Add error handling for Gemini API unavailability
|
| 305 |
+
error_logger.error(f"AI configuration error for user {user_id}: {e}")
|
| 306 |
+
raise HTTPException(
|
| 307 |
+
status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
|
| 308 |
+
detail={
|
| 309 |
+
"error": "AI service configuration error",
|
| 310 |
+
"message": str(e),
|
| 311 |
+
"suggestion": "Verify GEMINI_API_KEY and GEMINI_MODEL are correctly configured."
|
| 312 |
+
}
|
| 313 |
+
)
|
| 314 |
+
except ConnectionError as e:
|
| 315 |
+
# Network/connection issues
|
| 316 |
+
# [From]: T022 - Add error handling for Gemini API unavailability
|
| 317 |
+
error_logger.error(f"AI connection error for user {user_id}: {e}")
|
| 318 |
+
raise HTTPException(
|
| 319 |
+
status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
|
| 320 |
+
detail={
|
| 321 |
+
"error": "AI service unreachable",
|
| 322 |
+
"message": "Could not connect to the AI service. Please check your network connection.",
|
| 323 |
+
"suggestion": "If the problem persists, the AI service may be temporarily down."
|
| 324 |
+
}
|
| 325 |
+
)
|
| 326 |
+
except TimeoutError as e:
|
| 327 |
+
# Timeout errors
|
| 328 |
+
# [From]: T022 - Add error handling for Gemini API unavailability
|
| 329 |
+
error_logger.error(f"AI timeout error for user {user_id}: {e}")
|
| 330 |
+
raise HTTPException(
|
| 331 |
+
status_code=status.HTTP_504_GATEWAY_TIMEOUT,
|
| 332 |
+
detail={
|
| 333 |
+
"error": "AI service timeout",
|
| 334 |
+
"message": "The AI service took too long to respond. Please try again.",
|
| 335 |
+
"suggestion": "Your message may be too complex. Try breaking it into smaller requests."
|
| 336 |
+
}
|
| 337 |
+
)
|
| 338 |
+
except Exception as e:
|
| 339 |
+
# Other errors (rate limits, authentication, context, etc.)
|
| 340 |
+
# [From]: T022 - Add error handling for Gemini API unavailability
|
| 341 |
+
error_logger.error(f"Unexpected AI error for user {user_id}: {type(e).__name__}: {e}")
|
| 342 |
+
raise HTTPException(
|
| 343 |
+
status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
|
| 344 |
+
detail={
|
| 345 |
+
"error": "AI service error",
|
| 346 |
+
"message": f"An unexpected error occurred: {str(e)}",
|
| 347 |
+
"suggestion": "Please try again later or contact support if the problem persists."
|
| 348 |
+
}
|
| 349 |
+
)
|
| 350 |
+
|
| 351 |
+
# Prepare AI response for background save
|
| 352 |
+
ai_message_data = {
|
| 353 |
+
"id": uuid.uuid4(),
|
| 354 |
+
"conversation_id": conversation_id,
|
| 355 |
+
"user_id": user_uuid,
|
| 356 |
+
"role": MessageRole.ASSISTANT,
|
| 357 |
+
"content": ai_response_text,
|
| 358 |
+
"created_at": datetime.utcnow()
|
| 359 |
+
}
|
| 360 |
+
|
| 361 |
+
# Save messages to DB in background (non-blocking)
|
| 362 |
+
# This significantly improves response time
|
| 363 |
+
def save_messages_to_db():
|
| 364 |
+
"""Background task to save messages to database."""
|
| 365 |
+
try:
|
| 366 |
+
from core.database import engine
|
| 367 |
+
from sqlmodel import Session
|
| 368 |
+
|
| 369 |
+
# Create a new session for background task
|
| 370 |
+
bg_db = Session(engine)
|
| 371 |
+
|
| 372 |
+
try:
|
| 373 |
+
# Save user message
|
| 374 |
+
user_msg = Message(**user_message_data)
|
| 375 |
+
bg_db.add(user_msg)
|
| 376 |
+
|
| 377 |
+
# Save AI response
|
| 378 |
+
ai_msg = Message(**ai_message_data)
|
| 379 |
+
bg_db.add(ai_msg)
|
| 380 |
+
|
| 381 |
+
bg_db.commit()
|
| 382 |
+
|
| 383 |
+
# Update conversation timestamp
|
| 384 |
+
try:
|
| 385 |
+
update_conversation_timestamp(db=bg_db, conversation_id=conversation_id)
|
| 386 |
+
except SQLAlchemyError as e:
|
| 387 |
+
error_logger.error(f"Database error updating conversation timestamp for {conversation_id}: {e}")
|
| 388 |
+
|
| 389 |
+
except SQLAlchemyError as e:
|
| 390 |
+
error_logger.error(f"Background task: Database error saving messages for user {user_id}: {e}")
|
| 391 |
+
bg_db.rollback()
|
| 392 |
+
finally:
|
| 393 |
+
bg_db.close()
|
| 394 |
+
except Exception as e:
|
| 395 |
+
error_logger.error(f"Background task: Unexpected error saving messages for user {user_id}: {e}")
|
| 396 |
+
|
| 397 |
+
background_tasks.add_task(save_messages_to_db)
|
| 398 |
+
|
| 399 |
+
# TODO: Parse AI response for task references
|
| 400 |
+
# This will be enhanced in future tasks to extract task IDs from AI responses
|
| 401 |
+
task_references: list[TaskReference] = []
|
| 402 |
+
|
| 403 |
+
return ChatResponse(
|
| 404 |
+
response=ai_response_text,
|
| 405 |
+
conversation_id=str(conversation_id),
|
| 406 |
+
tasks=task_references
|
| 407 |
+
)
|
| 408 |
+
|
| 409 |
+
|
| 410 |
+
@router.websocket("/ws/{user_id}/chat")
|
| 411 |
+
async def websocket_chat(
|
| 412 |
+
websocket: WebSocket,
|
| 413 |
+
user_id: str,
|
| 414 |
+
db: Session = Depends(get_db)
|
| 415 |
+
):
|
| 416 |
+
"""WebSocket endpoint for real-time chat progress updates.
|
| 417 |
+
|
| 418 |
+
[From]: specs/004-ai-chatbot/research.md - Section 4
|
| 419 |
+
[Task]: T071
|
| 420 |
+
|
| 421 |
+
This endpoint provides a WebSocket connection for receiving real-time
|
| 422 |
+
progress events during AI agent execution. Events include:
|
| 423 |
+
- connection_established: Confirmation of successful connection
|
| 424 |
+
- agent_thinking: AI agent is processing
|
| 425 |
+
- tool_starting: A tool is about to execute
|
| 426 |
+
- tool_progress: Tool execution progress (e.g., "Found 3 tasks")
|
| 427 |
+
- tool_complete: Tool finished successfully
|
| 428 |
+
- tool_error: Tool execution failed
|
| 429 |
+
- agent_done: AI agent finished processing
|
| 430 |
+
|
| 431 |
+
Note: Authentication is handled implicitly by the frontend - users must
|
| 432 |
+
be logged in to access the chat page. The WebSocket only broadcasts
|
| 433 |
+
progress updates (not sensitive data), so strict auth is bypassed here.
|
| 434 |
+
|
| 435 |
+
Connection URL format:
|
| 436 |
+
ws://localhost:8000/ws/{user_id}/chat
|
| 437 |
+
|
| 438 |
+
Args:
|
| 439 |
+
websocket: The WebSocket connection instance
|
| 440 |
+
user_id: User ID from URL path (used to route progress events)
|
| 441 |
+
db: Database session (for any future DB operations)
|
| 442 |
+
|
| 443 |
+
The connection is kept alive and can receive messages from the client,
|
| 444 |
+
though currently it's primarily used for server-to-client progress updates.
|
| 445 |
+
"""
|
| 446 |
+
# Connect the WebSocket (manager handles accept)
|
| 447 |
+
# [From]: specs/004-ai-chatbot/research.md - Section 4
|
| 448 |
+
await manager.connect(user_id, websocket)
|
| 449 |
+
|
| 450 |
+
try:
|
| 451 |
+
# Keep connection alive and listen for client messages
|
| 452 |
+
# Currently, we don't expect many client messages, but we
|
| 453 |
+
# maintain the connection to receive any control messages
|
| 454 |
+
while True:
|
| 455 |
+
# Wait for message from client (with timeout)
|
| 456 |
+
data = await websocket.receive_text()
|
| 457 |
+
|
| 458 |
+
# Handle client messages if needed
|
| 459 |
+
# For now, we just acknowledge receipt
|
| 460 |
+
# Future: could handle ping/pong for connection health
|
| 461 |
+
if data:
|
| 462 |
+
# Echo back a simple acknowledgment
|
| 463 |
+
# (optional - mainly for debugging)
|
| 464 |
+
pass
|
| 465 |
+
|
| 466 |
+
except WebSocketDisconnect:
|
| 467 |
+
# Normal disconnect - clean up
|
| 468 |
+
manager.disconnect(user_id, websocket)
|
| 469 |
+
error_logger.info(f"WebSocket disconnected normally for user {user_id}")
|
| 470 |
+
|
| 471 |
+
except Exception as e:
|
| 472 |
+
# Unexpected error - clean up and log
|
| 473 |
+
error_logger.error(f"WebSocket error for user {user_id}: {e}")
|
| 474 |
+
manager.disconnect(user_id, websocket)
|
| 475 |
+
|
| 476 |
+
finally:
|
| 477 |
+
# Ensure disconnect is always called
|
| 478 |
+
manager.disconnect(user_id, websocket)
|
config.py
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Application configuration and settings.
|
| 2 |
+
|
| 3 |
+
[Task]: T009
|
| 4 |
+
[From]: specs/001-user-auth/plan.md
|
| 5 |
+
|
| 6 |
+
[Task]: T003
|
| 7 |
+
[From]: specs/004-ai-chatbot/plan.md
|
| 8 |
+
"""
|
| 9 |
+
import os
|
| 10 |
+
from pydantic_settings import BaseSettings, SettingsConfigDict
|
| 11 |
+
from functools import lru_cache
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class Settings(BaseSettings):
|
| 15 |
+
"""Application settings loaded from environment variables."""
|
| 16 |
+
|
| 17 |
+
# Database
|
| 18 |
+
database_url: str
|
| 19 |
+
|
| 20 |
+
# JWT
|
| 21 |
+
jwt_secret: str
|
| 22 |
+
jwt_algorithm: str = "HS256"
|
| 23 |
+
jwt_expiration_days: int = 7
|
| 24 |
+
|
| 25 |
+
# CORS
|
| 26 |
+
frontend_url: str
|
| 27 |
+
|
| 28 |
+
# Environment
|
| 29 |
+
environment: str = "development"
|
| 30 |
+
|
| 31 |
+
# Gemini API (Phase III: AI Chatbot)
|
| 32 |
+
gemini_api_key: str | None = None # Optional for migration/setup
|
| 33 |
+
gemini_model: str = "gemini-2.0-flash-exp"
|
| 34 |
+
|
| 35 |
+
model_config = SettingsConfigDict(
|
| 36 |
+
env_file=".env",
|
| 37 |
+
case_sensitive=False,
|
| 38 |
+
# Support legacy Better Auth environment variables
|
| 39 |
+
env_prefix="",
|
| 40 |
+
extra="ignore"
|
| 41 |
+
)
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
@lru_cache()
|
| 45 |
+
def get_settings() -> Settings:
|
| 46 |
+
"""Get cached settings instance.
|
| 47 |
+
|
| 48 |
+
Returns:
|
| 49 |
+
Settings: Application settings
|
| 50 |
+
|
| 51 |
+
Raises:
|
| 52 |
+
ValueError: If required environment variables are not set
|
| 53 |
+
"""
|
| 54 |
+
return Settings()
|
conversation.py
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Conversation model for AI chatbot.
|
| 2 |
+
|
| 3 |
+
[Task]: T005
|
| 4 |
+
[From]: specs/004-ai-chatbot/plan.md
|
| 5 |
+
"""
|
| 6 |
+
import uuid
|
| 7 |
+
from datetime import datetime
|
| 8 |
+
from typing import Optional
|
| 9 |
+
from sqlmodel import Field, SQLModel
|
| 10 |
+
from sqlalchemy import Column, DateTime
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class Conversation(SQLModel, table=True):
|
| 14 |
+
"""Conversation model representing a chat session.
|
| 15 |
+
|
| 16 |
+
A conversation groups multiple messages between a user and the AI assistant.
|
| 17 |
+
Conversations persist indefinitely (until 90-day auto-deletion).
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
__tablename__ = "conversation"
|
| 21 |
+
|
| 22 |
+
id: uuid.UUID = Field(default_factory=uuid.uuid4, primary_key=True)
|
| 23 |
+
user_id: uuid.UUID = Field(foreign_key="users.id", index=True)
|
| 24 |
+
created_at: datetime = Field(
|
| 25 |
+
default_factory=datetime.utcnow,
|
| 26 |
+
sa_column=Column(DateTime(timezone=True), nullable=False)
|
| 27 |
+
)
|
| 28 |
+
updated_at: datetime = Field(
|
| 29 |
+
default_factory=datetime.utcnow,
|
| 30 |
+
sa_column=Column(DateTime(timezone=True), nullable=False)
|
| 31 |
+
)
|
database.py
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Database connection and session management.
|
| 2 |
+
|
| 3 |
+
[Task]: T010
|
| 4 |
+
[From]: specs/001-user-auth/plan.md
|
| 5 |
+
|
| 6 |
+
[Task]: T004
|
| 7 |
+
[From]: specs/004-ai-chatbot/plan.md
|
| 8 |
+
"""
|
| 9 |
+
from sqlmodel import create_engine, Session
|
| 10 |
+
from typing import Generator
|
| 11 |
+
|
| 12 |
+
from core.config import get_settings
|
| 13 |
+
|
| 14 |
+
settings = get_settings()
|
| 15 |
+
|
| 16 |
+
# Create database engine with connection pooling
|
| 17 |
+
# Optimized for conversation/message table queries in Phase III
|
| 18 |
+
# SQLite doesn't support connection pooling, so we conditionally apply parameters
|
| 19 |
+
is_sqlite = settings.database_url.startswith("sqlite:")
|
| 20 |
+
is_postgresql = settings.database_url.startswith("postgresql:") or settings.database_url.startswith("postgres://")
|
| 21 |
+
|
| 22 |
+
if is_sqlite:
|
| 23 |
+
# SQLite configuration (no pooling)
|
| 24 |
+
engine = create_engine(
|
| 25 |
+
settings.database_url,
|
| 26 |
+
echo=settings.environment == "development", # Log SQL in development
|
| 27 |
+
connect_args={"check_same_thread": False} # Allow multithreaded access
|
| 28 |
+
)
|
| 29 |
+
elif is_postgresql:
|
| 30 |
+
# PostgreSQL configuration with connection pooling
|
| 31 |
+
engine = create_engine(
|
| 32 |
+
settings.database_url,
|
| 33 |
+
echo=settings.environment == "development", # Log SQL in development
|
| 34 |
+
pool_pre_ping=True, # Verify connections before using
|
| 35 |
+
pool_size=10, # Number of connections to maintain
|
| 36 |
+
max_overflow=20, # Additional connections beyond pool_size
|
| 37 |
+
pool_recycle=3600, # Recycle connections after 1 hour (prevents stale connections)
|
| 38 |
+
pool_timeout=30, # Timeout for getting connection from pool
|
| 39 |
+
connect_args={
|
| 40 |
+
"connect_timeout": 10, # Connection timeout
|
| 41 |
+
}
|
| 42 |
+
)
|
| 43 |
+
else:
|
| 44 |
+
# Default configuration for other databases
|
| 45 |
+
engine = create_engine(
|
| 46 |
+
settings.database_url,
|
| 47 |
+
echo=settings.environment == "development",
|
| 48 |
+
pool_pre_ping=True
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def get_session() -> Generator[Session, None, None]:
|
| 53 |
+
"""Get database session.
|
| 54 |
+
|
| 55 |
+
Yields:
|
| 56 |
+
Session: SQLModel database session
|
| 57 |
+
|
| 58 |
+
Example:
|
| 59 |
+
```python
|
| 60 |
+
@app.get("/users")
|
| 61 |
+
def read_users(session: Session = Depends(get_session)):
|
| 62 |
+
users = session.exec(select(User)).all()
|
| 63 |
+
return users
|
| 64 |
+
```
|
| 65 |
+
"""
|
| 66 |
+
with Session(engine) as session:
|
| 67 |
+
yield session
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
# Alias for compatibility with chat.py
|
| 71 |
+
get_db = get_session
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def init_db():
|
| 75 |
+
"""Initialize database tables.
|
| 76 |
+
|
| 77 |
+
Creates all tables defined in SQLModel models.
|
| 78 |
+
Should be called on application startup.
|
| 79 |
+
"""
|
| 80 |
+
from sqlmodel import SQLModel
|
| 81 |
+
import models.user # Import models to register them with SQLModel
|
| 82 |
+
import models.task # Import task model
|
| 83 |
+
|
| 84 |
+
# Phase III: Import conversation and message models
|
| 85 |
+
try:
|
| 86 |
+
import models.conversation
|
| 87 |
+
import models.message
|
| 88 |
+
except ImportError:
|
| 89 |
+
# Models not yet created (Phase 2 pending)
|
| 90 |
+
pass
|
| 91 |
+
|
| 92 |
+
SQLModel.metadata.create_all(engine)
|
deps.py
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Authentication dependencies for protected routes.
|
| 2 |
+
|
| 3 |
+
[Task]: T036, T037
|
| 4 |
+
[From]: specs/001-user-auth/plan.md
|
| 5 |
+
"""
|
| 6 |
+
from typing import Optional
|
| 7 |
+
from fastapi import Depends, HTTPException, status, Cookie
|
| 8 |
+
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
|
| 9 |
+
from sqlmodel import Session, select
|
| 10 |
+
|
| 11 |
+
from models.user import User
|
| 12 |
+
from core.database import get_session
|
| 13 |
+
from core.security import decode_access_token
|
| 14 |
+
|
| 15 |
+
# Optional: HTTP Bearer scheme for Authorization header
|
| 16 |
+
security = HTTPBearer(auto_error=False)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
async def get_current_user(
|
| 20 |
+
response: Optional[str] = None,
|
| 21 |
+
credentials: Optional[HTTPAuthorizationCredentials] = Depends(security),
|
| 22 |
+
auth_token: Optional[str] = Cookie(None),
|
| 23 |
+
session: Session = Depends(get_session)
|
| 24 |
+
) -> User:
|
| 25 |
+
"""Get current authenticated user from JWT token.
|
| 26 |
+
|
| 27 |
+
Extracts JWT from Authorization header or httpOnly cookie,
|
| 28 |
+
verifies signature, queries database for user.
|
| 29 |
+
|
| 30 |
+
[Task]: T036, T037
|
| 31 |
+
[From]: specs/001-user-auth/plan.md
|
| 32 |
+
|
| 33 |
+
Args:
|
| 34 |
+
credentials: HTTP Bearer credentials from Authorization header
|
| 35 |
+
auth_token: JWT token from httpOnly cookie
|
| 36 |
+
session: Database session
|
| 37 |
+
|
| 38 |
+
Returns:
|
| 39 |
+
User: Authenticated user object
|
| 40 |
+
|
| 41 |
+
Raises:
|
| 42 |
+
HTTPException 401: If token is invalid, expired, or missing
|
| 43 |
+
"""
|
| 44 |
+
# Extract token from Authorization header or cookie
|
| 45 |
+
token = None
|
| 46 |
+
|
| 47 |
+
# Try Authorization header first
|
| 48 |
+
if credentials:
|
| 49 |
+
token = credentials.credentials
|
| 50 |
+
|
| 51 |
+
# If no token in header, try cookie
|
| 52 |
+
if not token and auth_token:
|
| 53 |
+
token = auth_token
|
| 54 |
+
|
| 55 |
+
if not token:
|
| 56 |
+
raise HTTPException(
|
| 57 |
+
status_code=status.HTTP_401_UNAUTHORIZED,
|
| 58 |
+
detail="Not authenticated",
|
| 59 |
+
headers={"WWW-Authenticate": "Bearer"},
|
| 60 |
+
)
|
| 61 |
+
|
| 62 |
+
try:
|
| 63 |
+
# Decode and verify token
|
| 64 |
+
payload = decode_access_token(token)
|
| 65 |
+
user_id = payload.get("sub")
|
| 66 |
+
|
| 67 |
+
if not user_id:
|
| 68 |
+
raise HTTPException(
|
| 69 |
+
status_code=status.HTTP_401_UNAUTHORIZED,
|
| 70 |
+
detail="Invalid token: user_id missing"
|
| 71 |
+
)
|
| 72 |
+
|
| 73 |
+
# Query user from database
|
| 74 |
+
user = session.get(User, user_id)
|
| 75 |
+
if not user:
|
| 76 |
+
raise HTTPException(
|
| 77 |
+
status_code=status.HTTP_401_UNAUTHORIZED,
|
| 78 |
+
detail="User not found"
|
| 79 |
+
)
|
| 80 |
+
|
| 81 |
+
return user
|
| 82 |
+
|
| 83 |
+
except HTTPException:
|
| 84 |
+
raise
|
| 85 |
+
except Exception as e:
|
| 86 |
+
raise HTTPException(
|
| 87 |
+
status_code=status.HTTP_401_UNAUTHORIZED,
|
| 88 |
+
detail="Could not validate credentials"
|
| 89 |
+
)
|
logging.py
ADDED
|
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Clean logging configuration for development.
|
| 2 |
+
|
| 3 |
+
Provides simple, readable logs for development with optional JSON mode for production.
|
| 4 |
+
"""
|
| 5 |
+
import logging
|
| 6 |
+
import logging.config
|
| 7 |
+
import sys
|
| 8 |
+
from typing import Optional
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class CleanFormatter(logging.Formatter):
|
| 12 |
+
"""Simple, clean formatter for readable development logs."""
|
| 13 |
+
|
| 14 |
+
# Color codes for terminal output
|
| 15 |
+
COLORS = {
|
| 16 |
+
"DEBUG": "\033[36m", # Cyan
|
| 17 |
+
"INFO": "\033[32m", # Green
|
| 18 |
+
"WARNING": "\033[33m", # Yellow
|
| 19 |
+
"ERROR": "\033[31m", # Red
|
| 20 |
+
"CRITICAL": "\033[35m", # Magenta
|
| 21 |
+
"RESET": "\033[0m", # Reset
|
| 22 |
+
}
|
| 23 |
+
|
| 24 |
+
def __init__(self, use_colors: bool = True):
|
| 25 |
+
"""Initialize formatter.
|
| 26 |
+
|
| 27 |
+
Args:
|
| 28 |
+
use_colors: Whether to use ANSI color codes (disable for file logs)
|
| 29 |
+
"""
|
| 30 |
+
self.use_colors = use_colors
|
| 31 |
+
super().__init__()
|
| 32 |
+
|
| 33 |
+
def format(self, record: logging.LogRecord) -> str:
|
| 34 |
+
"""Format log record as a clean, readable string."""
|
| 35 |
+
level = record.levelname
|
| 36 |
+
module = record.name.split(".")[-1] if "." in record.name else record.name
|
| 37 |
+
message = record.getMessage()
|
| 38 |
+
|
| 39 |
+
# Build the log line
|
| 40 |
+
if self.use_colors:
|
| 41 |
+
color = self.COLORS.get(level, "")
|
| 42 |
+
reset = self.COLORS["RESET"]
|
| 43 |
+
formatted = f"{color}{level:8}{reset} {module:20} | {message}"
|
| 44 |
+
else:
|
| 45 |
+
formatted = f"{level:8} {module:20} | {message}"
|
| 46 |
+
|
| 47 |
+
# Add exception info if present
|
| 48 |
+
if record.exc_info:
|
| 49 |
+
formatted += f"\n{self.formatException(record.exc_info)}"
|
| 50 |
+
|
| 51 |
+
return formatted
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def setup_logging(
|
| 55 |
+
level: str = "INFO",
|
| 56 |
+
json_mode: bool = False,
|
| 57 |
+
quiet_sql: bool = True
|
| 58 |
+
) -> None:
|
| 59 |
+
"""Configure logging for the application.
|
| 60 |
+
|
| 61 |
+
Args:
|
| 62 |
+
level: Logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
|
| 63 |
+
json_mode: Use structured JSON logging (for production)
|
| 64 |
+
quiet_sql: Suppress verbose SQL query logs
|
| 65 |
+
"""
|
| 66 |
+
log_level = getattr(logging, level.upper(), logging.INFO)
|
| 67 |
+
|
| 68 |
+
# Configure root logger
|
| 69 |
+
logging.root.setLevel(log_level)
|
| 70 |
+
logging.root.handlers.clear()
|
| 71 |
+
|
| 72 |
+
# Create handler
|
| 73 |
+
handler = logging.StreamHandler(sys.stdout)
|
| 74 |
+
handler.setLevel(log_level)
|
| 75 |
+
|
| 76 |
+
# Set formatter
|
| 77 |
+
if json_mode:
|
| 78 |
+
# Import JSON formatter for production
|
| 79 |
+
import json
|
| 80 |
+
from datetime import datetime
|
| 81 |
+
|
| 82 |
+
class JSONFormatter(logging.Formatter):
|
| 83 |
+
def format(self, record):
|
| 84 |
+
log_entry = {
|
| 85 |
+
"timestamp": datetime.utcnow().isoformat() + "Z",
|
| 86 |
+
"level": record.levelname,
|
| 87 |
+
"logger": record.name,
|
| 88 |
+
"message": record.getMessage(),
|
| 89 |
+
}
|
| 90 |
+
if record.exc_info:
|
| 91 |
+
log_entry["exception"] = self.formatException(record.exc_info)
|
| 92 |
+
return json.dumps(log_entry)
|
| 93 |
+
|
| 94 |
+
handler.setFormatter(JSONFormatter())
|
| 95 |
+
else:
|
| 96 |
+
handler.setFormatter(CleanFormatter(use_colors=True))
|
| 97 |
+
|
| 98 |
+
logging.root.addHandler(handler)
|
| 99 |
+
|
| 100 |
+
# Configure third-party loggers
|
| 101 |
+
if quiet_sql:
|
| 102 |
+
logging.getLogger("sqlalchemy.engine").setLevel(logging.WARNING)
|
| 103 |
+
logging.getLogger("sqlalchemy.pool").setLevel(logging.WARNING)
|
| 104 |
+
logging.getLogger("sqlmodel").setLevel(logging.WARNING)
|
| 105 |
+
|
| 106 |
+
logging.getLogger("uvicorn.access").setLevel(logging.WARNING)
|
| 107 |
+
logging.getLogger("uvicorn.error").setLevel(logging.ERROR)
|
| 108 |
+
logging.getLogger("fastapi").setLevel(logging.INFO)
|
| 109 |
+
|
| 110 |
+
# Log startup message (but only in non-JSON mode)
|
| 111 |
+
if not json_mode:
|
| 112 |
+
logger = logging.getLogger(__name__)
|
| 113 |
+
logger.info(f"Logging configured at {level} level")
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def get_logger(name: str) -> logging.Logger:
|
| 117 |
+
"""Get a logger instance.
|
| 118 |
+
|
| 119 |
+
Args:
|
| 120 |
+
name: Logger name (typically __name__ of the module)
|
| 121 |
+
|
| 122 |
+
Returns:
|
| 123 |
+
Logger instance
|
| 124 |
+
"""
|
| 125 |
+
return logging.getLogger(name)
|
mcp_server/tools/CLAUDE.md
CHANGED
|
@@ -3,10 +3,5 @@
|
|
| 3 |
|
| 4 |
<!-- This section is auto-generated by claude-mem. Edit content outside the tags. -->
|
| 5 |
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
| ID | Time | T | Title | Read |
|
| 9 |
-
|----|------|---|-------|------|
|
| 10 |
-
| #684 | 11:00 PM | 🟣 | Priority Extraction Enhanced with Comprehensive Natural Language Patterns | ~488 |
|
| 11 |
-
| #677 | 10:57 PM | 🔵 | MCP Add Task Tool Implements Natural Language Task Creation | ~362 |
|
| 12 |
</claude-mem-context>
|
|
|
|
| 3 |
|
| 4 |
<!-- This section is auto-generated by claude-mem. Edit content outside the tags. -->
|
| 5 |
|
| 6 |
+
*No recent activity*
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7 |
</claude-mem-context>
|
message.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Message model for AI chatbot.
|
| 2 |
+
|
| 3 |
+
[Task]: T006
|
| 4 |
+
[From]: specs/004-ai-chatbot/plan.md
|
| 5 |
+
"""
|
| 6 |
+
import uuid
|
| 7 |
+
from datetime import datetime
|
| 8 |
+
from typing import Optional
|
| 9 |
+
from sqlmodel import Field, SQLModel
|
| 10 |
+
from sqlalchemy import Column, DateTime, Text, String as SQLString, Index
|
| 11 |
+
from enum import Enum
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class MessageRole(str, Enum):
|
| 15 |
+
"""Message role enum."""
|
| 16 |
+
USER = "user"
|
| 17 |
+
ASSISTANT = "assistant"
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class Message(SQLModel, table=True):
|
| 21 |
+
"""Message model representing a single message in a conversation.
|
| 22 |
+
|
| 23 |
+
Messages can be from the user or the AI assistant.
|
| 24 |
+
All messages are persisted to enable conversation history replay.
|
| 25 |
+
"""
|
| 26 |
+
|
| 27 |
+
__tablename__ = "message"
|
| 28 |
+
|
| 29 |
+
id: uuid.UUID = Field(default_factory=uuid.uuid4, primary_key=True)
|
| 30 |
+
conversation_id: uuid.UUID = Field(foreign_key="conversation.id", index=True)
|
| 31 |
+
user_id: uuid.UUID = Field(foreign_key="users.id", index=True)
|
| 32 |
+
role: MessageRole = Field(default=MessageRole.USER, sa_column=Column(SQLString(10), nullable=False, index=True))
|
| 33 |
+
content: str = Field(
|
| 34 |
+
...,
|
| 35 |
+
sa_column=Column(Text, nullable=False),
|
| 36 |
+
max_length=10000 # FR-042: Maximum message length
|
| 37 |
+
)
|
| 38 |
+
created_at: datetime = Field(
|
| 39 |
+
default_factory=datetime.utcnow,
|
| 40 |
+
sa_column=Column(DateTime(timezone=True), nullable=False, index=True)
|
| 41 |
+
)
|
| 42 |
+
|
| 43 |
+
# Table indexes for query optimization
|
| 44 |
+
__table_args__ = (
|
| 45 |
+
Index('idx_message_conversation_created', 'conversation_id', 'created_at'),
|
| 46 |
+
)
|
middleware.py
ADDED
|
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""JWT middleware for FastAPI.
|
| 2 |
+
|
| 3 |
+
[Task]: T012
|
| 4 |
+
[From]: specs/001-user-auth/quickstart.md
|
| 5 |
+
"""
|
| 6 |
+
from typing import Callable
|
| 7 |
+
from fastapi import Request, HTTPException, status
|
| 8 |
+
from fastapi.responses import JSONResponse
|
| 9 |
+
from starlette.middleware.base import BaseHTTPMiddleware
|
| 10 |
+
|
| 11 |
+
from core.security import JWTManager
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class JWTMiddleware(BaseHTTPMiddleware):
|
| 15 |
+
"""JWT authentication middleware.
|
| 16 |
+
|
| 17 |
+
Validates JWT tokens for all requests except public paths.
|
| 18 |
+
Adds user_id to request.state for downstream dependency injection.
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
def __init__(self, app, excluded_paths: list[str] = None):
|
| 22 |
+
"""Initialize JWT middleware.
|
| 23 |
+
|
| 24 |
+
Args:
|
| 25 |
+
app: FastAPI application instance
|
| 26 |
+
excluded_paths: List of paths to exclude from JWT validation
|
| 27 |
+
"""
|
| 28 |
+
super().__init__(app)
|
| 29 |
+
self.excluded_paths = excluded_paths or []
|
| 30 |
+
self.public_paths = [
|
| 31 |
+
"/",
|
| 32 |
+
"/docs",
|
| 33 |
+
"/redoc",
|
| 34 |
+
"/openapi.json",
|
| 35 |
+
"/health",
|
| 36 |
+
] + self.excluded_paths
|
| 37 |
+
|
| 38 |
+
async def dispatch(self, request: Request, call_next: Callable):
|
| 39 |
+
"""Process each request with JWT validation.
|
| 40 |
+
|
| 41 |
+
Args:
|
| 42 |
+
request: Incoming HTTP request
|
| 43 |
+
call_next: Next middleware or route handler
|
| 44 |
+
|
| 45 |
+
Returns:
|
| 46 |
+
HTTP response with JWT validation applied
|
| 47 |
+
|
| 48 |
+
Raises:
|
| 49 |
+
HTTPException: If JWT validation fails
|
| 50 |
+
"""
|
| 51 |
+
# Skip JWT validation for public paths
|
| 52 |
+
if request.url.path in self.public_paths:
|
| 53 |
+
return await call_next(request)
|
| 54 |
+
|
| 55 |
+
# Extract token from Authorization header OR httpOnly cookie
|
| 56 |
+
token = None
|
| 57 |
+
|
| 58 |
+
# Try Authorization header first
|
| 59 |
+
authorization = request.headers.get("Authorization")
|
| 60 |
+
if authorization:
|
| 61 |
+
try:
|
| 62 |
+
token = JWTManager.get_token_from_header(authorization)
|
| 63 |
+
except:
|
| 64 |
+
pass # Fall through to cookie
|
| 65 |
+
|
| 66 |
+
# If no token in header, try httpOnly cookie
|
| 67 |
+
if not token:
|
| 68 |
+
auth_token = request.cookies.get("auth_token")
|
| 69 |
+
if auth_token:
|
| 70 |
+
token = auth_token
|
| 71 |
+
|
| 72 |
+
# If still no token, return 401
|
| 73 |
+
if not token:
|
| 74 |
+
return JSONResponse(
|
| 75 |
+
status_code=status.HTTP_401_UNAUTHORIZED,
|
| 76 |
+
content={"detail": "Not authenticated"},
|
| 77 |
+
headers={"WWW-Authenticate": "Bearer"},
|
| 78 |
+
)
|
| 79 |
+
|
| 80 |
+
try:
|
| 81 |
+
# Verify token and extract user_id
|
| 82 |
+
user_id = JWTManager.get_user_id_from_token(token)
|
| 83 |
+
|
| 84 |
+
# Add user_id to request state for route handlers
|
| 85 |
+
request.state.user_id = user_id
|
| 86 |
+
|
| 87 |
+
return await call_next(request)
|
| 88 |
+
|
| 89 |
+
except HTTPException as e:
|
| 90 |
+
raise e
|
| 91 |
+
except Exception as e:
|
| 92 |
+
return JSONResponse(
|
| 93 |
+
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
| 94 |
+
content={"detail": "Internal server error during authentication"},
|
| 95 |
+
)
|
migrations/CLAUDE.md
CHANGED
|
@@ -3,15 +3,5 @@
|
|
| 3 |
|
| 4 |
<!-- This section is auto-generated by claude-mem. Edit content outside the tags. -->
|
| 5 |
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
| ID | Time | T | Title | Read |
|
| 9 |
-
|----|------|---|-------|------|
|
| 10 |
-
| #10 | 1:51 PM | 🟣 | Implemented Phase 10 security, audit logging, database indexes, and documentation for AI chatbot | ~448 |
|
| 11 |
-
|
| 12 |
-
### Jan 29, 2026
|
| 13 |
-
|
| 14 |
-
| ID | Time | T | Title | Read |
|
| 15 |
-
|----|------|---|-------|------|
|
| 16 |
-
| #870 | 7:34 PM | 🔵 | Backend migration runner script examined | ~199 |
|
| 17 |
</claude-mem-context>
|
|
|
|
| 3 |
|
| 4 |
<!-- This section is auto-generated by claude-mem. Edit content outside the tags. -->
|
| 5 |
|
| 6 |
+
*No recent activity*
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7 |
</claude-mem-context>
|
models/CLAUDE.md
CHANGED
|
@@ -3,25 +3,5 @@
|
|
| 3 |
|
| 4 |
<!-- This section is auto-generated by claude-mem. Edit content outside the tags. -->
|
| 5 |
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
| ID | Time | T | Title | Read |
|
| 9 |
-
|----|------|---|-------|------|
|
| 10 |
-
| #648 | 9:38 PM | 🔄 | Task Model Enhanced with Priority Enum and Tags Array | ~280 |
|
| 11 |
-
| #647 | " | 🟣 | Task Model Extended with PriorityLevel Enum and Tags Array | ~296 |
|
| 12 |
-
| #646 | " | 🟣 | Added PriorityLevel Enum to Task Model | ~311 |
|
| 13 |
-
| #643 | 9:37 PM | 🔵 | Existing Task Model Already Includes Priority and Due Date Fields | ~341 |
|
| 14 |
-
| #611 | 8:45 PM | 🔵 | Task Model Already Includes Priority Field with Medium Default | ~360 |
|
| 15 |
-
|
| 16 |
-
### Jan 29, 2026
|
| 17 |
-
|
| 18 |
-
| ID | Time | T | Title | Read |
|
| 19 |
-
|----|------|---|-------|------|
|
| 20 |
-
| #877 | 7:40 PM | 🔵 | Task model defines tags field with PostgreSQL ARRAY type | ~239 |
|
| 21 |
-
|
| 22 |
-
### Jan 30, 2026
|
| 23 |
-
|
| 24 |
-
| ID | Time | T | Title | Read |
|
| 25 |
-
|----|------|---|-------|------|
|
| 26 |
-
| #934 | 12:53 PM | 🔵 | Backend uses uppercase priority values (HIGH, MEDIUM, LOW) in PriorityLevel enum | ~199 |
|
| 27 |
</claude-mem-context>
|
|
|
|
| 3 |
|
| 4 |
<!-- This section is auto-generated by claude-mem. Edit content outside the tags. -->
|
| 5 |
|
| 6 |
+
*No recent activity*
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7 |
</claude-mem-context>
|
nlp_service.py
ADDED
|
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""NLP service for extracting task attributes from natural language.
|
| 2 |
+
|
| 3 |
+
[Task]: T029
|
| 4 |
+
[From]: specs/007-intermediate-todo-features/tasks.md (User Story 2)
|
| 5 |
+
|
| 6 |
+
This service provides:
|
| 7 |
+
- Tag extraction from natural language ("tagged with X", "add tag Y")
|
| 8 |
+
- Priority detection patterns
|
| 9 |
+
- Due date parsing patterns
|
| 10 |
+
"""
|
| 11 |
+
from typing import List, Optional
|
| 12 |
+
import re
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def extract_tags(text: str) -> List[str]:
|
| 16 |
+
"""Extract tags from natural language input.
|
| 17 |
+
|
| 18 |
+
[Task]: T029, T031 - Tag extraction from natural language
|
| 19 |
+
|
| 20 |
+
Supports patterns:
|
| 21 |
+
- "tagged with X", "tags X", "tag X"
|
| 22 |
+
- "add tag X", "with tag X"
|
| 23 |
+
- "labeled X"
|
| 24 |
+
- Hashtags: "#tagname"
|
| 25 |
+
|
| 26 |
+
Args:
|
| 27 |
+
text: Natural language input text
|
| 28 |
+
|
| 29 |
+
Returns:
|
| 30 |
+
List of extracted tag names (lowercased, deduplicated)
|
| 31 |
+
|
| 32 |
+
Examples:
|
| 33 |
+
>>> extract_tags("Add task tagged with work and urgent")
|
| 34 |
+
['work', 'urgent']
|
| 35 |
+
>>> extract_tags("Buy groceries #shopping #home")
|
| 36 |
+
['shopping', 'home']
|
| 37 |
+
>>> extract_tags("Create task with label review")
|
| 38 |
+
['review']
|
| 39 |
+
"""
|
| 40 |
+
if not text:
|
| 41 |
+
return []
|
| 42 |
+
|
| 43 |
+
tags = set()
|
| 44 |
+
text_lower = text.lower()
|
| 45 |
+
|
| 46 |
+
# Pattern 1: Hashtag extraction
|
| 47 |
+
hashtag_pattern = r'#(\w+)'
|
| 48 |
+
hashtags = re.findall(hashtag_pattern, text)
|
| 49 |
+
tags.update(hashtags)
|
| 50 |
+
|
| 51 |
+
# Pattern 2: "tagged with X and Y" or "tags X, Y"
|
| 52 |
+
tagged_with_pattern = r'(?:tagged|tags?|labeled?)\s+(?:with\s+)?(?:[,\s]+)?(\w+(?:\s+(?:and|,)\s+\w+)*)'
|
| 53 |
+
matches = re.findall(tagged_with_pattern, text_lower)
|
| 54 |
+
for match in matches:
|
| 55 |
+
# Split by common separators
|
| 56 |
+
parts = re.split(r'\s+(?:and|,)\s+', match)
|
| 57 |
+
tags.update(parts)
|
| 58 |
+
|
| 59 |
+
# Pattern 3: "add tag X" or "with tag X"
|
| 60 |
+
add_tag_pattern = r'(?:add|with|has)\s+tag\s+(\w+)'
|
| 61 |
+
matches = re.findall(add_tag_pattern, text_lower)
|
| 62 |
+
tags.update(matches)
|
| 63 |
+
|
| 64 |
+
# Pattern 4: "label X"
|
| 65 |
+
label_pattern = r'(?:label|categorize|file\s*(?:under)?)(?:ed|s+as)?\s+(\w+)'
|
| 66 |
+
matches = re.findall(label_pattern, text_lower)
|
| 67 |
+
tags.update(matches)
|
| 68 |
+
|
| 69 |
+
# Filter out common non-tag words
|
| 70 |
+
excluded_words = {
|
| 71 |
+
'a', 'an', 'the', 'with', 'for', 'and', 'or', 'but', 'not',
|
| 72 |
+
'this', 'that', 'to', 'of', 'in', 'on', 'at', 'by', 'as', 'is',
|
| 73 |
+
'are', 'was', 'were', 'be', 'been', 'being', 'have', 'has', 'had',
|
| 74 |
+
'do', 'does', 'did', 'will', 'would', 'could', 'should', 'may',
|
| 75 |
+
'might', 'must', 'can', 'need', 'want', 'like', 'such'
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
filtered_tags = [tag for tag in tags if tag not in excluded_words and len(tag) > 1]
|
| 79 |
+
|
| 80 |
+
return sorted(list(filtered_tags))
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
def normalize_tag_name(tag: str) -> str:
|
| 84 |
+
"""Normalize tag name for consistency.
|
| 85 |
+
|
| 86 |
+
Args:
|
| 87 |
+
tag: Raw tag name from user input
|
| 88 |
+
|
| 89 |
+
Returns:
|
| 90 |
+
Normalized tag name (lowercase, trimmed, no special chars)
|
| 91 |
+
"""
|
| 92 |
+
# Remove special characters except hyphens and underscores
|
| 93 |
+
normalized = re.sub(r'[^\w\s-]', '', tag)
|
| 94 |
+
# Convert to lowercase and trim
|
| 95 |
+
normalized = normalized.lower().strip()
|
| 96 |
+
# Replace spaces with hyphens for multi-word tags
|
| 97 |
+
normalized = re.sub(r'\s+', '-', normalized)
|
| 98 |
+
return normalized
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
def extract_tags_from_task_data(
|
| 102 |
+
title: str,
|
| 103 |
+
description: Optional[str] = None
|
| 104 |
+
) -> List[str]:
|
| 105 |
+
"""Extract tags from task title and description.
|
| 106 |
+
|
| 107 |
+
Convenience function that extracts tags from both title and description.
|
| 108 |
+
|
| 109 |
+
Args:
|
| 110 |
+
title: Task title
|
| 111 |
+
description: Optional task description
|
| 112 |
+
|
| 113 |
+
Returns:
|
| 114 |
+
List of extracted and normalized tag names
|
| 115 |
+
"""
|
| 116 |
+
text = title
|
| 117 |
+
if description:
|
| 118 |
+
text = f"{title} {description}"
|
| 119 |
+
|
| 120 |
+
raw_tags = extract_tags(text)
|
| 121 |
+
# Normalize each tag
|
| 122 |
+
return [normalize_tag_name(tag) for tag in raw_tags]
|
rate_limiter.py
ADDED
|
@@ -0,0 +1,181 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Rate limiting service for chat API.
|
| 2 |
+
|
| 3 |
+
[Task]: T021
|
| 4 |
+
[From]: specs/004-ai-chatbot/tasks.md
|
| 5 |
+
|
| 6 |
+
This service enforces the 100 messages/day limit per user (NFR-011).
|
| 7 |
+
"""
|
| 8 |
+
import uuid
|
| 9 |
+
from datetime import datetime, timedelta
|
| 10 |
+
from typing import Optional
|
| 11 |
+
from sqlmodel import Session, select
|
| 12 |
+
from sqlalchemy import func
|
| 13 |
+
|
| 14 |
+
from models.message import Message
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
# Rate limit constants
|
| 18 |
+
DAILY_MESSAGE_LIMIT = 100 # NFR-011: Maximum messages per user per day
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def check_rate_limit(
|
| 22 |
+
db: Session,
|
| 23 |
+
user_id: uuid.UUID
|
| 24 |
+
) -> tuple[bool, int, Optional[datetime]]:
|
| 25 |
+
"""Check if user has exceeded their daily message limit.
|
| 26 |
+
|
| 27 |
+
[From]: specs/004-ai-chatbot/spec.md - NFR-011
|
| 28 |
+
|
| 29 |
+
Args:
|
| 30 |
+
db: Database session (synchronous)
|
| 31 |
+
user_id: User ID to check
|
| 32 |
+
|
| 33 |
+
Returns:
|
| 34 |
+
Tuple of (allowed, remaining_count, reset_time)
|
| 35 |
+
- allowed: True if user can send message, False if limit exceeded
|
| 36 |
+
- remaining_count: Number of messages remaining today
|
| 37 |
+
- reset_time: When the limit resets (midnight UTC), or None if allowed
|
| 38 |
+
|
| 39 |
+
Example:
|
| 40 |
+
>>> allowed, remaining, reset = await check_rate_limit(db, user_id)
|
| 41 |
+
>>> if not allowed:
|
| 42 |
+
... print(f"Rate limited. Resets at {reset}")
|
| 43 |
+
"""
|
| 44 |
+
# Calculate today's date range (UTC)
|
| 45 |
+
now = datetime.utcnow()
|
| 46 |
+
today_start = now.replace(hour=0, minute=0, second=0, microsecond=0)
|
| 47 |
+
today_end = today_start + timedelta(days=1)
|
| 48 |
+
|
| 49 |
+
# Count messages sent by user today
|
| 50 |
+
# [From]: specs/004-ai-chatbot/spec.md - NFR-011
|
| 51 |
+
# Count both user and assistant messages (all messages in conversation)
|
| 52 |
+
statement = select(func.count(Message.id)).where(
|
| 53 |
+
Message.user_id == user_id,
|
| 54 |
+
Message.created_at >= today_start,
|
| 55 |
+
Message.created_at < today_end
|
| 56 |
+
)
|
| 57 |
+
|
| 58 |
+
message_count = db.exec(statement).one() or 0
|
| 59 |
+
|
| 60 |
+
# Calculate remaining messages
|
| 61 |
+
remaining = DAILY_MESSAGE_LIMIT - message_count
|
| 62 |
+
|
| 63 |
+
if remaining <= 0:
|
| 64 |
+
# Rate limit exceeded
|
| 65 |
+
return False, 0, today_end
|
| 66 |
+
else:
|
| 67 |
+
# User can send message
|
| 68 |
+
return True, remaining - 1, None
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def record_message(
|
| 72 |
+
db: Session,
|
| 73 |
+
user_id: uuid.UUID,
|
| 74 |
+
conversation_id: uuid.UUID,
|
| 75 |
+
role: str,
|
| 76 |
+
content: str
|
| 77 |
+
) -> Message:
|
| 78 |
+
"""Record a message in the database (for rate limit tracking).
|
| 79 |
+
|
| 80 |
+
[From]: specs/004-ai-chatbot/plan.md - Message Persistence
|
| 81 |
+
|
| 82 |
+
Note: This function is primarily for rate limit tracking.
|
| 83 |
+
The actual message persistence should happen in the chat API endpoint
|
| 84 |
+
before AI processing (T017) and after AI response (T018).
|
| 85 |
+
|
| 86 |
+
Args:
|
| 87 |
+
db: Database session
|
| 88 |
+
user_id: User ID who sent the message
|
| 89 |
+
conversation_id: Conversation ID
|
| 90 |
+
role: Message role ("user" or "assistant")
|
| 91 |
+
content: Message content
|
| 92 |
+
|
| 93 |
+
Returns:
|
| 94 |
+
Created message object
|
| 95 |
+
"""
|
| 96 |
+
message = Message(
|
| 97 |
+
id=uuid.uuid4(),
|
| 98 |
+
conversation_id=conversation_id,
|
| 99 |
+
user_id=user_id,
|
| 100 |
+
role=role,
|
| 101 |
+
content=content,
|
| 102 |
+
created_at=datetime.utcnow()
|
| 103 |
+
)
|
| 104 |
+
|
| 105 |
+
db.add(message)
|
| 106 |
+
db.commit()
|
| 107 |
+
db.refresh(message)
|
| 108 |
+
|
| 109 |
+
return message
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
def get_message_count_today(
|
| 113 |
+
db: Session,
|
| 114 |
+
user_id: uuid.UUID
|
| 115 |
+
) -> int:
|
| 116 |
+
"""Get the number of messages sent by user today.
|
| 117 |
+
|
| 118 |
+
[From]: specs/004-ai-chatbot/spec.md - NFR-011
|
| 119 |
+
|
| 120 |
+
Args:
|
| 121 |
+
db: Database session
|
| 122 |
+
user_id: User ID to check
|
| 123 |
+
|
| 124 |
+
Returns:
|
| 125 |
+
Number of messages sent today (both user and assistant)
|
| 126 |
+
"""
|
| 127 |
+
now = datetime.utcnow()
|
| 128 |
+
today_start = now.replace(hour=0, minute=0, second=0, microsecond=0)
|
| 129 |
+
today_end = today_start + timedelta(days=1)
|
| 130 |
+
|
| 131 |
+
statement = select(func.count(Message.id)).where(
|
| 132 |
+
Message.user_id == user_id,
|
| 133 |
+
Message.created_at >= today_start,
|
| 134 |
+
Message.created_at < today_end
|
| 135 |
+
)
|
| 136 |
+
|
| 137 |
+
return db.exec(statement).one() or 0
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
def get_rate_limit_status(
|
| 141 |
+
db: Session,
|
| 142 |
+
user_id: uuid.UUID
|
| 143 |
+
) -> dict:
|
| 144 |
+
"""Get comprehensive rate limit status for a user.
|
| 145 |
+
|
| 146 |
+
[From]: specs/004-ai-chatbot/spec.md - NFR-011
|
| 147 |
+
|
| 148 |
+
Args:
|
| 149 |
+
db: Database session
|
| 150 |
+
user_id: User ID to check
|
| 151 |
+
|
| 152 |
+
Returns:
|
| 153 |
+
Dictionary with rate limit information:
|
| 154 |
+
{
|
| 155 |
+
"limit": 100,
|
| 156 |
+
"used": 45,
|
| 157 |
+
"remaining": 55,
|
| 158 |
+
"resets_at": "2025-01-16T00:00:00Z"
|
| 159 |
+
}
|
| 160 |
+
"""
|
| 161 |
+
now = datetime.utcnow()
|
| 162 |
+
today_start = now.replace(hour=0, minute=0, second=0, microsecond=0)
|
| 163 |
+
today_end = today_start + timedelta(days=1)
|
| 164 |
+
|
| 165 |
+
# Count messages sent today
|
| 166 |
+
statement = select(func.count(Message.id)).where(
|
| 167 |
+
Message.user_id == user_id,
|
| 168 |
+
Message.created_at >= today_start,
|
| 169 |
+
Message.created_at < today_end
|
| 170 |
+
)
|
| 171 |
+
|
| 172 |
+
message_count = db.exec(statement).one() or 0
|
| 173 |
+
|
| 174 |
+
remaining = max(0, DAILY_MESSAGE_LIMIT - message_count)
|
| 175 |
+
|
| 176 |
+
return {
|
| 177 |
+
"limit": DAILY_MESSAGE_LIMIT,
|
| 178 |
+
"used": message_count,
|
| 179 |
+
"remaining": remaining,
|
| 180 |
+
"resets_at": today_end.isoformat() + "Z"
|
| 181 |
+
}
|
recurrence.py
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Recurrence rule model for recurring tasks.
|
| 2 |
+
|
| 3 |
+
[Task]: T002
|
| 4 |
+
[From]: specs/008-advanced-features/tasks.md (Phase 1)
|
| 5 |
+
|
| 6 |
+
This module defines the RecurrenceRule Pydantic model used for
|
| 7 |
+
defining how tasks repeat (daily, weekly, monthly).
|
| 8 |
+
"""
|
| 9 |
+
from datetime import datetime
|
| 10 |
+
from typing import Optional, Literal
|
| 11 |
+
from pydantic import BaseModel, Field
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class RecurrenceRule(BaseModel):
|
| 15 |
+
"""Defines how a task repeats.
|
| 16 |
+
|
| 17 |
+
Used for creating recurring tasks that automatically generate
|
| 18 |
+
new instances when completed.
|
| 19 |
+
|
| 20 |
+
Attributes:
|
| 21 |
+
frequency: How often the task repeats (daily, weekly, monthly)
|
| 22 |
+
interval: Repeat every N periods (default: 1)
|
| 23 |
+
count: Maximum number of occurrences (max 100)
|
| 24 |
+
end_date: Stop recurring after this date
|
| 25 |
+
|
| 26 |
+
Examples:
|
| 27 |
+
Daily forever: {"frequency": "daily"}
|
| 28 |
+
Weekly: {"frequency": "weekly"}
|
| 29 |
+
Every 2 weeks: {"frequency": "weekly", "interval": 2}
|
| 30 |
+
Monthly, 10 times: {"frequency": "monthly", "count": 10}
|
| 31 |
+
Daily until Dec 31: {"frequency": "daily", "end_date": "2026-12-31"}
|
| 32 |
+
"""
|
| 33 |
+
|
| 34 |
+
frequency: Literal['daily', 'weekly', 'monthly'] = Field(
|
| 35 |
+
...,
|
| 36 |
+
description="How often the task repeats"
|
| 37 |
+
)
|
| 38 |
+
|
| 39 |
+
interval: Optional[int] = Field(
|
| 40 |
+
default=1,
|
| 41 |
+
ge=1,
|
| 42 |
+
le=365,
|
| 43 |
+
description="Repeat every N periods (max 365, e.g., 2 = every 2 days)"
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
count: Optional[int] = Field(
|
| 47 |
+
default=None,
|
| 48 |
+
ge=1,
|
| 49 |
+
le=100,
|
| 50 |
+
description="Maximum number of occurrences (max 100)"
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
end_date: Optional[datetime] = Field(
|
| 54 |
+
default=None,
|
| 55 |
+
description="Stop recurring after this date"
|
| 56 |
+
)
|
| 57 |
+
|
| 58 |
+
model_config = {
|
| 59 |
+
"json_schema_extra": {
|
| 60 |
+
"examples": [
|
| 61 |
+
{"frequency": "daily"},
|
| 62 |
+
{"frequency": "weekly"},
|
| 63 |
+
{"frequency": "weekly", "interval": 2},
|
| 64 |
+
{"frequency": "monthly", "count": 10},
|
| 65 |
+
{"frequency": "daily", "interval": 1, "count": 30},
|
| 66 |
+
]
|
| 67 |
+
}
|
| 68 |
+
}
|
recurrence_service.py
ADDED
|
@@ -0,0 +1,219 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Recurrence service for calculating recurring task dates.
|
| 2 |
+
|
| 3 |
+
[Task]: T015-T020
|
| 4 |
+
[From]: specs/008-advanced-features/tasks.md (Phase 2)
|
| 5 |
+
|
| 6 |
+
This service handles:
|
| 7 |
+
- Calculating next occurrence dates from recurrence rules
|
| 8 |
+
- Validating recurrence rule structures
|
| 9 |
+
- Checking recurrence limits (100 instance max)
|
| 10 |
+
- Supporting daily, weekly, monthly, and cron-based patterns
|
| 11 |
+
"""
|
| 12 |
+
import uuid
|
| 13 |
+
from datetime import datetime, timedelta
|
| 14 |
+
from typing import Optional
|
| 15 |
+
from sqlalchemy import select, func
|
| 16 |
+
from sqlmodel import Session
|
| 17 |
+
|
| 18 |
+
from models.task import Task
|
| 19 |
+
from models.recurrence import RecurrenceRule
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class RecurrenceService:
|
| 23 |
+
"""Service for calculating recurring task dates."""
|
| 24 |
+
|
| 25 |
+
MAX_RECURRING_INSTANCES = 100
|
| 26 |
+
|
| 27 |
+
def __init__(self, session: Session):
|
| 28 |
+
"""Initialize the recurrence service.
|
| 29 |
+
|
| 30 |
+
Args:
|
| 31 |
+
session: Database session for queries
|
| 32 |
+
"""
|
| 33 |
+
self.session = session
|
| 34 |
+
|
| 35 |
+
def calculate_next_occurrence(
|
| 36 |
+
self,
|
| 37 |
+
base_date: datetime,
|
| 38 |
+
recurrence_rule: dict
|
| 39 |
+
) -> Optional[datetime]:
|
| 40 |
+
"""Calculate next due date based on recurrence pattern.
|
| 41 |
+
|
| 42 |
+
[Task]: T016-T018, T077 (cron)
|
| 43 |
+
|
| 44 |
+
Args:
|
| 45 |
+
base_date: The base date to calculate from
|
| 46 |
+
recurrence_rule: Dictionary containing recurrence rule
|
| 47 |
+
|
| 48 |
+
Returns:
|
| 49 |
+
Next due date in UTC, or None if limit reached
|
| 50 |
+
|
| 51 |
+
Raises:
|
| 52 |
+
ValueError: If recurrence rule is invalid
|
| 53 |
+
"""
|
| 54 |
+
if not self.validate_recurrence_rule(recurrence_rule):
|
| 55 |
+
raise ValueError("Invalid recurrence rule")
|
| 56 |
+
|
| 57 |
+
frequency = recurrence_rule.get("frequency")
|
| 58 |
+
interval = recurrence_rule.get("interval", 1)
|
| 59 |
+
|
| 60 |
+
# For cron-based recurrence (T077)
|
| 61 |
+
if frequency == "cron" and "cron_expression" in recurrence_rule:
|
| 62 |
+
return self._calculate_from_cron(base_date, recurrence_rule["cron_expression"])
|
| 63 |
+
|
| 64 |
+
# Calculate based on frequency
|
| 65 |
+
if frequency == "daily":
|
| 66 |
+
return base_date + timedelta(days=interval)
|
| 67 |
+
elif frequency == "weekly":
|
| 68 |
+
return base_date + timedelta(weeks=interval)
|
| 69 |
+
elif frequency == "monthly":
|
| 70 |
+
return self._add_months(base_date, interval)
|
| 71 |
+
else:
|
| 72 |
+
raise ValueError(f"Unsupported frequency: {frequency}")
|
| 73 |
+
|
| 74 |
+
def _add_months(self, date: datetime, months: int) -> datetime:
|
| 75 |
+
"""Add months to a date, handling edge cases.
|
| 76 |
+
|
| 77 |
+
Args:
|
| 78 |
+
date: Base date
|
| 79 |
+
months: Number of months to add
|
| 80 |
+
|
| 81 |
+
Returns:
|
| 82 |
+
New date with months added
|
| 83 |
+
"""
|
| 84 |
+
# Simple implementation: add days (approximate)
|
| 85 |
+
# For precise month handling, would use dateutil.relativedelta
|
| 86 |
+
return date + timedelta(days=months * 30)
|
| 87 |
+
|
| 88 |
+
def _calculate_from_cron(self, base_date: datetime, cron_expression: str) -> Optional[datetime]:
|
| 89 |
+
"""Calculate next occurrence from cron expression.
|
| 90 |
+
|
| 91 |
+
[Task]: T077
|
| 92 |
+
|
| 93 |
+
Args:
|
| 94 |
+
base_date: Base date to calculate from
|
| 95 |
+
cron_expression: Cron expression (5 fields)
|
| 96 |
+
|
| 97 |
+
Returns:
|
| 98 |
+
Next due date, or None if invalid
|
| 99 |
+
|
| 100 |
+
Note:
|
| 101 |
+
This is a simplified implementation. A full cron parser
|
| 102 |
+
would be more complex. For MVP, we support basic patterns.
|
| 103 |
+
"""
|
| 104 |
+
# TODO: Implement full cron parsing
|
| 105 |
+
# For now, return None to indicate not implemented
|
| 106 |
+
return None
|
| 107 |
+
|
| 108 |
+
def validate_recurrence_rule(self, rule: dict) -> bool:
|
| 109 |
+
"""Validate recurrence rule structure.
|
| 110 |
+
|
| 111 |
+
[Task]: T019
|
| 112 |
+
|
| 113 |
+
Args:
|
| 114 |
+
rule: Dictionary to validate
|
| 115 |
+
|
| 116 |
+
Returns:
|
| 117 |
+
True if valid, False otherwise
|
| 118 |
+
"""
|
| 119 |
+
if not isinstance(rule, dict):
|
| 120 |
+
return False
|
| 121 |
+
|
| 122 |
+
# Check frequency
|
| 123 |
+
frequency = rule.get("frequency")
|
| 124 |
+
if frequency not in ("daily", "weekly", "monthly"):
|
| 125 |
+
return False
|
| 126 |
+
|
| 127 |
+
# Validate interval
|
| 128 |
+
interval = rule.get("interval", 1)
|
| 129 |
+
if not isinstance(interval, int) or interval < 1 or interval > 365:
|
| 130 |
+
return False
|
| 131 |
+
|
| 132 |
+
# Validate count
|
| 133 |
+
count = rule.get("count")
|
| 134 |
+
if count is not None:
|
| 135 |
+
if not isinstance(count, int) or count < 1 or count > self.MAX_RECURRING_INSTANCES:
|
| 136 |
+
return False
|
| 137 |
+
|
| 138 |
+
# Validate end_date
|
| 139 |
+
end_date = rule.get("end_date")
|
| 140 |
+
if end_date is not None:
|
| 141 |
+
if not isinstance(end_date, (datetime, str)):
|
| 142 |
+
return False
|
| 143 |
+
|
| 144 |
+
return True
|
| 145 |
+
|
| 146 |
+
def check_recurrence_limit(self, parent_task_id: uuid.UUID) -> bool:
|
| 147 |
+
"""Check if recurrence limit has been reached.
|
| 148 |
+
|
| 149 |
+
[Task]: T020
|
| 150 |
+
|
| 151 |
+
Args:
|
| 152 |
+
parent_task_id: ID of the parent recurring task
|
| 153 |
+
|
| 154 |
+
Returns:
|
| 155 |
+
True if limit reached, False otherwise
|
| 156 |
+
|
| 157 |
+
Raises:
|
| 158 |
+
ValueError: If limit exceeded
|
| 159 |
+
"""
|
| 160 |
+
# Count existing instances with this parent
|
| 161 |
+
count_result = self.session.exec(
|
| 162 |
+
select(func.count(Task.id))
|
| 163 |
+
.where(Task.parent_task_id == parent_task_id)
|
| 164 |
+
)
|
| 165 |
+
instance_count = count_result.one() or 0
|
| 166 |
+
|
| 167 |
+
if instance_count >= self.MAX_RECURRING_INSTANCES:
|
| 168 |
+
raise ValueError(
|
| 169 |
+
f"Recurrence limit reached: maximum {self.MAX_RECURRING_INSTANCES} instances"
|
| 170 |
+
)
|
| 171 |
+
|
| 172 |
+
return False
|
| 173 |
+
|
| 174 |
+
def should_create_next_instance(
|
| 175 |
+
self,
|
| 176 |
+
task: Task,
|
| 177 |
+
next_due_date: datetime
|
| 178 |
+
) -> bool:
|
| 179 |
+
"""Determine if next instance should be created.
|
| 180 |
+
|
| 181 |
+
Checks count and end_date limits from recurrence rule.
|
| 182 |
+
|
| 183 |
+
Args:
|
| 184 |
+
task: Current task being completed
|
| 185 |
+
next_due_date: Calculated next due date
|
| 186 |
+
|
| 187 |
+
Returns:
|
| 188 |
+
True if should create, False if limit reached
|
| 189 |
+
"""
|
| 190 |
+
if not task.recurrence:
|
| 191 |
+
return False
|
| 192 |
+
|
| 193 |
+
# Get parent task ID (for instances) or use current task ID (for original)
|
| 194 |
+
parent_id = task.parent_task_id or task.id
|
| 195 |
+
|
| 196 |
+
# Check instance count limit
|
| 197 |
+
count_result = self.session.exec(
|
| 198 |
+
select(func.count(Task.id))
|
| 199 |
+
.where(Task.parent_task_id == parent_id)
|
| 200 |
+
)
|
| 201 |
+
instance_count = count_result.one() or 0
|
| 202 |
+
|
| 203 |
+
# Check count limit
|
| 204 |
+
count_limit = task.recurrence.get("count")
|
| 205 |
+
if count_limit and instance_count >= count_limit:
|
| 206 |
+
return False
|
| 207 |
+
|
| 208 |
+
# Check end_date limit
|
| 209 |
+
end_date_str = task.recurrence.get("end_date")
|
| 210 |
+
if end_date_str:
|
| 211 |
+
if isinstance(end_date_str, str):
|
| 212 |
+
end_date = datetime.fromisoformat(end_date_str)
|
| 213 |
+
else:
|
| 214 |
+
end_date = end_date_str
|
| 215 |
+
|
| 216 |
+
if next_due_date > end_date:
|
| 217 |
+
return False
|
| 218 |
+
|
| 219 |
+
return True
|
run_migration.py
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Database migration runner.
|
| 2 |
+
|
| 3 |
+
[Task]: T022, T023
|
| 4 |
+
[From]: specs/001-user-auth/tasks.md
|
| 5 |
+
|
| 6 |
+
This script runs SQL migrations against the database.
|
| 7 |
+
|
| 8 |
+
Usage:
|
| 9 |
+
uv run python migrations/run_migration.py
|
| 10 |
+
"""
|
| 11 |
+
import os
|
| 12 |
+
import sys
|
| 13 |
+
from pathlib import Path
|
| 14 |
+
from sqlmodel import Session, text
|
| 15 |
+
|
| 16 |
+
# Add parent directory to path for imports
|
| 17 |
+
sys.path.insert(0, str(Path(__file__).parent.parent))
|
| 18 |
+
|
| 19 |
+
from core.database import engine
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def run_migration(migration_file: str):
|
| 23 |
+
"""Run a single SQL migration file.
|
| 24 |
+
|
| 25 |
+
Args:
|
| 26 |
+
migration_file: Name of the migration file in migrations/ directory
|
| 27 |
+
"""
|
| 28 |
+
migration_path = Path(__file__).parent / migration_file
|
| 29 |
+
|
| 30 |
+
if not migration_path.exists():
|
| 31 |
+
print(f"❌ Migration file not found: {migration_path}")
|
| 32 |
+
return False
|
| 33 |
+
|
| 34 |
+
print(f"📜 Running migration: {migration_file}")
|
| 35 |
+
|
| 36 |
+
with open(migration_path, "r") as f:
|
| 37 |
+
sql = f.read()
|
| 38 |
+
|
| 39 |
+
try:
|
| 40 |
+
with Session(engine) as session:
|
| 41 |
+
# Execute the migration using text()
|
| 42 |
+
session.exec(text(sql))
|
| 43 |
+
session.commit()
|
| 44 |
+
|
| 45 |
+
print(f"✅ Migration completed successfully: {migration_file}")
|
| 46 |
+
return True
|
| 47 |
+
except Exception as e:
|
| 48 |
+
print(f"❌ Migration failed: {e}")
|
| 49 |
+
return False
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def main():
|
| 53 |
+
"""Run pending migrations."""
|
| 54 |
+
# Migration files in order
|
| 55 |
+
migrations = [
|
| 56 |
+
"001_add_user_id_index.sql",
|
| 57 |
+
"002_add_conversation_and_message_tables.sql", # Phase III: AI Chatbot
|
| 58 |
+
"003_add_due_date_and_priority_to_tasks.sql", # Phase III: UX Improvements
|
| 59 |
+
"004_add_performance_indexes.sql", # Phase III: UX Improvements
|
| 60 |
+
"005_add_tags_to_tasks.sql", # Phase VII: Intermediate Features
|
| 61 |
+
"008_add_advanced_features.sql", # Phase VIII: Advanced Features
|
| 62 |
+
]
|
| 63 |
+
|
| 64 |
+
print("🚀 Starting database migrations...\n")
|
| 65 |
+
|
| 66 |
+
success_count = 0
|
| 67 |
+
for migration in migrations:
|
| 68 |
+
if run_migration(migration):
|
| 69 |
+
success_count += 1
|
| 70 |
+
print()
|
| 71 |
+
|
| 72 |
+
print(f"✅ {success_count}/{len(migrations)} migrations completed successfully")
|
| 73 |
+
|
| 74 |
+
if success_count == len(migrations):
|
| 75 |
+
print("\n🎉 All migrations completed!")
|
| 76 |
+
print("\n📊 Database schema is ready for authentication.")
|
| 77 |
+
return 0
|
| 78 |
+
else:
|
| 79 |
+
print("\n⚠️ Some migrations failed. Please check the errors above.")
|
| 80 |
+
return 1
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
if __name__ == "__main__":
|
| 84 |
+
sys.exit(main())
|
security.py
ADDED
|
@@ -0,0 +1,276 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Security utilities for the AI chatbot.
|
| 2 |
+
|
| 3 |
+
[Task]: T057
|
| 4 |
+
[From]: specs/004-ai-chatbot/tasks.md
|
| 5 |
+
|
| 6 |
+
This module provides security functions including prompt injection sanitization,
|
| 7 |
+
input validation, and content filtering.
|
| 8 |
+
"""
|
| 9 |
+
import re
|
| 10 |
+
import html
|
| 11 |
+
from typing import Optional, List
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
# Known prompt injection patterns
|
| 15 |
+
PROMPT_INJECTION_PATTERNS = [
|
| 16 |
+
# Direct instructions to ignore previous context
|
| 17 |
+
r"(?i)ignore\s+(all\s+)?(previous|above|prior)",
|
| 18 |
+
r"(?i)disregard\s+(all\s+)?(previous|above|prior)",
|
| 19 |
+
r"(?i)forget\s+(everything|all\s+instructions|previous)",
|
| 20 |
+
r"(?i)override\s+(your\s+)?programming",
|
| 21 |
+
r"(?i)new\s+(instruction|direction|rule)s?",
|
| 22 |
+
r"(?i)change\s+(your\s+)?(behavior|role|persona)",
|
| 23 |
+
|
| 24 |
+
# Jailbreak attempts
|
| 25 |
+
r"(?i)(jailbreak|jail\s*break)",
|
| 26 |
+
r"(?i)(developer|admin|root|privileged)\s+mode",
|
| 27 |
+
r"(?i)act\s+as\s+(a\s+)?(developer|admin|root)",
|
| 28 |
+
r"(?i)roleplay\s+as",
|
| 29 |
+
r"(?i)pretend\s+(to\s+be|you're)",
|
| 30 |
+
r"(?i)simulate\s+being",
|
| 31 |
+
|
| 32 |
+
# System prompt extraction
|
| 33 |
+
r"(?i)show\s+(your\s+)?(instructions|system\s+prompt|prompt)",
|
| 34 |
+
r"(?i)print\s+(your\s+)?(instructions|system\s+prompt)",
|
| 35 |
+
r"(?i)reveal\s+(your\s+)?(instructions|system\s+prompt)",
|
| 36 |
+
r"(?i)what\s+(are\s+)?your\s+instructions",
|
| 37 |
+
r"(?i)tell\s+me\s+how\s+you\s+work",
|
| 38 |
+
|
| 39 |
+
# DAN and similar jailbreaks
|
| 40 |
+
r"(?i)do\s+anything\s+now",
|
| 41 |
+
r"(?i)unrestricted\s+mode",
|
| 42 |
+
r"(?i)no\s+limitations?",
|
| 43 |
+
r"(?i)bypass\s+(safety|filters|restrictions)",
|
| 44 |
+
r"(?i)\bDAN\b", # Do Anything Now
|
| 45 |
+
]
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def sanitize_message(message: str, max_length: int = 10000) -> str:
|
| 49 |
+
"""Sanitize a user message to prevent prompt injection attacks.
|
| 50 |
+
|
| 51 |
+
[From]: specs/004-ai-chatbot/spec.md - NFR-017
|
| 52 |
+
|
| 53 |
+
Args:
|
| 54 |
+
message: The raw user message
|
| 55 |
+
max_length: Maximum allowed message length
|
| 56 |
+
|
| 57 |
+
Returns:
|
| 58 |
+
Sanitized message safe for processing by AI
|
| 59 |
+
|
| 60 |
+
Raises:
|
| 61 |
+
ValueError: If message contains severe injection attempts
|
| 62 |
+
"""
|
| 63 |
+
if not message:
|
| 64 |
+
return ""
|
| 65 |
+
|
| 66 |
+
# Trim to max length
|
| 67 |
+
message = message[:max_length]
|
| 68 |
+
|
| 69 |
+
# Check for severe injection patterns
|
| 70 |
+
detected = detect_prompt_injection(message)
|
| 71 |
+
if detected:
|
| 72 |
+
# For severe attacks, reject the message
|
| 73 |
+
if detected["severity"] == "high":
|
| 74 |
+
raise ValueError(
|
| 75 |
+
"This message contains content that cannot be processed. "
|
| 76 |
+
"Please rephrase your request."
|
| 77 |
+
)
|
| 78 |
+
|
| 79 |
+
# Apply sanitization
|
| 80 |
+
sanitized = _apply_sanitization(message)
|
| 81 |
+
|
| 82 |
+
return sanitized
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def detect_prompt_injection(message: str) -> Optional[dict]:
|
| 86 |
+
"""Detect potential prompt injection attempts in a message.
|
| 87 |
+
|
| 88 |
+
[From]: specs/004-ai-chatbot/spec.md - NFR-017
|
| 89 |
+
|
| 90 |
+
Args:
|
| 91 |
+
message: The message to check
|
| 92 |
+
|
| 93 |
+
Returns:
|
| 94 |
+
Dictionary with detection info if injection detected, None otherwise:
|
| 95 |
+
{
|
| 96 |
+
"detected": True,
|
| 97 |
+
"severity": "low" | "medium" | "high",
|
| 98 |
+
"pattern": "matched pattern",
|
| 99 |
+
"confidence": 0.0-1.0
|
| 100 |
+
}
|
| 101 |
+
"""
|
| 102 |
+
message_lower = message.lower()
|
| 103 |
+
|
| 104 |
+
for pattern in PROMPT_INJECTION_PATTERNS:
|
| 105 |
+
match = re.search(pattern, message_lower)
|
| 106 |
+
|
| 107 |
+
if match:
|
| 108 |
+
# Determine severity based on pattern type
|
| 109 |
+
severity = _get_severity_for_pattern(pattern)
|
| 110 |
+
|
| 111 |
+
# Check for context that might indicate legitimate use
|
| 112 |
+
is_legitimate = _check_legitimate_context(message, match.group())
|
| 113 |
+
|
| 114 |
+
if not is_legitimate:
|
| 115 |
+
return {
|
| 116 |
+
"detected": True,
|
| 117 |
+
"severity": severity,
|
| 118 |
+
"pattern": match.group(),
|
| 119 |
+
"confidence": 0.8
|
| 120 |
+
}
|
| 121 |
+
|
| 122 |
+
return None
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
def _get_severity_for_pattern(pattern: str) -> str:
|
| 126 |
+
"""Determine severity level for a matched pattern.
|
| 127 |
+
|
| 128 |
+
Args:
|
| 129 |
+
pattern: The regex pattern that matched
|
| 130 |
+
|
| 131 |
+
Returns:
|
| 132 |
+
"low", "medium", or "high"
|
| 133 |
+
"""
|
| 134 |
+
pattern_lower = pattern.lower()
|
| 135 |
+
|
| 136 |
+
# High severity: direct jailbreak attempts
|
| 137 |
+
if any(word in pattern_lower for word in ["jailbreak", "dan", "unrestricted", "bypass"]):
|
| 138 |
+
return "high"
|
| 139 |
+
|
| 140 |
+
# High severity: system prompt extraction
|
| 141 |
+
if any(word in pattern_lower for word in ["show", "print", "reveal", "instructions"]):
|
| 142 |
+
return "high"
|
| 143 |
+
|
| 144 |
+
# Medium severity: role/persona manipulation
|
| 145 |
+
if any(word in pattern_lower for word in ["act as", "pretend", "roleplay", "override"]):
|
| 146 |
+
return "medium"
|
| 147 |
+
|
| 148 |
+
# Low severity: ignore instructions
|
| 149 |
+
if any(word in pattern_lower for word in ["ignore", "disregard", "forget"]):
|
| 150 |
+
return "low"
|
| 151 |
+
|
| 152 |
+
return "low"
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
def _check_legitimate_context(message: str, matched_text: str) -> bool:
|
| 156 |
+
"""Check if a matched pattern might be legitimate user content.
|
| 157 |
+
|
| 158 |
+
[From]: specs/004-ai-chatbot/spec.md - NFR-017
|
| 159 |
+
|
| 160 |
+
Args:
|
| 161 |
+
message: The full message
|
| 162 |
+
matched_text: The text that matched a pattern
|
| 163 |
+
|
| 164 |
+
Returns:
|
| 165 |
+
True if this appears to be legitimate context, False otherwise
|
| 166 |
+
"""
|
| 167 |
+
message_lower = message.lower()
|
| 168 |
+
matched_lower = matched_text.lower()
|
| 169 |
+
|
| 170 |
+
# Check if the matched text is part of a task description (legitimate)
|
| 171 |
+
legitimate_contexts = [
|
| 172 |
+
# Common task-related phrases
|
| 173 |
+
"task to ignore",
|
| 174 |
+
"mark as complete",
|
| 175 |
+
"disregard this",
|
| 176 |
+
"role in the project",
|
| 177 |
+
"change status",
|
| 178 |
+
"update the role",
|
| 179 |
+
"priority change",
|
| 180 |
+
]
|
| 181 |
+
|
| 182 |
+
for context in legitimate_contexts:
|
| 183 |
+
if context in message_lower:
|
| 184 |
+
return True
|
| 185 |
+
|
| 186 |
+
# Check if matched text is very short (likely false positive)
|
| 187 |
+
if len(matched_text) <= 3:
|
| 188 |
+
return True
|
| 189 |
+
|
| 190 |
+
return False
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
def _apply_sanitization(message: str) -> str:
|
| 194 |
+
"""Apply sanitization transformations to a message.
|
| 195 |
+
|
| 196 |
+
[From]: specs/004-ai-chatbot/spec.md - NFR-017
|
| 197 |
+
|
| 198 |
+
Args:
|
| 199 |
+
message: The message to sanitize
|
| 200 |
+
|
| 201 |
+
Returns:
|
| 202 |
+
Sanitized message
|
| 203 |
+
"""
|
| 204 |
+
# Remove excessive whitespace
|
| 205 |
+
message = re.sub(r"\s+", " ", message)
|
| 206 |
+
|
| 207 |
+
# Remove control characters except newlines and tabs
|
| 208 |
+
message = re.sub(r"[\x00-\x08\x0b-\x0c\x0e-\x1f\x7f-\x9f]", "", message)
|
| 209 |
+
|
| 210 |
+
# Normalize line endings
|
| 211 |
+
message = message.replace("\r\n", "\n").replace("\r", "\n")
|
| 212 |
+
|
| 213 |
+
# Limit consecutive newlines to 2
|
| 214 |
+
message = re.sub(r"\n{3,}", "\n\n", message)
|
| 215 |
+
|
| 216 |
+
return message.strip()
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
def validate_task_input(task_data: dict) -> tuple[bool, Optional[str]]:
|
| 220 |
+
"""Validate task-related input for security issues.
|
| 221 |
+
|
| 222 |
+
[From]: specs/004-ai-chatbot/spec.md - NFR-017
|
| 223 |
+
|
| 224 |
+
Args:
|
| 225 |
+
task_data: Dictionary containing task fields
|
| 226 |
+
|
| 227 |
+
Returns:
|
| 228 |
+
Tuple of (is_valid, error_message)
|
| 229 |
+
"""
|
| 230 |
+
if not isinstance(task_data, dict):
|
| 231 |
+
return False, "Invalid task data format"
|
| 232 |
+
|
| 233 |
+
# Check for SQL injection patterns in string fields
|
| 234 |
+
sql_patterns = [
|
| 235 |
+
r"(?i)(\bunion\b.*\bselect\b)",
|
| 236 |
+
r"(?i)(\bselect\b.*\bfrom\b)",
|
| 237 |
+
r"(?i)(\binsert\b.*\binto\b)",
|
| 238 |
+
r"(?i)(\bupdate\b.*\bset\b)",
|
| 239 |
+
r"(?i)(\bdelete\b.*\bfrom\b)",
|
| 240 |
+
r"(?i)(\bdrop\b.*\btable\b)",
|
| 241 |
+
r";\s*(union|select|insert|update|delete|drop)",
|
| 242 |
+
]
|
| 243 |
+
|
| 244 |
+
for key, value in task_data.items():
|
| 245 |
+
if isinstance(value, str):
|
| 246 |
+
for pattern in sql_patterns:
|
| 247 |
+
if re.search(pattern, value):
|
| 248 |
+
return False, f"Invalid characters in {key}"
|
| 249 |
+
|
| 250 |
+
# Check for script injection
|
| 251 |
+
if re.search(r"<script[^>]*>.*?</script>", value, re.IGNORECASE):
|
| 252 |
+
return False, f"Invalid content in {key}"
|
| 253 |
+
|
| 254 |
+
return True, None
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
def sanitize_html_content(content: str) -> str:
|
| 258 |
+
"""Sanitize HTML content by escaping potentially dangerous elements.
|
| 259 |
+
|
| 260 |
+
[From]: specs/004-ai-chatbot/spec.md - NFR-017
|
| 261 |
+
|
| 262 |
+
Args:
|
| 263 |
+
content: Content that may contain HTML
|
| 264 |
+
|
| 265 |
+
Returns:
|
| 266 |
+
Escaped HTML string
|
| 267 |
+
"""
|
| 268 |
+
return html.escape(content, quote=False)
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
__all__ = [
|
| 272 |
+
"sanitize_message",
|
| 273 |
+
"detect_prompt_injection",
|
| 274 |
+
"validate_task_input",
|
| 275 |
+
"sanitize_html_content",
|
| 276 |
+
]
|
server.py
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tool registry for AI agent.
|
| 2 |
+
|
| 3 |
+
[Task]: T009
|
| 4 |
+
[From]: specs/004-ai-chatbot/plan.md
|
| 5 |
+
|
| 6 |
+
This module provides a simple registry for tools that the AI agent can use.
|
| 7 |
+
Note: We're using OpenAI Agents SDK's built-in tool calling mechanism,
|
| 8 |
+
not the full Model Context Protocol server.
|
| 9 |
+
"""
|
| 10 |
+
from typing import Any, Callable, Dict
|
| 11 |
+
import logging
|
| 12 |
+
|
| 13 |
+
logger = logging.getLogger(__name__)
|
| 14 |
+
|
| 15 |
+
# Tool registry - maps tool names to their functions
|
| 16 |
+
tool_registry: Dict[str, Callable] = {}
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def register_tool(name: str, func: Callable) -> None:
|
| 20 |
+
"""Register a tool function.
|
| 21 |
+
|
| 22 |
+
Args:
|
| 23 |
+
name: Tool name
|
| 24 |
+
func: Tool function (async)
|
| 25 |
+
"""
|
| 26 |
+
tool_registry[name] = func
|
| 27 |
+
logger.info(f"Registered tool: {name}")
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def get_tool(name: str) -> Callable:
|
| 31 |
+
"""Get a registered tool by name.
|
| 32 |
+
|
| 33 |
+
Args:
|
| 34 |
+
name: Tool name
|
| 35 |
+
|
| 36 |
+
Returns:
|
| 37 |
+
The tool function
|
| 38 |
+
|
| 39 |
+
Raises:
|
| 40 |
+
ValueError: If tool not found
|
| 41 |
+
"""
|
| 42 |
+
if name not in tool_registry:
|
| 43 |
+
raise ValueError(f"Tool '{name}' not found. Available tools: {list(tool_registry.keys())}")
|
| 44 |
+
return tool_registry[name]
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def list_tools() -> list[str]:
|
| 48 |
+
"""List all registered tools.
|
| 49 |
+
|
| 50 |
+
Returns:
|
| 51 |
+
List of tool names
|
| 52 |
+
"""
|
| 53 |
+
return list(tool_registry.keys())
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
# Note: Tools are registered in the tools/__init__.py module
|
| 57 |
+
# The OpenAI Agents SDK will call these functions directly
|
| 58 |
+
# based on the agent's instructions and user input
|
services/CLAUDE.md
CHANGED
|
@@ -3,15 +3,5 @@
|
|
| 3 |
|
| 4 |
<!-- This section is auto-generated by claude-mem. Edit content outside the tags. -->
|
| 5 |
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
| ID | Time | T | Title | Read |
|
| 9 |
-
|----|------|---|-------|------|
|
| 10 |
-
| #708 | 11:21 PM | 🟣 | NLP Service Created for Tag Extraction | ~342 |
|
| 11 |
-
|
| 12 |
-
### Jan 29, 2026
|
| 13 |
-
|
| 14 |
-
| ID | Time | T | Title | Read |
|
| 15 |
-
|----|------|---|-------|------|
|
| 16 |
-
| #832 | 5:12 PM | 🔵 | Project Continuation Context Established | ~170 |
|
| 17 |
</claude-mem-context>
|
|
|
|
| 3 |
|
| 4 |
<!-- This section is auto-generated by claude-mem. Edit content outside the tags. -->
|
| 5 |
|
| 6 |
+
*No recent activity*
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7 |
</claude-mem-context>
|
task.py
ADDED
|
@@ -0,0 +1,237 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Task model and related I/O classes."""
|
| 2 |
+
import uuid
|
| 3 |
+
from datetime import datetime, timezone
|
| 4 |
+
from enum import Enum
|
| 5 |
+
from typing import Optional
|
| 6 |
+
from sqlmodel import Field, SQLModel, Column
|
| 7 |
+
from pydantic import field_validator
|
| 8 |
+
from sqlalchemy import ARRAY, String, JSON
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class PriorityLevel(str, Enum):
|
| 12 |
+
"""Task priority levels.
|
| 13 |
+
|
| 14 |
+
Defines the three priority levels for tasks:
|
| 15 |
+
- HIGH: Urgent tasks that need immediate attention
|
| 16 |
+
- MEDIUM: Default priority for normal tasks
|
| 17 |
+
- LOW: Optional tasks that can be done whenever
|
| 18 |
+
"""
|
| 19 |
+
HIGH = "HIGH"
|
| 20 |
+
MEDIUM = "MEDIUM"
|
| 21 |
+
LOW = "LOW"
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class Task(SQLModel, table=True):
|
| 25 |
+
"""Database table model for Task entity."""
|
| 26 |
+
|
| 27 |
+
__tablename__ = "tasks"
|
| 28 |
+
|
| 29 |
+
id: uuid.UUID = Field(
|
| 30 |
+
default_factory=uuid.uuid4,
|
| 31 |
+
primary_key=True,
|
| 32 |
+
index=True
|
| 33 |
+
)
|
| 34 |
+
user_id: uuid.UUID = Field(
|
| 35 |
+
foreign_key="users.id",
|
| 36 |
+
index=True
|
| 37 |
+
)
|
| 38 |
+
title: str = Field(max_length=255)
|
| 39 |
+
description: Optional[str] = Field(
|
| 40 |
+
default=None,
|
| 41 |
+
max_length=2000
|
| 42 |
+
)
|
| 43 |
+
priority: PriorityLevel = Field(
|
| 44 |
+
default=PriorityLevel.MEDIUM,
|
| 45 |
+
max_length=10
|
| 46 |
+
)
|
| 47 |
+
tags: list[str] = Field(
|
| 48 |
+
default=[],
|
| 49 |
+
sa_column=Column(ARRAY(String), nullable=False), # PostgreSQL TEXT[] type
|
| 50 |
+
)
|
| 51 |
+
due_date: Optional[datetime] = Field(
|
| 52 |
+
default=None,
|
| 53 |
+
index=True
|
| 54 |
+
)
|
| 55 |
+
# Reminder fields (T003-T004)
|
| 56 |
+
reminder_offset: Optional[int] = Field(
|
| 57 |
+
default=None,
|
| 58 |
+
description="Minutes before due_date to send notification (0 = at due time)"
|
| 59 |
+
)
|
| 60 |
+
reminder_sent: bool = Field(
|
| 61 |
+
default=False,
|
| 62 |
+
description="Whether notification has been sent for this task"
|
| 63 |
+
)
|
| 64 |
+
# Recurrence fields (T005-T006)
|
| 65 |
+
recurrence: Optional[dict] = Field(
|
| 66 |
+
default=None,
|
| 67 |
+
sa_column=Column(JSON, nullable=True),
|
| 68 |
+
description="Recurrence rule as JSONB (frequency, interval, count, end_date)"
|
| 69 |
+
)
|
| 70 |
+
parent_task_id: Optional[uuid.UUID] = Field(
|
| 71 |
+
default=None,
|
| 72 |
+
foreign_key="tasks.id",
|
| 73 |
+
description="For recurring task instances, links to the original task"
|
| 74 |
+
)
|
| 75 |
+
completed: bool = Field(default=False)
|
| 76 |
+
created_at: datetime = Field(
|
| 77 |
+
default_factory=datetime.utcnow
|
| 78 |
+
)
|
| 79 |
+
updated_at: datetime = Field(
|
| 80 |
+
default_factory=datetime.utcnow
|
| 81 |
+
)
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
class TaskCreate(SQLModel):
|
| 85 |
+
"""Request model for creating a task.
|
| 86 |
+
|
| 87 |
+
Validates input data when creating a new task.
|
| 88 |
+
"""
|
| 89 |
+
title: str = Field(min_length=1, max_length=255)
|
| 90 |
+
description: Optional[str] = Field(default=None, max_length=2000)
|
| 91 |
+
priority: str = Field(default="MEDIUM")
|
| 92 |
+
tags: list[str] = Field(default=[])
|
| 93 |
+
due_date: Optional[datetime] = None
|
| 94 |
+
# Advanced features fields (T007)
|
| 95 |
+
reminder_offset: Optional[int] = Field(default=None, ge=0)
|
| 96 |
+
recurrence: Optional[dict] = Field(default=None)
|
| 97 |
+
completed: bool = False
|
| 98 |
+
|
| 99 |
+
@field_validator('priority')
|
| 100 |
+
@classmethod
|
| 101 |
+
def normalize_priority(cls, v: str) -> str:
|
| 102 |
+
"""Normalize priority to uppercase."""
|
| 103 |
+
if isinstance(v, str):
|
| 104 |
+
v = v.upper()
|
| 105 |
+
# Validate against enum values
|
| 106 |
+
valid_values = {e.value for e in PriorityLevel}
|
| 107 |
+
if v not in valid_values:
|
| 108 |
+
raise ValueError(f"priority must be one of {valid_values}")
|
| 109 |
+
return v
|
| 110 |
+
|
| 111 |
+
@field_validator('tags')
|
| 112 |
+
@classmethod
|
| 113 |
+
def validate_tags(cls, v: list[str]) -> list[str]:
|
| 114 |
+
"""Validate tags: max 50 characters per tag, remove duplicates."""
|
| 115 |
+
validated = []
|
| 116 |
+
seen = set()
|
| 117 |
+
for tag in v:
|
| 118 |
+
if len(tag) > 50:
|
| 119 |
+
raise ValueError(f"Tag '{tag[:20]}...' exceeds maximum length of 50 characters")
|
| 120 |
+
# Normalize tag: lowercase and strip whitespace
|
| 121 |
+
normalized = tag.strip().lower()
|
| 122 |
+
if not normalized:
|
| 123 |
+
continue
|
| 124 |
+
if normalized not in seen:
|
| 125 |
+
seen.add(normalized)
|
| 126 |
+
validated.append(normalized)
|
| 127 |
+
return validated
|
| 128 |
+
|
| 129 |
+
@field_validator('due_date')
|
| 130 |
+
@classmethod
|
| 131 |
+
def validate_due_date(cls, v: Optional[datetime]) -> Optional[datetime]:
|
| 132 |
+
"""Validate due date is not more than 10 years in the past."""
|
| 133 |
+
if v is not None:
|
| 134 |
+
# Normalize to UTC timezone-aware datetime for comparison
|
| 135 |
+
now = datetime.now(timezone.utc)
|
| 136 |
+
if v.tzinfo is None:
|
| 137 |
+
# If input is naive, assume it's UTC
|
| 138 |
+
v = v.replace(tzinfo=timezone.utc)
|
| 139 |
+
else:
|
| 140 |
+
# Convert to UTC
|
| 141 |
+
v = v.astimezone(timezone.utc)
|
| 142 |
+
|
| 143 |
+
# Allow dates up to 10 years in the past (for historical tasks)
|
| 144 |
+
min_date = now.replace(year=now.year - 10)
|
| 145 |
+
if v < min_date:
|
| 146 |
+
raise ValueError("Due date cannot be more than 10 years in the past")
|
| 147 |
+
return v
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
class TaskUpdate(SQLModel):
|
| 151 |
+
"""Request model for updating a task.
|
| 152 |
+
|
| 153 |
+
All fields are optional - only provided fields will be updated.
|
| 154 |
+
"""
|
| 155 |
+
title: Optional[str] = Field(default=None, min_length=1, max_length=255)
|
| 156 |
+
description: Optional[str] = Field(default=None, max_length=2000)
|
| 157 |
+
priority: Optional[str] = None
|
| 158 |
+
tags: Optional[list[str]] = None
|
| 159 |
+
due_date: Optional[datetime] = None
|
| 160 |
+
# Advanced features fields (T008)
|
| 161 |
+
reminder_offset: Optional[int] = Field(default=None, ge=0)
|
| 162 |
+
recurrence: Optional[dict] = None
|
| 163 |
+
completed: Optional[bool] = None
|
| 164 |
+
|
| 165 |
+
@field_validator('priority')
|
| 166 |
+
@classmethod
|
| 167 |
+
def normalize_priority(cls, v: Optional[str]) -> Optional[str]:
|
| 168 |
+
"""Normalize priority to uppercase."""
|
| 169 |
+
if v is not None and isinstance(v, str):
|
| 170 |
+
v = v.upper()
|
| 171 |
+
# Validate against enum values
|
| 172 |
+
valid_values = {e.value for e in PriorityLevel}
|
| 173 |
+
if v not in valid_values:
|
| 174 |
+
raise ValueError(f"priority must be one of {valid_values}")
|
| 175 |
+
return v
|
| 176 |
+
|
| 177 |
+
@field_validator('tags')
|
| 178 |
+
@classmethod
|
| 179 |
+
def validate_tags(cls, v: Optional[list[str]]) -> Optional[list[str]]:
|
| 180 |
+
"""Validate tags: max 50 characters per tag, remove duplicates."""
|
| 181 |
+
if v is None:
|
| 182 |
+
return v
|
| 183 |
+
validated = []
|
| 184 |
+
seen = set()
|
| 185 |
+
for tag in v:
|
| 186 |
+
if len(tag) > 50:
|
| 187 |
+
raise ValueError(f"Tag '{tag[:20]}...' exceeds maximum length of 50 characters")
|
| 188 |
+
# Normalize tag: lowercase and strip whitespace
|
| 189 |
+
normalized = tag.strip().lower()
|
| 190 |
+
if not normalized:
|
| 191 |
+
continue
|
| 192 |
+
if normalized not in seen:
|
| 193 |
+
seen.add(normalized)
|
| 194 |
+
validated.append(normalized)
|
| 195 |
+
return validated
|
| 196 |
+
|
| 197 |
+
@field_validator('due_date')
|
| 198 |
+
@classmethod
|
| 199 |
+
def validate_due_date(cls, v: Optional[datetime]) -> Optional[datetime]:
|
| 200 |
+
"""Validate due date is not more than 10 years in the past."""
|
| 201 |
+
if v is not None:
|
| 202 |
+
# Normalize to UTC timezone-aware datetime for comparison
|
| 203 |
+
now = datetime.now(timezone.utc)
|
| 204 |
+
if v.tzinfo is None:
|
| 205 |
+
# If input is naive, assume it's UTC
|
| 206 |
+
v = v.replace(tzinfo=timezone.utc)
|
| 207 |
+
else:
|
| 208 |
+
# Convert to UTC
|
| 209 |
+
v = v.astimezone(timezone.utc)
|
| 210 |
+
|
| 211 |
+
# Allow dates up to 10 years in the past (for historical tasks)
|
| 212 |
+
min_date = now.replace(year=now.year - 10)
|
| 213 |
+
if v < min_date:
|
| 214 |
+
raise ValueError("Due date cannot be more than 10 years in the past")
|
| 215 |
+
return v
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
class TaskRead(SQLModel):
|
| 219 |
+
"""Response model for task data.
|
| 220 |
+
|
| 221 |
+
Used for serializing task data in API responses.
|
| 222 |
+
"""
|
| 223 |
+
id: uuid.UUID
|
| 224 |
+
user_id: uuid.UUID
|
| 225 |
+
title: str
|
| 226 |
+
description: Optional[str] | None
|
| 227 |
+
priority: PriorityLevel
|
| 228 |
+
tags: list[str]
|
| 229 |
+
due_date: Optional[datetime] | None
|
| 230 |
+
# Advanced features fields (T009)
|
| 231 |
+
reminder_offset: Optional[int] | None
|
| 232 |
+
reminder_sent: bool
|
| 233 |
+
recurrence: Optional[dict] | None
|
| 234 |
+
parent_task_id: Optional[uuid.UUID] | None
|
| 235 |
+
completed: bool
|
| 236 |
+
created_at: datetime
|
| 237 |
+
updated_at: datetime
|
tasks.py
ADDED
|
@@ -0,0 +1,532 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Task CRUD API endpoints with JWT authentication.
|
| 2 |
+
|
| 3 |
+
[Task]: T053-T059, T043, T065-T067
|
| 4 |
+
[From]: specs/001-user-auth/tasks.md (User Story 3), specs/007-intermediate-todo-features/tasks.md (User Story 4)
|
| 5 |
+
|
| 6 |
+
Implements all task management operations with JWT-based authentication:
|
| 7 |
+
- Create task with validation
|
| 8 |
+
- List tasks with filtering (status, priority, tags, due_date) [T043]
|
| 9 |
+
- Get task by ID
|
| 10 |
+
- Update task with validation
|
| 11 |
+
- Delete task
|
| 12 |
+
- Toggle completion status
|
| 13 |
+
- Search tasks (User Story 3)
|
| 14 |
+
- List tags
|
| 15 |
+
|
| 16 |
+
All endpoints require valid JWT token. user_id is extracted from JWT claims.
|
| 17 |
+
"""
|
| 18 |
+
import uuid
|
| 19 |
+
from datetime import datetime, timedelta
|
| 20 |
+
from typing import Annotated, List, Optional
|
| 21 |
+
from zoneinfo import ZoneInfo
|
| 22 |
+
from fastapi import APIRouter, HTTPException, Query
|
| 23 |
+
from sqlmodel import Session, select
|
| 24 |
+
from pydantic import BaseModel
|
| 25 |
+
from sqlalchemy import func, and_, any_
|
| 26 |
+
|
| 27 |
+
from core.deps import SessionDep, CurrentUserDep
|
| 28 |
+
from models.task import Task, TaskCreate, TaskUpdate, TaskRead
|
| 29 |
+
|
| 30 |
+
# Create API router (user_id removed - now from JWT)
|
| 31 |
+
router = APIRouter(prefix="/api/tasks", tags=["tasks"])
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
# Response models
|
| 35 |
+
class TaskListResponse(BaseModel):
|
| 36 |
+
"""Response model for task list with pagination."""
|
| 37 |
+
tasks: list[TaskRead]
|
| 38 |
+
total: int
|
| 39 |
+
offset: int
|
| 40 |
+
limit: int
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
class TagInfo(BaseModel):
|
| 44 |
+
"""Tag information with usage count."""
|
| 45 |
+
name: str
|
| 46 |
+
count: int
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
class TagsListResponse(BaseModel):
|
| 50 |
+
"""Response model for tags list."""
|
| 51 |
+
tags: list[TagInfo]
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
class TaskSearchResponse(BaseModel):
|
| 55 |
+
"""Response model for task search results."""
|
| 56 |
+
tasks: list[TaskRead]
|
| 57 |
+
total: int
|
| 58 |
+
page: int
|
| 59 |
+
limit: int
|
| 60 |
+
query: str
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
# Routes - IMPORTANT: Static routes MUST come before dynamic path parameters
|
| 64 |
+
# This ensures /tags and /search are matched before /{task_id}
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
@router.post("", response_model=TaskRead, status_code=201)
|
| 68 |
+
def create_task(
|
| 69 |
+
task: TaskCreate,
|
| 70 |
+
session: SessionDep,
|
| 71 |
+
user_id: CurrentUserDep
|
| 72 |
+
):
|
| 73 |
+
"""Create a new task for the authenticated user."""
|
| 74 |
+
# Convert priority string to PriorityLevel enum (handles both upper/lowercase input)
|
| 75 |
+
priority_enum = PriorityLevel(task.priority.upper()) if isinstance(task.priority, str) else task.priority
|
| 76 |
+
|
| 77 |
+
db_task = Task(
|
| 78 |
+
user_id=user_id,
|
| 79 |
+
title=task.title,
|
| 80 |
+
description=task.description,
|
| 81 |
+
priority=priority_enum,
|
| 82 |
+
tags=task.tags,
|
| 83 |
+
due_date=task.due_date,
|
| 84 |
+
completed=task.completed,
|
| 85 |
+
reminder_offset=task.reminder_offset, # [T043] Add reminder_offset support
|
| 86 |
+
reminder_sent=False # Initialize reminder_sent to False
|
| 87 |
+
)
|
| 88 |
+
session.add(db_task)
|
| 89 |
+
session.commit()
|
| 90 |
+
session.refresh(db_task)
|
| 91 |
+
return db_task
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
@router.get("", response_model=TaskListResponse)
|
| 95 |
+
def list_tasks(
|
| 96 |
+
session: SessionDep,
|
| 97 |
+
user_id: CurrentUserDep,
|
| 98 |
+
offset: int = 0,
|
| 99 |
+
limit: Annotated[int, Query(le=100)] = 50,
|
| 100 |
+
completed: bool | None = None,
|
| 101 |
+
priority: str | None = None,
|
| 102 |
+
tags: Annotated[List[str] | None, Query()] = None,
|
| 103 |
+
due_date: str | None = None,
|
| 104 |
+
due_before: str | None = None, # [T028] Add due_before filter
|
| 105 |
+
due_after: str | None = None, # [T028] Add due_after filter
|
| 106 |
+
timezone: str = "UTC",
|
| 107 |
+
sort_by: str | None = None,
|
| 108 |
+
sort_order: str = "asc",
|
| 109 |
+
):
|
| 110 |
+
"""List all tasks for the authenticated user with pagination and filtering."""
|
| 111 |
+
count_statement = select(func.count(Task.id)).where(Task.user_id == user_id)
|
| 112 |
+
statement = select(Task).where(Task.user_id == user_id)
|
| 113 |
+
|
| 114 |
+
if completed is not None:
|
| 115 |
+
count_statement = count_statement.where(Task.completed == completed)
|
| 116 |
+
statement = statement.where(Task.completed == completed)
|
| 117 |
+
|
| 118 |
+
if priority is not None:
|
| 119 |
+
count_statement = count_statement.where(Task.priority == priority)
|
| 120 |
+
statement = statement.where(Task.priority == priority)
|
| 121 |
+
|
| 122 |
+
if tags and len(tags) > 0:
|
| 123 |
+
for tag in tags:
|
| 124 |
+
# Use PostgreSQL ANY operator: tag = ANY(tags)
|
| 125 |
+
count_statement = count_statement.where(tag == any_(Task.tags))
|
| 126 |
+
statement = statement.where(tag == any_(Task.tags))
|
| 127 |
+
|
| 128 |
+
# [T028] Add due_before and due_after filters
|
| 129 |
+
if due_before:
|
| 130 |
+
try:
|
| 131 |
+
due_before_dt = datetime.fromisoformat(due_before)
|
| 132 |
+
count_statement = count_statement.where(Task.due_date <= due_before_dt)
|
| 133 |
+
statement = statement.where(Task.due_date <= due_before_dt)
|
| 134 |
+
except ValueError:
|
| 135 |
+
pass # Invalid date format, ignore filter
|
| 136 |
+
|
| 137 |
+
if due_after:
|
| 138 |
+
try:
|
| 139 |
+
due_after_dt = datetime.fromisoformat(due_after)
|
| 140 |
+
count_statement = count_statement.where(Task.due_date >= due_after_dt)
|
| 141 |
+
statement = statement.where(Task.due_date >= due_after_dt)
|
| 142 |
+
except ValueError:
|
| 143 |
+
pass # Invalid date format, ignore filter
|
| 144 |
+
|
| 145 |
+
if due_date:
|
| 146 |
+
try:
|
| 147 |
+
user_tz = ZoneInfo(timezone)
|
| 148 |
+
now_utc = datetime.now(ZoneInfo("UTC"))
|
| 149 |
+
now_user = now_utc.astimezone(user_tz)
|
| 150 |
+
today_start = now_user.replace(hour=0, minute=0, second=0, microsecond=0)
|
| 151 |
+
today_end = today_start + timedelta(days=1)
|
| 152 |
+
|
| 153 |
+
if due_date == "overdue":
|
| 154 |
+
today_start_utc = today_start.astimezone(ZoneInfo("UTC"))
|
| 155 |
+
count_statement = count_statement.where(
|
| 156 |
+
and_(Task.due_date < today_start_utc, Task.completed == False)
|
| 157 |
+
)
|
| 158 |
+
statement = statement.where(
|
| 159 |
+
and_(Task.due_date < today_start_utc, Task.completed == False)
|
| 160 |
+
)
|
| 161 |
+
elif due_date == "today":
|
| 162 |
+
today_start_utc = today_start.astimezone(ZoneInfo("UTC"))
|
| 163 |
+
today_end_utc = today_end.astimezone(ZoneInfo("UTC"))
|
| 164 |
+
count_statement = count_statement.where(
|
| 165 |
+
and_(Task.due_date >= today_start_utc, Task.due_date < today_end_utc)
|
| 166 |
+
)
|
| 167 |
+
statement = statement.where(
|
| 168 |
+
and_(Task.due_date >= today_start_utc, Task.due_date < today_end_utc)
|
| 169 |
+
)
|
| 170 |
+
elif due_date == "week":
|
| 171 |
+
week_end_utc = (today_start + timedelta(days=7)).astimezone(ZoneInfo("UTC"))
|
| 172 |
+
today_start_utc = today_start.astimezone(ZoneInfo("UTC"))
|
| 173 |
+
count_statement = count_statement.where(
|
| 174 |
+
and_(Task.due_date >= today_start_utc, Task.due_date < week_end_utc)
|
| 175 |
+
)
|
| 176 |
+
statement = statement.where(
|
| 177 |
+
and_(Task.due_date >= today_start_utc, Task.due_date < week_end_utc)
|
| 178 |
+
)
|
| 179 |
+
elif due_date == "month":
|
| 180 |
+
month_end_utc = (today_start + timedelta(days=30)).astimezone(ZoneInfo("UTC"))
|
| 181 |
+
today_start_utc = today_start.astimezone(ZoneInfo("UTC"))
|
| 182 |
+
count_statement = count_statement.where(
|
| 183 |
+
and_(Task.due_date >= today_start_utc, Task.due_date < month_end_utc)
|
| 184 |
+
)
|
| 185 |
+
statement = statement.where(
|
| 186 |
+
and_(Task.due_date >= today_start_utc, Task.due_date < month_end_utc)
|
| 187 |
+
)
|
| 188 |
+
except Exception:
|
| 189 |
+
pass
|
| 190 |
+
|
| 191 |
+
total = session.exec(count_statement).one()
|
| 192 |
+
|
| 193 |
+
if sort_by == "due_date":
|
| 194 |
+
if sort_order == "asc":
|
| 195 |
+
statement = statement.order_by(Task.due_date.asc().nulls_last())
|
| 196 |
+
else:
|
| 197 |
+
statement = statement.order_by(Task.due_date.desc().nulls_last())
|
| 198 |
+
elif sort_by == "priority":
|
| 199 |
+
from sqlalchemy import case
|
| 200 |
+
priority_case = case(
|
| 201 |
+
*[(Task.priority == k, i) for i, k in enumerate(["high", "medium", "low"])],
|
| 202 |
+
else_=3
|
| 203 |
+
)
|
| 204 |
+
if sort_order == "asc":
|
| 205 |
+
statement = statement.order_by(priority_case.asc())
|
| 206 |
+
else:
|
| 207 |
+
statement = statement.order_by(priority_case.desc())
|
| 208 |
+
elif sort_by == "title":
|
| 209 |
+
if sort_order == "asc":
|
| 210 |
+
statement = statement.order_by(Task.title.asc())
|
| 211 |
+
else:
|
| 212 |
+
statement = statement.order_by(Task.title.desc())
|
| 213 |
+
else:
|
| 214 |
+
if sort_order == "asc":
|
| 215 |
+
statement = statement.order_by(Task.created_at.asc())
|
| 216 |
+
else:
|
| 217 |
+
statement = statement.order_by(Task.created_at.desc())
|
| 218 |
+
|
| 219 |
+
statement = statement.offset(offset).limit(limit)
|
| 220 |
+
tasks = session.exec(statement).all()
|
| 221 |
+
|
| 222 |
+
return TaskListResponse(
|
| 223 |
+
tasks=[TaskRead.model_validate(task) for task in tasks],
|
| 224 |
+
total=total,
|
| 225 |
+
offset=offset,
|
| 226 |
+
limit=limit
|
| 227 |
+
)
|
| 228 |
+
|
| 229 |
+
|
| 230 |
+
@router.get("/tags", response_model=TagsListResponse)
|
| 231 |
+
def list_tags(
|
| 232 |
+
session: SessionDep,
|
| 233 |
+
user_id: CurrentUserDep
|
| 234 |
+
):
|
| 235 |
+
"""Get all unique tags for the authenticated user with usage counts."""
|
| 236 |
+
from sqlalchemy import text
|
| 237 |
+
|
| 238 |
+
query = text("""
|
| 239 |
+
SELECT unnest(tags) as tag, COUNT(*) as count
|
| 240 |
+
FROM tasks
|
| 241 |
+
WHERE user_id = :user_id
|
| 242 |
+
AND tags != '{}'
|
| 243 |
+
GROUP BY tag
|
| 244 |
+
ORDER BY count DESC, tag ASC
|
| 245 |
+
""")
|
| 246 |
+
|
| 247 |
+
result = session.exec(query.params(user_id=str(user_id)))
|
| 248 |
+
tags = [TagInfo(name=row[0], count=row[1]) for row in result]
|
| 249 |
+
return TagsListResponse(tags=tags)
|
| 250 |
+
|
| 251 |
+
|
| 252 |
+
@router.get("/search", response_model=TaskSearchResponse)
|
| 253 |
+
def search_tasks(
|
| 254 |
+
session: SessionDep,
|
| 255 |
+
user_id: CurrentUserDep,
|
| 256 |
+
q: Annotated[str, Query(min_length=1, max_length=200)] = "",
|
| 257 |
+
page: int = 1,
|
| 258 |
+
limit: Annotated[int, Query(le=100)] = 20,
|
| 259 |
+
):
|
| 260 |
+
"""Search tasks by keyword in title and description."""
|
| 261 |
+
if not q:
|
| 262 |
+
raise HTTPException(status_code=400, detail="Search query parameter 'q' is required")
|
| 263 |
+
|
| 264 |
+
search_pattern = f"%{q}%"
|
| 265 |
+
|
| 266 |
+
count_statement = select(func.count(Task.id)).where(
|
| 267 |
+
(Task.user_id == user_id) &
|
| 268 |
+
(Task.title.ilike(search_pattern) | Task.description.ilike(search_pattern))
|
| 269 |
+
)
|
| 270 |
+
total = session.exec(count_statement).one()
|
| 271 |
+
|
| 272 |
+
offset = (page - 1) * limit
|
| 273 |
+
statement = select(Task).where(
|
| 274 |
+
(Task.user_id == user_id) &
|
| 275 |
+
(Task.title.ilike(search_pattern) | Task.description.ilike(search_pattern))
|
| 276 |
+
)
|
| 277 |
+
statement = statement.offset(offset).limit(limit)
|
| 278 |
+
statement = statement.order_by(Task.created_at.desc())
|
| 279 |
+
|
| 280 |
+
tasks = session.exec(statement).all()
|
| 281 |
+
|
| 282 |
+
return TaskSearchResponse(
|
| 283 |
+
tasks=[TaskRead.model_validate(task) for task in tasks],
|
| 284 |
+
total=total,
|
| 285 |
+
page=page,
|
| 286 |
+
limit=limit,
|
| 287 |
+
query=q
|
| 288 |
+
)
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
@router.get("/{task_id}", response_model=TaskRead)
|
| 292 |
+
def get_task(
|
| 293 |
+
task_id: uuid.UUID,
|
| 294 |
+
session: SessionDep,
|
| 295 |
+
user_id: CurrentUserDep
|
| 296 |
+
):
|
| 297 |
+
"""Get a specific task by ID."""
|
| 298 |
+
task = session.get(Task, task_id)
|
| 299 |
+
if not task or task.user_id != user_id:
|
| 300 |
+
raise HTTPException(status_code=404, detail="Task not found")
|
| 301 |
+
return task
|
| 302 |
+
|
| 303 |
+
|
| 304 |
+
@router.put("/{task_id}", response_model=TaskRead)
|
| 305 |
+
def update_task(
|
| 306 |
+
task_id: uuid.UUID,
|
| 307 |
+
task_update: TaskUpdate,
|
| 308 |
+
session: SessionDep,
|
| 309 |
+
user_id: CurrentUserDep
|
| 310 |
+
):
|
| 311 |
+
"""Update an existing task."""
|
| 312 |
+
task = session.get(Task, task_id)
|
| 313 |
+
if not task or task.user_id != user_id:
|
| 314 |
+
raise HTTPException(status_code=404, detail="Task not found")
|
| 315 |
+
|
| 316 |
+
task_data = task_update.model_dump(exclude_unset=True)
|
| 317 |
+
for key, value in task_data.items():
|
| 318 |
+
# Convert priority string to PriorityLevel enum
|
| 319 |
+
if key == "priority" and isinstance(value, str):
|
| 320 |
+
value = PriorityLevel(value.upper())
|
| 321 |
+
setattr(task, key, value)
|
| 322 |
+
|
| 323 |
+
task.updated_at = datetime.utcnow()
|
| 324 |
+
session.add(task)
|
| 325 |
+
session.commit()
|
| 326 |
+
session.refresh(task)
|
| 327 |
+
return task
|
| 328 |
+
|
| 329 |
+
|
| 330 |
+
@router.delete("/{task_id}")
|
| 331 |
+
def delete_task(
|
| 332 |
+
task_id: uuid.UUID,
|
| 333 |
+
session: SessionDep,
|
| 334 |
+
user_id: CurrentUserDep
|
| 335 |
+
):
|
| 336 |
+
"""Delete a task."""
|
| 337 |
+
task = session.get(Task, task_id)
|
| 338 |
+
if not task or task.user_id != user_id:
|
| 339 |
+
raise HTTPException(status_code=404, detail="Task not found")
|
| 340 |
+
|
| 341 |
+
session.delete(task)
|
| 342 |
+
session.commit()
|
| 343 |
+
return {"ok": True}
|
| 344 |
+
|
| 345 |
+
|
| 346 |
+
@router.patch("/{task_id}/complete", response_model=TaskRead)
|
| 347 |
+
def toggle_complete(
|
| 348 |
+
task_id: uuid.UUID,
|
| 349 |
+
session: SessionDep,
|
| 350 |
+
user_id: CurrentUserDep
|
| 351 |
+
):
|
| 352 |
+
"""Toggle task completion status and create next instance for recurring tasks.
|
| 353 |
+
|
| 354 |
+
[Task]: T062-T065
|
| 355 |
+
[From]: specs/008-advanced-features/tasks.md (User Story 3)
|
| 356 |
+
|
| 357 |
+
When completing a recurring task:
|
| 358 |
+
- T063: Checks if recurrence limit (count/end_date) is reached
|
| 359 |
+
- T064: Handles count limit
|
| 360 |
+
- T065: Handles end_date limit
|
| 361 |
+
- Creates next instance if limits not reached
|
| 362 |
+
"""
|
| 363 |
+
task = session.get(Task, task_id)
|
| 364 |
+
if not task or task.user_id != user_id:
|
| 365 |
+
raise HTTPException(status_code=404, detail="Task not found")
|
| 366 |
+
|
| 367 |
+
is_completing = not task.completed
|
| 368 |
+
task.completed = is_completing
|
| 369 |
+
task.updated_at = datetime.utcnow()
|
| 370 |
+
session.add(task)
|
| 371 |
+
session.commit()
|
| 372 |
+
|
| 373 |
+
# [T062] Create next instance for recurring tasks when completing
|
| 374 |
+
if is_completing and task.recurrence and task.due_date:
|
| 375 |
+
from services.recurrence_service import RecurrenceService
|
| 376 |
+
|
| 377 |
+
recurrence_service = RecurrenceService()
|
| 378 |
+
|
| 379 |
+
# Parse recurrence rule
|
| 380 |
+
recurrence_dict = task.recurrence if isinstance(task.recurrence, dict) else {"frequency": task.recurrence}
|
| 381 |
+
if not isinstance(recurrence_dict, dict):
|
| 382 |
+
recurrence_dict = {"frequency": str(task.recurrence)}
|
| 383 |
+
|
| 384 |
+
# [T063] Check if we should create the next instance
|
| 385 |
+
should_create_next = True
|
| 386 |
+
current_count = 0
|
| 387 |
+
|
| 388 |
+
# Count existing instances (tasks with same parent_task_id)
|
| 389 |
+
if task.parent_task_id:
|
| 390 |
+
# This is already an instance, count siblings
|
| 391 |
+
count_statement = select(func.count(Task.id)).where(
|
| 392 |
+
Task.parent_task_id == task.parent_task_id
|
| 393 |
+
)
|
| 394 |
+
current_count = session.exec(count_statement).one() + 1 # +1 for parent
|
| 395 |
+
else:
|
| 396 |
+
# This is the parent task, count its instances
|
| 397 |
+
count_statement = select(func.count(Task.id)).where(
|
| 398 |
+
Task.parent_task_id == task_id
|
| 399 |
+
)
|
| 400 |
+
current_count = session.exec(count_statement).one() + 1 # +1 for this task
|
| 401 |
+
|
| 402 |
+
# [T064] Handle count limit
|
| 403 |
+
max_count = recurrence_dict.get("count")
|
| 404 |
+
if max_count is not None:
|
| 405 |
+
if current_count >= max_count:
|
| 406 |
+
should_create_next = False
|
| 407 |
+
|
| 408 |
+
# [T065] Handle end_date limit
|
| 409 |
+
end_date_str = recurrence_dict.get("end_date")
|
| 410 |
+
if end_date_str and should_create_next:
|
| 411 |
+
try:
|
| 412 |
+
if isinstance(end_date_str, str):
|
| 413 |
+
end_date = datetime.fromisoformat(end_date_str.replace('Z', '+00:00'))
|
| 414 |
+
else:
|
| 415 |
+
end_date = end_date_str
|
| 416 |
+
|
| 417 |
+
# Calculate next occurrence date
|
| 418 |
+
base_date = datetime.fromisoformat(task.due_date.replace('Z', '+00:00')) if isinstance(task.due_date, str) else task.due_date
|
| 419 |
+
next_due_date = recurrence_service.calculate_next_occurrence(base_date, recurrence_dict)
|
| 420 |
+
|
| 421 |
+
if next_due_date and next_due_date > end_date:
|
| 422 |
+
should_create_next = False
|
| 423 |
+
except Exception:
|
| 424 |
+
pass # Invalid date format, skip check
|
| 425 |
+
|
| 426 |
+
# Create next instance if limits not reached
|
| 427 |
+
if should_create_next:
|
| 428 |
+
base_date = datetime.fromisoformat(task.due_date.replace('Z', '+00:00')) if isinstance(task.due_date, str) else task.due_date
|
| 429 |
+
next_due_date = recurrence_service.calculate_next_occurrence(base_date, recurrence_dict)
|
| 430 |
+
|
| 431 |
+
if next_due_date:
|
| 432 |
+
# Create next instance
|
| 433 |
+
next_task = Task(
|
| 434 |
+
user_id=user_id,
|
| 435 |
+
title=task.title,
|
| 436 |
+
description=task.description,
|
| 437 |
+
priority=task.priority,
|
| 438 |
+
tags=task.tags,
|
| 439 |
+
due_date=next_due_date.isoformat(),
|
| 440 |
+
completed=False,
|
| 441 |
+
reminder_offset=task.reminder_offset,
|
| 442 |
+
reminder_sent=False,
|
| 443 |
+
recurrence=task.recurrence,
|
| 444 |
+
parent_task_id=task.parent_task_id if task.parent_task_id else task.id,
|
| 445 |
+
)
|
| 446 |
+
session.add(next_task)
|
| 447 |
+
session.commit()
|
| 448 |
+
|
| 449 |
+
session.refresh(task)
|
| 450 |
+
return task
|
| 451 |
+
|
| 452 |
+
|
| 453 |
+
@router.patch("/{task_id}/tags")
|
| 454 |
+
def update_task_tags(
|
| 455 |
+
task_id: uuid.UUID,
|
| 456 |
+
session: SessionDep,
|
| 457 |
+
user_id: CurrentUserDep,
|
| 458 |
+
tags_add: Optional[List[str]] = None,
|
| 459 |
+
tags_remove: Optional[List[str]] = None,
|
| 460 |
+
):
|
| 461 |
+
"""Add or remove tags from a task."""
|
| 462 |
+
from services.nlp_service import normalize_tag_name
|
| 463 |
+
|
| 464 |
+
if tags_add is None and tags_remove is None:
|
| 465 |
+
raise HTTPException(
|
| 466 |
+
status_code=400,
|
| 467 |
+
detail="Either 'tags_add' or 'tags_remove' must be provided"
|
| 468 |
+
)
|
| 469 |
+
|
| 470 |
+
if not tags_add and not tags_remove:
|
| 471 |
+
raise HTTPException(
|
| 472 |
+
status_code=400,
|
| 473 |
+
detail="Either 'tags_add' or 'tags_remove' must contain at least one tag"
|
| 474 |
+
)
|
| 475 |
+
|
| 476 |
+
task = session.get(Task, task_id)
|
| 477 |
+
if not task or task.user_id != user_id:
|
| 478 |
+
raise HTTPException(status_code=404, detail="Task not found")
|
| 479 |
+
|
| 480 |
+
current_tags = set(task.tags or [])
|
| 481 |
+
|
| 482 |
+
if tags_add:
|
| 483 |
+
normalized_add = [normalize_tag_name(tag) for tag in tags_add]
|
| 484 |
+
current_tags.update(normalized_add)
|
| 485 |
+
|
| 486 |
+
if tags_remove:
|
| 487 |
+
normalized_remove = [normalize_tag_name(tag).lower() for tag in tags_remove]
|
| 488 |
+
current_tags = {
|
| 489 |
+
tag for tag in current_tags
|
| 490 |
+
if tag.lower() not in normalized_remove
|
| 491 |
+
}
|
| 492 |
+
|
| 493 |
+
task.tags = sorted(list(current_tags))
|
| 494 |
+
task.updated_at = datetime.utcnow()
|
| 495 |
+
session.add(task)
|
| 496 |
+
session.commit()
|
| 497 |
+
session.refresh(task)
|
| 498 |
+
return task
|
| 499 |
+
|
| 500 |
+
|
| 501 |
+
@router.patch("/{task_id}/reminder", response_model=TaskRead)
|
| 502 |
+
def update_reminder(
|
| 503 |
+
task_id: uuid.UUID,
|
| 504 |
+
session: SessionDep,
|
| 505 |
+
user_id: CurrentUserDep,
|
| 506 |
+
reminder_offset: int | None = None,
|
| 507 |
+
reset_sent: bool = False
|
| 508 |
+
):
|
| 509 |
+
"""Update reminder settings for a task.
|
| 510 |
+
|
| 511 |
+
[Task]: T045
|
| 512 |
+
[From]: specs/008-advanced-features/tasks.md (User Story 2)
|
| 513 |
+
|
| 514 |
+
Allows updating the reminder_offset and optionally resetting the reminder_sent flag.
|
| 515 |
+
"""
|
| 516 |
+
task = session.get(Task, task_id)
|
| 517 |
+
if not task or task.user_id != user_id:
|
| 518 |
+
raise HTTPException(status_code=404, detail="Task not found")
|
| 519 |
+
|
| 520 |
+
# Update reminder_offset if provided
|
| 521 |
+
if reminder_offset is not None:
|
| 522 |
+
task.reminder_offset = reminder_offset
|
| 523 |
+
|
| 524 |
+
# Reset reminder_sent flag if requested (e.g., when changing due date)
|
| 525 |
+
if reset_sent:
|
| 526 |
+
task.reminder_sent = False
|
| 527 |
+
|
| 528 |
+
task.updated_at = datetime.utcnow()
|
| 529 |
+
session.add(task)
|
| 530 |
+
session.commit()
|
| 531 |
+
session.refresh(task)
|
| 532 |
+
return task
|
tools/CLAUDE.md
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<claude-mem-context>
|
| 2 |
+
# Recent Activity
|
| 3 |
+
|
| 4 |
+
<!-- This section is auto-generated by claude-mem. Edit content outside the tags. -->
|
| 5 |
+
|
| 6 |
+
### Jan 28, 2026
|
| 7 |
+
|
| 8 |
+
| ID | Time | T | Title | Read |
|
| 9 |
+
|----|------|---|-------|------|
|
| 10 |
+
| #684 | 11:00 PM | 🟣 | Priority Extraction Enhanced with Comprehensive Natural Language Patterns | ~488 |
|
| 11 |
+
| #677 | 10:57 PM | 🔵 | MCP Add Task Tool Implements Natural Language Task Creation | ~362 |
|
| 12 |
+
</claude-mem-context>
|
tools/__init__.py
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tools for task management AI agent.
|
| 2 |
+
|
| 3 |
+
[Task]: T010
|
| 4 |
+
[From]: specs/004-ai-chatbot/plan.md
|
| 5 |
+
|
| 6 |
+
This module provides tools that enable the AI agent to perform task
|
| 7 |
+
management operations through a standardized interface.
|
| 8 |
+
|
| 9 |
+
All tools enforce:
|
| 10 |
+
- User isolation via user_id parameter
|
| 11 |
+
- Stateless execution (no shared memory between invocations)
|
| 12 |
+
- Structured success/error responses
|
| 13 |
+
- Parameter validation
|
| 14 |
+
|
| 15 |
+
Tool Registration Pattern:
|
| 16 |
+
Tools are registered in the tool_registry for discovery.
|
| 17 |
+
The OpenAI Agents SDK will call these functions directly.
|
| 18 |
+
"""
|
| 19 |
+
from mcp_server.server import register_tool
|
| 20 |
+
from mcp_server.tools import (
|
| 21 |
+
add_task, list_tasks, update_task, complete_task, delete_task,
|
| 22 |
+
complete_all_tasks, delete_all_tasks
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
# Register all available tools
|
| 26 |
+
# [Task]: T013 - add_task tool
|
| 27 |
+
register_tool("add_task", add_task.add_task)
|
| 28 |
+
|
| 29 |
+
# [Task]: T024, T027 - list_tasks tool
|
| 30 |
+
register_tool("list_tasks", list_tasks.list_tasks)
|
| 31 |
+
|
| 32 |
+
# [Task]: T037 - update_task tool
|
| 33 |
+
register_tool("update_task", update_task.update_task)
|
| 34 |
+
|
| 35 |
+
# [Task]: T042 - complete_task tool
|
| 36 |
+
register_tool("complete_task", complete_task.complete_task)
|
| 37 |
+
|
| 38 |
+
# [Task]: T047 - delete_task tool
|
| 39 |
+
register_tool("delete_task", delete_task.delete_task)
|
| 40 |
+
|
| 41 |
+
# [Task]: T044, T045 - complete_all_tasks tool
|
| 42 |
+
register_tool("complete_all_tasks", complete_all_tasks.complete_all_tasks)
|
| 43 |
+
|
| 44 |
+
# [Task]: T048, T050 - delete_all_tasks tool
|
| 45 |
+
register_tool("delete_all_tasks", delete_all_tasks.delete_all_tasks)
|
| 46 |
+
|
| 47 |
+
# Export tool functions for direct access by the agent
|
| 48 |
+
__all__ = [
|
| 49 |
+
"add_task", "list_tasks", "update_task", "complete_task", "delete_task",
|
| 50 |
+
"complete_all_tasks", "delete_all_tasks"
|
| 51 |
+
]
|
tools/add_task.py
ADDED
|
@@ -0,0 +1,320 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""MCP tool for adding tasks to the todo list.
|
| 2 |
+
|
| 3 |
+
[Task]: T013, T031
|
| 4 |
+
[From]: specs/004-ai-chatbot/tasks.md, specs/007-intermediate-todo-features/tasks.md (US2)
|
| 5 |
+
|
| 6 |
+
This tool allows the AI agent to create tasks on behalf of users
|
| 7 |
+
through natural language conversations.
|
| 8 |
+
|
| 9 |
+
Now supports tag extraction from natural language patterns.
|
| 10 |
+
"""
|
| 11 |
+
from typing import Optional, Any, List
|
| 12 |
+
from uuid import UUID, uuid4
|
| 13 |
+
from datetime import datetime, timedelta
|
| 14 |
+
|
| 15 |
+
from models.task import Task
|
| 16 |
+
from core.database import engine
|
| 17 |
+
from sqlmodel import Session
|
| 18 |
+
|
| 19 |
+
# Import tag extraction service [T029, T031]
|
| 20 |
+
import sys
|
| 21 |
+
import os
|
| 22 |
+
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
|
| 23 |
+
from services.nlp_service import extract_tags_from_task_data, normalize_tag_name
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
# Tool metadata for MCP registration
|
| 27 |
+
tool_metadata = {
|
| 28 |
+
"name": "add_task",
|
| 29 |
+
"description": """Create a new task in the user's todo list.
|
| 30 |
+
|
| 31 |
+
Use this tool when the user wants to create, add, or remind themselves about a task.
|
| 32 |
+
The task will be associated with their user account and persist across conversations.
|
| 33 |
+
|
| 34 |
+
Parameters:
|
| 35 |
+
- title (required): Brief task title (max 255 characters)
|
| 36 |
+
- description (optional): Detailed task description (max 2000 characters)
|
| 37 |
+
- due_date (optional): When the task is due (ISO 8601 date string or relative like 'tomorrow', 'next week')
|
| 38 |
+
- priority (optional): Task priority - 'low', 'medium', or 'high' (default: 'medium')
|
| 39 |
+
- tags (optional): List of tag names for categorization (e.g., ["work", "urgent"])
|
| 40 |
+
|
| 41 |
+
Natural Language Tag Support [T031]:
|
| 42 |
+
- "tagged with X" or "tags X" → extracts tag X
|
| 43 |
+
- "add tag X" or "with tag X" → extracts tag X
|
| 44 |
+
- "#tagname" → extracts hashtag as tag
|
| 45 |
+
- "labeled X" → extracts tag X
|
| 46 |
+
|
| 47 |
+
Returns: Created task details including ID, title, and confirmation.
|
| 48 |
+
""",
|
| 49 |
+
"inputSchema": {
|
| 50 |
+
"type": "object",
|
| 51 |
+
"properties": {
|
| 52 |
+
"user_id": {
|
| 53 |
+
"type": "string",
|
| 54 |
+
"description": "User ID (UUID) who owns this task"
|
| 55 |
+
},
|
| 56 |
+
"title": {
|
| 57 |
+
"type": "string",
|
| 58 |
+
"description": "Task title (brief description)",
|
| 59 |
+
"maxLength": 255
|
| 60 |
+
},
|
| 61 |
+
"description": {
|
| 62 |
+
"type": "string",
|
| 63 |
+
"description": "Detailed task description",
|
| 64 |
+
"maxLength": 2000
|
| 65 |
+
},
|
| 66 |
+
"due_date": {
|
| 67 |
+
"type": "string",
|
| 68 |
+
"description": "Due date in ISO 8601 format (e.g., '2025-01-15') or relative terms"
|
| 69 |
+
},
|
| 70 |
+
"priority": {
|
| 71 |
+
"type": "string",
|
| 72 |
+
"enum": ["low", "medium", "high"],
|
| 73 |
+
"description": "Task priority level"
|
| 74 |
+
},
|
| 75 |
+
"tags": {
|
| 76 |
+
"type": "array",
|
| 77 |
+
"items": {"type": "string"},
|
| 78 |
+
"description": "List of tag names for categorization"
|
| 79 |
+
}
|
| 80 |
+
},
|
| 81 |
+
"required": ["user_id", "title"]
|
| 82 |
+
}
|
| 83 |
+
}
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
async def add_task(
|
| 87 |
+
user_id: str,
|
| 88 |
+
title: str,
|
| 89 |
+
description: Optional[str] = None,
|
| 90 |
+
due_date: Optional[str] = None,
|
| 91 |
+
priority: Optional[str] = None,
|
| 92 |
+
tags: Optional[List[str]] = None
|
| 93 |
+
) -> dict[str, Any]:
|
| 94 |
+
"""Create a new task for the user.
|
| 95 |
+
|
| 96 |
+
[From]: specs/004-ai-chatbot/spec.md - US1
|
| 97 |
+
[Task]: T031 - Integrate tag extraction for natural language
|
| 98 |
+
|
| 99 |
+
Args:
|
| 100 |
+
user_id: User ID (UUID string) who owns this task
|
| 101 |
+
title: Brief task title
|
| 102 |
+
description: Optional detailed description
|
| 103 |
+
due_date: Optional due date (ISO 8601 or relative)
|
| 104 |
+
priority: Optional priority level (low/medium/high)
|
| 105 |
+
tags: Optional list of tag names
|
| 106 |
+
|
| 107 |
+
Returns:
|
| 108 |
+
Dictionary with created task details
|
| 109 |
+
|
| 110 |
+
Raises:
|
| 111 |
+
ValueError: If validation fails
|
| 112 |
+
ValidationError: If task constraints violated
|
| 113 |
+
"""
|
| 114 |
+
from core.validators import validate_task_title, validate_task_description
|
| 115 |
+
|
| 116 |
+
# Validate inputs
|
| 117 |
+
validated_title = validate_task_title(title)
|
| 118 |
+
validated_description = validate_task_description(description) if description else None
|
| 119 |
+
|
| 120 |
+
# Parse and validate due date if provided
|
| 121 |
+
parsed_due_date = None
|
| 122 |
+
if due_date:
|
| 123 |
+
parsed_due_date = _parse_due_date(due_date)
|
| 124 |
+
|
| 125 |
+
# Normalize priority
|
| 126 |
+
normalized_priority = _normalize_priority(priority)
|
| 127 |
+
|
| 128 |
+
# [T031] Extract tags from natural language in title and description
|
| 129 |
+
extracted_tags = extract_tags_from_task_data(validated_title, validated_description)
|
| 130 |
+
|
| 131 |
+
# Normalize extracted tags
|
| 132 |
+
normalized_extracted_tags = [normalize_tag_name(tag) for tag in extracted_tags]
|
| 133 |
+
|
| 134 |
+
# Combine provided tags with extracted tags, removing duplicates
|
| 135 |
+
all_tags = set(normalized_extracted_tags)
|
| 136 |
+
if tags:
|
| 137 |
+
# Normalize provided tags
|
| 138 |
+
normalized_provided_tags = [normalize_tag_name(tag) for tag in tags]
|
| 139 |
+
all_tags.update(normalized_provided_tags)
|
| 140 |
+
|
| 141 |
+
final_tags = sorted(list(all_tags)) if all_tags else []
|
| 142 |
+
|
| 143 |
+
# Get database session (synchronous)
|
| 144 |
+
with Session(engine) as db:
|
| 145 |
+
try:
|
| 146 |
+
# Create task instance
|
| 147 |
+
task = Task(
|
| 148 |
+
id=uuid4(),
|
| 149 |
+
user_id=UUID(user_id),
|
| 150 |
+
title=validated_title,
|
| 151 |
+
description=validated_description,
|
| 152 |
+
due_date=parsed_due_date,
|
| 153 |
+
priority=normalized_priority,
|
| 154 |
+
tags=final_tags,
|
| 155 |
+
completed=False,
|
| 156 |
+
created_at=datetime.utcnow(),
|
| 157 |
+
updated_at=datetime.utcnow()
|
| 158 |
+
)
|
| 159 |
+
|
| 160 |
+
# Save to database
|
| 161 |
+
db.add(task)
|
| 162 |
+
db.commit()
|
| 163 |
+
db.refresh(task)
|
| 164 |
+
|
| 165 |
+
# Return success response
|
| 166 |
+
return {
|
| 167 |
+
"success": True,
|
| 168 |
+
"task": {
|
| 169 |
+
"id": str(task.id),
|
| 170 |
+
"title": task.title,
|
| 171 |
+
"description": task.description,
|
| 172 |
+
"due_date": task.due_date.isoformat() if task.due_date else None,
|
| 173 |
+
"priority": task.priority,
|
| 174 |
+
"tags": task.tags,
|
| 175 |
+
"completed": task.completed,
|
| 176 |
+
"created_at": task.created_at.isoformat()
|
| 177 |
+
},
|
| 178 |
+
"message": f"✅ Task created: {task.title}" + (f" (tags: {', '.join(final_tags)})" if final_tags else "")
|
| 179 |
+
}
|
| 180 |
+
|
| 181 |
+
except Exception as e:
|
| 182 |
+
db.rollback()
|
| 183 |
+
raise ValueError(f"Failed to create task: {str(e)}")
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
def _parse_due_date(due_date_str: str) -> Optional[datetime]:
|
| 187 |
+
"""Parse due date from ISO 8601 or natural language.
|
| 188 |
+
|
| 189 |
+
[From]: specs/004-ai-chatbot/plan.md - Natural Language Processing
|
| 190 |
+
|
| 191 |
+
Supports:
|
| 192 |
+
- ISO 8601: "2025-01-15", "2025-01-15T10:00:00Z"
|
| 193 |
+
- Relative: "today", "tomorrow", "next week", "in 3 days"
|
| 194 |
+
|
| 195 |
+
Args:
|
| 196 |
+
due_date_str: Date string to parse
|
| 197 |
+
|
| 198 |
+
Returns:
|
| 199 |
+
Parsed datetime or None if parsing fails
|
| 200 |
+
|
| 201 |
+
Raises:
|
| 202 |
+
ValueError: If date format is invalid
|
| 203 |
+
"""
|
| 204 |
+
from datetime import datetime
|
| 205 |
+
import re
|
| 206 |
+
|
| 207 |
+
# Try ISO 8601 format first
|
| 208 |
+
try:
|
| 209 |
+
# Handle YYYY-MM-DD format
|
| 210 |
+
if re.match(r"^\d{4}-\d{2}-\d{2}$", due_date_str):
|
| 211 |
+
return datetime.fromisoformat(due_date_str)
|
| 212 |
+
|
| 213 |
+
# Handle full ISO 8601 with time
|
| 214 |
+
if "T" in due_date_str:
|
| 215 |
+
return datetime.fromisoformat(due_date_str.replace("Z", "+00:00"))
|
| 216 |
+
except ValueError:
|
| 217 |
+
pass # Fall through to natural language parsing
|
| 218 |
+
|
| 219 |
+
# Natural language parsing (simplified)
|
| 220 |
+
due_date_str = due_date_str.lower().strip()
|
| 221 |
+
today = datetime.utcnow().replace(hour=23, minute=59, second=59, microsecond=999999)
|
| 222 |
+
|
| 223 |
+
if due_date_str == "today":
|
| 224 |
+
return today
|
| 225 |
+
elif due_date_str == "tomorrow":
|
| 226 |
+
return today + timedelta(days=1)
|
| 227 |
+
elif due_date_str == "next week":
|
| 228 |
+
return today + timedelta(weeks=1)
|
| 229 |
+
elif due_date_str.startswith("in "):
|
| 230 |
+
# Parse "in X days/weeks"
|
| 231 |
+
match = re.match(r"in (\d+) (day|days|week|weeks)", due_date_str)
|
| 232 |
+
if match:
|
| 233 |
+
amount = int(match.group(1))
|
| 234 |
+
unit = match.group(2)
|
| 235 |
+
if unit.startswith("day"):
|
| 236 |
+
return today + timedelta(days=amount)
|
| 237 |
+
elif unit.startswith("week"):
|
| 238 |
+
return today + timedelta(weeks=amount)
|
| 239 |
+
|
| 240 |
+
# If parsing fails, return None and let AI agent ask for clarification
|
| 241 |
+
return None
|
| 242 |
+
|
| 243 |
+
|
| 244 |
+
def _normalize_priority(priority: Optional[str]) -> str:
|
| 245 |
+
"""Normalize priority string to valid values.
|
| 246 |
+
|
| 247 |
+
[From]: models/task.py - Task model
|
| 248 |
+
[Task]: T009-T011 - Priority extraction from natural language
|
| 249 |
+
|
| 250 |
+
Args:
|
| 251 |
+
priority: Priority string to normalize
|
| 252 |
+
|
| 253 |
+
Returns:
|
| 254 |
+
Normalized priority: "LOW", "MEDIUM", or "HIGH" (uppercase enum values)
|
| 255 |
+
|
| 256 |
+
Raises:
|
| 257 |
+
ValueError: If priority is invalid
|
| 258 |
+
"""
|
| 259 |
+
from models.task import PriorityLevel
|
| 260 |
+
|
| 261 |
+
if not priority:
|
| 262 |
+
return PriorityLevel.MEDIUM # Default priority (uppercase)
|
| 263 |
+
|
| 264 |
+
priority_normalized = priority.lower().strip()
|
| 265 |
+
|
| 266 |
+
# Direct matches - return uppercase enum values
|
| 267 |
+
if priority_normalized in ["low", "medium", "high"]:
|
| 268 |
+
return priority_normalized.upper()
|
| 269 |
+
|
| 270 |
+
# Enhanced priority mapping from natural language patterns
|
| 271 |
+
# [Task]: T011 - Integrate priority extraction in MCP tools
|
| 272 |
+
priority_map_high = {
|
| 273 |
+
# Explicit high priority keywords
|
| 274 |
+
"urgent", "asap", "important", "critical", "emergency", "immediate",
|
| 275 |
+
"high", "priority", "top", "now", "today", "deadline", "crucial",
|
| 276 |
+
# Numeric mappings
|
| 277 |
+
"3", "high priority", "very important", "must do"
|
| 278 |
+
}
|
| 279 |
+
|
| 280 |
+
priority_map_low = {
|
| 281 |
+
# Explicit low priority keywords
|
| 282 |
+
"low", "later", "whenever", "optional", "nice to have", "someday",
|
| 283 |
+
"eventually", "routine", "normal", "regular", "backlog",
|
| 284 |
+
# Numeric mappings
|
| 285 |
+
"1", "low priority", "no rush", "can wait"
|
| 286 |
+
}
|
| 287 |
+
|
| 288 |
+
priority_map_medium = {
|
| 289 |
+
"2", "medium", "normal", "standard", "default", "moderate"
|
| 290 |
+
}
|
| 291 |
+
|
| 292 |
+
# Check high priority patterns
|
| 293 |
+
if priority_normalized in priority_map_high or any(
|
| 294 |
+
keyword in priority_normalized for keyword in ["urgent", "asap", "critical", "deadline", "today"]
|
| 295 |
+
):
|
| 296 |
+
return PriorityLevel.HIGH
|
| 297 |
+
|
| 298 |
+
# Check low priority patterns
|
| 299 |
+
if priority_normalized in priority_map_low or any(
|
| 300 |
+
keyword in priority_normalized for keyword in ["whenever", "later", "optional", "someday"]
|
| 301 |
+
):
|
| 302 |
+
return PriorityLevel.LOW
|
| 303 |
+
|
| 304 |
+
# Default to medium
|
| 305 |
+
return PriorityLevel.MEDIUM
|
| 306 |
+
|
| 307 |
+
|
| 308 |
+
# Register tool with MCP server
|
| 309 |
+
def register_tool(mcp_server: Any) -> None:
|
| 310 |
+
"""Register this tool with the MCP server.
|
| 311 |
+
|
| 312 |
+
[From]: backend/mcp_server/server.py
|
| 313 |
+
|
| 314 |
+
Args:
|
| 315 |
+
mcp_server: MCP server instance
|
| 316 |
+
"""
|
| 317 |
+
mcp_server.tool(
|
| 318 |
+
name=tool_metadata["name"],
|
| 319 |
+
description=tool_metadata["description"]
|
| 320 |
+
)(add_task)
|
tools/complete_all_tasks.py
ADDED
|
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""MCP tool for marking all tasks as complete or incomplete.
|
| 2 |
+
|
| 3 |
+
[Task]: T044, T045
|
| 4 |
+
[From]: specs/004-ai-chatbot/tasks.md
|
| 5 |
+
|
| 6 |
+
This tool allows the AI agent to mark all tasks with a completion status
|
| 7 |
+
through natural language conversations.
|
| 8 |
+
"""
|
| 9 |
+
from typing import Optional, Any
|
| 10 |
+
from uuid import UUID
|
| 11 |
+
from datetime import datetime
|
| 12 |
+
from sqlalchemy import select
|
| 13 |
+
|
| 14 |
+
from models.task import Task
|
| 15 |
+
from core.database import engine
|
| 16 |
+
from sqlmodel import Session
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
# Tool metadata for MCP registration
|
| 20 |
+
tool_metadata = {
|
| 21 |
+
"name": "complete_all_tasks",
|
| 22 |
+
"description": """Mark all tasks as completed or not completed.
|
| 23 |
+
|
| 24 |
+
Use this tool when the user wants to:
|
| 25 |
+
- Mark all tasks as complete, done, or finished
|
| 26 |
+
- Mark all tasks as incomplete or pending
|
| 27 |
+
- Complete every task in their list
|
| 28 |
+
|
| 29 |
+
Parameters:
|
| 30 |
+
- user_id (required): User ID (UUID) who owns the tasks
|
| 31 |
+
- completed (required): True to mark all complete, False to mark all incomplete
|
| 32 |
+
- status_filter (optional): Only affect tasks with this status ('pending' or 'completed')
|
| 33 |
+
|
| 34 |
+
Returns: Summary with count of tasks updated.
|
| 35 |
+
""",
|
| 36 |
+
"inputSchema": {
|
| 37 |
+
"type": "object",
|
| 38 |
+
"properties": {
|
| 39 |
+
"user_id": {
|
| 40 |
+
"type": "string",
|
| 41 |
+
"description": "User ID (UUID) who owns these tasks"
|
| 42 |
+
},
|
| 43 |
+
"completed": {
|
| 44 |
+
"type": "boolean",
|
| 45 |
+
"description": "True to mark all tasks complete, False to mark all incomplete"
|
| 46 |
+
},
|
| 47 |
+
"status_filter": {
|
| 48 |
+
"type": "string",
|
| 49 |
+
"enum": ["pending", "completed"],
|
| 50 |
+
"description": "Optional: Only affect tasks with this status. If not provided, affects all tasks."
|
| 51 |
+
}
|
| 52 |
+
},
|
| 53 |
+
"required": ["user_id", "completed"]
|
| 54 |
+
}
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
async def complete_all_tasks(
|
| 59 |
+
user_id: str,
|
| 60 |
+
completed: bool,
|
| 61 |
+
status_filter: Optional[str] = None
|
| 62 |
+
) -> dict[str, Any]:
|
| 63 |
+
"""Mark all tasks as completed or incomplete.
|
| 64 |
+
|
| 65 |
+
[From]: specs/004-ai-chatbot/spec.md - US4
|
| 66 |
+
|
| 67 |
+
Args:
|
| 68 |
+
user_id: User ID (UUID string) who owns the tasks
|
| 69 |
+
completed: True to mark all complete, False to mark all incomplete
|
| 70 |
+
status_filter: Optional filter to only affect tasks with current status
|
| 71 |
+
|
| 72 |
+
Returns:
|
| 73 |
+
Dictionary with count of tasks updated and confirmation message
|
| 74 |
+
|
| 75 |
+
Raises:
|
| 76 |
+
ValueError: If validation fails
|
| 77 |
+
"""
|
| 78 |
+
# Get database session (synchronous)
|
| 79 |
+
with Session(engine) as db:
|
| 80 |
+
try:
|
| 81 |
+
# Build query based on filter
|
| 82 |
+
stmt = select(Task).where(Task.user_id == UUID(user_id))
|
| 83 |
+
|
| 84 |
+
# Apply status filter if provided
|
| 85 |
+
if status_filter == "pending":
|
| 86 |
+
stmt = stmt.where(Task.completed == False)
|
| 87 |
+
elif status_filter == "completed":
|
| 88 |
+
stmt = stmt.where(Task.completed == True)
|
| 89 |
+
|
| 90 |
+
# Fetch matching tasks
|
| 91 |
+
tasks = list(db.scalars(stmt).all())
|
| 92 |
+
|
| 93 |
+
if not tasks:
|
| 94 |
+
return {
|
| 95 |
+
"success": False,
|
| 96 |
+
"error": "No tasks found",
|
| 97 |
+
"message": f"Could not find any tasks{' matching the filter' if status_filter else ''}"
|
| 98 |
+
}
|
| 99 |
+
|
| 100 |
+
# Count tasks before update
|
| 101 |
+
task_count = len(tasks)
|
| 102 |
+
already_correct = sum(1 for t in tasks if t.completed == completed)
|
| 103 |
+
|
| 104 |
+
# If all tasks already have the desired status
|
| 105 |
+
if already_correct == task_count:
|
| 106 |
+
status_word = "completed" if completed else "pending"
|
| 107 |
+
return {
|
| 108 |
+
"success": True,
|
| 109 |
+
"updated_count": 0,
|
| 110 |
+
"skipped_count": task_count,
|
| 111 |
+
"message": f"All {task_count} task(s) are already {status_word}."
|
| 112 |
+
}
|
| 113 |
+
|
| 114 |
+
# Update completion status for all tasks
|
| 115 |
+
updated_count = 0
|
| 116 |
+
for task in tasks:
|
| 117 |
+
if task.completed != completed:
|
| 118 |
+
task.completed = completed
|
| 119 |
+
task.updated_at = datetime.utcnow()
|
| 120 |
+
db.add(task)
|
| 121 |
+
updated_count += 1
|
| 122 |
+
|
| 123 |
+
# Save to database
|
| 124 |
+
db.commit()
|
| 125 |
+
|
| 126 |
+
# Build success message
|
| 127 |
+
action = "completed" if completed else "marked as pending"
|
| 128 |
+
if status_filter:
|
| 129 |
+
filter_msg = f" {status_filter} tasks"
|
| 130 |
+
else:
|
| 131 |
+
filter_msg = ""
|
| 132 |
+
|
| 133 |
+
message = f"✅ {updated_count} task{'' if updated_count == 1 else 's'}{filter_msg} marked as {action}"
|
| 134 |
+
|
| 135 |
+
return {
|
| 136 |
+
"success": True,
|
| 137 |
+
"updated_count": updated_count,
|
| 138 |
+
"skipped_count": already_correct,
|
| 139 |
+
"total_count": task_count,
|
| 140 |
+
"message": message
|
| 141 |
+
}
|
| 142 |
+
|
| 143 |
+
except ValueError as e:
|
| 144 |
+
db.rollback()
|
| 145 |
+
raise ValueError(f"Failed to update tasks: {str(e)}")
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
# Register tool with MCP server
|
| 149 |
+
def register_tool(mcp_server: Any) -> None:
|
| 150 |
+
"""Register this tool with the MCP server.
|
| 151 |
+
|
| 152 |
+
[From]: backend/mcp_server/server.py
|
| 153 |
+
|
| 154 |
+
Args:
|
| 155 |
+
mcp_server: MCP server instance
|
| 156 |
+
"""
|
| 157 |
+
mcp_server.tool(
|
| 158 |
+
name=tool_metadata["name"],
|
| 159 |
+
description=tool_metadata["description"]
|
| 160 |
+
)(complete_all_tasks)
|
tools/complete_task.py
ADDED
|
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""MCP tool for completing/uncompleting tasks in the todo list.
|
| 2 |
+
|
| 3 |
+
[Task]: T042, T043
|
| 4 |
+
[From]: specs/004-ai-chatbot/tasks.md
|
| 5 |
+
|
| 6 |
+
This tool allows the AI agent to mark tasks as complete or incomplete
|
| 7 |
+
through natural language conversations.
|
| 8 |
+
"""
|
| 9 |
+
from typing import Optional, Any
|
| 10 |
+
from uuid import UUID
|
| 11 |
+
from datetime import datetime
|
| 12 |
+
from sqlalchemy import select
|
| 13 |
+
|
| 14 |
+
from models.task import Task
|
| 15 |
+
from core.database import engine
|
| 16 |
+
from sqlmodel import Session
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
# Tool metadata for MCP registration
|
| 20 |
+
tool_metadata = {
|
| 21 |
+
"name": "complete_task",
|
| 22 |
+
"description": """Mark a task as completed or not completed (toggle completion status).
|
| 23 |
+
|
| 24 |
+
Use this tool when the user wants to:
|
| 25 |
+
- Mark a task as complete, done, finished
|
| 26 |
+
- Mark a task as incomplete, pending, not done
|
| 27 |
+
- Unmark a task as complete (revert to pending)
|
| 28 |
+
- Toggle the completion status of a task
|
| 29 |
+
|
| 30 |
+
Parameters:
|
| 31 |
+
- user_id (required): User ID (UUID) who owns the task
|
| 32 |
+
- task_id (required): Task ID (UUID) of the task to mark complete/incomplete
|
| 33 |
+
- completed (required): True to mark as complete, False to mark as incomplete/pending
|
| 34 |
+
|
| 35 |
+
Returns: Updated task details with confirmation.
|
| 36 |
+
""",
|
| 37 |
+
"inputSchema": {
|
| 38 |
+
"type": "object",
|
| 39 |
+
"properties": {
|
| 40 |
+
"user_id": {
|
| 41 |
+
"type": "string",
|
| 42 |
+
"description": "User ID (UUID) who owns this task"
|
| 43 |
+
},
|
| 44 |
+
"task_id": {
|
| 45 |
+
"type": "string",
|
| 46 |
+
"description": "Task ID (UUID) of the task to mark complete/incomplete"
|
| 47 |
+
},
|
| 48 |
+
"completed": {
|
| 49 |
+
"type": "boolean",
|
| 50 |
+
"description": "True to mark complete, False to mark incomplete"
|
| 51 |
+
}
|
| 52 |
+
},
|
| 53 |
+
"required": ["user_id", "task_id", "completed"]
|
| 54 |
+
}
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
async def complete_task(
|
| 59 |
+
user_id: str,
|
| 60 |
+
task_id: str,
|
| 61 |
+
completed: bool
|
| 62 |
+
) -> dict[str, Any]:
|
| 63 |
+
"""Mark a task as completed or incomplete.
|
| 64 |
+
|
| 65 |
+
[From]: specs/004-ai-chatbot/spec.md - US4
|
| 66 |
+
|
| 67 |
+
Args:
|
| 68 |
+
user_id: User ID (UUID string) who owns the task
|
| 69 |
+
task_id: Task ID (UUID string) of the task to update
|
| 70 |
+
completed: True to mark complete, False to mark incomplete
|
| 71 |
+
|
| 72 |
+
Returns:
|
| 73 |
+
Dictionary with updated task details
|
| 74 |
+
|
| 75 |
+
Raises:
|
| 76 |
+
ValueError: If validation fails or task not found
|
| 77 |
+
"""
|
| 78 |
+
# Get database session (synchronous)
|
| 79 |
+
with Session(engine) as db:
|
| 80 |
+
try:
|
| 81 |
+
# Fetch the task
|
| 82 |
+
stmt = select(Task).where(
|
| 83 |
+
Task.id == UUID(task_id),
|
| 84 |
+
Task.user_id == UUID(user_id)
|
| 85 |
+
)
|
| 86 |
+
task = db.scalars(stmt).first()
|
| 87 |
+
|
| 88 |
+
if not task:
|
| 89 |
+
return {
|
| 90 |
+
"success": False,
|
| 91 |
+
"error": "Task not found",
|
| 92 |
+
"message": f"Could not find task with ID {task_id}"
|
| 93 |
+
}
|
| 94 |
+
|
| 95 |
+
# Update completion status
|
| 96 |
+
old_status = "completed" if task.completed else "pending"
|
| 97 |
+
task.completed = completed
|
| 98 |
+
task.updated_at = datetime.utcnow()
|
| 99 |
+
|
| 100 |
+
# Save to database
|
| 101 |
+
db.add(task)
|
| 102 |
+
db.commit()
|
| 103 |
+
db.refresh(task)
|
| 104 |
+
|
| 105 |
+
# Build success message
|
| 106 |
+
new_status = "completed" if completed else "pending"
|
| 107 |
+
action = "marked as complete" if completed else "marked as pending"
|
| 108 |
+
message = f"✅ Task '{task.title}' {action}"
|
| 109 |
+
|
| 110 |
+
return {
|
| 111 |
+
"success": True,
|
| 112 |
+
"task": {
|
| 113 |
+
"id": str(task.id),
|
| 114 |
+
"title": task.title,
|
| 115 |
+
"description": task.description,
|
| 116 |
+
"due_date": task.due_date.isoformat() if task.due_date else None,
|
| 117 |
+
"priority": task.priority,
|
| 118 |
+
"completed": task.completed,
|
| 119 |
+
"created_at": task.created_at.isoformat(),
|
| 120 |
+
"updated_at": task.updated_at.isoformat()
|
| 121 |
+
},
|
| 122 |
+
"message": message,
|
| 123 |
+
"old_status": old_status,
|
| 124 |
+
"new_status": new_status
|
| 125 |
+
}
|
| 126 |
+
|
| 127 |
+
except ValueError as e:
|
| 128 |
+
db.rollback()
|
| 129 |
+
raise ValueError(f"Failed to update task completion status: {str(e)}")
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
# Register tool with MCP server
|
| 133 |
+
def register_tool(mcp_server: Any) -> None:
|
| 134 |
+
"""Register this tool with the MCP server.
|
| 135 |
+
|
| 136 |
+
[From]: backend/mcp_server/server.py
|
| 137 |
+
|
| 138 |
+
Args:
|
| 139 |
+
mcp_server: MCP server instance
|
| 140 |
+
"""
|
| 141 |
+
mcp_server.tool(
|
| 142 |
+
name=tool_metadata["name"],
|
| 143 |
+
description=tool_metadata["description"]
|
| 144 |
+
)(complete_task)
|
tools/delete_all_tasks.py
ADDED
|
@@ -0,0 +1,168 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""MCP tool for deleting all tasks with confirmation.
|
| 2 |
+
|
| 3 |
+
[Task]: T048, T050
|
| 4 |
+
[From]: specs/004-ai-chatbot/tasks.md
|
| 5 |
+
|
| 6 |
+
This tool allows the AI agent to delete all tasks with safety checks.
|
| 7 |
+
"""
|
| 8 |
+
from typing import Optional, Any
|
| 9 |
+
from uuid import UUID
|
| 10 |
+
from datetime import datetime
|
| 11 |
+
from sqlalchemy import select
|
| 12 |
+
|
| 13 |
+
from models.task import Task
|
| 14 |
+
from core.database import engine
|
| 15 |
+
from sqlmodel import Session
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
# Tool metadata for MCP registration
|
| 19 |
+
tool_metadata = {
|
| 20 |
+
"name": "delete_all_tasks",
|
| 21 |
+
"description": """Delete all tasks from the user's todo list permanently.
|
| 22 |
+
|
| 23 |
+
⚠️ DESTRUCTIVE OPERATION: This will permanently delete all tasks.
|
| 24 |
+
|
| 25 |
+
Use this tool when the user wants to:
|
| 26 |
+
- Delete all tasks, clear entire task list
|
| 27 |
+
- Remove every task from their list
|
| 28 |
+
- Start fresh with no tasks
|
| 29 |
+
|
| 30 |
+
IMPORTANT: Always inform the user about how many tasks will be deleted before proceeding.
|
| 31 |
+
|
| 32 |
+
Parameters:
|
| 33 |
+
- user_id (required): User ID (UUID) who owns the tasks
|
| 34 |
+
- status_filter (optional): Only delete tasks with this status ('pending' or 'completed')
|
| 35 |
+
- confirmed (required): Must be true to proceed with deletion
|
| 36 |
+
|
| 37 |
+
Returns: Summary with count of tasks deleted.
|
| 38 |
+
""",
|
| 39 |
+
"inputSchema": {
|
| 40 |
+
"type": "object",
|
| 41 |
+
"properties": {
|
| 42 |
+
"user_id": {
|
| 43 |
+
"type": "string",
|
| 44 |
+
"description": "User ID (UUID) who owns these tasks"
|
| 45 |
+
},
|
| 46 |
+
"status_filter": {
|
| 47 |
+
"type": "string",
|
| 48 |
+
"enum": ["pending", "completed"],
|
| 49 |
+
"description": "Optional: Only delete tasks with this status. If not provided, deletes all tasks."
|
| 50 |
+
},
|
| 51 |
+
"confirmed": {
|
| 52 |
+
"type": "boolean",
|
| 53 |
+
"description": "Must be true to proceed with deletion. This ensures user confirmation."
|
| 54 |
+
}
|
| 55 |
+
},
|
| 56 |
+
"required": ["user_id", "confirmed"]
|
| 57 |
+
}
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
async def delete_all_tasks(
|
| 62 |
+
user_id: str,
|
| 63 |
+
confirmed: bool,
|
| 64 |
+
status_filter: Optional[str] = None
|
| 65 |
+
) -> dict[str, Any]:
|
| 66 |
+
"""Delete all tasks from the user's todo list.
|
| 67 |
+
|
| 68 |
+
[From]: specs/004-ai-chatbot/spec.md - US5
|
| 69 |
+
|
| 70 |
+
Args:
|
| 71 |
+
user_id: User ID (UUID string) who owns the tasks
|
| 72 |
+
confirmed: Must be True to actually delete (safety check)
|
| 73 |
+
status_filter: Optional filter to only delete tasks with current status
|
| 74 |
+
|
| 75 |
+
Returns:
|
| 76 |
+
Dictionary with count of tasks deleted and confirmation message
|
| 77 |
+
|
| 78 |
+
Raises:
|
| 79 |
+
ValueError: If validation fails
|
| 80 |
+
"""
|
| 81 |
+
# Get database session (synchronous)
|
| 82 |
+
with Session(engine) as db:
|
| 83 |
+
try:
|
| 84 |
+
# If not confirmed, return task count for confirmation prompt
|
| 85 |
+
if not confirmed:
|
| 86 |
+
# Build query to count tasks
|
| 87 |
+
stmt = select(Task).where(Task.user_id == UUID(user_id))
|
| 88 |
+
|
| 89 |
+
if status_filter:
|
| 90 |
+
if status_filter == "pending":
|
| 91 |
+
stmt = stmt.where(Task.completed == False)
|
| 92 |
+
elif status_filter == "completed":
|
| 93 |
+
stmt = stmt.where(Task.completed == True)
|
| 94 |
+
|
| 95 |
+
tasks = list(db.scalars(stmt).all())
|
| 96 |
+
task_count = len(tasks)
|
| 97 |
+
|
| 98 |
+
if task_count == 0:
|
| 99 |
+
return {
|
| 100 |
+
"success": False,
|
| 101 |
+
"error": "No tasks found",
|
| 102 |
+
"message": f"Could not find any tasks{' matching the filter' if status_filter else ''}"
|
| 103 |
+
}
|
| 104 |
+
|
| 105 |
+
filter_msg = f" {status_filter}" if status_filter else ""
|
| 106 |
+
return {
|
| 107 |
+
"success": True,
|
| 108 |
+
"requires_confirmation": True,
|
| 109 |
+
"task_count": task_count,
|
| 110 |
+
"message": f"⚠️ This will delete {task_count} {filter_msg} task(s). Please confirm by saying 'yes' or 'confirm'."
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
# Confirmed - proceed with deletion
|
| 114 |
+
# Build query based on filter
|
| 115 |
+
stmt = select(Task).where(Task.user_id == UUID(user_id))
|
| 116 |
+
|
| 117 |
+
if status_filter:
|
| 118 |
+
if status_filter == "pending":
|
| 119 |
+
stmt = stmt.where(Task.completed == False)
|
| 120 |
+
elif status_filter == "completed":
|
| 121 |
+
stmt = stmt.where(Task.completed == True)
|
| 122 |
+
|
| 123 |
+
# Fetch matching tasks
|
| 124 |
+
tasks = list(db.scalars(stmt).all())
|
| 125 |
+
|
| 126 |
+
if not tasks:
|
| 127 |
+
return {
|
| 128 |
+
"success": False,
|
| 129 |
+
"error": "No tasks found",
|
| 130 |
+
"message": f"Could not find any tasks{' matching the filter' if status_filter else ''}"
|
| 131 |
+
}
|
| 132 |
+
|
| 133 |
+
# Count and delete tasks
|
| 134 |
+
deleted_count = len(tasks)
|
| 135 |
+
for task in tasks:
|
| 136 |
+
db.delete(task)
|
| 137 |
+
|
| 138 |
+
# Commit deletion
|
| 139 |
+
db.commit()
|
| 140 |
+
|
| 141 |
+
# Build success message
|
| 142 |
+
filter_msg = f" {status_filter}" if status_filter else ""
|
| 143 |
+
message = f"✅ Deleted {deleted_count} {filter_msg} task{'' if deleted_count == 1 else 's'}"
|
| 144 |
+
|
| 145 |
+
return {
|
| 146 |
+
"success": True,
|
| 147 |
+
"deleted_count": deleted_count,
|
| 148 |
+
"message": message
|
| 149 |
+
}
|
| 150 |
+
|
| 151 |
+
except ValueError as e:
|
| 152 |
+
db.rollback()
|
| 153 |
+
raise ValueError(f"Failed to delete tasks: {str(e)}")
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
# Register tool with MCP server
|
| 157 |
+
def register_tool(mcp_server: Any) -> None:
|
| 158 |
+
"""Register this tool with the MCP server.
|
| 159 |
+
|
| 160 |
+
[From]: backend/mcp_server/server.py
|
| 161 |
+
|
| 162 |
+
Args:
|
| 163 |
+
mcp_server: MCP server instance
|
| 164 |
+
"""
|
| 165 |
+
mcp_server.tool(
|
| 166 |
+
name=tool_metadata["name"],
|
| 167 |
+
description=tool_metadata["description"]
|
| 168 |
+
)(delete_all_tasks)
|
tools/delete_task.py
ADDED
|
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""MCP tool for deleting tasks from the todo list.
|
| 2 |
+
|
| 3 |
+
[Task]: T047
|
| 4 |
+
[From]: specs/004-ai-chatbot/tasks.md
|
| 5 |
+
|
| 6 |
+
This tool allows the AI agent to permanently delete tasks
|
| 7 |
+
through natural language conversations.
|
| 8 |
+
"""
|
| 9 |
+
from typing import Optional, Any
|
| 10 |
+
from uuid import UUID
|
| 11 |
+
from datetime import datetime
|
| 12 |
+
from sqlalchemy import select
|
| 13 |
+
|
| 14 |
+
from models.task import Task
|
| 15 |
+
from core.database import engine
|
| 16 |
+
from sqlmodel import Session
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
# Tool metadata for MCP registration
|
| 20 |
+
tool_metadata = {
|
| 21 |
+
"name": "delete_task",
|
| 22 |
+
"description": """Delete a task from the user's todo list permanently.
|
| 23 |
+
|
| 24 |
+
Use this tool when the user wants to:
|
| 25 |
+
- Delete, remove, or get rid of a task
|
| 26 |
+
- Clear a task from their list
|
| 27 |
+
- Permanently remove a task
|
| 28 |
+
|
| 29 |
+
Parameters:
|
| 30 |
+
- user_id (required): User ID (UUID) who owns the task
|
| 31 |
+
- task_id (required): Task ID (UUID) of the task to delete
|
| 32 |
+
|
| 33 |
+
Returns: Confirmation of deletion with task details.
|
| 34 |
+
""",
|
| 35 |
+
"inputSchema": {
|
| 36 |
+
"type": "object",
|
| 37 |
+
"properties": {
|
| 38 |
+
"user_id": {
|
| 39 |
+
"type": "string",
|
| 40 |
+
"description": "User ID (UUID) who owns this task"
|
| 41 |
+
},
|
| 42 |
+
"task_id": {
|
| 43 |
+
"type": "string",
|
| 44 |
+
"description": "Task ID (UUID) of the task to delete"
|
| 45 |
+
}
|
| 46 |
+
},
|
| 47 |
+
"required": ["user_id", "task_id"]
|
| 48 |
+
}
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
async def delete_task(
|
| 53 |
+
user_id: str,
|
| 54 |
+
task_id: str
|
| 55 |
+
) -> dict[str, Any]:
|
| 56 |
+
"""Delete a task from the user's todo list.
|
| 57 |
+
|
| 58 |
+
[From]: specs/004-ai-chatbot/spec.md - US5
|
| 59 |
+
|
| 60 |
+
Args:
|
| 61 |
+
user_id: User ID (UUID string) who owns the task
|
| 62 |
+
task_id: Task ID (UUID string) of the task to delete
|
| 63 |
+
|
| 64 |
+
Returns:
|
| 65 |
+
Dictionary with deletion confirmation
|
| 66 |
+
|
| 67 |
+
Raises:
|
| 68 |
+
ValueError: If validation fails or task not found
|
| 69 |
+
"""
|
| 70 |
+
# Get database session (synchronous)
|
| 71 |
+
with Session(engine) as db:
|
| 72 |
+
try:
|
| 73 |
+
# Fetch the task
|
| 74 |
+
stmt = select(Task).where(
|
| 75 |
+
Task.id == UUID(task_id),
|
| 76 |
+
Task.user_id == UUID(user_id)
|
| 77 |
+
)
|
| 78 |
+
task = db.scalars(stmt).first()
|
| 79 |
+
|
| 80 |
+
if not task:
|
| 81 |
+
return {
|
| 82 |
+
"success": False,
|
| 83 |
+
"error": "Task not found",
|
| 84 |
+
"message": f"Could not find task with ID {task_id}"
|
| 85 |
+
}
|
| 86 |
+
|
| 87 |
+
# Store task details for confirmation
|
| 88 |
+
task_details = {
|
| 89 |
+
"id": str(task.id),
|
| 90 |
+
"title": task.title,
|
| 91 |
+
"description": task.description,
|
| 92 |
+
"due_date": task.due_date.isoformat() if task.due_date else None,
|
| 93 |
+
"priority": task.priority,
|
| 94 |
+
"completed": task.completed,
|
| 95 |
+
"created_at": task.created_at.isoformat(),
|
| 96 |
+
"updated_at": task.updated_at.isoformat()
|
| 97 |
+
}
|
| 98 |
+
|
| 99 |
+
# Delete the task
|
| 100 |
+
db.delete(task)
|
| 101 |
+
db.commit()
|
| 102 |
+
|
| 103 |
+
# Build success message
|
| 104 |
+
message = f"✅ Task '{task.title}' deleted successfully"
|
| 105 |
+
|
| 106 |
+
return {
|
| 107 |
+
"success": True,
|
| 108 |
+
"task": task_details,
|
| 109 |
+
"message": message
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
except ValueError as e:
|
| 113 |
+
db.rollback()
|
| 114 |
+
raise ValueError(f"Failed to delete task: {str(e)}")
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
# Register tool with MCP server
|
| 118 |
+
def register_tool(mcp_server: Any) -> None:
|
| 119 |
+
"""Register this tool with the MCP server.
|
| 120 |
+
|
| 121 |
+
[From]: backend/mcp_server/server.py
|
| 122 |
+
|
| 123 |
+
Args:
|
| 124 |
+
mcp_server: MCP server instance
|
| 125 |
+
"""
|
| 126 |
+
mcp_server.tool(
|
| 127 |
+
name=tool_metadata["name"],
|
| 128 |
+
description=tool_metadata["description"]
|
| 129 |
+
)(delete_task)
|
tools/list_tasks.py
ADDED
|
@@ -0,0 +1,242 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""MCP tool for listing tasks from the todo list.
|
| 2 |
+
|
| 3 |
+
[Task]: T024, T027
|
| 4 |
+
[From]: specs/004-ai-chatbot/tasks.md
|
| 5 |
+
|
| 6 |
+
This tool allows the AI agent to list and filter tasks on behalf of users
|
| 7 |
+
through natural language conversations.
|
| 8 |
+
"""
|
| 9 |
+
from typing import Optional, Any
|
| 10 |
+
from uuid import UUID
|
| 11 |
+
from datetime import datetime, timedelta, date
|
| 12 |
+
from sqlalchemy import select
|
| 13 |
+
|
| 14 |
+
from models.task import Task
|
| 15 |
+
from core.database import engine
|
| 16 |
+
from sqlmodel import Session
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
# Tool metadata for MCP registration
|
| 20 |
+
tool_metadata = {
|
| 21 |
+
"name": "list_tasks",
|
| 22 |
+
"description": """List and filter tasks from the user's todo list.
|
| 23 |
+
|
| 24 |
+
Use this tool when the user wants to see their tasks, ask what they have to do,
|
| 25 |
+
or request a filtered view of their tasks.
|
| 26 |
+
|
| 27 |
+
Parameters:
|
| 28 |
+
- user_id (required): User ID (UUID) who owns the tasks
|
| 29 |
+
- status (optional): Filter by completion status - 'all', 'pending', or 'completed' (default: 'all')
|
| 30 |
+
- due_within_days (optional): Only show tasks due within X days (default: null, shows all)
|
| 31 |
+
- limit (optional): Maximum number of tasks to return (default: 50, max: 100)
|
| 32 |
+
|
| 33 |
+
Returns: List of tasks with titles, descriptions, due dates, priorities, and completion status.
|
| 34 |
+
""",
|
| 35 |
+
"inputSchema": {
|
| 36 |
+
"type": "object",
|
| 37 |
+
"properties": {
|
| 38 |
+
"user_id": {
|
| 39 |
+
"type": "string",
|
| 40 |
+
"description": "User ID (UUID) who owns these tasks"
|
| 41 |
+
},
|
| 42 |
+
"status": {
|
| 43 |
+
"type": "string",
|
| 44 |
+
"enum": ["all", "pending", "completed"],
|
| 45 |
+
"description": "Filter by completion status",
|
| 46 |
+
"default": "all"
|
| 47 |
+
},
|
| 48 |
+
"due_within_days": {
|
| 49 |
+
"type": "number",
|
| 50 |
+
"description": "Only show tasks due within X days (optional)",
|
| 51 |
+
"minimum": 0
|
| 52 |
+
},
|
| 53 |
+
"limit": {
|
| 54 |
+
"type": "number",
|
| 55 |
+
"description": "Maximum tasks to return",
|
| 56 |
+
"default": 50,
|
| 57 |
+
"minimum": 1,
|
| 58 |
+
"maximum": 100
|
| 59 |
+
}
|
| 60 |
+
},
|
| 61 |
+
"required": ["user_id"]
|
| 62 |
+
}
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
async def list_tasks(
|
| 67 |
+
user_id: str,
|
| 68 |
+
status: str = "all",
|
| 69 |
+
due_within_days: Optional[int] = None,
|
| 70 |
+
limit: int = 50
|
| 71 |
+
) -> dict[str, Any]:
|
| 72 |
+
"""List tasks for the user with optional filtering.
|
| 73 |
+
|
| 74 |
+
[From]: specs/004-ai-chatbot/spec.md - US2
|
| 75 |
+
|
| 76 |
+
Args:
|
| 77 |
+
user_id: User ID (UUID string) who owns the tasks
|
| 78 |
+
status: Filter by completion status ("all", "pending", "completed")
|
| 79 |
+
due_within_days: Optional filter for tasks due within X days
|
| 80 |
+
limit: Maximum number of tasks to return
|
| 81 |
+
|
| 82 |
+
Returns:
|
| 83 |
+
Dictionary with task list and metadata
|
| 84 |
+
|
| 85 |
+
Raises:
|
| 86 |
+
ValueError: If validation fails
|
| 87 |
+
Exception: If database operation fails
|
| 88 |
+
"""
|
| 89 |
+
# Validate inputs
|
| 90 |
+
if status not in ["all", "pending", "completed"]:
|
| 91 |
+
raise ValueError(f"Invalid status: {status}. Must be 'all', 'pending', or 'completed'")
|
| 92 |
+
|
| 93 |
+
if limit < 1 or limit > 100:
|
| 94 |
+
raise ValueError(f"Invalid limit: {limit}. Must be between 1 and 100")
|
| 95 |
+
|
| 96 |
+
# Get database session (synchronous)
|
| 97 |
+
with Session(engine) as db:
|
| 98 |
+
try:
|
| 99 |
+
# Build query
|
| 100 |
+
stmt = select(Task).where(Task.user_id == UUID(user_id))
|
| 101 |
+
|
| 102 |
+
# Apply status filter
|
| 103 |
+
# [From]: T027 - Add task status filtering
|
| 104 |
+
if status == "pending":
|
| 105 |
+
stmt = stmt.where(Task.completed == False)
|
| 106 |
+
elif status == "completed":
|
| 107 |
+
stmt = stmt.where(Task.completed == True)
|
| 108 |
+
|
| 109 |
+
# Apply due date filter if specified
|
| 110 |
+
if due_within_days is not None:
|
| 111 |
+
today = datetime.utcnow().date()
|
| 112 |
+
max_due_date = today + timedelta(days=due_within_days)
|
| 113 |
+
|
| 114 |
+
# Only show tasks that have a due_date AND are within the range
|
| 115 |
+
stmt = stmt.where(
|
| 116 |
+
Task.due_date.isnot(None),
|
| 117 |
+
Task.due_date <= max_due_date
|
| 118 |
+
)
|
| 119 |
+
|
| 120 |
+
# Order by due date (if available) then created date
|
| 121 |
+
# Tasks with due dates come first, ordered by due date ascending
|
| 122 |
+
# Tasks without due dates come after, ordered by created date descending
|
| 123 |
+
stmt = stmt.order_by(
|
| 124 |
+
Task.due_date.asc().nulls_last(),
|
| 125 |
+
Task.created_at.desc()
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
# Apply limit
|
| 129 |
+
stmt = stmt.limit(limit)
|
| 130 |
+
|
| 131 |
+
# Execute query
|
| 132 |
+
tasks = db.scalars(stmt).all()
|
| 133 |
+
|
| 134 |
+
# Convert to dict format for AI
|
| 135 |
+
task_list = []
|
| 136 |
+
for task in tasks:
|
| 137 |
+
task_dict = {
|
| 138 |
+
"id": str(task.id),
|
| 139 |
+
"title": task.title,
|
| 140 |
+
"description": task.description,
|
| 141 |
+
"due_date": task.due_date.isoformat() if task.due_date else None,
|
| 142 |
+
"priority": task.priority,
|
| 143 |
+
"completed": task.completed,
|
| 144 |
+
"created_at": task.created_at.isoformat()
|
| 145 |
+
}
|
| 146 |
+
task_list.append(task_dict)
|
| 147 |
+
|
| 148 |
+
# Get summary statistics
|
| 149 |
+
total_count = len(task_list)
|
| 150 |
+
completed_count = sum(1 for t in task_list if t["completed"])
|
| 151 |
+
pending_count = total_count - completed_count
|
| 152 |
+
|
| 153 |
+
# Generate summary message for AI
|
| 154 |
+
# [From]: T026 - Handle empty task list responses
|
| 155 |
+
if total_count == 0:
|
| 156 |
+
summary = "No tasks found"
|
| 157 |
+
elif status == "all":
|
| 158 |
+
summary = f"Found {total_count} tasks ({pending_count} pending, {completed_count} completed)"
|
| 159 |
+
elif status == "pending":
|
| 160 |
+
summary = f"Found {total_count} pending tasks"
|
| 161 |
+
elif status == "completed":
|
| 162 |
+
summary = f"Found {total_count} completed tasks"
|
| 163 |
+
else:
|
| 164 |
+
summary = f"Found {total_count} tasks"
|
| 165 |
+
|
| 166 |
+
return {
|
| 167 |
+
"success": True,
|
| 168 |
+
"tasks": task_list,
|
| 169 |
+
"summary": summary,
|
| 170 |
+
"total": total_count,
|
| 171 |
+
"pending": pending_count,
|
| 172 |
+
"completed": completed_count
|
| 173 |
+
}
|
| 174 |
+
|
| 175 |
+
except Exception as e:
|
| 176 |
+
raise Exception(f"Failed to list tasks: {str(e)}")
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
def format_task_list_for_ai(tasks: list[dict[str, Any]]) -> str:
|
| 180 |
+
"""Format task list for AI response.
|
| 181 |
+
|
| 182 |
+
[From]: specs/004-ai-chatbot/spec.md - US2-AC1
|
| 183 |
+
|
| 184 |
+
This helper function formats the task list in a readable way
|
| 185 |
+
that the AI can use to generate natural language responses.
|
| 186 |
+
|
| 187 |
+
Args:
|
| 188 |
+
tasks: List of task dictionaries
|
| 189 |
+
|
| 190 |
+
Returns:
|
| 191 |
+
Formatted string representation of tasks
|
| 192 |
+
|
| 193 |
+
Example:
|
| 194 |
+
>>> tasks = [
|
| 195 |
+
... {"title": "Buy groceries", "completed": False, "due_date": "2025-01-15"},
|
| 196 |
+
... {"title": "Finish report", "completed": True}
|
| 197 |
+
... ]
|
| 198 |
+
>>> format_task_list_for_ai(tasks)
|
| 199 |
+
'1. Buy groceries (Due: 2025-01-15) [Pending]\\n2. Finish report [Completed]'
|
| 200 |
+
"""
|
| 201 |
+
if not tasks:
|
| 202 |
+
return "No tasks found."
|
| 203 |
+
|
| 204 |
+
lines = []
|
| 205 |
+
for i, task in enumerate(tasks, 1):
|
| 206 |
+
# Task title
|
| 207 |
+
line = f"{i}. {task['title']}"
|
| 208 |
+
|
| 209 |
+
# Due date if available
|
| 210 |
+
if task.get('due_date'):
|
| 211 |
+
line += f" (Due: {task['due_date']})"
|
| 212 |
+
|
| 213 |
+
# Priority if not default (medium)
|
| 214 |
+
if task.get('priority') and task['priority'] != 'medium':
|
| 215 |
+
line += f" [{task['priority'].capitalize()} Priority]"
|
| 216 |
+
|
| 217 |
+
# Completion status
|
| 218 |
+
status = "✓ Completed" if task['completed'] else "○ Pending"
|
| 219 |
+
line += f" - {status}"
|
| 220 |
+
|
| 221 |
+
# Description if available
|
| 222 |
+
if task.get('description'):
|
| 223 |
+
line += f"\n {task['description']}"
|
| 224 |
+
|
| 225 |
+
lines.append(line)
|
| 226 |
+
|
| 227 |
+
return "\n".join(lines)
|
| 228 |
+
|
| 229 |
+
|
| 230 |
+
# Register tool with MCP server
|
| 231 |
+
def register_tool(mcp_server: Any) -> None:
|
| 232 |
+
"""Register this tool with the MCP server.
|
| 233 |
+
|
| 234 |
+
[From]: backend/mcp_server/server.py
|
| 235 |
+
|
| 236 |
+
Args:
|
| 237 |
+
mcp_server: MCP server instance
|
| 238 |
+
"""
|
| 239 |
+
mcp_server.tool(
|
| 240 |
+
name=tool_metadata["name"],
|
| 241 |
+
description=tool_metadata["description"]
|
| 242 |
+
)(list_tasks)
|
tools/update_task.py
ADDED
|
@@ -0,0 +1,303 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""MCP tool for updating tasks in the todo list.
|
| 2 |
+
|
| 3 |
+
[Task]: T037
|
| 4 |
+
[From]: specs/004-ai-chatbot/tasks.md
|
| 5 |
+
|
| 6 |
+
This tool allows the AI agent to update existing tasks on behalf of users
|
| 7 |
+
through natural language conversations.
|
| 8 |
+
"""
|
| 9 |
+
from typing import Optional, Any
|
| 10 |
+
from uuid import UUID
|
| 11 |
+
from datetime import datetime
|
| 12 |
+
from sqlalchemy import select
|
| 13 |
+
|
| 14 |
+
from models.task import Task
|
| 15 |
+
from core.database import engine
|
| 16 |
+
from sqlmodel import Session
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
# Tool metadata for MCP registration
|
| 20 |
+
tool_metadata = {
|
| 21 |
+
"name": "update_task",
|
| 22 |
+
"description": """Update an existing task in the user's todo list.
|
| 23 |
+
|
| 24 |
+
Use this tool when the user wants to modify, change, or edit an existing task.
|
| 25 |
+
You must identify the task first (by ID or by matching title/description).
|
| 26 |
+
|
| 27 |
+
Parameters:
|
| 28 |
+
- user_id (required): User ID (UUID) who owns the task
|
| 29 |
+
- task_id (required): Task ID (UUID) of the task to update
|
| 30 |
+
- title (optional): New task title
|
| 31 |
+
- description (optional): New task description
|
| 32 |
+
- due_date (optional): New due date (ISO 8601 date string or relative like 'tomorrow', 'next week')
|
| 33 |
+
- priority (optional): New priority level - 'low', 'medium', or 'high'
|
| 34 |
+
- completed (optional): Mark task as completed or not completed
|
| 35 |
+
|
| 36 |
+
Returns: Updated task details with confirmation.
|
| 37 |
+
""",
|
| 38 |
+
"inputSchema": {
|
| 39 |
+
"type": "object",
|
| 40 |
+
"properties": {
|
| 41 |
+
"user_id": {
|
| 42 |
+
"type": "string",
|
| 43 |
+
"description": "User ID (UUID) who owns this task"
|
| 44 |
+
},
|
| 45 |
+
"task_id": {
|
| 46 |
+
"type": "string",
|
| 47 |
+
"description": "Task ID (UUID) of the task to update"
|
| 48 |
+
},
|
| 49 |
+
"title": {
|
| 50 |
+
"type": "string",
|
| 51 |
+
"description": "New task title (brief description)",
|
| 52 |
+
"maxLength": 255
|
| 53 |
+
},
|
| 54 |
+
"description": {
|
| 55 |
+
"type": "string",
|
| 56 |
+
"description": "New task description",
|
| 57 |
+
"maxLength": 2000
|
| 58 |
+
},
|
| 59 |
+
"due_date": {
|
| 60 |
+
"type": "string",
|
| 61 |
+
"description": "New due date in ISO 8601 format (e.g., '2025-01-15') or relative terms"
|
| 62 |
+
},
|
| 63 |
+
"priority": {
|
| 64 |
+
"type": "string",
|
| 65 |
+
"enum": ["low", "medium", "high"],
|
| 66 |
+
"description": "New task priority level"
|
| 67 |
+
},
|
| 68 |
+
"completed": {
|
| 69 |
+
"type": "boolean",
|
| 70 |
+
"description": "Mark task as completed or not completed"
|
| 71 |
+
}
|
| 72 |
+
},
|
| 73 |
+
"required": ["user_id", "task_id"]
|
| 74 |
+
}
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
async def update_task(
|
| 79 |
+
user_id: str,
|
| 80 |
+
task_id: str,
|
| 81 |
+
title: Optional[str] = None,
|
| 82 |
+
description: Optional[str] = None,
|
| 83 |
+
due_date: Optional[str] = None,
|
| 84 |
+
priority: Optional[str] = None,
|
| 85 |
+
completed: Optional[bool] = None
|
| 86 |
+
) -> dict[str, Any]:
|
| 87 |
+
"""Update an existing task for the user.
|
| 88 |
+
|
| 89 |
+
[From]: specs/004-ai-chatbot/spec.md - US3
|
| 90 |
+
|
| 91 |
+
Args:
|
| 92 |
+
user_id: User ID (UUID string) who owns the task
|
| 93 |
+
task_id: Task ID (UUID string) of the task to update
|
| 94 |
+
title: Optional new task title
|
| 95 |
+
description: Optional new task description
|
| 96 |
+
due_date: Optional new due date (ISO 8601 or relative)
|
| 97 |
+
priority: Optional new priority level (low/medium/high)
|
| 98 |
+
completed: Optional new completion status
|
| 99 |
+
|
| 100 |
+
Returns:
|
| 101 |
+
Dictionary with updated task details
|
| 102 |
+
|
| 103 |
+
Raises:
|
| 104 |
+
ValueError: If validation fails or task not found
|
| 105 |
+
"""
|
| 106 |
+
from core.validators import validate_task_title, validate_task_description
|
| 107 |
+
|
| 108 |
+
# Get database session (synchronous)
|
| 109 |
+
with Session(engine) as db:
|
| 110 |
+
try:
|
| 111 |
+
# Fetch the task
|
| 112 |
+
stmt = select(Task).where(
|
| 113 |
+
Task.id == UUID(task_id),
|
| 114 |
+
Task.user_id == UUID(user_id)
|
| 115 |
+
)
|
| 116 |
+
task = db.scalars(stmt).first()
|
| 117 |
+
|
| 118 |
+
if not task:
|
| 119 |
+
return {
|
| 120 |
+
"success": False,
|
| 121 |
+
"error": "Task not found",
|
| 122 |
+
"message": f"Could not find task with ID {task_id}"
|
| 123 |
+
}
|
| 124 |
+
|
| 125 |
+
# Track changes for confirmation message
|
| 126 |
+
changes = []
|
| 127 |
+
|
| 128 |
+
# Update title if provided
|
| 129 |
+
if title is not None:
|
| 130 |
+
validated_title = validate_task_title(title)
|
| 131 |
+
old_title = task.title
|
| 132 |
+
task.title = validated_title
|
| 133 |
+
changes.append(f"title from '{old_title}' to '{validated_title}'")
|
| 134 |
+
|
| 135 |
+
# Update description if provided
|
| 136 |
+
if description is not None:
|
| 137 |
+
validated_description = validate_task_description(description) if description else None
|
| 138 |
+
task.description = validated_description
|
| 139 |
+
changes.append("description")
|
| 140 |
+
|
| 141 |
+
# Update due date if provided
|
| 142 |
+
if due_date is not None:
|
| 143 |
+
parsed_due_date = _parse_due_date(due_date)
|
| 144 |
+
task.due_date = parsed_due_date
|
| 145 |
+
changes.append(f"due date to '{parsed_due_date.isoformat() if parsed_due_date else 'None'}'")
|
| 146 |
+
|
| 147 |
+
# Update priority if provided
|
| 148 |
+
if priority is not None:
|
| 149 |
+
normalized_priority = _normalize_priority(priority)
|
| 150 |
+
old_priority = task.priority
|
| 151 |
+
task.priority = normalized_priority
|
| 152 |
+
changes.append(f"priority from '{old_priority}' to '{normalized_priority}'")
|
| 153 |
+
|
| 154 |
+
# Update completion status if provided
|
| 155 |
+
if completed is not None:
|
| 156 |
+
old_status = "completed" if task.completed else "pending"
|
| 157 |
+
task.completed = completed
|
| 158 |
+
new_status = "completed" if completed else "pending"
|
| 159 |
+
changes.append(f"status from '{old_status}' to '{new_status}'")
|
| 160 |
+
|
| 161 |
+
# Always update updated_at timestamp
|
| 162 |
+
task.updated_at = datetime.utcnow()
|
| 163 |
+
|
| 164 |
+
# Save to database
|
| 165 |
+
db.add(task)
|
| 166 |
+
db.commit()
|
| 167 |
+
db.refresh(task)
|
| 168 |
+
|
| 169 |
+
# Build success message
|
| 170 |
+
if changes:
|
| 171 |
+
changes_str = " and ".join(changes)
|
| 172 |
+
message = f"✅ Task updated: {changes_str}"
|
| 173 |
+
else:
|
| 174 |
+
message = f"✅ Task '{task.title}' retrieved (no changes made)"
|
| 175 |
+
|
| 176 |
+
return {
|
| 177 |
+
"success": True,
|
| 178 |
+
"task": {
|
| 179 |
+
"id": str(task.id),
|
| 180 |
+
"title": task.title,
|
| 181 |
+
"description": task.description,
|
| 182 |
+
"due_date": task.due_date.isoformat() if task.due_date else None,
|
| 183 |
+
"priority": task.priority,
|
| 184 |
+
"completed": task.completed,
|
| 185 |
+
"created_at": task.created_at.isoformat(),
|
| 186 |
+
"updated_at": task.updated_at.isoformat()
|
| 187 |
+
},
|
| 188 |
+
"message": message
|
| 189 |
+
}
|
| 190 |
+
|
| 191 |
+
except ValueError as e:
|
| 192 |
+
db.rollback()
|
| 193 |
+
raise ValueError(f"Failed to update task: {str(e)}")
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
def _parse_due_date(due_date_str: str) -> Optional[datetime]:
|
| 197 |
+
"""Parse due date from ISO 8601 or natural language.
|
| 198 |
+
|
| 199 |
+
[From]: specs/004-ai-chatbot/plan.md - Natural Language Processing
|
| 200 |
+
|
| 201 |
+
Supports:
|
| 202 |
+
- ISO 8601: "2025-01-15", "2025-01-15T10:00:00Z"
|
| 203 |
+
- Relative: "today", "tomorrow", "next week", "in 3 days"
|
| 204 |
+
|
| 205 |
+
Args:
|
| 206 |
+
due_date_str: Date string to parse
|
| 207 |
+
|
| 208 |
+
Returns:
|
| 209 |
+
Parsed datetime or None if parsing fails
|
| 210 |
+
|
| 211 |
+
Raises:
|
| 212 |
+
ValueError: If date format is invalid
|
| 213 |
+
"""
|
| 214 |
+
from datetime import datetime
|
| 215 |
+
import re
|
| 216 |
+
|
| 217 |
+
# Try ISO 8601 format first
|
| 218 |
+
try:
|
| 219 |
+
# Handle YYYY-MM-DD format
|
| 220 |
+
if re.match(r"^\d{4}-\d{2}-\d{2}$", due_date_str):
|
| 221 |
+
return datetime.fromisoformat(due_date_str)
|
| 222 |
+
|
| 223 |
+
# Handle full ISO 8601 with time
|
| 224 |
+
if "T" in due_date_str:
|
| 225 |
+
return datetime.fromisoformat(due_date_str.replace("Z", "+00:00"))
|
| 226 |
+
except ValueError:
|
| 227 |
+
pass # Fall through to natural language parsing
|
| 228 |
+
|
| 229 |
+
# Natural language parsing (simplified)
|
| 230 |
+
due_date_str = due_date_str.lower().strip()
|
| 231 |
+
today = datetime.utcnow().replace(hour=23, minute=59, second=59, microsecond=999999)
|
| 232 |
+
|
| 233 |
+
if due_date_str == "today":
|
| 234 |
+
return today
|
| 235 |
+
elif due_date_str == "tomorrow":
|
| 236 |
+
return today + __import__('datetime').timedelta(days=1)
|
| 237 |
+
elif due_date_str == "next week":
|
| 238 |
+
return today + __import__('datetime').timedelta(weeks=1)
|
| 239 |
+
elif due_date_str.startswith("in "):
|
| 240 |
+
# Parse "in X days/weeks"
|
| 241 |
+
match = re.match(r"in (\d+) (day|days|week|weeks)", due_date_str)
|
| 242 |
+
if match:
|
| 243 |
+
amount = int(match.group(1))
|
| 244 |
+
unit = match.group(2)
|
| 245 |
+
if unit.startswith("day"):
|
| 246 |
+
return today + __import__('datetime').timedelta(days=amount)
|
| 247 |
+
elif unit.startswith("week"):
|
| 248 |
+
return today + __import__('datetime').timedelta(weeks=amount)
|
| 249 |
+
|
| 250 |
+
# If parsing fails, return None and let AI agent ask for clarification
|
| 251 |
+
return None
|
| 252 |
+
|
| 253 |
+
|
| 254 |
+
def _normalize_priority(priority: Optional[str]) -> str:
|
| 255 |
+
"""Normalize priority string to valid values.
|
| 256 |
+
|
| 257 |
+
[From]: models/task.py - Task model
|
| 258 |
+
|
| 259 |
+
Args:
|
| 260 |
+
priority: Priority string to normalize
|
| 261 |
+
|
| 262 |
+
Returns:
|
| 263 |
+
Normalized priority: "low", "medium", or "high"
|
| 264 |
+
|
| 265 |
+
Raises:
|
| 266 |
+
ValueError: If priority is invalid
|
| 267 |
+
"""
|
| 268 |
+
if not priority:
|
| 269 |
+
return "medium" # Default priority
|
| 270 |
+
|
| 271 |
+
priority_normalized = priority.lower().strip()
|
| 272 |
+
|
| 273 |
+
if priority_normalized in ["low", "medium", "high"]:
|
| 274 |
+
return priority_normalized
|
| 275 |
+
|
| 276 |
+
# Map common alternatives
|
| 277 |
+
priority_map = {
|
| 278 |
+
"1": "low",
|
| 279 |
+
"2": "medium",
|
| 280 |
+
"3": "high",
|
| 281 |
+
"urgent": "high",
|
| 282 |
+
"important": "high",
|
| 283 |
+
"normal": "medium",
|
| 284 |
+
"routine": "low"
|
| 285 |
+
}
|
| 286 |
+
|
| 287 |
+
normalized = priority_map.get(priority_normalized, "medium")
|
| 288 |
+
return normalized
|
| 289 |
+
|
| 290 |
+
|
| 291 |
+
# Register tool with MCP server
|
| 292 |
+
def register_tool(mcp_server: Any) -> None:
|
| 293 |
+
"""Register this tool with the MCP server.
|
| 294 |
+
|
| 295 |
+
[From]: backend/mcp_server/server.py
|
| 296 |
+
|
| 297 |
+
Args:
|
| 298 |
+
mcp_server: MCP server instance
|
| 299 |
+
"""
|
| 300 |
+
mcp_server.tool(
|
| 301 |
+
name=tool_metadata["name"],
|
| 302 |
+
description=tool_metadata["description"]
|
| 303 |
+
)(update_task)
|
user.py
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""User model and authentication schemas for FastAPI backend.
|
| 2 |
+
|
| 3 |
+
[Task]: T016
|
| 4 |
+
[From]: specs/001-user-auth/data-model.md
|
| 5 |
+
"""
|
| 6 |
+
import uuid
|
| 7 |
+
from datetime import datetime
|
| 8 |
+
from typing import Optional
|
| 9 |
+
from sqlmodel import Field, SQLModel
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class UserBase(SQLModel):
|
| 13 |
+
"""Base User model with common fields."""
|
| 14 |
+
email: str = Field(unique=True, index=True, max_length=255)
|
| 15 |
+
created_at: datetime = Field(default_factory=datetime.utcnow)
|
| 16 |
+
updated_at: datetime = Field(default_factory=datetime.utcnow)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class User(UserBase, table=True):
|
| 20 |
+
"""Full User model with database table.
|
| 21 |
+
|
| 22 |
+
FastAPI backend handles ALL authentication logic:
|
| 23 |
+
- Password hashing (bcrypt)
|
| 24 |
+
- JWT token generation/verification
|
| 25 |
+
- User creation and validation
|
| 26 |
+
"""
|
| 27 |
+
__tablename__ = "users"
|
| 28 |
+
|
| 29 |
+
id: uuid.UUID = Field(default_factory=uuid.uuid4, primary_key=True)
|
| 30 |
+
hashed_password: str = Field(max_length=255) # bcrypt hash, not plaintext
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class UserCreate(SQLModel):
|
| 34 |
+
"""Schema for user registration.
|
| 35 |
+
|
| 36 |
+
Frontend sends plaintext password, backend hashes it before storage.
|
| 37 |
+
"""
|
| 38 |
+
email: str
|
| 39 |
+
password: str # Plaintext password, will be hashed before storage
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
class UserRead(SQLModel):
|
| 43 |
+
"""Schema for returning user data (excludes password)."""
|
| 44 |
+
id: uuid.UUID
|
| 45 |
+
email: str
|
| 46 |
+
created_at: datetime
|
| 47 |
+
updated_at: datetime
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
class UserLogin(SQLModel):
|
| 51 |
+
"""Schema for user login."""
|
| 52 |
+
email: str
|
| 53 |
+
password: str
|
validators.py
ADDED
|
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Validation utilities for the application.
|
| 2 |
+
|
| 3 |
+
[Task]: T008
|
| 4 |
+
[From]: specs/004-ai-chatbot/plan.md
|
| 5 |
+
"""
|
| 6 |
+
from pydantic import ValidationError, model_validator
|
| 7 |
+
from pydantic_core import PydanticUndefined
|
| 8 |
+
from typing import Any
|
| 9 |
+
from sqlmodel import Field
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
# Constants from spec
|
| 13 |
+
MAX_MESSAGE_LENGTH = 10000 # FR-042: Maximum message content length
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class ValidationError(Exception):
|
| 17 |
+
"""Custom validation error."""
|
| 18 |
+
|
| 19 |
+
def __init__(self, message: str, field: str | None = None):
|
| 20 |
+
self.message = message
|
| 21 |
+
self.field = field
|
| 22 |
+
super().__init__(self.message)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def validate_message_length(content: str) -> str:
|
| 26 |
+
"""Validate message content length.
|
| 27 |
+
|
| 28 |
+
[From]: specs/004-ai-chatbot/spec.md - FR-042
|
| 29 |
+
|
| 30 |
+
Args:
|
| 31 |
+
content: Message content to validate
|
| 32 |
+
|
| 33 |
+
Returns:
|
| 34 |
+
str: The validated content
|
| 35 |
+
|
| 36 |
+
Raises:
|
| 37 |
+
ValidationError: If content exceeds maximum length
|
| 38 |
+
"""
|
| 39 |
+
if not content:
|
| 40 |
+
raise ValidationError("Message content cannot be empty", "content")
|
| 41 |
+
|
| 42 |
+
if len(content) > MAX_MESSAGE_LENGTH:
|
| 43 |
+
raise ValidationError(
|
| 44 |
+
f"Message content exceeds maximum length of {MAX_MESSAGE_LENGTH} characters "
|
| 45 |
+
f"(got {len(content)} characters)",
|
| 46 |
+
"content"
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
return content
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def validate_conversation_id(conversation_id: Any) -> int | None:
|
| 53 |
+
"""Validate conversation ID.
|
| 54 |
+
|
| 55 |
+
Args:
|
| 56 |
+
conversation_id: Conversation ID to validate
|
| 57 |
+
|
| 58 |
+
Returns:
|
| 59 |
+
int | None: Validated conversation ID or None
|
| 60 |
+
|
| 61 |
+
Raises:
|
| 62 |
+
ValidationError: If conversation_id is invalid
|
| 63 |
+
"""
|
| 64 |
+
if conversation_id is None:
|
| 65 |
+
return None
|
| 66 |
+
|
| 67 |
+
if isinstance(conversation_id, int):
|
| 68 |
+
if conversation_id <= 0:
|
| 69 |
+
raise ValidationError("Conversation ID must be positive", "conversation_id")
|
| 70 |
+
return conversation_id
|
| 71 |
+
|
| 72 |
+
if isinstance(conversation_id, str):
|
| 73 |
+
try:
|
| 74 |
+
conv_id = int(conversation_id)
|
| 75 |
+
if conv_id <= 0:
|
| 76 |
+
raise ValidationError("Conversation ID must be positive", "conversation_id")
|
| 77 |
+
return conv_id
|
| 78 |
+
except ValueError:
|
| 79 |
+
raise ValidationError("Conversation ID must be a valid integer", "conversation_id")
|
| 80 |
+
|
| 81 |
+
raise ValidationError("Conversation ID must be an integer or null", "conversation_id")
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
# Task validation constants
|
| 85 |
+
MAX_TASK_TITLE_LENGTH = 255 # From Task model
|
| 86 |
+
MAX_TASK_DESCRIPTION_LENGTH = 2000 # From Task model
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
def validate_task_title(title: str) -> str:
|
| 90 |
+
"""Validate task title.
|
| 91 |
+
|
| 92 |
+
[From]: models/task.py - Task.title
|
| 93 |
+
|
| 94 |
+
Args:
|
| 95 |
+
title: Task title to validate
|
| 96 |
+
|
| 97 |
+
Returns:
|
| 98 |
+
str: The validated title
|
| 99 |
+
|
| 100 |
+
Raises:
|
| 101 |
+
ValidationError: If title is empty or exceeds max length
|
| 102 |
+
"""
|
| 103 |
+
if not title or not title.strip():
|
| 104 |
+
raise ValidationError("Task title cannot be empty", "title")
|
| 105 |
+
|
| 106 |
+
title = title.strip()
|
| 107 |
+
|
| 108 |
+
if len(title) > MAX_TASK_TITLE_LENGTH:
|
| 109 |
+
raise ValidationError(
|
| 110 |
+
f"Task title exceeds maximum length of {MAX_TASK_TITLE_LENGTH} characters "
|
| 111 |
+
f"(got {len(title)} characters)",
|
| 112 |
+
"title"
|
| 113 |
+
)
|
| 114 |
+
|
| 115 |
+
return title
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
def validate_task_description(description: str | None) -> str:
|
| 119 |
+
"""Validate task description.
|
| 120 |
+
|
| 121 |
+
[From]: models/task.py - Task.description
|
| 122 |
+
|
| 123 |
+
Args:
|
| 124 |
+
description: Task description to validate
|
| 125 |
+
|
| 126 |
+
Returns:
|
| 127 |
+
str: The validated description
|
| 128 |
+
|
| 129 |
+
Raises:
|
| 130 |
+
ValidationError: If description exceeds max length
|
| 131 |
+
"""
|
| 132 |
+
if description is None:
|
| 133 |
+
return ""
|
| 134 |
+
|
| 135 |
+
description = description.strip()
|
| 136 |
+
|
| 137 |
+
if len(description) > MAX_TASK_DESCRIPTION_LENGTH:
|
| 138 |
+
raise ValidationError(
|
| 139 |
+
f"Task description exceeds maximum length of {MAX_TASK_DESCRIPTION_LENGTH} characters "
|
| 140 |
+
f"(got {len(description)} characters)",
|
| 141 |
+
"description"
|
| 142 |
+
)
|
| 143 |
+
|
| 144 |
+
return description
|
verify_schema.py
ADDED
|
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Database schema verification script.
|
| 2 |
+
|
| 3 |
+
[Task]: T022, T023
|
| 4 |
+
[From]: specs/001-user-auth/tasks.md
|
| 5 |
+
|
| 6 |
+
This script verifies that the database schema is correct for authentication.
|
| 7 |
+
"""
|
| 8 |
+
import sys
|
| 9 |
+
from pathlib import Path
|
| 10 |
+
|
| 11 |
+
# Add parent directory to path for imports
|
| 12 |
+
sys.path.insert(0, str(Path(__file__).parent.parent))
|
| 13 |
+
|
| 14 |
+
from sqlmodel import Session, select, text
|
| 15 |
+
from core.config import engine
|
| 16 |
+
from models.user import User
|
| 17 |
+
from models.task import Task
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def verify_schema():
|
| 21 |
+
"""Verify database schema for authentication."""
|
| 22 |
+
print("🔍 Verifying database schema...\n")
|
| 23 |
+
|
| 24 |
+
with Session(engine) as session:
|
| 25 |
+
# Check users table
|
| 26 |
+
print("📋 Checking users table...")
|
| 27 |
+
try:
|
| 28 |
+
result = session.exec(text("""
|
| 29 |
+
SELECT column_name, data_type, is_nullable, column_default
|
| 30 |
+
FROM information_schema.columns
|
| 31 |
+
WHERE table_name = 'users'
|
| 32 |
+
ORDER BY ordinal_position;
|
| 33 |
+
"""))
|
| 34 |
+
print("✅ Users table columns:")
|
| 35 |
+
for row in result:
|
| 36 |
+
print(f" - {row.column_name}: {row.data_type}")
|
| 37 |
+
except Exception as e:
|
| 38 |
+
print(f"❌ Error checking users table: {e}")
|
| 39 |
+
return False
|
| 40 |
+
|
| 41 |
+
print()
|
| 42 |
+
|
| 43 |
+
# Check tasks table
|
| 44 |
+
print("📋 Checking tasks table...")
|
| 45 |
+
try:
|
| 46 |
+
result = session.exec(text("""
|
| 47 |
+
SELECT column_name, data_type, is_nullable, column_default
|
| 48 |
+
FROM information_schema.columns
|
| 49 |
+
WHERE table_name = 'tasks'
|
| 50 |
+
ORDER BY ordinal_position;
|
| 51 |
+
"""))
|
| 52 |
+
print("✅ Tasks table columns:")
|
| 53 |
+
for row in result:
|
| 54 |
+
print(f" - {row.column_name}: {row.data_type}")
|
| 55 |
+
except Exception as e:
|
| 56 |
+
print(f"❌ Error checking tasks table: {e}")
|
| 57 |
+
return False
|
| 58 |
+
|
| 59 |
+
print()
|
| 60 |
+
|
| 61 |
+
# Check indexes
|
| 62 |
+
print("📋 Checking indexes on tasks table...")
|
| 63 |
+
try:
|
| 64 |
+
result = session.exec(text("""
|
| 65 |
+
SELECT indexname, indexdef
|
| 66 |
+
FROM pg_indexes
|
| 67 |
+
WHERE tablename = 'tasks';
|
| 68 |
+
"""))
|
| 69 |
+
print("✅ Indexes on tasks table:")
|
| 70 |
+
for row in result:
|
| 71 |
+
print(f" - {row.indexname}")
|
| 72 |
+
except Exception as e:
|
| 73 |
+
print(f"❌ Error checking indexes: {e}")
|
| 74 |
+
return False
|
| 75 |
+
|
| 76 |
+
print()
|
| 77 |
+
|
| 78 |
+
# Check foreign key constraints
|
| 79 |
+
print("📋 Checking foreign key constraints...")
|
| 80 |
+
try:
|
| 81 |
+
result = session.exec(text("""
|
| 82 |
+
SELECT
|
| 83 |
+
tc.constraint_name,
|
| 84 |
+
tc.table_name,
|
| 85 |
+
kcu.column_name,
|
| 86 |
+
ccu.table_name AS foreign_table_name,
|
| 87 |
+
ccu.column_name AS foreign_column_name
|
| 88 |
+
FROM information_schema.table_constraints AS tc
|
| 89 |
+
JOIN information_schema.key_column_usage AS kcu
|
| 90 |
+
ON tc.constraint_name = kcu.constraint_name
|
| 91 |
+
JOIN information_schema.constraint_column_usage AS ccu
|
| 92 |
+
ON ccu.constraint_name = tc.constraint_name
|
| 93 |
+
WHERE tc.constraint_type = 'FOREIGN KEY'
|
| 94 |
+
AND tc.table_name = 'tasks';
|
| 95 |
+
"""))
|
| 96 |
+
print("✅ Foreign key constraints:")
|
| 97 |
+
for row in result:
|
| 98 |
+
print(f" - {row.constraint_name}:")
|
| 99 |
+
print(f" {row.column_name} → {row.foreign_table_name}.{row.foreign_column_name}")
|
| 100 |
+
except Exception as e:
|
| 101 |
+
print(f"❌ Error checking foreign keys: {e}")
|
| 102 |
+
return False
|
| 103 |
+
|
| 104 |
+
print()
|
| 105 |
+
|
| 106 |
+
# Check unique constraints
|
| 107 |
+
print("📋 Checking unique constraints...")
|
| 108 |
+
try:
|
| 109 |
+
result = session.exec(text("""
|
| 110 |
+
SELECT
|
| 111 |
+
tc.constraint_name,
|
| 112 |
+
tc.table_name,
|
| 113 |
+
kcu.column_name
|
| 114 |
+
FROM information_schema.table_constraints AS tc
|
| 115 |
+
JOIN information_schema.key_column_usage AS kcu
|
| 116 |
+
ON tc.constraint_name = kcu.constraint_name
|
| 117 |
+
WHERE tc.constraint_type = 'UNIQUE'
|
| 118 |
+
AND tc.table_name = 'users';
|
| 119 |
+
"""))
|
| 120 |
+
print("✅ Unique constraints on users table:")
|
| 121 |
+
for row in result:
|
| 122 |
+
print(f" - {row.constraint_name}: {row.column_name}")
|
| 123 |
+
except Exception as e:
|
| 124 |
+
print(f"❌ Error checking unique constraints: {e}")
|
| 125 |
+
return False
|
| 126 |
+
|
| 127 |
+
print("\n✅ Schema verification complete!")
|
| 128 |
+
print("\n🎉 Database schema is ready for authentication.")
|
| 129 |
+
return True
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
if __name__ == "__main__":
|
| 133 |
+
success = verify_schema()
|
| 134 |
+
sys.exit(0 if success else 1)
|