diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 0000000000000000000000000000000000000000..8a2476958d21a763b7f619da20c67d959bb0618e
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1,77 @@
+# Python cache
+__pycache__
+*.py[cod]
+*$py.class
+*.so
+.Python
+
+# Virtual environments
+.venv
+venv
+env
+ENV
+
+# Testing
+.pytest_cache
+.coverage
+htmlcov
+.tox
+.nox
+
+# Environment variables
+.env
+.env*.local
+.env.production
+
+# IDE
+.vscode
+.idea
+*.swp
+*.swo
+*~
+
+# OS
+.DS_Store
+Thumbs.db
+
+# Git
+.git
+.gitignore
+.gitattributes
+
+# Documentation (keep README.md for HF Spaces config)
+docs
+
+# CI/CD
+.github
+.gitlab-ci.yml
+.travis.yml
+
+# Database
+*.db
+*.sqlite
+*.sqlite3
+
+# Logs
+*.log
+logs
+
+# Misc
+.cache
+.temp
+tmp
+dist
+build
+.vercel
+.claude
+
+# Test files
+test_*.py
+verify_*.py
+*_test.json
+*_response.json
+*.token.json
+token.txt
+signin_*.json
+signup_*.json
+fresh_token.json
diff --git a/.env.example b/.env.example
new file mode 100644
index 0000000000000000000000000000000000000000..e640992b4229928dee433607414d06f11ac87b69
--- /dev/null
+++ b/.env.example
@@ -0,0 +1,55 @@
+# Backend Environment Variables
+# Copy this file to .env and fill in your values
+
+# Database Configuration
+DATABASE_URL=sqlite:///./todo.db
+# For production, use PostgreSQL:
+# DATABASE_URL=postgresql://user:password@host:5432/database
+
+# JWT Configuration
+JWT_SECRET_KEY=your-super-secret-key-change-this-min-32-characters-long
+JWT_ALGORITHM=HS256
+ACCESS_TOKEN_EXPIRE_MINUTES=30
+
+# CORS Configuration
+CORS_ORIGINS=http://localhost:3000,http://localhost:3001,http://localhost:3002
+# For production, add your frontend URL:
+# CORS_ORIGINS=https://your-frontend-url.com,http://localhost:3000
+
+# Gmail SMTP Configuration (for password reset emails)
+# To get app-specific password:
+# 1. Enable 2-Factor Authentication on your Gmail account
+# 2. Go to Google Account → Security → 2-Step Verification → App passwords
+# 3. Select "Mail" and "Other (Custom name)"
+# 4. Copy the 16-character password
+SMTP_HOST=smtp.gmail.com
+SMTP_PORT=587
+SMTP_USERNAME=your_email@gmail.com
+SMTP_PASSWORD=your_app_specific_password_here
+SMTP_USE_TLS=true
+EMAIL_FROM=your_email@gmail.com
+EMAIL_FROM_NAME=Todo Application
+
+# Frontend URL (for password reset links)
+FRONTEND_URL=http://localhost:3000
+# For production:
+# FRONTEND_URL=https://your-frontend-url.com
+
+# Password Reset Configuration
+PASSWORD_RESET_TOKEN_EXPIRY_MINUTES=15
+PASSWORD_RESET_MAX_REQUESTS_PER_HOUR=3
+
+# Cohere AI API Configuration
+# Get your API key from: https://dashboard.cohere.com/api-keys
+COHERE_API_KEY=your-cohere-api-key-here
+
+# AI Chatbot Configuration
+# Cohere model settings for conversational AI
+COHERE_MODEL=command-r-plus
+COHERE_TEMPERATURE=0.3
+COHERE_MAX_TOKENS=2000
+COHERE_TIMEOUT=30
+
+# MCP Tools Configuration
+MCP_SERVER_NAME=todo-tools
+MCP_SERVER_VERSION=1.0.0
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..f1f5d59e037b610af7bc20e6842223dc8a6a4ea9
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,37 @@
+.vercel
+.env
+.env*.local
+
+# Database
+*.db
+*.db-journal
+
+# Python
+__pycache__/
+*.py[cod]
+*$py.class
+*.so
+.Python
+*.egg-info/
+dist/
+build/
+
+# Test data and tokens
+*.token.json
+*_token.json
+token.txt
+*_test.json
+*_response.json
+signin_*.json
+signup_*.json
+fresh_token.json
+
+# IDE
+.vscode/
+.idea/
+*.swp
+*.swo
+
+# Logs
+*.log
+backend.log
diff --git a/Dockerfile b/Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..4059683fe97c6da3bee9f5a6a2c03b87e47a0daa
--- /dev/null
+++ b/Dockerfile
@@ -0,0 +1,27 @@
+# Use Python 3.12 slim image as base
+FROM python:3.12-slim
+
+# Set working directory
+WORKDIR /app
+
+# Copy requirements file
+COPY requirements.txt .
+
+# Install dependencies
+RUN pip install --no-cache-dir -r requirements.txt
+
+# Copy the application code
+COPY src/ ./src/
+COPY templates/ ./templates/
+COPY alembic/ ./alembic/
+COPY alembic.ini .
+COPY init_db.py .
+
+# Create directory for database
+RUN mkdir -p /app/data
+
+# Expose port 7860 (Hugging Face default) and 8000
+EXPOSE 7860 8000
+
+# Run the application
+CMD ["uvicorn", "src.main:app", "--host", "0.0.0.0", "--port", "7860"]
\ No newline at end of file
diff --git a/Procfile b/Procfile
new file mode 100644
index 0000000000000000000000000000000000000000..d29efd927cdd992b2133fb512a346b47fadfd321
--- /dev/null
+++ b/Procfile
@@ -0,0 +1 @@
+web: uvicorn src.main_minimal:app --host 0.0.0.0 --port $PORT
diff --git a/README.md b/README.md
index 8a73a7f8fb1305752327ca9626dc39835f3fa532..0a130adcc55c066723d3944f24bfaa379396b0df 100644
--- a/README.md
+++ b/README.md
@@ -1,10 +1,33 @@
---
-title: Hackathon Phase 4
-emoji: 📈
-colorFrom: gray
-colorTo: green
+title: AI-Todo Chatbot
+emoji: 💻
+colorFrom: blue
+colorTo: purple
sdk: docker
+app_port: 7860
pinned: false
---
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
+# AI-Powered Todo Application
+
+A FastAPI-based todo application with AI chatbot integration using Cohere API.
+
+## Features
+
+- User authentication with JWT
+- Task management with subtasks
+- AI-powered conversational interface
+- Password reset functionality
+- RESTful API
+
+## Environment Variables
+
+Set these in your Hugging Face Space settings:
+
+- `COHERE_API_KEY`: Your Cohere API key
+- `JWT_SECRET_KEY`: Secret key for JWT tokens
+- `DATABASE_URL`: Database connection string (default: sqlite:///./data/todo.db)
+
+## API Documentation
+
+Once deployed, visit `/docs` for interactive API documentation.
diff --git a/alembic.ini b/alembic.ini
new file mode 100644
index 0000000000000000000000000000000000000000..dd66694d3b99707d981dc1c9c1c67079c1f4dc25
--- /dev/null
+++ b/alembic.ini
@@ -0,0 +1,114 @@
+# A generic, single database configuration.
+
+[alembic]
+# path to migration scripts
+script_location = alembic
+
+# template used to generate migration file names; The default value is %%(rev)s_%%(slug)s
+# Uncomment the line below if you want the files to be prepended with date and time
+# file_template = %%(year)d_%%(month).2d_%%(day).2d_%%(hour).2d%%(minute).2d-%%(rev)s_%%(slug)s
+
+# sys.path path, will be prepended to sys.path if present.
+# defaults to the current working directory.
+prepend_sys_path = .
+
+# timezone to use when rendering the date within the migration file
+# as well as the filename.
+# If specified, requires the python>=3.9 or backports.zoneinfo library.
+# Any required deps can installed by adding `alembic[tz]` to the pip requirements
+# string value is passed to ZoneInfo()
+# leave blank for localtime
+# timezone =
+
+# max length of characters to apply to the
+# "slug" field
+# truncate_slug_length = 40
+
+# set to 'true' to run the environment during
+# the 'revision' command, regardless of autogenerate
+# revision_environment = false
+
+# set to 'true' to allow .pyc and .pyo files without
+# a source .py file to be detected as revisions in the
+# versions/ directory
+# sourceless = false
+
+# version location specification; This defaults
+# to alembic/versions. When using multiple version
+# directories, initial revisions must be specified with --version-path.
+# The path separator used here should be the separator specified by "version_path_separator" below.
+# version_locations = %(here)s/bar:%(here)s/bat:alembic/versions
+
+# version path separator; As mentioned above, this is the character used to split
+# version_locations. The default within new alembic.ini files is "os", which uses os.pathsep.
+# If this key is omitted entirely, it falls back to the legacy behavior of splitting on spaces and/or commas.
+# Valid values for version_path_separator are:
+#
+# version_path_separator = :
+# version_path_separator = ;
+# version_path_separator = space
+version_path_separator = os # Use os.pathsep. Default configuration used for new projects.
+
+# set to 'true' to search source files recursively
+# in each "version_locations" directory
+# new in Alembic version 1.10
+# recursive_version_locations = false
+
+# the output encoding used when revision files
+# are written from script.py.mako
+# output_encoding = utf-8
+
+sqlalchemy.url = postgresql://postgres:postgres@localhost:5432/todo_db
+
+
+[post_write_hooks]
+# post_write_hooks defines scripts or Python functions that are run
+# on newly generated revision scripts. See the documentation for further
+# detail and examples
+
+# format using "black" - use the console_scripts runner, against the "black" entrypoint
+# hooks = black
+# black.type = console_scripts
+# black.entrypoint = black
+# black.options = -l 79 REVISION_SCRIPT_FILENAME
+
+# lint with attempts to fix using "ruff" - use the exec runner, execute a binary
+# hooks = ruff
+# ruff.type = exec
+# ruff.executable = %(here)s/.venv/bin/ruff
+# ruff.options = --fix REVISION_SCRIPT_FILENAME
+
+# Logging configuration
+[loggers]
+keys = root,sqlalchemy,alembic
+
+[handlers]
+keys = console
+
+[formatters]
+keys = generic
+
+[logger_root]
+level = WARN
+handlers = console
+qualname =
+
+[logger_sqlalchemy]
+level = WARN
+handlers =
+qualname = sqlalchemy.engine
+
+[logger_alembic]
+level = INFO
+handlers =
+qualname = alembic
+
+[handler_console]
+class = StreamHandler
+args = (sys.stderr,)
+level = NOTSET
+formatter = generic
+
+[formatter_generic]
+format = %(levelname)-5.5s [%(name)s] %(message)s
+datefmt = %H:%M:%S
diff --git a/alembic/env.py b/alembic/env.py
new file mode 100644
index 0000000000000000000000000000000000000000..58809420135d221b76788c80d1f3c3dee985ae42
--- /dev/null
+++ b/alembic/env.py
@@ -0,0 +1,94 @@
+from logging.config import fileConfig
+
+from sqlalchemy import engine_from_config
+from sqlalchemy import pool
+
+from alembic import context
+
+# Import your models here
+import sys
+import os
+sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
+
+from src.models.user import User
+from src.models.task import Task
+from sqlmodel import SQLModel
+
+# Load environment variables
+from dotenv import load_dotenv
+load_dotenv()
+
+# this is the Alembic Config object, which provides
+# access to the values within the .ini file in use.
+config = context.config
+
+# Override sqlalchemy.url with DATABASE_URL from environment
+database_url = os.getenv("DATABASE_URL")
+if database_url:
+ config.set_main_option("sqlalchemy.url", database_url)
+
+# Interpret the config file for Python logging.
+# This line sets up loggers basically.
+if config.config_file_name is not None:
+ fileConfig(config.config_file_name)
+
+# add your model's MetaData object here
+# for 'autogenerate' support
+target_metadata = SQLModel.metadata
+
+# other values from the config, defined by the needs of env.py,
+# can be acquired:
+# my_important_option = config.get_main_option("my_important_option")
+# ... etc.
+
+
+def run_migrations_offline() -> None:
+ """Run migrations in 'offline' mode.
+
+ This configures the context with just a URL
+ and not an Engine, though an Engine is acceptable
+ here as well. By skipping the Engine creation
+ we don't even need a DBAPI to be available.
+
+ Calls to context.execute() here emit the given string to the
+ script output.
+
+ """
+ url = config.get_main_option("sqlalchemy.url")
+ context.configure(
+ url=url,
+ target_metadata=target_metadata,
+ literal_binds=True,
+ dialect_opts={"paramstyle": "named"},
+ )
+
+ with context.begin_transaction():
+ context.run_migrations()
+
+
+def run_migrations_online() -> None:
+ """Run migrations in 'online' mode.
+
+ In this scenario we need to create an Engine
+ and associate a connection with the context.
+
+ """
+ connectable = engine_from_config(
+ config.get_section(config.config_ini_section, {}),
+ prefix="sqlalchemy.",
+ poolclass=pool.NullPool,
+ )
+
+ with connectable.connect() as connection:
+ context.configure(
+ connection=connection, target_metadata=target_metadata
+ )
+
+ with context.begin_transaction():
+ context.run_migrations()
+
+
+if context.is_offline_mode():
+ run_migrations_offline()
+else:
+ run_migrations_online()
diff --git a/alembic/script.py.mako b/alembic/script.py.mako
new file mode 100644
index 0000000000000000000000000000000000000000..55df2863d206fa1678abb4c92e90c45d3f85c114
--- /dev/null
+++ b/alembic/script.py.mako
@@ -0,0 +1,24 @@
+"""${message}
+
+Revision ID: ${up_revision}
+Revises: ${down_revision | comma,n}
+Create Date: ${create_date}
+
+"""
+from alembic import op
+import sqlalchemy as sa
+${imports if imports else ""}
+
+# revision identifiers, used by Alembic.
+revision = ${repr(up_revision)}
+down_revision = ${repr(down_revision)}
+branch_labels = ${repr(branch_labels)}
+depends_on = ${repr(depends_on)}
+
+
+def upgrade() -> None:
+ ${upgrades if upgrades else "pass"}
+
+
+def downgrade() -> None:
+ ${downgrades if downgrades else "pass"}
diff --git a/alembic/versions/001_initial_schema.py b/alembic/versions/001_initial_schema.py
new file mode 100644
index 0000000000000000000000000000000000000000..5e66d970ce2e612cf19282cb047037e712ab0fe8
--- /dev/null
+++ b/alembic/versions/001_initial_schema.py
@@ -0,0 +1,52 @@
+"""Initial schema
+
+Revision ID: 001
+Revises:
+Create Date: 2026-02-05
+
+"""
+from alembic import op
+import sqlalchemy as sa
+from sqlalchemy.dialects import postgresql
+
+# revision identifiers, used by Alembic.
+revision = '001'
+down_revision = None
+branch_labels = None
+depends_on = None
+
+
+def upgrade() -> None:
+ # Create users table
+ op.create_table(
+ 'users',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('email', sa.String(length=255), nullable=False),
+ sa.Column('hashed_password', sa.String(length=255), nullable=False),
+ sa.Column('created_at', sa.DateTime(), nullable=False),
+ sa.Column('updated_at', sa.DateTime(), nullable=False),
+ sa.PrimaryKeyConstraint('id')
+ )
+ op.create_index('idx_users_email', 'users', ['email'], unique=True)
+
+ # Create tasks table
+ op.create_table(
+ 'tasks',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('user_id', sa.Integer(), nullable=False),
+ sa.Column('title', sa.String(length=500), nullable=False),
+ sa.Column('description', sa.Text(), nullable=True),
+ sa.Column('completed', sa.Boolean(), nullable=False, server_default='false'),
+ sa.Column('created_at', sa.DateTime(), nullable=False),
+ sa.Column('updated_at', sa.DateTime(), nullable=False),
+ sa.PrimaryKeyConstraint('id'),
+ sa.ForeignKeyConstraint(['user_id'], ['users.id'], ondelete='CASCADE')
+ )
+ op.create_index('idx_tasks_user_id', 'tasks', ['user_id'], unique=False)
+
+
+def downgrade() -> None:
+ op.drop_index('idx_tasks_user_id', table_name='tasks')
+ op.drop_table('tasks')
+ op.drop_index('idx_users_email', table_name='users')
+ op.drop_table('users')
diff --git a/alembic/versions/002_add_password_reset_tokens.py b/alembic/versions/002_add_password_reset_tokens.py
new file mode 100644
index 0000000000000000000000000000000000000000..eb14ec08e7265fa3dc1a1cfbe0efc5f05734a690
--- /dev/null
+++ b/alembic/versions/002_add_password_reset_tokens.py
@@ -0,0 +1,40 @@
+"""Add password reset tokens table
+
+Revision ID: 002
+Revises: a6878af5b66f
+Create Date: 2026-02-07
+
+"""
+from alembic import op
+import sqlalchemy as sa
+
+# revision identifiers, used by Alembic.
+revision = '002'
+down_revision = 'a6878af5b66f'
+branch_labels = None
+depends_on = None
+
+
+def upgrade() -> None:
+ # Create password_reset_tokens table
+ op.create_table(
+ 'password_reset_tokens',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('user_id', sa.Integer(), nullable=False),
+ sa.Column('token', sa.String(length=255), nullable=False),
+ sa.Column('expires_at', sa.DateTime(), nullable=False),
+ sa.Column('used', sa.Boolean(), nullable=False, server_default='false'),
+ sa.Column('created_at', sa.DateTime(), nullable=False),
+ sa.PrimaryKeyConstraint('id'),
+ sa.ForeignKeyConstraint(['user_id'], ['users.id'], ondelete='CASCADE')
+ )
+ op.create_index('idx_password_reset_tokens_user_id', 'password_reset_tokens', ['user_id'], unique=False)
+ op.create_index('idx_password_reset_tokens_token', 'password_reset_tokens', ['token'], unique=True)
+ op.create_index('idx_password_reset_tokens_expires_at', 'password_reset_tokens', ['expires_at'], unique=False)
+
+
+def downgrade() -> None:
+ op.drop_index('idx_password_reset_tokens_expires_at', table_name='password_reset_tokens')
+ op.drop_index('idx_password_reset_tokens_token', table_name='password_reset_tokens')
+ op.drop_index('idx_password_reset_tokens_user_id', table_name='password_reset_tokens')
+ op.drop_table('password_reset_tokens')
diff --git a/alembic/versions/003_ai_chatbot_tables.py b/alembic/versions/003_ai_chatbot_tables.py
new file mode 100644
index 0000000000000000000000000000000000000000..67218d18d389c7325f1a685fac9f0f3d9a04e302
--- /dev/null
+++ b/alembic/versions/003_ai_chatbot_tables.py
@@ -0,0 +1,56 @@
+"""
+Alembic migration: Add conversations and messages tables for AI chatbot.
+
+Revision ID: 003_ai_chatbot_tables
+Revises: 002_add_password_reset_tokens
+Create Date: 2026-02-15
+"""
+from alembic import op
+import sqlalchemy as sa
+from sqlalchemy.dialects import postgresql
+
+# revision identifiers
+revision = '003_ai_chatbot_tables'
+down_revision = '002'
+branch_labels = None
+depends_on = None
+
+
+def upgrade():
+ # Create conversations table
+ op.create_table(
+ 'conversations',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('user_id', sa.Integer(), nullable=False),
+ sa.Column('created_at', sa.DateTime(), nullable=False, server_default=sa.text('CURRENT_TIMESTAMP')),
+ sa.Column('updated_at', sa.DateTime(), nullable=False, server_default=sa.text('CURRENT_TIMESTAMP')),
+ sa.ForeignKeyConstraint(['user_id'], ['users.id'], ondelete='CASCADE'),
+ sa.PrimaryKeyConstraint('id')
+ )
+ op.create_index('ix_conversations_user_id', 'conversations', ['user_id'])
+ op.create_index('ix_conversations_updated_at', 'conversations', ['updated_at'])
+
+ # Create messages table
+ op.create_table(
+ 'messages',
+ sa.Column('id', sa.Integer(), nullable=False),
+ sa.Column('conversation_id', sa.Integer(), nullable=False),
+ sa.Column('user_id', sa.Integer(), nullable=False),
+ sa.Column('role', sa.String(length=20), nullable=False),
+ sa.Column('content', sa.Text(), nullable=False),
+ sa.Column('created_at', sa.DateTime(), nullable=False, server_default=sa.text('CURRENT_TIMESTAMP')),
+ sa.CheckConstraint("role IN ('user', 'assistant')", name='chk_messages_role'),
+ sa.CheckConstraint("LENGTH(TRIM(content)) > 0", name='chk_messages_content_not_empty'),
+ sa.ForeignKeyConstraint(['conversation_id'], ['conversations.id'], ondelete='CASCADE'),
+ sa.ForeignKeyConstraint(['user_id'], ['users.id'], ondelete='CASCADE'),
+ sa.PrimaryKeyConstraint('id')
+ )
+ op.create_index('ix_messages_conversation_id', 'messages', ['conversation_id'])
+ op.create_index('ix_messages_user_id', 'messages', ['user_id'])
+ op.create_index('ix_messages_created_at', 'messages', ['created_at'])
+ op.create_index('ix_messages_conversation_created', 'messages', ['conversation_id', 'created_at'])
+
+
+def downgrade():
+ op.drop_table('messages')
+ op.drop_table('conversations')
diff --git a/alembic/versions/a6878af5b66f_add_category_and_due_date_to_tasks.py b/alembic/versions/a6878af5b66f_add_category_and_due_date_to_tasks.py
new file mode 100644
index 0000000000000000000000000000000000000000..9c687f093e6850dbea4a06ffea4c04987a737117
--- /dev/null
+++ b/alembic/versions/a6878af5b66f_add_category_and_due_date_to_tasks.py
@@ -0,0 +1,30 @@
+"""add_category_and_due_date_to_tasks
+
+Revision ID: a6878af5b66f
+Revises: 001
+Create Date: 2026-02-05 14:23:11.577860
+
+"""
+from alembic import op
+import sqlalchemy as sa
+
+
+# revision identifiers, used by Alembic.
+revision = 'a6878af5b66f'
+down_revision = '001'
+branch_labels = None
+depends_on = None
+
+
+def upgrade() -> None:
+ # Add category column
+ op.add_column('tasks', sa.Column('category', sa.String(length=50), nullable=True))
+
+ # Add due_date column
+ op.add_column('tasks', sa.Column('due_date', sa.DateTime(), nullable=True))
+
+
+def downgrade() -> None:
+ # Remove columns in reverse order
+ op.drop_column('tasks', 'due_date')
+ op.drop_column('tasks', 'category')
diff --git a/api/index.py b/api/index.py
new file mode 100644
index 0000000000000000000000000000000000000000..e958df2205697ad469eead3c253f093ad56a4eba
--- /dev/null
+++ b/api/index.py
@@ -0,0 +1,19 @@
+"""
+Vercel Serverless Function for FastAPI
+Vercel natively supports ASGI apps - just export the app directly
+"""
+import sys
+import os
+
+# Add parent directory to path for imports
+sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+
+from src.main import app
+
+# Vercel will automatically detect and handle the ASGI app
+# No need for Mangum or any wrapper
+
+# For local testing
+if __name__ == "__main__":
+ import uvicorn
+ uvicorn.run(app, host="0.0.0.0", port=8000)
diff --git a/api/test.py b/api/test.py
new file mode 100644
index 0000000000000000000000000000000000000000..33350f2d2133c0ca75660af89e38d16768856216
--- /dev/null
+++ b/api/test.py
@@ -0,0 +1,45 @@
+"""
+Minimal test endpoint for Vercel deployment debugging
+"""
+from fastapi import FastAPI
+from fastapi.middleware.cors import CORSMiddleware
+import os
+
+app = FastAPI(title="Todo API - Minimal Test")
+
+# CORS
+app.add_middleware(
+ CORSMiddleware,
+ allow_origins=["*"],
+ allow_credentials=True,
+ allow_methods=["*"],
+ allow_headers=["*"],
+)
+
+@app.get("/")
+async def root():
+ return {
+ "status": "ok",
+ "message": "Minimal FastAPI working on Vercel",
+ "environment": {
+ "VERCEL": os.getenv("VERCEL", "not set"),
+ "VERCEL_ENV": os.getenv("VERCEL_ENV", "not set"),
+ }
+ }
+
+@app.get("/health")
+async def health():
+ return {"status": "healthy"}
+
+@app.get("/test-db")
+async def test_db():
+ """Test database connection"""
+ try:
+ from src.database import engine
+ from sqlmodel import text
+
+ with engine.connect() as conn:
+ result = conn.execute(text("SELECT 1"))
+ return {"status": "ok", "database": "connected"}
+ except Exception as e:
+ return {"status": "error", "message": str(e)}
diff --git a/init_db.py b/init_db.py
new file mode 100644
index 0000000000000000000000000000000000000000..92e15054b7a048eea80ac7c842f1d52ab75087bb
--- /dev/null
+++ b/init_db.py
@@ -0,0 +1,9 @@
+"""
+Initialize database tables for the Todo application.
+"""
+from src.database import create_db_and_tables
+
+if __name__ == "__main__":
+ print("Creating database tables...")
+ create_db_and_tables()
+ print("Database tables created successfully!")
diff --git a/migrate_db.py b/migrate_db.py
new file mode 100644
index 0000000000000000000000000000000000000000..011fb4276f82370a7a5aaa3dcbb5ce513b671e87
--- /dev/null
+++ b/migrate_db.py
@@ -0,0 +1,36 @@
+"""
+Simple migration script to add category and due_date columns to tasks table.
+"""
+import sqlite3
+
+# Connect to database
+conn = sqlite3.connect('todo.db')
+cursor = conn.cursor()
+
+try:
+ # Check if columns exist
+ cursor.execute("PRAGMA table_info(tasks)")
+ columns = [col[1] for col in cursor.fetchall()]
+
+ # Add category column if it doesn't exist
+ if 'category' not in columns:
+ cursor.execute("ALTER TABLE tasks ADD COLUMN category VARCHAR(50)")
+ print("Added 'category' column")
+ else:
+ print("'category' column already exists")
+
+ # Add due_date column if it doesn't exist
+ if 'due_date' not in columns:
+ cursor.execute("ALTER TABLE tasks ADD COLUMN due_date DATETIME")
+ print("Added 'due_date' column")
+ else:
+ print("'due_date' column already exists")
+
+ conn.commit()
+ print("\nDatabase migration completed successfully!")
+
+except Exception as e:
+ print(f"Error: {e}")
+ conn.rollback()
+finally:
+ conn.close()
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..88836ac3d80da3507d4c255bfd4bce7bbea18783
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,12 @@
+fastapi==0.109.0
+sqlmodel==0.0.14
+python-jose[cryptography]==3.3.0
+passlib[argon2]==1.7.4
+python-multipart==0.0.6
+uvicorn[standard]==0.27.0
+pydantic>=2.6.0
+pydantic-settings>=2.2.0
+python-dotenv==1.0.0
+email-validator==2.1.0
+cohere>=5.0.0
+tenacity>=8.2.3
diff --git a/src/agents/__init__.py b/src/agents/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/agents/cohere_client.py b/src/agents/cohere_client.py
new file mode 100644
index 0000000000000000000000000000000000000000..7ccfef752f1e622334b439b01a80876836f84ce8
--- /dev/null
+++ b/src/agents/cohere_client.py
@@ -0,0 +1,168 @@
+"""
+Cohere client service for AI chatbot.
+
+This module provides a wrapper around the Cohere API with:
+- API key management
+- Retry logic for transient failures
+- Timeout handling
+- Structured logging
+- Token usage tracking
+"""
+
+import os
+import logging
+import time
+from typing import List, Dict, Any, Optional
+from tenacity import (
+ retry,
+ stop_after_attempt,
+ wait_exponential,
+ retry_if_exception_type
+)
+import cohere
+from cohere.errors import TooManyRequestsError, ServiceUnavailableError
+
+logger = logging.getLogger(__name__)
+
+
+class CohereClient:
+ """
+ Cohere API client with retry logic and structured logging.
+
+ This client is specifically configured for the AI chatbot use case
+ with deterministic temperature and tool-calling support.
+ """
+
+ def __init__(self):
+ """Initialize Cohere client with environment configuration."""
+ self.api_key = os.getenv("COHERE_API_KEY")
+ if not self.api_key:
+ raise ValueError("COHERE_API_KEY not found in environment variables")
+
+ self.model = os.getenv("COHERE_MODEL", "command-r-plus")
+ self.temperature = float(os.getenv("COHERE_TEMPERATURE", "0.3"))
+ self.max_tokens = int(os.getenv("COHERE_MAX_TOKENS", "2000"))
+ self.timeout = int(os.getenv("COHERE_TIMEOUT", "30"))
+
+ # Initialize Cohere client
+ self.client = cohere.ClientV2(self.api_key)
+ logger.info(f"Cohere client initialized with model: {self.model}")
+
+ @retry(
+ stop=stop_after_attempt(3),
+ wait=wait_exponential(multiplier=1, min=2, max=10),
+ retry=retry_if_exception_type((TooManyRequestsError, ServiceUnavailableError))
+ )
+ async def chat(
+ self,
+ messages: List[Dict[str, str]],
+ tools: Optional[List[Dict[str, Any]]] = None
+ ) -> Dict[str, Any]:
+ """
+ Send chat request to Cohere API with retry logic.
+
+ Args:
+ messages: List of message dictionaries with 'role' and 'content'
+ tools: Optional list of tool definitions for tool-calling
+
+ Returns:
+ Dictionary containing response and tool calls (if any)
+
+ Raises:
+ Exception: If API call fails after retries
+ """
+ start_time = time.time()
+
+ try:
+ logger.info(f"Sending chat request to Cohere (model: {self.model})")
+ logger.debug(f"Messages: {len(messages)}, Tools: {len(tools) if tools else 0}")
+
+ response = self.client.chat(
+ model=self.model,
+ messages=messages,
+ temperature=self.temperature,
+ max_tokens=self.max_tokens,
+ tools=tools if tools else None
+ )
+
+ latency = time.time() - start_time
+
+ # Debug: Print full response structure
+ logger.info(f"Cohere response received: {response}")
+ logger.info(f"Response dict: {response.__dict__ if hasattr(response, '__dict__') else 'No dict'}")
+
+ # Extract response content
+ response_text = ""
+ if hasattr(response, 'message') and hasattr(response.message, 'content') and response.message.content:
+ for item in response.message.content:
+ if hasattr(item, 'text'):
+ response_text = item.text
+ break
+
+ # Extract tool calls if present
+ tool_calls = []
+ if hasattr(response.message, 'tool_calls') and response.message.tool_calls:
+ import json
+ for tool_call in response.message.tool_calls:
+ try:
+ # Parse JSON string arguments into dictionary
+ arguments = json.loads(tool_call.function.arguments) if isinstance(tool_call.function.arguments, str) else tool_call.function.arguments
+ tool_calls.append({
+ "name": tool_call.function.name,
+ "parameters": arguments
+ })
+ except json.JSONDecodeError as e:
+ logger.error(f"Failed to parse tool call arguments: {e}")
+ continue
+
+ # Log metrics
+ logger.info(f"Cohere API call successful (latency: {latency:.2f}s)")
+ if hasattr(response, 'usage'):
+ logger.info(f"Token usage - Input: {response.usage.tokens.input_tokens}, "
+ f"Output: {response.usage.tokens.output_tokens}")
+
+ return {
+ "response": response_text,
+ "tool_calls": tool_calls,
+ "latency": latency
+ }
+
+ except TooManyRequestsError as e:
+ logger.warning(f"Rate limit hit: {str(e)}")
+ raise
+ except ServiceUnavailableError as e:
+ logger.error(f"Cohere service unavailable: {str(e)}")
+ raise
+ except Exception as e:
+ import traceback
+ logger.error(f"Cohere API call failed: {str(e)}")
+ logger.error(f"Traceback: {traceback.format_exc()}")
+ raise
+
+ def validate_tool_call(self, tool_call: Dict[str, Any]) -> bool:
+ """
+ Validate that a tool call has the required structure.
+
+ Args:
+ tool_call: Tool call dictionary to validate
+
+ Returns:
+ True if valid, False otherwise
+ """
+ if not isinstance(tool_call, dict):
+ return False
+
+ if "name" not in tool_call or "parameters" not in tool_call:
+ return False
+
+ if not isinstance(tool_call["name"], str):
+ return False
+
+ if not isinstance(tool_call["parameters"], dict):
+ return False
+
+ return True
+
+
+# Global Cohere client instance
+cohere_client = CohereClient()
diff --git a/src/agents/orchestrator.py b/src/agents/orchestrator.py
new file mode 100644
index 0000000000000000000000000000000000000000..dcba97df6a85ab376ec94d8198e3580ff18dbc90
--- /dev/null
+++ b/src/agents/orchestrator.py
@@ -0,0 +1,163 @@
+"""
+Agent orchestrator for AI chatbot.
+
+This module coordinates between Cohere API and MCP tools,
+managing the conversation flow and tool execution.
+"""
+
+import logging
+from typing import List, Dict, Any, Optional
+from src.agents.cohere_client import cohere_client
+from src.mcp.server import mcp_server
+
+logger = logging.getLogger(__name__)
+
+
+class AgentOrchestrator:
+ """
+ Orchestrates AI agent interactions with tool-calling support.
+
+ This orchestrator:
+ 1. Sends messages to Cohere API
+ 2. Receives tool call decisions
+ 3. Validates and executes tools via MCP server
+ 4. Returns results to continue conversation
+ """
+
+ def __init__(self):
+ self.cohere = cohere_client
+ self.mcp = mcp_server
+
+ async def run(
+ self,
+ messages: List[Dict[str, str]],
+ user_id: int,
+ db: Any = None
+ ) -> Dict[str, Any]:
+ """
+ Run the agent with conversation history.
+
+ Args:
+ messages: List of conversation messages
+ user_id: Authenticated user ID for tool execution
+ db: Database session for tool execution
+
+ Returns:
+ Dictionary with response and tool execution results
+ """
+ try:
+ # Get available tools from MCP server
+ tools = self.mcp.list_tools()
+
+ logger.info(f"Running agent with {len(messages)} messages and {len(tools)} tools")
+
+ # Call Cohere API
+ result = await self.cohere.chat(messages=messages, tools=tools)
+
+ response_text = result["response"]
+ tool_calls = result["tool_calls"]
+
+ # If no tool calls, return response directly
+ if not tool_calls:
+ logger.info("No tool calls in response")
+ return {
+ "response": response_text,
+ "tool_calls": [],
+ "tool_results": []
+ }
+
+ # Execute tool calls
+ tool_results = []
+ for tool_call in tool_calls:
+ if not self.cohere.validate_tool_call(tool_call):
+ logger.warning(f"Invalid tool call structure: {tool_call}")
+ continue
+
+ tool_name = tool_call["name"]
+ parameters = tool_call["parameters"]
+
+ # Inject user_id into parameters for security
+ parameters["user_id"] = user_id
+
+ try:
+ logger.info(f"Executing tool: {tool_name}")
+ tool_result = await self.mcp.execute_tool(tool_name, parameters, db=db)
+ tool_results.append({
+ "tool": tool_name,
+ "result": tool_result
+ })
+ except Exception as e:
+ logger.error(f"Tool execution failed: {tool_name} - {str(e)}")
+ tool_results.append({
+ "tool": tool_name,
+ "result": {
+ "success": False,
+ "message": f"Tool execution failed: {str(e)}"
+ }
+ })
+
+ # Generate final response incorporating tool results
+ final_response = await self._generate_final_response(
+ messages,
+ response_text,
+ tool_results
+ )
+
+ return {
+ "response": final_response,
+ "tool_calls": tool_calls,
+ "tool_results": tool_results
+ }
+
+ except Exception as e:
+ logger.error(f"Agent orchestration failed: {str(e)}")
+ raise
+
+ async def _generate_final_response(
+ self,
+ messages: List[Dict[str, str]],
+ initial_response: str,
+ tool_results: List[Dict[str, Any]]
+ ) -> str:
+ """
+ Generate final response incorporating tool execution results.
+
+ Args:
+ messages: Original conversation messages
+ initial_response: Initial AI response with tool calls
+ tool_results: Results from tool executions
+
+ Returns:
+ Final response text
+ """
+ # If no tool results, return initial response
+ if not tool_results:
+ return initial_response
+
+ # Build context with tool results
+ tool_context = "\n".join([
+ f"Tool {tr['tool']}: {tr['result'].get('message', 'Executed')}"
+ for tr in tool_results
+ ])
+
+ # Create follow-up message to generate natural response
+ # Only include non-empty messages to avoid Cohere API v2 validation errors
+ follow_up_messages = [msg for msg in messages if msg.get('content', '').strip()]
+
+ # Add tool results as user message for context
+ follow_up_messages.append({
+ "role": "user",
+ "content": f"Tool execution results:\n{tool_context}\n\nProvide a natural language response to the user based on these results."
+ })
+
+ try:
+ result = await self.cohere.chat(messages=follow_up_messages, tools=None)
+ return result["response"]
+ except Exception as e:
+ logger.error(f"Failed to generate final response: {str(e)}")
+ # Fallback to tool results summary
+ return f"Operation completed. {tool_context}"
+
+
+# Global orchestrator instance
+orchestrator = AgentOrchestrator()
diff --git a/src/api/__init__.py b/src/api/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..9c7f58ef7befcb42f1b879de6c1ef5f6705c9188
--- /dev/null
+++ b/src/api/__init__.py
@@ -0,0 +1 @@
+# API module
diff --git a/src/api/ai.py b/src/api/ai.py
new file mode 100644
index 0000000000000000000000000000000000000000..8d506864e2f85e87cdea99da5c44d4c3705fce49
--- /dev/null
+++ b/src/api/ai.py
@@ -0,0 +1,228 @@
+"""
+AI-powered task management endpoints using Cohere.
+
+This module provides REST API endpoints for AI features:
+- Task suggestions
+- Smart auto-completion
+- Task categorization
+- Description enhancement
+- Complexity analysis
+"""
+
+from fastapi import APIRouter, HTTPException, Depends
+from pydantic import BaseModel, Field
+from typing import List, Dict, Optional
+from src.services.cohere_ai import cohere_service
+from src.middleware.jwt_auth import get_current_user
+
+router = APIRouter()
+
+
+# Request/Response Models
+class TaskSuggestionRequest(BaseModel):
+ context: str = Field(..., description="Context to generate suggestions from")
+ count: int = Field(default=5, ge=1, le=10, description="Number of suggestions")
+
+
+class TaskSuggestionResponse(BaseModel):
+ suggestions: List[str]
+
+
+class EnhanceDescriptionRequest(BaseModel):
+ title: str = Field(..., description="Task title")
+ description: str = Field(default="", description="Current description")
+
+
+class EnhanceDescriptionResponse(BaseModel):
+ enhanced_description: str
+
+
+class CategorizeTaskRequest(BaseModel):
+ title: str = Field(..., description="Task title")
+ description: str = Field(default="", description="Task description")
+
+
+class CategorizeTaskResponse(BaseModel):
+ category: str
+ priority: str
+ tags: List[str]
+
+
+class AutoCompleteRequest(BaseModel):
+ partial_title: str = Field(..., description="Partial task title")
+
+
+class AutoCompleteResponse(BaseModel):
+ completions: List[str]
+
+
+class AnalyzeComplexityRequest(BaseModel):
+ title: str = Field(..., description="Task title")
+ description: str = Field(default="", description="Task description")
+
+
+class AnalyzeComplexityResponse(BaseModel):
+ complexity: str
+ estimated_time: str
+ needs_subtasks: bool
+
+
+# Endpoints
+@router.post("/suggestions", response_model=TaskSuggestionResponse)
+async def generate_task_suggestions(
+ request: TaskSuggestionRequest,
+ current_user: dict = Depends(get_current_user)
+):
+ """
+ Generate AI-powered task suggestions based on context.
+
+ Requires authentication.
+ """
+ try:
+ suggestions = cohere_service.generate_task_suggestions(
+ context=request.context,
+ count=request.count
+ )
+
+ if not suggestions:
+ raise HTTPException(
+ status_code=500,
+ detail="Failed to generate suggestions. Please try again."
+ )
+
+ return TaskSuggestionResponse(suggestions=suggestions)
+
+ except Exception as e:
+ raise HTTPException(
+ status_code=500,
+ detail=f"Error generating suggestions: {str(e)}"
+ )
+
+
+@router.post("/enhance-description", response_model=EnhanceDescriptionResponse)
+async def enhance_task_description(
+ request: EnhanceDescriptionRequest,
+ current_user: dict = Depends(get_current_user)
+):
+ """
+ Enhance a task description with AI to make it more clear and actionable.
+
+ Requires authentication.
+ """
+ try:
+ enhanced = cohere_service.enhance_task_description(
+ title=request.title,
+ description=request.description
+ )
+
+ return EnhanceDescriptionResponse(enhanced_description=enhanced)
+
+ except Exception as e:
+ raise HTTPException(
+ status_code=500,
+ detail=f"Error enhancing description: {str(e)}"
+ )
+
+
+@router.post("/categorize", response_model=CategorizeTaskResponse)
+async def categorize_task(
+ request: CategorizeTaskRequest,
+ current_user: dict = Depends(get_current_user)
+):
+ """
+ Categorize a task and suggest priority level using AI.
+
+ Requires authentication.
+ """
+ try:
+ result = cohere_service.categorize_task(
+ title=request.title,
+ description=request.description
+ )
+
+ return CategorizeTaskResponse(**result)
+
+ except Exception as e:
+ raise HTTPException(
+ status_code=500,
+ detail=f"Error categorizing task: {str(e)}"
+ )
+
+
+@router.post("/autocomplete", response_model=AutoCompleteResponse)
+async def autocomplete_task(
+ request: AutoCompleteRequest,
+ current_user: dict = Depends(get_current_user)
+):
+ """
+ Provide smart auto-completion suggestions for task titles.
+
+ Requires authentication.
+ """
+ try:
+ completions = cohere_service.smart_complete_task(
+ partial_title=request.partial_title
+ )
+
+ return AutoCompleteResponse(completions=completions)
+
+ except Exception as e:
+ raise HTTPException(
+ status_code=500,
+ detail=f"Error generating completions: {str(e)}"
+ )
+
+
+@router.post("/analyze-complexity", response_model=AnalyzeComplexityResponse)
+async def analyze_task_complexity(
+ request: AnalyzeComplexityRequest,
+ current_user: dict = Depends(get_current_user)
+):
+ """
+ Analyze task complexity and provide time estimates using AI.
+
+ Requires authentication.
+ """
+ try:
+ result = cohere_service.analyze_task_complexity(
+ title=request.title,
+ description=request.description
+ )
+
+ return AnalyzeComplexityResponse(**result)
+
+ except Exception as e:
+ raise HTTPException(
+ status_code=500,
+ detail=f"Error analyzing complexity: {str(e)}"
+ )
+
+
+@router.get("/health")
+async def ai_health_check():
+ """
+ Check if AI service is properly configured.
+
+ Does not require authentication.
+ """
+ try:
+ import os
+ api_key = os.getenv("COHERE_API_KEY")
+
+ if not api_key:
+ return {
+ "status": "error",
+ "message": "COHERE_API_KEY not configured"
+ }
+
+ return {
+ "status": "healthy",
+ "message": "AI service is configured and ready",
+ "provider": "Cohere"
+ }
+
+ except Exception as e:
+ return {
+ "status": "error",
+ "message": str(e)
+ }
diff --git a/src/api/auth.py b/src/api/auth.py
new file mode 100644
index 0000000000000000000000000000000000000000..1ad4af17757ff65d2ce93ce4ea54b9fcbbe3d1f9
--- /dev/null
+++ b/src/api/auth.py
@@ -0,0 +1,155 @@
+"""
+Authentication API endpoints for user signup and signin.
+
+This module provides:
+- POST /api/auth/signup - Create new user account
+- POST /api/auth/signin - Authenticate existing user
+"""
+
+from fastapi import APIRouter, HTTPException, Depends
+from sqlmodel import Session, select
+from pydantic import BaseModel, EmailStr, Field
+
+from ..models.user import User
+from ..services.auth import hash_password, verify_password, create_access_token
+from ..database import get_session
+
+router = APIRouter()
+
+
+# Request/Response Models
+class SignUpRequest(BaseModel):
+ """Request model for user signup."""
+ email: EmailStr = Field(..., description="User email address")
+ password: str = Field(..., min_length=8, description="User password (minimum 8 characters)")
+
+
+class SignInRequest(BaseModel):
+ """Request model for user signin."""
+ email: EmailStr = Field(..., description="User email address")
+ password: str = Field(..., description="User password")
+
+
+class UserResponse(BaseModel):
+ """User data response model."""
+ id: int
+ email: str
+ created_at: str
+ updated_at: str
+
+
+class AuthResponse(BaseModel):
+ """Authentication response with token and user data."""
+ token: str
+ user: UserResponse
+
+
+@router.post("/signup", response_model=AuthResponse, status_code=201)
+async def signup(
+ request: SignUpRequest,
+ session: Session = Depends(get_session)
+) -> AuthResponse:
+ """
+ Create a new user account.
+
+ Args:
+ request: Signup request with email and password
+ session: Database session
+
+ Returns:
+ AuthResponse with JWT token and user data
+
+ Raises:
+ HTTPException 400: If email already exists
+ HTTPException 422: If validation fails
+ """
+ # Check if email already exists
+ statement = select(User).where(User.email == request.email)
+ existing_user = session.exec(statement).first()
+
+ if existing_user:
+ raise HTTPException(
+ status_code=400,
+ detail="Email already registered"
+ )
+
+ # Hash password
+ hashed_password = hash_password(request.password)
+
+ # Create new user
+ new_user = User(
+ email=request.email,
+ hashed_password=hashed_password
+ )
+
+ session.add(new_user)
+ session.commit()
+ session.refresh(new_user)
+
+ # Create JWT token
+ token = create_access_token(
+ data={
+ "user_id": new_user.id,
+ "email": new_user.email
+ }
+ )
+
+ # Return response
+ return AuthResponse(
+ token=token,
+ user=UserResponse(
+ id=new_user.id,
+ email=new_user.email,
+ created_at=new_user.created_at.isoformat(),
+ updated_at=new_user.updated_at.isoformat()
+ )
+ )
+
+
+@router.post("/signin", response_model=AuthResponse)
+async def signin(
+ request: SignInRequest,
+ session: Session = Depends(get_session)
+) -> AuthResponse:
+ """
+ Authenticate an existing user.
+
+ Args:
+ request: Signin request with email and password
+ session: Database session
+
+ Returns:
+ AuthResponse with JWT token and user data
+
+ Raises:
+ HTTPException 401: If credentials are invalid
+ """
+ # Find user by email
+ statement = select(User).where(User.email == request.email)
+ user = session.exec(statement).first()
+
+ # Verify user exists and password is correct
+ if not user or not verify_password(request.password, user.hashed_password):
+ raise HTTPException(
+ status_code=401,
+ detail="Invalid email or password"
+ )
+
+ # Create JWT token
+ token = create_access_token(
+ data={
+ "user_id": user.id,
+ "email": user.email
+ }
+ )
+
+ # Return response
+ return AuthResponse(
+ token=token,
+ user=UserResponse(
+ id=user.id,
+ email=user.email,
+ created_at=user.created_at.isoformat(),
+ updated_at=user.updated_at.isoformat()
+ )
+ )
diff --git a/src/api/chat.py b/src/api/chat.py
new file mode 100644
index 0000000000000000000000000000000000000000..0424b5d42e7885f288c706d68975b9c54dccaddd
--- /dev/null
+++ b/src/api/chat.py
@@ -0,0 +1,210 @@
+"""
+Chat endpoint for AI-powered conversational task management.
+
+This module provides the REST API endpoint for the AI chatbot,
+implementing stateless conversation management with MCP tool execution.
+"""
+
+from fastapi import APIRouter, HTTPException, Depends
+from pydantic import BaseModel, Field
+from typing import List, Dict, Any, Optional
+from sqlmodel import Session
+import logging
+
+from src.database import get_session
+from src.middleware.jwt_auth import get_current_user
+from src.services.conversation_service import conversation_service
+from src.agents.orchestrator import orchestrator
+
+logger = logging.getLogger(__name__)
+
+router = APIRouter()
+
+
+# Request/Response Models
+
+class ChatRequest(BaseModel):
+ """Request model for chat endpoint."""
+ message: str = Field(
+ ...,
+ min_length=1,
+ max_length=10000,
+ description="User's message to the AI chatbot"
+ )
+
+
+class ChatResponse(BaseModel):
+ """Response model for chat endpoint."""
+ conversation_id: int = Field(description="ID of the conversation")
+ message_id: int = Field(description="ID of the assistant's message")
+ response: str = Field(description="AI assistant's response")
+ timestamp: str = Field(description="ISO 8601 timestamp of the response")
+
+
+class ConversationHistoryResponse(BaseModel):
+ """Response model for conversation history."""
+ conversation_id: int
+ messages: List[Dict[str, Any]]
+ total_count: int
+ has_more: bool = False
+
+
+# Endpoints
+
+@router.post("/chat", response_model=ChatResponse)
+async def chat(
+ request: ChatRequest,
+ current_user: dict = Depends(get_current_user),
+ db: Session = Depends(get_session)
+):
+ """
+ Send a message to the AI chatbot.
+
+ The chatbot will:
+ - Understand user intent (add task, list tasks, complete task, etc.)
+ - Execute appropriate MCP tool operations
+ - Return conversational response with operation results
+
+ All conversation history is automatically persisted and loaded for context.
+
+ Requires authentication.
+ """
+ try:
+ user_id = current_user["user_id"]
+ logger.info(f"Chat request from user {user_id}: {request.message[:50]}...")
+
+ # 1. Get or create conversation
+ conversation = await conversation_service.get_or_create_conversation(db, user_id)
+
+ # 2. Store user message
+ user_message = await conversation_service.store_message(
+ db=db,
+ conversation_id=conversation.id,
+ user_id=user_id,
+ role="user",
+ content=request.message
+ )
+
+ # 3. Load conversation history
+ history = await conversation_service.load_conversation_history(
+ db=db,
+ conversation_id=conversation.id,
+ limit=50
+ )
+
+ # 4. Build message array for AI
+ messages = conversation_service.build_message_array(history)
+
+ # 5. Run agent orchestrator
+ result = await orchestrator.run(messages=messages, user_id=user_id, db=db)
+
+ # 6. Store assistant response
+ assistant_message = await conversation_service.store_message(
+ db=db,
+ conversation_id=conversation.id,
+ user_id=user_id,
+ role="assistant",
+ content=result["response"]
+ )
+
+ # 7. Return structured response
+ return ChatResponse(
+ conversation_id=conversation.id,
+ message_id=assistant_message.id,
+ response=result["response"],
+ timestamp=assistant_message.created_at.isoformat()
+ )
+
+ except ValueError as e:
+ logger.error(f"Validation error in chat endpoint: {str(e)}")
+ raise HTTPException(status_code=400, detail=str(e))
+ except Exception as e:
+ logger.error(f"Error in chat endpoint: {str(e)}")
+ raise HTTPException(
+ status_code=500,
+ detail="An error occurred while processing your message. Please try again."
+ )
+
+
+@router.get("/chat/history", response_model=ConversationHistoryResponse)
+async def get_chat_history(
+ limit: int = 50,
+ offset: int = 0,
+ current_user: dict = Depends(get_current_user),
+ db: Session = Depends(get_session)
+):
+ """
+ Retrieve conversation history for the authenticated user.
+
+ Returns messages in chronological order.
+
+ Requires authentication.
+ """
+ try:
+ user_id = current_user["user_id"]
+
+ # Get user's conversation
+ conversation = await conversation_service.get_or_create_conversation(db, user_id)
+
+ # Load messages
+ messages = await conversation_service.load_conversation_history(
+ db=db,
+ conversation_id=conversation.id,
+ limit=limit
+ )
+
+ # Format messages
+ formatted_messages = [
+ {
+ "id": msg.id,
+ "role": msg.role,
+ "content": msg.content,
+ "timestamp": msg.created_at.isoformat()
+ }
+ for msg in messages
+ ]
+
+ return ConversationHistoryResponse(
+ conversation_id=conversation.id,
+ messages=formatted_messages,
+ total_count=len(formatted_messages),
+ has_more=len(formatted_messages) >= limit
+ )
+
+ except Exception as e:
+ logger.error(f"Error retrieving chat history: {str(e)}")
+ raise HTTPException(
+ status_code=500,
+ detail="An error occurred while retrieving chat history."
+ )
+
+
+@router.get("/chat/health")
+async def chat_health_check():
+ """
+ Check if chat service is properly configured.
+
+ Does not require authentication.
+ """
+ try:
+ import os
+ cohere_key = os.getenv("COHERE_API_KEY")
+
+ if not cohere_key:
+ return {
+ "status": "error",
+ "message": "COHERE_API_KEY not configured"
+ }
+
+ return {
+ "status": "healthy",
+ "message": "Chat service is configured and ready",
+ "provider": "Cohere",
+ "architecture": "Stateless with MCP tools"
+ }
+
+ except Exception as e:
+ return {
+ "status": "error",
+ "message": str(e)
+ }
diff --git a/src/api/health.py b/src/api/health.py
new file mode 100644
index 0000000000000000000000000000000000000000..e13a3965572b8566736fea84ad3faaa6c51e7258
--- /dev/null
+++ b/src/api/health.py
@@ -0,0 +1,61 @@
+"""
+Health check endpoints for Kubernetes probes
+"""
+from fastapi import APIRouter, status
+from fastapi.responses import JSONResponse
+from datetime import datetime
+from sqlalchemy import text
+from src.database import get_db
+
+router = APIRouter()
+
+
+@router.get("/health", status_code=status.HTTP_200_OK)
+async def health_check():
+ """
+ Liveness probe endpoint
+ Returns 200 OK if the FastAPI server is running
+
+ Used by: Kubernetes liveness probe
+ """
+ return {
+ "status": "healthy",
+ "service": "todo-backend",
+ "timestamp": datetime.utcnow().isoformat()
+ }
+
+
+@router.get("/ready", status_code=status.HTTP_200_OK)
+async def readiness_check():
+ """
+ Readiness probe endpoint
+ Returns 200 OK if the application is ready to serve traffic
+ Checks database connection pool health
+
+ Used by: Kubernetes readiness probe
+ """
+ try:
+ # Check database connectivity
+ db = next(get_db())
+
+ # Simple query to verify database connection
+ result = db.execute(text("SELECT 1"))
+ result.fetchone()
+
+ return {
+ "status": "ready",
+ "service": "todo-backend",
+ "database": "connected",
+ "timestamp": datetime.utcnow().isoformat()
+ }
+ except Exception as e:
+ return JSONResponse(
+ status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
+ content={
+ "status": "not_ready",
+ "service": "todo-backend",
+ "database": "disconnected",
+ "error": str(e),
+ "timestamp": datetime.utcnow().isoformat()
+ }
+ )
diff --git a/src/api/password_reset.py b/src/api/password_reset.py
new file mode 100644
index 0000000000000000000000000000000000000000..dc728b9736f636d561164b178956126e7b63bd72
--- /dev/null
+++ b/src/api/password_reset.py
@@ -0,0 +1,233 @@
+"""
+Password Reset API endpoints for secure password recovery.
+
+This module provides:
+- POST /api/auth/forgot-password - Request password reset email
+- GET /api/auth/reset-password/{token} - Verify reset token validity
+- POST /api/auth/reset-password - Reset password with token
+"""
+
+from fastapi import APIRouter, HTTPException, Depends
+from sqlmodel import Session
+from pydantic import BaseModel, EmailStr, Field
+from typing import Optional
+
+from ..models.user import User
+from ..services.auth import hash_password
+from ..services.password_reset import (
+ create_reset_token,
+ validate_reset_token,
+ invalidate_token,
+ check_rate_limit,
+ validate_password_strength,
+ get_user_by_email
+)
+from ..services.email import send_password_reset_email
+from ..database import get_session
+
+router = APIRouter()
+
+
+# Request/Response Models
+class ForgotPasswordRequest(BaseModel):
+ """Request model for forgot password."""
+ email: EmailStr = Field(..., description="User email address")
+
+
+class ForgotPasswordResponse(BaseModel):
+ """Response model for forgot password request."""
+ message: str
+
+
+class TokenValidationResponse(BaseModel):
+ """Response model for token validation."""
+ valid: bool
+ email: Optional[str] = None
+ error: Optional[str] = None
+
+
+class ResetPasswordRequest(BaseModel):
+ """Request model for password reset."""
+ token: str = Field(..., description="Password reset token")
+ new_password: str = Field(..., min_length=8, description="New password (minimum 8 characters)")
+
+
+class ResetPasswordResponse(BaseModel):
+ """Response model for password reset."""
+ message: str
+
+
+@router.post("/forgot-password", response_model=ForgotPasswordResponse)
+async def forgot_password(
+ request: ForgotPasswordRequest,
+ session: Session = Depends(get_session)
+) -> ForgotPasswordResponse:
+ """
+ Request a password reset email.
+
+ Security features:
+ - No user enumeration (same response for existing/non-existing emails)
+ - Rate limiting (3 requests per hour per user)
+ - Cryptographically secure tokens
+ - 15-minute token expiry
+
+ Args:
+ request: Forgot password request with email
+ session: Database session
+
+ Returns:
+ Generic success message (no user enumeration)
+
+ Raises:
+ HTTPException 400: If email format is invalid
+ HTTPException 429: If rate limit exceeded
+ """
+ # Find user by email
+ user = get_user_by_email(session, request.email)
+
+ # Always return same message to prevent user enumeration
+ generic_message = "If an account exists with this email, you will receive a password reset link shortly."
+
+ # If user doesn't exist, return generic message (no enumeration)
+ if not user:
+ return ForgotPasswordResponse(message=generic_message)
+
+ # Check rate limit
+ if not check_rate_limit(session, user.id):
+ raise HTTPException(
+ status_code=429,
+ detail="Too many password reset requests. Please try again later."
+ )
+
+ # Create reset token
+ token = create_reset_token(session, user.id)
+
+ # Send reset email
+ email_sent = send_password_reset_email(user.email, token)
+
+ if not email_sent:
+ # Log error but don't expose to user
+ print(f"Failed to send password reset email to {user.email}")
+
+ # Always return generic message
+ return ForgotPasswordResponse(message=generic_message)
+
+
+@router.get("/reset-password/{token}", response_model=TokenValidationResponse)
+async def verify_reset_token(
+ token: str,
+ session: Session = Depends(get_session)
+) -> TokenValidationResponse:
+ """
+ Verify if a password reset token is valid.
+
+ Checks:
+ - Token exists
+ - Token has not expired (15 minutes)
+ - Token has not been used
+
+ Args:
+ token: Password reset token to verify
+ session: Database session
+
+ Returns:
+ TokenValidationResponse with validity status and user email
+
+ Example:
+ GET /api/auth/reset-password/abc123def456
+ """
+ # Validate token
+ token_record = validate_reset_token(session, token)
+
+ if not token_record:
+ return TokenValidationResponse(
+ valid=False,
+ error="Invalid or expired reset token"
+ )
+
+ # Get user email
+ user = session.get(User, token_record.user_id)
+
+ if not user:
+ return TokenValidationResponse(
+ valid=False,
+ error="User not found"
+ )
+
+ return TokenValidationResponse(
+ valid=True,
+ email=user.email
+ )
+
+
+@router.post("/reset-password", response_model=ResetPasswordResponse)
+async def reset_password(
+ request: ResetPasswordRequest,
+ session: Session = Depends(get_session)
+) -> ResetPasswordResponse:
+ """
+ Reset user password with a valid token.
+
+ Security features:
+ - Token validation (expiry, usage)
+ - Password strength validation
+ - One-time use tokens
+ - Automatic token invalidation
+
+ Args:
+ request: Reset password request with token and new password
+ session: Database session
+
+ Returns:
+ Success message
+
+ Raises:
+ HTTPException 400: If token is invalid or password is weak
+ HTTPException 422: If validation fails
+ """
+ # Validate token
+ token_record = validate_reset_token(session, request.token)
+
+ if not token_record:
+ raise HTTPException(
+ status_code=400,
+ detail="Invalid or expired reset token"
+ )
+
+ # Validate password strength
+ password_validation = validate_password_strength(request.new_password)
+
+ if not password_validation["valid"]:
+ raise HTTPException(
+ status_code=400,
+ detail={
+ "message": "Password does not meet strength requirements",
+ "errors": password_validation["errors"]
+ }
+ )
+
+ # Get user
+ user = session.get(User, token_record.user_id)
+
+ if not user:
+ raise HTTPException(
+ status_code=400,
+ detail="User not found"
+ )
+
+ # Hash new password
+ hashed_password = hash_password(request.new_password)
+
+ # Update user password
+ user.hashed_password = hashed_password
+ session.add(user)
+
+ # Invalidate token (mark as used)
+ invalidate_token(session, request.token)
+
+ # Commit changes
+ session.commit()
+
+ return ResetPasswordResponse(
+ message="Password successfully reset. You can now sign in with your new password."
+ )
diff --git a/src/api/subtasks.py b/src/api/subtasks.py
new file mode 100644
index 0000000000000000000000000000000000000000..98e7a536c968b29552d6265fdd41ff8d7b078a19
--- /dev/null
+++ b/src/api/subtasks.py
@@ -0,0 +1,230 @@
+"""
+Subtasks API endpoints for CRUD operations on subtasks.
+
+This module provides:
+- GET /api/tasks/{task_id}/subtasks - List all subtasks for a task
+- POST /api/tasks/{task_id}/subtasks - Create new subtask
+- PUT /api/subtasks/{subtask_id} - Update existing subtask
+- DELETE /api/subtasks/{subtask_id} - Delete subtask
+
+All endpoints require JWT authentication and enforce user isolation.
+"""
+
+from fastapi import APIRouter, HTTPException, Depends, status
+from sqlmodel import Session
+from pydantic import BaseModel, Field
+from typing import Optional, List
+
+from ..models.subtask import Subtask
+from ..services import subtasks as subtask_service
+from ..middleware.jwt_auth import get_current_user_id
+from ..database import get_session
+
+router = APIRouter()
+
+
+# Request/Response Models
+class CreateSubtaskRequest(BaseModel):
+ """Request model for creating a subtask."""
+ title: str = Field(..., min_length=1, max_length=500, description="Subtask title")
+ order: Optional[int] = Field(0, description="Order position")
+
+
+class UpdateSubtaskRequest(BaseModel):
+ """Request model for updating a subtask."""
+ title: Optional[str] = Field(None, min_length=1, max_length=500, description="Subtask title")
+ completed: Optional[bool] = Field(None, description="Subtask completion status")
+ order: Optional[int] = Field(None, description="Order position")
+
+
+class SubtaskResponse(BaseModel):
+ """Subtask data response model."""
+ id: int
+ task_id: int
+ title: str
+ completed: bool
+ order: int
+ created_at: str
+ updated_at: str
+
+
+class SubtaskListResponse(BaseModel):
+ """Response model for subtask list."""
+ subtasks: List[SubtaskResponse]
+
+
+@router.get("/tasks/{task_id}/subtasks", response_model=SubtaskListResponse)
+async def list_subtasks(
+ task_id: int,
+ user_id: int = Depends(get_current_user_id),
+ session: Session = Depends(get_session)
+) -> SubtaskListResponse:
+ """
+ Get all subtasks for a task.
+
+ Args:
+ task_id: Task ID
+ user_id: Current user ID from JWT token
+ session: Database session
+
+ Returns:
+ SubtaskListResponse with array of subtasks
+
+ Raises:
+ HTTPException 401: If JWT token is invalid
+ HTTPException 404: If task not found or doesn't belong to user
+ """
+ # Get subtasks
+ subtasks = subtask_service.get_task_subtasks(session, task_id, user_id)
+
+ # Convert to response format
+ subtask_responses = [
+ SubtaskResponse(
+ id=subtask.id,
+ task_id=subtask.task_id,
+ title=subtask.title,
+ completed=subtask.completed,
+ order=subtask.order,
+ created_at=subtask.created_at.isoformat(),
+ updated_at=subtask.updated_at.isoformat()
+ )
+ for subtask in subtasks
+ ]
+
+ return SubtaskListResponse(subtasks=subtask_responses)
+
+
+@router.post("/tasks/{task_id}/subtasks", response_model=SubtaskResponse, status_code=status.HTTP_201_CREATED)
+async def create_subtask(
+ task_id: int,
+ request: CreateSubtaskRequest,
+ user_id: int = Depends(get_current_user_id),
+ session: Session = Depends(get_session)
+) -> SubtaskResponse:
+ """
+ Create a new subtask for a task.
+
+ Args:
+ task_id: Task ID
+ request: Subtask creation request
+ user_id: Current user ID from JWT token
+ session: Database session
+
+ Returns:
+ SubtaskResponse with created subtask data
+
+ Raises:
+ HTTPException 401: If JWT token is invalid
+ HTTPException 404: If task not found or doesn't belong to user
+ """
+ # Create subtask
+ subtask = subtask_service.create_subtask(
+ session=session,
+ task_id=task_id,
+ user_id=user_id,
+ title=request.title,
+ order=request.order or 0
+ )
+
+ if not subtask:
+ raise HTTPException(
+ status_code=status.HTTP_404_NOT_FOUND,
+ detail="Task not found"
+ )
+
+ # Return response
+ return SubtaskResponse(
+ id=subtask.id,
+ task_id=subtask.task_id,
+ title=subtask.title,
+ completed=subtask.completed,
+ order=subtask.order,
+ created_at=subtask.created_at.isoformat(),
+ updated_at=subtask.updated_at.isoformat()
+ )
+
+
+@router.put("/subtasks/{subtask_id}", response_model=SubtaskResponse)
+async def update_subtask(
+ subtask_id: int,
+ request: UpdateSubtaskRequest,
+ user_id: int = Depends(get_current_user_id),
+ session: Session = Depends(get_session)
+) -> SubtaskResponse:
+ """
+ Update an existing subtask.
+
+ Args:
+ subtask_id: ID of the subtask to update
+ request: Subtask update request
+ user_id: Current user ID from JWT token
+ session: Database session
+
+ Returns:
+ SubtaskResponse with updated subtask data
+
+ Raises:
+ HTTPException 401: If JWT token is invalid
+ HTTPException 404: If subtask not found or doesn't belong to user
+ """
+ # Update subtask
+ subtask = subtask_service.update_subtask(
+ session=session,
+ subtask_id=subtask_id,
+ user_id=user_id,
+ title=request.title,
+ completed=request.completed,
+ order=request.order
+ )
+
+ if not subtask:
+ raise HTTPException(
+ status_code=status.HTTP_404_NOT_FOUND,
+ detail="Subtask not found"
+ )
+
+ # Return response
+ return SubtaskResponse(
+ id=subtask.id,
+ task_id=subtask.task_id,
+ title=subtask.title,
+ completed=subtask.completed,
+ order=subtask.order,
+ created_at=subtask.created_at.isoformat(),
+ updated_at=subtask.updated_at.isoformat()
+ )
+
+
+@router.delete("/subtasks/{subtask_id}", status_code=status.HTTP_204_NO_CONTENT)
+async def delete_subtask(
+ subtask_id: int,
+ user_id: int = Depends(get_current_user_id),
+ session: Session = Depends(get_session)
+) -> None:
+ """
+ Delete a subtask.
+
+ Args:
+ subtask_id: ID of the subtask to delete
+ user_id: Current user ID from JWT token
+ session: Database session
+
+ Returns:
+ None (204 No Content)
+
+ Raises:
+ HTTPException 401: If JWT token is invalid
+ HTTPException 404: If subtask not found or doesn't belong to user
+ """
+ # Delete subtask
+ success = subtask_service.delete_subtask(
+ session=session,
+ subtask_id=subtask_id,
+ user_id=user_id
+ )
+
+ if not success:
+ raise HTTPException(
+ status_code=status.HTTP_404_NOT_FOUND,
+ detail="Subtask not found"
+ )
diff --git a/src/api/tasks.py b/src/api/tasks.py
new file mode 100644
index 0000000000000000000000000000000000000000..d7acbaa62e39b6d3a89d07c1ed0e215ddc310e0a
--- /dev/null
+++ b/src/api/tasks.py
@@ -0,0 +1,278 @@
+"""
+Tasks API endpoints for CRUD operations on tasks.
+
+This module provides:
+- GET /api/tasks - List all user tasks
+- POST /api/tasks - Create new task
+- PUT /api/tasks/{id} - Update existing task
+- DELETE /api/tasks/{id} - Delete task
+
+All endpoints require JWT authentication and enforce user isolation.
+"""
+
+from fastapi import APIRouter, HTTPException, Depends, status
+from sqlmodel import Session
+from pydantic import BaseModel, Field
+from typing import Optional, List
+
+from ..models.task import Task
+from ..services import tasks as task_service
+from ..middleware.jwt_auth import get_current_user_id
+from ..database import get_session
+
+router = APIRouter()
+
+
+# Request/Response Models
+class CreateTaskRequest(BaseModel):
+ """Request model for creating a task."""
+ title: str = Field(..., min_length=1, max_length=500, description="Task title")
+ description: Optional[str] = Field(None, description="Optional task description")
+ category: Optional[str] = Field(None, max_length=50, description="Task category/tag")
+ due_date: Optional[str] = Field(None, description="Due date in ISO format")
+ priority: Optional[str] = Field("medium", description="Task priority: low, medium, high")
+ is_recurring: Optional[bool] = Field(False, description="Whether task is recurring")
+ recurrence_type: Optional[str] = Field(None, description="Recurrence type: daily, weekly, monthly, yearly")
+ recurrence_interval: Optional[int] = Field(1, description="Recurrence interval (e.g., every 2 days)")
+ recurrence_end_date: Optional[str] = Field(None, description="Recurrence end date in ISO format")
+
+
+class UpdateTaskRequest(BaseModel):
+ """Request model for updating a task."""
+ title: Optional[str] = Field(None, min_length=1, max_length=500, description="Task title")
+ description: Optional[str] = Field(None, description="Task description")
+ completed: Optional[bool] = Field(None, description="Task completion status")
+ category: Optional[str] = Field(None, max_length=50, description="Task category/tag")
+ due_date: Optional[str] = Field(None, description="Due date in ISO format")
+ priority: Optional[str] = Field(None, description="Task priority: low, medium, high")
+ is_recurring: Optional[bool] = Field(None, description="Whether task is recurring")
+ recurrence_type: Optional[str] = Field(None, description="Recurrence type: daily, weekly, monthly, yearly")
+ recurrence_interval: Optional[int] = Field(None, description="Recurrence interval")
+ recurrence_end_date: Optional[str] = Field(None, description="Recurrence end date in ISO format")
+
+
+class TaskResponse(BaseModel):
+ """Task data response model."""
+ id: int
+ user_id: int
+ title: str
+ description: Optional[str]
+ completed: bool
+ category: Optional[str]
+ due_date: Optional[str]
+ priority: Optional[str]
+ is_recurring: bool
+ recurrence_type: Optional[str]
+ recurrence_interval: Optional[int]
+ recurrence_end_date: Optional[str]
+ parent_task_id: Optional[int]
+ created_at: str
+ updated_at: str
+
+
+class TaskListResponse(BaseModel):
+ """Response model for task list."""
+ tasks: List[TaskResponse]
+
+
+@router.get("", response_model=TaskListResponse)
+async def list_tasks(
+ user_id: int = Depends(get_current_user_id),
+ session: Session = Depends(get_session)
+) -> TaskListResponse:
+ """
+ Get all tasks for the authenticated user.
+
+ Args:
+ user_id: Current user ID from JWT token
+ session: Database session
+
+ Returns:
+ TaskListResponse with array of user's tasks
+
+ Raises:
+ HTTPException 401: If JWT token is invalid
+ """
+ # Get user tasks
+ tasks = task_service.get_user_tasks(session, user_id)
+
+ # Convert to response format
+ task_responses = [
+ TaskResponse(
+ id=task.id,
+ user_id=task.user_id,
+ title=task.title,
+ description=task.description,
+ completed=task.completed,
+ category=task.category,
+ due_date=task.due_date.isoformat() if task.due_date else None,
+ priority=task.priority,
+ is_recurring=task.is_recurring,
+ recurrence_type=task.recurrence_type,
+ recurrence_interval=task.recurrence_interval,
+ recurrence_end_date=task.recurrence_end_date.isoformat() if task.recurrence_end_date else None,
+ parent_task_id=task.parent_task_id,
+ created_at=task.created_at.isoformat(),
+ updated_at=task.updated_at.isoformat()
+ )
+ for task in tasks
+ ]
+
+ return TaskListResponse(tasks=task_responses)
+
+
+@router.post("", response_model=TaskResponse, status_code=status.HTTP_201_CREATED)
+async def create_task(
+ request: CreateTaskRequest,
+ user_id: int = Depends(get_current_user_id),
+ session: Session = Depends(get_session)
+) -> TaskResponse:
+ """
+ Create a new task for the authenticated user.
+
+ Args:
+ request: Task creation request with title and optional description
+ user_id: Current user ID from JWT token
+ session: Database session
+
+ Returns:
+ TaskResponse with created task data
+
+ Raises:
+ HTTPException 401: If JWT token is invalid
+ HTTPException 422: If validation fails
+ """
+ # Create task
+ task = task_service.create_task(
+ session=session,
+ user_id=user_id,
+ title=request.title,
+ description=request.description,
+ category=request.category,
+ due_date=request.due_date,
+ priority=request.priority,
+ is_recurring=request.is_recurring or False,
+ recurrence_type=request.recurrence_type,
+ recurrence_interval=request.recurrence_interval or 1,
+ recurrence_end_date=request.recurrence_end_date
+ )
+
+ # Return response
+ return TaskResponse(
+ id=task.id,
+ user_id=task.user_id,
+ title=task.title,
+ description=task.description,
+ completed=task.completed,
+ category=task.category,
+ due_date=task.due_date.isoformat() if task.due_date else None,
+ priority=task.priority,
+ is_recurring=task.is_recurring,
+ recurrence_type=task.recurrence_type,
+ recurrence_interval=task.recurrence_interval,
+ recurrence_end_date=task.recurrence_end_date.isoformat() if task.recurrence_end_date else None,
+ parent_task_id=task.parent_task_id,
+ created_at=task.created_at.isoformat(),
+ updated_at=task.updated_at.isoformat()
+ )
+
+
+@router.put("/{task_id}", response_model=TaskResponse)
+async def update_task(
+ task_id: int,
+ request: UpdateTaskRequest,
+ user_id: int = Depends(get_current_user_id),
+ session: Session = Depends(get_session)
+) -> TaskResponse:
+ """
+ Update an existing task.
+
+ Args:
+ task_id: ID of the task to update
+ request: Task update request with optional fields
+ user_id: Current user ID from JWT token
+ session: Database session
+
+ Returns:
+ TaskResponse with updated task data
+
+ Raises:
+ HTTPException 401: If JWT token is invalid
+ HTTPException 404: If task not found or doesn't belong to user
+ """
+ # Update task
+ task = task_service.update_task(
+ session=session,
+ task_id=task_id,
+ user_id=user_id,
+ title=request.title,
+ description=request.description,
+ completed=request.completed,
+ category=request.category,
+ due_date=request.due_date,
+ priority=request.priority,
+ is_recurring=request.is_recurring,
+ recurrence_type=request.recurrence_type,
+ recurrence_interval=request.recurrence_interval,
+ recurrence_end_date=request.recurrence_end_date
+ )
+
+ if not task:
+ raise HTTPException(
+ status_code=status.HTTP_404_NOT_FOUND,
+ detail="Task not found"
+ )
+
+ # Return response
+ return TaskResponse(
+ id=task.id,
+ user_id=task.user_id,
+ title=task.title,
+ description=task.description,
+ completed=task.completed,
+ category=task.category,
+ due_date=task.due_date.isoformat() if task.due_date else None,
+ priority=task.priority,
+ is_recurring=task.is_recurring,
+ recurrence_type=task.recurrence_type,
+ recurrence_interval=task.recurrence_interval,
+ recurrence_end_date=task.recurrence_end_date.isoformat() if task.recurrence_end_date else None,
+ parent_task_id=task.parent_task_id,
+ created_at=task.created_at.isoformat(),
+ updated_at=task.updated_at.isoformat()
+ )
+
+
+@router.delete("/{task_id}", status_code=status.HTTP_204_NO_CONTENT)
+async def delete_task(
+ task_id: int,
+ user_id: int = Depends(get_current_user_id),
+ session: Session = Depends(get_session)
+) -> None:
+ """
+ Delete a task.
+
+ Args:
+ task_id: ID of the task to delete
+ user_id: Current user ID from JWT token
+ session: Database session
+
+ Returns:
+ None (204 No Content)
+
+ Raises:
+ HTTPException 401: If JWT token is invalid
+ HTTPException 404: If task not found or doesn't belong to user
+ """
+ # Delete task
+ success = task_service.delete_task(
+ session=session,
+ task_id=task_id,
+ user_id=user_id
+ )
+
+ if not success:
+ raise HTTPException(
+ status_code=status.HTTP_404_NOT_FOUND,
+ detail="Task not found"
+ )
diff --git a/src/config/__init__.py b/src/config/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/config/logging.py b/src/config/logging.py
new file mode 100644
index 0000000000000000000000000000000000000000..45944b857a11d974622b012417ff3ae0cd188ebd
--- /dev/null
+++ b/src/config/logging.py
@@ -0,0 +1,63 @@
+"""
+Logging configuration for the AI chatbot backend.
+
+This module sets up structured logging with appropriate levels
+and formats for production use.
+"""
+
+import logging
+import sys
+from typing import Optional
+import os
+
+
+def setup_logging(
+ level: Optional[str] = None,
+ log_file: Optional[str] = None
+) -> None:
+ """
+ Configure structured logging for the application.
+
+ Args:
+ level: Logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
+ log_file: Optional file path for log output
+ """
+ # Get log level from environment or parameter
+ log_level_str = level or os.getenv("LOG_LEVEL", "INFO")
+ log_level = getattr(logging, log_level_str.upper(), logging.INFO)
+
+ # Create formatter
+ formatter = logging.Formatter(
+ fmt='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
+ datefmt='%Y-%m-%d %H:%M:%S'
+ )
+
+ # Configure root logger
+ root_logger = logging.getLogger()
+ root_logger.setLevel(log_level)
+
+ # Remove existing handlers
+ root_logger.handlers.clear()
+
+ # Console handler
+ console_handler = logging.StreamHandler(sys.stdout)
+ console_handler.setLevel(log_level)
+ console_handler.setFormatter(formatter)
+ root_logger.addHandler(console_handler)
+
+ # File handler (if specified)
+ if log_file:
+ file_handler = logging.FileHandler(log_file)
+ file_handler.setLevel(log_level)
+ file_handler.setFormatter(formatter)
+ root_logger.addHandler(file_handler)
+
+ # Set specific logger levels
+ logging.getLogger("uvicorn").setLevel(logging.INFO)
+ logging.getLogger("sqlalchemy.engine").setLevel(logging.WARNING)
+
+ logging.info(f"Logging configured at {log_level_str} level")
+
+
+# Auto-configure logging on import
+setup_logging()
diff --git a/src/database.py b/src/database.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ef9848426525ea1eb5a1a8215c1dec128875a58
--- /dev/null
+++ b/src/database.py
@@ -0,0 +1,64 @@
+"""
+Database configuration and session management.
+
+This module provides:
+- Database engine creation
+- Session management
+- Dependency injection for FastAPI routes
+"""
+
+import os
+from typing import Generator
+
+from sqlmodel import Session, create_engine, SQLModel
+
+# Get database URL from environment variable
+# For Vercel serverless, use /tmp directory for SQLite
+# For Hugging Face Spaces, use /app/data directory
+DATABASE_URL = os.getenv("DATABASE_URL")
+
+if DATABASE_URL is None:
+ # Check if running on Vercel (serverless environment)
+ if os.getenv("VERCEL"):
+ # Use /tmp directory which is writable in Vercel serverless
+ DATABASE_URL = "sqlite:////tmp/todo.db"
+ # Check if running on Hugging Face Spaces
+ elif os.getenv("SPACE_ID") or os.path.exists("/app/data"):
+ # Use /app/data directory for persistent storage on HF Spaces
+ DATABASE_URL = "sqlite:////app/data/todo.db"
+ else:
+ # Local development
+ DATABASE_URL = "sqlite:///./todo.db"
+
+# Create database engine
+# connect_args only needed for SQLite
+connect_args = {"check_same_thread": False} if DATABASE_URL.startswith("sqlite") else {}
+
+engine = create_engine(
+ DATABASE_URL,
+ echo=False, # Disable SQL query logging for serverless
+ connect_args=connect_args,
+ pool_pre_ping=True, # Verify connections before using
+)
+
+
+def create_db_and_tables():
+ """Create all database tables."""
+ SQLModel.metadata.create_all(engine)
+
+
+def get_session() -> Generator[Session, None, None]:
+ """
+ Dependency function to provide database session to FastAPI routes.
+
+ Yields:
+ Session: SQLModel database session
+
+ Example:
+ @app.get("/items")
+ def get_items(session: Session = Depends(get_session)):
+ items = session.exec(select(Item)).all()
+ return items
+ """
+ with Session(engine) as session:
+ yield session
diff --git a/src/main.py b/src/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..a87904c36b3c5f84036c809eac03d7c7b87b8ef8
--- /dev/null
+++ b/src/main.py
@@ -0,0 +1,79 @@
+from fastapi import FastAPI
+from fastapi.middleware.cors import CORSMiddleware
+from dotenv import load_dotenv
+import os
+
+# Load environment variables
+load_dotenv()
+
+# Configure logging first
+from src.config.logging import setup_logging
+setup_logging()
+
+# Create FastAPI application
+app = FastAPI(
+ title="Todo Application API with AI Chatbot",
+ description="Backend API for Todo application with JWT authentication and AI-powered conversational task management",
+ version="2.0.0",
+)
+
+# CORS Configuration
+CORS_ORIGINS = os.getenv("CORS_ORIGINS", "http://localhost:3000,http://localhost:3001,http://localhost:3002,http://localhost:3003,http://localhost:3004,http://localhost:3005").split(",")
+
+# Configure CORS middleware
+app.add_middleware(
+ CORSMiddleware,
+ allow_origins=CORS_ORIGINS,
+ allow_credentials=True,
+ allow_methods=["GET", "POST", "PUT", "DELETE", "PATCH", "OPTIONS"],
+ allow_headers=["*"],
+ expose_headers=["*"],
+ max_age=3600,
+)
+
+# Initialize database tables on startup
+from src.database import create_db_and_tables
+
+@app.on_event("startup")
+def on_startup():
+ """Initialize database tables and MCP server on application startup."""
+ try:
+ create_db_and_tables()
+ except Exception as e:
+ print(f"Warning: Could not initialize database tables: {e}")
+ # Continue anyway - tables might already exist
+
+ # Initialize MCP server with tools
+ try:
+ from src.mcp.server import mcp_server
+ from src.mcp.tools import register_all_tools # This triggers tool registration
+ print(f"MCP Server initialized: {mcp_server.name} v{mcp_server.version}")
+ print(f"Registered tools: {len(mcp_server.tools)}")
+ except Exception as e:
+ print(f"Warning: Could not initialize MCP server: {e}")
+
+# Root endpoint
+@app.get("/")
+async def root():
+ """Root endpoint with API information."""
+ return {
+ "message": "Todo Application API",
+ "version": "1.0.0",
+ "docs": "/docs",
+ "health": "/health"
+ }
+
+# Router registration
+from src.api import auth, tasks, subtasks, password_reset, chat, health
+# AI router temporarily disabled due to Vercel size constraints
+# from src.api import ai
+
+# Health check endpoints (no prefix for Kubernetes probes)
+app.include_router(health.router, tags=["Health"])
+
+app.include_router(auth.router, prefix="/api/auth", tags=["Authentication"])
+app.include_router(password_reset.router, prefix="/api/auth", tags=["Password Reset"])
+app.include_router(tasks.router, prefix="/api/tasks", tags=["Tasks"])
+app.include_router(subtasks.router, prefix="/api", tags=["Subtasks"])
+app.include_router(chat.router, prefix="/api/v1", tags=["AI Chat"])
+# app.include_router(ai.router, prefix="/api/ai", tags=["AI Features"])
diff --git a/src/main_minimal.py b/src/main_minimal.py
new file mode 100644
index 0000000000000000000000000000000000000000..1efb89ccc9cd9c4ff28ae5fbce3735a72f5ebe71
--- /dev/null
+++ b/src/main_minimal.py
@@ -0,0 +1,21 @@
+from fastapi import FastAPI
+import os
+
+app = FastAPI(title="Todo API - Minimal Test")
+
+@app.get("/")
+def root():
+ return {
+ "status": "ok",
+ "message": "Railway FastAPI is working!",
+ "port": os.getenv("PORT", "not set"),
+ "database": "connected" if os.getenv("DATABASE_URL") else "not configured"
+ }
+
+@app.get("/health")
+def health():
+ return {"status": "healthy", "service": "railway-test"}
+
+@app.get("/api/health")
+def api_health():
+ return {"status": "healthy", "api": "working"}
diff --git a/src/mcp/__init__.py b/src/mcp/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/mcp/server.py b/src/mcp/server.py
new file mode 100644
index 0000000000000000000000000000000000000000..9a9e4b3468f81f50fb5167d64c0e2d0ac1103cf6
--- /dev/null
+++ b/src/mcp/server.py
@@ -0,0 +1,138 @@
+"""
+MCP (Model Context Protocol) Server for AI Chatbot.
+
+This module initializes and manages MCP tools that the AI can use
+to interact with the task management system.
+"""
+
+import logging
+from typing import Dict, Any, Callable, List
+from pydantic import BaseModel
+
+logger = logging.getLogger(__name__)
+
+
+class MCPTool(BaseModel):
+ """Base model for MCP tool definition."""
+ name: str
+ description: str
+ parameters: Dict[str, Any]
+ function: Any # Will be the actual callable
+
+
+class MCPServer:
+ """
+ MCP Server managing tool registration and execution.
+
+ This server acts as a registry for tools that the AI can invoke.
+ All tools must be registered before they can be used.
+ """
+
+ def __init__(self, name: str = "todo-tools", version: str = "1.0.0"):
+ self.name = name
+ self.version = version
+ self.tools: Dict[str, MCPTool] = {}
+ logger.info(f"MCP Server initialized: {name} v{version}")
+
+ def register_tool(
+ self,
+ name: str,
+ description: str,
+ parameters: Dict[str, Any],
+ function: Callable
+ ):
+ """
+ Register a new tool with the MCP server.
+
+ Args:
+ name: Tool name (must be unique)
+ description: Human-readable description of what the tool does
+ parameters: JSON schema describing the tool's parameters
+ function: The actual function to execute when tool is called
+ """
+ if name in self.tools:
+ logger.warning(f"Tool '{name}' already registered, overwriting")
+
+ tool = MCPTool(
+ name=name,
+ description=description,
+ parameters=parameters,
+ function=function
+ )
+ self.tools[name] = tool
+ logger.info(f"Registered tool: {name}")
+
+ def get_tool(self, name: str) -> MCPTool:
+ """Get a registered tool by name."""
+ if name not in self.tools:
+ raise ValueError(f"Tool '{name}' not found in MCP server")
+ return self.tools[name]
+
+ def list_tools(self) -> List[Dict[str, Any]]:
+ """
+ List all registered tools in Cohere-compatible format.
+
+ Returns:
+ List of tool definitions for Cohere API v2
+ """
+ tools_list = []
+ for tool in self.tools.values():
+ # Convert parameter definitions to JSON Schema format
+ properties = {}
+ required = []
+
+ for param_name, param_def in tool.parameters.items():
+ properties[param_name] = {
+ "type": param_def.get("type", "string"),
+ "description": param_def.get("description", "")
+ }
+ if param_def.get("required", False):
+ required.append(param_name)
+
+ tools_list.append({
+ "type": "function",
+ "function": {
+ "name": tool.name,
+ "description": tool.description,
+ "parameters": {
+ "type": "object",
+ "properties": properties,
+ "required": required
+ }
+ }
+ })
+
+ return tools_list
+
+ async def execute_tool(
+ self,
+ tool_name: str,
+ parameters: Dict[str, Any],
+ db: Any = None
+ ) -> Dict[str, Any]:
+ """
+ Execute a registered tool with given parameters.
+
+ Args:
+ tool_name: Name of the tool to execute
+ parameters: Parameters to pass to the tool
+ db: Database session to pass to the tool
+
+ Returns:
+ Tool execution result as dictionary
+ """
+ tool = self.get_tool(tool_name)
+
+ try:
+ logger.info(f"Executing tool: {tool_name} with params: {parameters}")
+ # Pass database session to tool
+ result = await tool.function(**parameters, db=db)
+ logger.info(f"Tool {tool_name} executed successfully")
+ return result
+ except Exception as e:
+ logger.error(f"Tool {tool_name} execution failed: {str(e)}")
+ raise
+
+
+# Global MCP server instance
+mcp_server = MCPServer()
diff --git a/src/mcp/tools/__init__.py b/src/mcp/tools/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..78d29d1760e5da64ecd5cea429ee1373c9bba7d7
--- /dev/null
+++ b/src/mcp/tools/__init__.py
@@ -0,0 +1,65 @@
+"""
+MCP Tools initialization and registration.
+
+This module registers all MCP tools with the server on import.
+"""
+
+import logging
+from src.mcp.server import mcp_server
+from src.mcp.tools.add_task import add_task_tool
+from src.mcp.tools.list_tasks import list_tasks_tool
+from src.mcp.tools.complete_task import complete_task_tool
+from src.mcp.tools.delete_task import delete_task_tool
+from src.mcp.tools.update_task import update_task_tool
+
+logger = logging.getLogger(__name__)
+
+
+def register_all_tools():
+ """Register all MCP tools with the server."""
+
+ # Register add_task tool (User Story 1)
+ mcp_server.register_tool(
+ name=add_task_tool.name,
+ description=add_task_tool.description,
+ parameters=add_task_tool.parameters,
+ function=add_task_tool.execute
+ )
+
+ # Register list_tasks tool (User Story 2)
+ mcp_server.register_tool(
+ name=list_tasks_tool.name,
+ description=list_tasks_tool.description,
+ parameters=list_tasks_tool.parameters,
+ function=list_tasks_tool.execute
+ )
+
+ # Register complete_task tool (User Story 3)
+ mcp_server.register_tool(
+ name=complete_task_tool.name,
+ description=complete_task_tool.description,
+ parameters=complete_task_tool.parameters,
+ function=complete_task_tool.execute
+ )
+
+ # Register delete_task tool (User Story 4)
+ mcp_server.register_tool(
+ name=delete_task_tool.name,
+ description=delete_task_tool.description,
+ parameters=delete_task_tool.parameters,
+ function=delete_task_tool.execute
+ )
+
+ # Register update_task tool (User Story 5)
+ mcp_server.register_tool(
+ name=update_task_tool.name,
+ description=update_task_tool.description,
+ parameters=update_task_tool.parameters,
+ function=update_task_tool.execute
+ )
+
+ logger.info("All 5 MCP tools registered successfully")
+
+
+# Auto-register tools on import
+register_all_tools()
diff --git a/src/mcp/tools/add_task.py b/src/mcp/tools/add_task.py
new file mode 100644
index 0000000000000000000000000000000000000000..27359bd2f9c2e4f0c9935eb5588162fad32efda7
--- /dev/null
+++ b/src/mcp/tools/add_task.py
@@ -0,0 +1,112 @@
+"""
+Add Task MCP Tool.
+
+This tool allows the AI to create new tasks for users through natural language.
+"""
+
+import logging
+from typing import Dict, Any
+from sqlmodel import Session
+from src.mcp.tools.base import MCPToolBase
+from src.models.task import Task
+from src.validation.security_guard import security_guard
+
+logger = logging.getLogger(__name__)
+
+
+class AddTaskTool(MCPToolBase):
+ """MCP tool for adding tasks."""
+
+ @property
+ def name(self) -> str:
+ return "add_task"
+
+ @property
+ def description(self) -> str:
+ return "Create a new task for the authenticated user. Extracts task title and optional description from natural language input."
+
+ @property
+ def parameters(self) -> Dict[str, Any]:
+ return {
+ "user_id": {
+ "description": "ID of the authenticated user (automatically provided by system)",
+ "type": "integer",
+ "required": True
+ },
+ "title": {
+ "description": "Task title extracted from user's message",
+ "type": "string",
+ "required": True
+ },
+ "description": {
+ "description": "Optional task description or additional details",
+ "type": "string",
+ "required": False
+ }
+ }
+
+ async def execute(self, user_id: int, title: str, description: str = "", db: Session = None, **kwargs) -> Dict[str, Any]:
+ """
+ Execute add_task tool.
+
+ Args:
+ user_id: Authenticated user ID
+ title: Task title
+ description: Optional task description
+ db: Database session
+
+ Returns:
+ Dictionary with success status and task details
+ """
+ try:
+ # Validate user_id
+ security_guard.validate_user_id(user_id)
+
+ # Validate title
+ if not title or not title.strip():
+ logger.error("Task title is empty")
+ return self.create_error_result(
+ "Task title cannot be empty",
+ "INVALID_TITLE"
+ )
+
+ # Create task in database (wrapped in transaction)
+ task = Task(
+ user_id=user_id,
+ title=title.strip(),
+ description=description.strip() if description else "",
+ completed=False
+ )
+
+ db.add(task)
+ db.commit()
+ db.refresh(task)
+
+ logger.info(f"Task created: ID={task.id}, User={user_id}, Title='{title[:50]}'")
+
+ # Return structured success response
+ return self.create_success_result(
+ message=f"Task '{title}' created successfully",
+ data={
+ "task_id": task.id,
+ "title": task.title,
+ "description": task.description,
+ "completed": task.completed,
+ "created_at": task.created_at.isoformat()
+ }
+ )
+
+ except ValueError as e:
+ logger.error(f"Validation error in add_task: {str(e)}")
+ return self.create_error_result(str(e), "VALIDATION_ERROR")
+ except Exception as e:
+ logger.error(f"Error creating task: {str(e)}")
+ db.rollback()
+ return self.create_error_result(
+ "Failed to create task. Please try again.",
+ "DATABASE_ERROR"
+ )
+
+
+# Tool instance
+add_task_tool = AddTaskTool()
diff --git a/src/mcp/tools/base.py b/src/mcp/tools/base.py
new file mode 100644
index 0000000000000000000000000000000000000000..1a08e776323f5e8f5ad1f394d1c28995975ff720
--- /dev/null
+++ b/src/mcp/tools/base.py
@@ -0,0 +1,131 @@
+"""
+Base class for MCP tools with validation.
+
+All MCP tools should inherit from this base class to ensure
+consistent validation and error handling.
+"""
+
+import logging
+from typing import Dict, Any
+from pydantic import BaseModel, Field
+from abc import ABC, abstractmethod
+
+logger = logging.getLogger(__name__)
+
+
+class ToolResult(BaseModel):
+ """Standard result format for all MCP tools."""
+ success: bool
+ message: str
+ data: Dict[str, Any] = Field(default_factory=dict)
+
+
+class MCPToolBase(ABC):
+ """
+ Abstract base class for MCP tools.
+
+ All tools must implement the execute method and define their schema.
+ """
+
+ @property
+ @abstractmethod
+ def name(self) -> str:
+ """Tool name (must be unique)."""
+ pass
+
+ @property
+ @abstractmethod
+ def description(self) -> str:
+ """Human-readable description of what the tool does."""
+ pass
+
+ @property
+ @abstractmethod
+ def parameters(self) -> Dict[str, Any]:
+ """
+ JSON schema describing the tool's parameters.
+
+ Format for Cohere API:
+ {
+ "param_name": {
+ "description": "Parameter description",
+ "type": "string|integer|boolean",
+ "required": True|False
+ }
+ }
+ """
+ pass
+
+ @abstractmethod
+ async def execute(self, **kwargs) -> Dict[str, Any]:
+ """
+ Execute the tool with given parameters.
+
+ Args:
+ **kwargs: Tool parameters
+
+ Returns:
+ Dictionary with tool execution result
+
+ Raises:
+ ValueError: If parameters are invalid
+ Exception: If execution fails
+ """
+ pass
+
+ def validate_user_id(self, user_id: int) -> None:
+ """
+ Validate that user_id is provided and valid.
+
+ Args:
+ user_id: User ID to validate
+
+ Raises:
+ ValueError: If user_id is invalid
+ """
+ if not user_id or user_id <= 0:
+ raise ValueError("Invalid user_id")
+
+ def create_success_result(
+ self,
+ message: str,
+ data: Dict[str, Any] = None
+ ) -> Dict[str, Any]:
+ """
+ Create a standardized success result.
+
+ Args:
+ message: Success message
+ data: Optional data to include
+
+ Returns:
+ Standardized success result dictionary
+ """
+ result = ToolResult(
+ success=True,
+ message=message,
+ data=data or {}
+ )
+ return result.model_dump()
+
+ def create_error_result(
+ self,
+ message: str,
+ error_code: str = "TOOL_ERROR"
+ ) -> Dict[str, Any]:
+ """
+ Create a standardized error result.
+
+ Args:
+ message: Error message
+ error_code: Error code for categorization
+
+ Returns:
+ Standardized error result dictionary
+ """
+ return {
+ "success": False,
+ "message": message,
+ "error_code": error_code,
+ "data": {}
+ }
diff --git a/src/mcp/tools/complete_task.py b/src/mcp/tools/complete_task.py
new file mode 100644
index 0000000000000000000000000000000000000000..af1f490d957fc63f8bf899171e41b236b91825d3
--- /dev/null
+++ b/src/mcp/tools/complete_task.py
@@ -0,0 +1,147 @@
+"""
+Complete Task MCP Tool.
+
+This tool allows the AI to mark tasks as completed by ID or title matching.
+"""
+
+import logging
+from typing import Dict, Any, Optional
+from sqlmodel import Session, select
+from src.mcp.tools.base import MCPToolBase
+from src.models.task import Task
+from src.validation.security_guard import security_guard
+
+logger = logging.getLogger(__name__)
+
+
+class CompleteTaskTool(MCPToolBase):
+ """MCP tool for completing tasks."""
+
+ @property
+ def name(self) -> str:
+ return "complete_task"
+
+ @property
+ def description(self) -> str:
+ return "Mark a task as completed. Accepts either task_id or task_title for identification. Validates that the task belongs to the authenticated user."
+
+ @property
+ def parameters(self) -> Dict[str, Any]:
+ return {
+ "user_id": {
+ "description": "ID of the authenticated user (automatically provided by system)",
+ "type": "integer",
+ "required": True
+ },
+ "task_id": {
+ "description": "ID of the task to complete",
+ "type": "integer",
+ "required": False
+ },
+ "task_title": {
+ "description": "Title or partial title of the task to complete (case-insensitive substring match)",
+ "type": "string",
+ "required": False
+ }
+ }
+
+ async def execute(
+ self,
+ user_id: int,
+ task_id: Optional[int] = None,
+ task_title: Optional[str] = None,
+ db: Session = None,
+ **kwargs
+ ) -> Dict[str, Any]:
+ """
+ Execute complete_task tool.
+
+ Args:
+ user_id: Authenticated user ID
+ task_id: Task ID to complete
+ task_title: Task title for matching
+ db: Database session
+
+ Returns:
+ Dictionary with success status and task details
+ """
+ try:
+ # Validate user_id
+ security_guard.validate_user_id(user_id)
+
+ # Must provide either task_id or task_title
+ if not task_id and not task_title:
+ return self.create_error_result(
+ "Please specify either a task ID or task title",
+ "MISSING_IDENTIFIER"
+ )
+
+ # Find task
+ if task_id:
+ task = await security_guard.validate_task_ownership(db, task_id, user_id)
+ else:
+ # Search by title (case-insensitive substring match)
+ statement = select(Task).where(
+ Task.user_id == user_id,
+ Task.title.ilike(f"%{task_title}%")
+ )
+ matching_tasks = db.exec(statement).all()
+
+ if not matching_tasks:
+ return self.create_error_result(
+ "Task not found. Use 'show tasks' to see your list",
+ "TASK_NOT_FOUND"
+ )
+
+ if len(matching_tasks) > 1:
+ task_titles = [f"- {t.title} (ID: {t.id})" for t in matching_tasks]
+ return self.create_error_result(
+ f"Multiple tasks match that description:\n" + "\n".join(task_titles) + "\nPlease be more specific or use the task ID.",
+ "AMBIGUOUS_MATCH"
+ )
+
+ task = matching_tasks[0]
+
+ # Check if already completed
+ if task.completed:
+ return self.create_success_result(
+ message=f"Task '{task.title}' is already marked as complete",
+ data={
+ "task_id": task.id,
+ "title": task.title,
+ "completed": True
+ }
+ )
+
+ # Mark as completed
+ task.completed = True
+ db.add(task)
+ db.commit()
+ db.refresh(task)
+
+ logger.info(f"Task completed: ID={task.id}, User={user_id}")
+
+ return self.create_success_result(
+ message=f"Task '{task.title}' marked as complete",
+ data={
+ "task_id": task.id,
+ "title": task.title,
+ "completed": task.completed,
+ "updated_at": task.updated_at.isoformat()
+ }
+ )
+
+ except ValueError as e:
+ logger.error(f"Validation error in complete_task: {str(e)}")
+ return self.create_error_result(str(e), "VALIDATION_ERROR")
+ except Exception as e:
+ logger.error(f"Error completing task: {str(e)}")
+ db.rollback()
+ return self.create_error_result(
+ "Failed to complete task. Please try again.",
+ "DATABASE_ERROR"
+ )
+
+
+# Tool instance
+complete_task_tool = CompleteTaskTool()
diff --git a/src/mcp/tools/delete_task.py b/src/mcp/tools/delete_task.py
new file mode 100644
index 0000000000000000000000000000000000000000..070322f286a00ee0a5934c551e7d242a7d0e1e16
--- /dev/null
+++ b/src/mcp/tools/delete_task.py
@@ -0,0 +1,136 @@
+"""
+Delete Task MCP Tool.
+
+This tool allows the AI to permanently delete tasks by ID or title matching.
+"""
+
+import logging
+from typing import Dict, Any, Optional
+from sqlmodel import Session, select
+from src.mcp.tools.base import MCPToolBase
+from src.models.task import Task
+from src.validation.security_guard import security_guard
+
+logger = logging.getLogger(__name__)
+
+
+class DeleteTaskTool(MCPToolBase):
+ """MCP tool for deleting tasks."""
+
+ @property
+ def name(self) -> str:
+ return "delete_task"
+
+ @property
+ def description(self) -> str:
+ return "Permanently delete a task. Accepts either task_id or task_title for identification. Validates that the task belongs to the authenticated user."
+
+ @property
+ def parameters(self) -> Dict[str, Any]:
+ return {
+ "user_id": {
+ "description": "ID of the authenticated user (automatically provided by system)",
+ "type": "integer",
+ "required": True
+ },
+ "task_id": {
+ "description": "ID of the task to delete",
+ "type": "integer",
+ "required": False
+ },
+ "task_title": {
+ "description": "Title or partial title of the task to delete (case-insensitive substring match)",
+ "type": "string",
+ "required": False
+ }
+ }
+
+ async def execute(
+ self,
+ user_id: int,
+ task_id: Optional[int] = None,
+ task_title: Optional[str] = None,
+ db: Session = None,
+ **kwargs
+ ) -> Dict[str, Any]:
+ """
+ Execute delete_task tool.
+
+ Args:
+ user_id: Authenticated user ID
+ task_id: Task ID to delete
+ task_title: Task title for matching
+ db: Database session
+
+ Returns:
+ Dictionary with success status
+ """
+ try:
+ # Validate user_id
+ security_guard.validate_user_id(user_id)
+
+ # Must provide either task_id or task_title
+ if not task_id and not task_title:
+ return self.create_error_result(
+ "Please specify either a task ID or task title",
+ "MISSING_IDENTIFIER"
+ )
+
+ # Find task
+ if task_id:
+ task = await security_guard.validate_task_ownership(db, task_id, user_id)
+ else:
+ # Search by title (case-insensitive substring match)
+ statement = select(Task).where(
+ Task.user_id == user_id,
+ Task.title.ilike(f"%{task_title}%")
+ )
+ matching_tasks = db.exec(statement).all()
+
+ if not matching_tasks:
+ return self.create_error_result(
+ "Task not found",
+ "TASK_NOT_FOUND"
+ )
+
+ if len(matching_tasks) > 1:
+ task_titles = [f"- {t.title} (ID: {t.id})" for t in matching_tasks]
+ return self.create_error_result(
+ f"Multiple tasks match that description:\n" + "\n".join(task_titles) + "\nPlease be more specific or use the task ID.",
+ "AMBIGUOUS_MATCH"
+ )
+
+ task = matching_tasks[0]
+
+ # Store task info before deletion
+ task_title_deleted = task.title
+ task_id_deleted = task.id
+
+ # Delete task
+ db.delete(task)
+ db.commit()
+
+ logger.info(f"Task deleted: ID={task_id_deleted}, User={user_id}, Title='{task_title_deleted}'")
+
+ return self.create_success_result(
+ message=f"Task '{task_title_deleted}' has been deleted",
+ data={
+ "task_id": task_id_deleted,
+ "title": task_title_deleted
+ }
+ )
+
+ except ValueError as e:
+ logger.error(f"Validation error in delete_task: {str(e)}")
+ return self.create_error_result(str(e), "VALIDATION_ERROR")
+ except Exception as e:
+ logger.error(f"Error deleting task: {str(e)}")
+ db.rollback()
+ return self.create_error_result(
+ "Failed to delete task. Please try again.",
+ "DATABASE_ERROR"
+ )
+
+
+# Tool instance
+delete_task_tool = DeleteTaskTool()
diff --git a/src/mcp/tools/list_tasks.py b/src/mcp/tools/list_tasks.py
new file mode 100644
index 0000000000000000000000000000000000000000..d15b36d3876570289fb7224d32cec75951ed1b15
--- /dev/null
+++ b/src/mcp/tools/list_tasks.py
@@ -0,0 +1,135 @@
+"""
+List Tasks MCP Tool.
+
+This tool allows the AI to retrieve and display tasks for users with filtering options.
+"""
+
+import logging
+from typing import Dict, Any
+from sqlmodel import Session, select
+from src.mcp.tools.base import MCPToolBase
+from src.models.task import Task
+from src.validation.security_guard import security_guard
+
+logger = logging.getLogger(__name__)
+
+
+class ListTasksTool(MCPToolBase):
+ """MCP tool for listing tasks."""
+
+ @property
+ def name(self) -> str:
+ return "list_tasks"
+
+ @property
+ def description(self) -> str:
+ return "Retrieve tasks for the authenticated user. Supports filtering by completion status (all, pending, or completed)."
+
+ @property
+ def parameters(self) -> Dict[str, Any]:
+ return {
+ "user_id": {
+ "description": "ID of the authenticated user (automatically provided by system)",
+ "type": "integer",
+ "required": True
+ },
+ "filter": {
+ "description": "Filter tasks by status: 'all', 'pending', or 'completed'",
+ "type": "string",
+ "required": False
+ },
+ "limit": {
+ "description": "Maximum number of tasks to return",
+ "type": "integer",
+ "required": False
+ }
+ }
+
+ async def execute(
+ self,
+ user_id: int,
+ filter: str = "all",
+ limit: int = 50,
+ db: Session = None,
+ **kwargs
+ ) -> Dict[str, Any]:
+ """
+ Execute list_tasks tool.
+
+ Args:
+ user_id: Authenticated user ID
+ filter: Task filter ('all', 'pending', 'completed')
+ limit: Maximum tasks to return
+ db: Database session
+
+ Returns:
+ Dictionary with success status and task list
+ """
+ try:
+ # Validate user_id
+ security_guard.validate_user_id(user_id)
+
+ # Validate filter
+ if filter not in ['all', 'pending', 'completed']:
+ return self.create_error_result(
+ "Filter must be 'all', 'pending', or 'completed'",
+ "INVALID_FILTER"
+ )
+
+ # Build query
+ statement = select(Task).where(Task.user_id == user_id)
+
+ if filter == 'pending':
+ statement = statement.where(Task.completed == False)
+ elif filter == 'completed':
+ statement = statement.where(Task.completed == True)
+
+ statement = statement.order_by(Task.created_at.desc()).limit(limit)
+
+ # Execute query
+ tasks = db.exec(statement).all()
+
+ logger.info(f"Retrieved {len(tasks)} tasks for user {user_id} (filter: {filter})")
+
+ # Format tasks
+ task_list = [
+ {
+ "id": task.id,
+ "title": task.title,
+ "description": task.description,
+ "completed": task.completed,
+ "created_at": task.created_at.isoformat(),
+ "updated_at": task.updated_at.isoformat()
+ }
+ for task in tasks
+ ]
+
+ # Generate message
+ if not task_list:
+ message = "You have no tasks yet. Add one to get started!"
+ else:
+ filter_text = filter if filter != 'all' else ''
+ message = f"You have {len(task_list)} {filter_text} task{'s' if len(task_list) != 1 else ''}"
+
+ return self.create_success_result(
+ message=message,
+ data={
+ "tasks": task_list,
+ "count": len(task_list),
+ "filter": filter
+ }
+ )
+
+ except ValueError as e:
+ logger.error(f"Validation error in list_tasks: {str(e)}")
+ return self.create_error_result(str(e), "VALIDATION_ERROR")
+ except Exception as e:
+ logger.error(f"Error listing tasks: {str(e)}")
+ return self.create_error_result(
+ "Failed to retrieve tasks. Please try again.",
+ "DATABASE_ERROR"
+ )
+
+
+# Tool instance
+list_tasks_tool = ListTasksTool()
diff --git a/src/mcp/tools/update_task.py b/src/mcp/tools/update_task.py
new file mode 100644
index 0000000000000000000000000000000000000000..33fc7354c5142bb94f1c85c235d7e5ebed01cdf7
--- /dev/null
+++ b/src/mcp/tools/update_task.py
@@ -0,0 +1,169 @@
+"""
+Update Task MCP Tool.
+
+This tool allows the AI to modify task titles and descriptions.
+"""
+
+import logging
+from typing import Dict, Any, Optional
+from sqlmodel import Session, select
+from src.mcp.tools.base import MCPToolBase
+from src.models.task import Task
+from src.validation.security_guard import security_guard
+
+logger = logging.getLogger(__name__)
+
+
+class UpdateTaskTool(MCPToolBase):
+ """MCP tool for updating tasks."""
+
+ @property
+ def name(self) -> str:
+ return "update_task"
+
+ @property
+ def description(self) -> str:
+ return "Update a task's title and/or description. Accepts either task_id or task_title for identification. Validates that the task belongs to the authenticated user."
+
+ @property
+ def parameters(self) -> Dict[str, Any]:
+ return {
+ "user_id": {
+ "description": "ID of the authenticated user (automatically provided by system)",
+ "type": "integer",
+ "required": True
+ },
+ "task_id": {
+ "description": "ID of the task to update",
+ "type": "integer",
+ "required": False
+ },
+ "task_title": {
+ "description": "Current title or partial title of the task to update (case-insensitive substring match)",
+ "type": "string",
+ "required": False
+ },
+ "new_title": {
+ "description": "New title for the task",
+ "type": "string",
+ "required": False
+ },
+ "new_description": {
+ "description": "New description for the task",
+ "type": "string",
+ "required": False
+ }
+ }
+
+ async def execute(
+ self,
+ user_id: int,
+ task_id: Optional[int] = None,
+ task_title: Optional[str] = None,
+ new_title: Optional[str] = None,
+ new_description: Optional[str] = None,
+ db: Session = None,
+ **kwargs
+ ) -> Dict[str, Any]:
+ """
+ Execute update_task tool.
+
+ Args:
+ user_id: Authenticated user ID
+ task_id: Task ID to update
+ task_title: Current task title for matching
+ new_title: New task title
+ new_description: New task description
+ db: Database session
+
+ Returns:
+ Dictionary with success status and updated task details
+ """
+ try:
+ # Validate user_id
+ security_guard.validate_user_id(user_id)
+
+ # Must provide either task_id or task_title
+ if not task_id and not task_title:
+ return self.create_error_result(
+ "Please specify either a task ID or task title",
+ "MISSING_IDENTIFIER"
+ )
+
+ # Must provide at least one update
+ if not new_title and new_description is None:
+ return self.create_error_result(
+ "Please specify what you'd like to update",
+ "NO_UPDATES"
+ )
+
+ # Validate new_title if provided
+ if new_title and not new_title.strip():
+ return self.create_error_result(
+ "Task title cannot be empty",
+ "INVALID_TITLE"
+ )
+
+ # Find task
+ if task_id:
+ task = await security_guard.validate_task_ownership(db, task_id, user_id)
+ else:
+ # Search by title (case-insensitive substring match)
+ statement = select(Task).where(
+ Task.user_id == user_id,
+ Task.title.ilike(f"%{task_title}%")
+ )
+ matching_tasks = db.exec(statement).all()
+
+ if not matching_tasks:
+ return self.create_error_result(
+ "Task not found. Use 'show tasks' to see your list",
+ "TASK_NOT_FOUND"
+ )
+
+ if len(matching_tasks) > 1:
+ task_titles = [f"- {t.title} (ID: {t.id})" for t in matching_tasks]
+ return self.create_error_result(
+ f"Multiple tasks match that description:\n" + "\n".join(task_titles) + "\nPlease be more specific or use the task ID.",
+ "AMBIGUOUS_MATCH"
+ )
+
+ task = matching_tasks[0]
+
+ # Update task
+ if new_title:
+ task.title = new_title.strip()
+ if new_description is not None:
+ task.description = new_description.strip()
+
+ db.add(task)
+ db.commit()
+ db.refresh(task)
+
+ logger.info(f"Task updated: ID={task.id}, User={user_id}")
+
+ return self.create_success_result(
+ message="Task updated successfully",
+ data={
+ "task_id": task.id,
+ "title": task.title,
+ "description": task.description,
+ "completed": task.completed,
+ "updated_at": task.updated_at.isoformat()
+ }
+ )
+
+ except ValueError as e:
+ logger.error(f"Validation error in update_task: {str(e)}")
+ return self.create_error_result(str(e), "VALIDATION_ERROR")
+ except Exception as e:
+ logger.error(f"Error updating task: {str(e)}")
+ db.rollback()
+ return self.create_error_result(
+ "Failed to update task. Please try again.",
+ "DATABASE_ERROR"
+ )
+
+
+# Tool instance
+update_task_tool = UpdateTaskTool()
diff --git a/src/middleware/jwt_auth.py b/src/middleware/jwt_auth.py
new file mode 100644
index 0000000000000000000000000000000000000000..ce60d2a43a674fac3e14986972b3fe916139c18d
--- /dev/null
+++ b/src/middleware/jwt_auth.py
@@ -0,0 +1,86 @@
+from fastapi import Request, HTTPException, status, Depends
+from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
+from jose import JWTError, jwt
+from typing import Optional
+import os
+
+# JWT Configuration
+SECRET_KEY = os.getenv("JWT_SECRET_KEY", "your-secret-key-here")
+ALGORITHM = os.getenv("JWT_ALGORITHM", "HS256")
+
+security = HTTPBearer()
+
+
+async def verify_jwt_token(credentials: HTTPAuthorizationCredentials) -> dict:
+ """
+ Verify JWT token and return payload.
+
+ Args:
+ credentials: HTTP Authorization credentials with Bearer token
+
+ Returns:
+ dict: JWT payload containing user_id and other claims
+
+ Raises:
+ HTTPException: If token is invalid or expired
+ """
+ try:
+ token = credentials.credentials
+ payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])
+ user_id: Optional[int] = payload.get("user_id")
+
+ if user_id is None:
+ raise HTTPException(
+ status_code=status.HTTP_401_UNAUTHORIZED,
+ detail="Invalid authentication credentials",
+ headers={"WWW-Authenticate": "Bearer"},
+ )
+
+ return payload
+
+ except JWTError:
+ raise HTTPException(
+ status_code=status.HTTP_401_UNAUTHORIZED,
+ detail="Could not validate credentials",
+ headers={"WWW-Authenticate": "Bearer"},
+ )
+
+
+async def get_current_user_id(credentials: HTTPAuthorizationCredentials = Depends(security)) -> int:
+ """
+ Extract user_id from JWT token.
+
+ This function is used as a dependency in FastAPI routes to get the
+ authenticated user's ID from the JWT token.
+
+ Args:
+ credentials: HTTP Authorization credentials (injected by FastAPI)
+
+ Returns:
+ int: The authenticated user's ID
+
+ Raises:
+ HTTPException: If token is invalid or user_id is missing
+ """
+ payload = await verify_jwt_token(credentials)
+ return payload["user_id"]
+
+
+async def get_current_user(credentials: HTTPAuthorizationCredentials = Depends(security)) -> dict:
+ """
+ Extract full user payload from JWT token.
+
+ This function is used as a dependency in FastAPI routes to get the
+ authenticated user's full information from the JWT token.
+
+ Args:
+ credentials: HTTP Authorization credentials (injected by FastAPI)
+
+ Returns:
+ dict: The JWT payload containing user information
+
+ Raises:
+ HTTPException: If token is invalid
+ """
+ payload = await verify_jwt_token(credentials)
+ return payload
diff --git a/src/models/__init__.py b/src/models/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..6fb9af0184a7215df737c7b0fae7c0a2e82bb5eb
--- /dev/null
+++ b/src/models/__init__.py
@@ -0,0 +1,15 @@
+from .user import User
+from .task import Task
+from .subtask import Subtask
+from .password_reset import PasswordResetToken
+from .conversation import Conversation
+from .message import Message
+
+__all__ = [
+ "User",
+ "Task",
+ "Subtask",
+ "PasswordResetToken",
+ "Conversation",
+ "Message"
+]
diff --git a/src/models/conversation.py b/src/models/conversation.py
new file mode 100644
index 0000000000000000000000000000000000000000..59ab29cd103ab0f270621c8e8422fef715879aae
--- /dev/null
+++ b/src/models/conversation.py
@@ -0,0 +1,36 @@
+"""
+Conversation model for AI chatbot.
+
+Represents a chat session between a user and the AI assistant.
+"""
+
+from sqlmodel import SQLModel, Field, Relationship
+from datetime import datetime
+from typing import Optional, List, TYPE_CHECKING
+
+if TYPE_CHECKING:
+ from src.models.message import Message
+ from src.models.user import User
+
+
+class Conversation(SQLModel, table=True):
+ """
+ Conversation model representing a chat session.
+
+ Each user can have multiple conversations over time.
+ Currently, the system uses the most recent conversation for each user.
+ """
+ __tablename__ = "conversations"
+
+ id: Optional[int] = Field(default=None, primary_key=True)
+ user_id: int = Field(foreign_key="users.id", index=True, nullable=False)
+ created_at: datetime = Field(default_factory=datetime.utcnow, nullable=False)
+ updated_at: datetime = Field(
+ default_factory=datetime.utcnow,
+ nullable=False,
+ sa_column_kwargs={"onupdate": datetime.utcnow}
+ )
+
+ # Relationships
+ messages: List["Message"] = Relationship(back_populates="conversation")
+ user: Optional["User"] = Relationship(back_populates="conversations")
diff --git a/src/models/message.py b/src/models/message.py
new file mode 100644
index 0000000000000000000000000000000000000000..70d49b7fdce6adbe69b08f1679023776139d1a2d
--- /dev/null
+++ b/src/models/message.py
@@ -0,0 +1,54 @@
+"""
+Message model for AI chatbot.
+
+Represents a single message in a conversation (either from user or assistant).
+"""
+
+from sqlmodel import SQLModel, Field, Relationship
+from datetime import datetime
+from typing import Optional, Literal, TYPE_CHECKING
+
+if TYPE_CHECKING:
+ from src.models.conversation import Conversation
+ from src.models.user import User
+
+
+class Message(SQLModel, table=True):
+ """
+ Message model representing a single message in a conversation.
+
+ Messages are immutable once created (no editing).
+ Role must be either 'user' or 'assistant'.
+ """
+ __tablename__ = "messages"
+
+ id: Optional[int] = Field(default=None, primary_key=True)
+ conversation_id: int = Field(
+ foreign_key="conversations.id",
+ index=True,
+ nullable=False
+ )
+ user_id: int = Field(
+ foreign_key="users.id",
+ index=True,
+ nullable=False
+ )
+ role: str = Field(
+ max_length=20,
+ nullable=False,
+ description="Message sender role: 'user' or 'assistant'"
+ )
+ content: str = Field(
+ max_length=10000,
+ nullable=False,
+ description="Message text content"
+ )
+ created_at: datetime = Field(
+ default_factory=datetime.utcnow,
+ nullable=False,
+ index=True
+ )
+
+ # Relationships
+ conversation: Optional["Conversation"] = Relationship(back_populates="messages")
+ user: Optional["User"] = Relationship()
diff --git a/src/models/password_reset.py b/src/models/password_reset.py
new file mode 100644
index 0000000000000000000000000000000000000000..40b1b2c470ed41d0680e5cecd68804aadf5e71fb
--- /dev/null
+++ b/src/models/password_reset.py
@@ -0,0 +1,30 @@
+from sqlmodel import SQLModel, Field, Relationship
+from datetime import datetime
+from typing import Optional
+
+class PasswordResetToken(SQLModel, table=True):
+ """Password reset token model for secure password recovery."""
+
+ __tablename__ = "password_reset_tokens"
+
+ id: Optional[int] = Field(default=None, primary_key=True)
+ user_id: int = Field(foreign_key="users.id", index=True)
+ token: str = Field(unique=True, index=True, max_length=255)
+ expires_at: datetime = Field(index=True)
+ used: bool = Field(default=False)
+ created_at: datetime = Field(default_factory=datetime.utcnow)
+
+ # Relationships
+ user: Optional["User"] = Relationship()
+
+ class Config:
+ json_schema_extra = {
+ "example": {
+ "id": 1,
+ "user_id": 1,
+ "token": "abc123def456...",
+ "expires_at": "2026-02-07T12:15:00Z",
+ "used": False,
+ "created_at": "2026-02-07T12:00:00Z"
+ }
+ }
diff --git a/src/models/subtask.py b/src/models/subtask.py
new file mode 100644
index 0000000000000000000000000000000000000000..737fe86ba217983810fccb67b5f11d0b6f8aefb6
--- /dev/null
+++ b/src/models/subtask.py
@@ -0,0 +1,32 @@
+from sqlmodel import SQLModel, Field, Relationship
+from datetime import datetime
+from typing import Optional
+
+class Subtask(SQLModel, table=True):
+ """Subtask model representing a checklist item within a task."""
+
+ __tablename__ = "subtasks"
+
+ id: Optional[int] = Field(default=None, primary_key=True)
+ task_id: int = Field(foreign_key="tasks.id", index=True)
+ title: str = Field(max_length=500)
+ completed: bool = Field(default=False)
+ order: int = Field(default=0) # For ordering subtasks
+ created_at: datetime = Field(default_factory=datetime.utcnow)
+ updated_at: datetime = Field(default_factory=datetime.utcnow)
+
+ # Relationships
+ task: "Task" = Relationship(back_populates="subtasks")
+
+ class Config:
+ json_schema_extra = {
+ "example": {
+ "id": 1,
+ "task_id": 42,
+ "title": "Review documentation",
+ "completed": False,
+ "order": 0,
+ "created_at": "2026-02-05T10:00:00Z",
+ "updated_at": "2026-02-05T10:00:00Z"
+ }
+ }
diff --git a/src/models/task.py b/src/models/task.py
new file mode 100644
index 0000000000000000000000000000000000000000..7b0ca404a4739cb1d23df3a75a58d7eec6257f26
--- /dev/null
+++ b/src/models/task.py
@@ -0,0 +1,49 @@
+from sqlmodel import SQLModel, Field, Relationship
+from datetime import datetime
+from typing import Optional, List, TYPE_CHECKING
+
+if TYPE_CHECKING:
+ from .subtask import Subtask
+
+class Task(SQLModel, table=True):
+ """Task model representing a work item belonging to a user."""
+
+ __tablename__ = "tasks"
+
+ id: Optional[int] = Field(default=None, primary_key=True)
+ user_id: int = Field(foreign_key="users.id", index=True)
+ title: str = Field(max_length=500)
+ description: Optional[str] = Field(default=None)
+ completed: bool = Field(default=False)
+ category: Optional[str] = Field(default=None, max_length=50)
+ due_date: Optional[datetime] = Field(default=None)
+ priority: Optional[str] = Field(default="medium", max_length=20) # low, medium, high
+
+ # Recurring task fields
+ is_recurring: bool = Field(default=False)
+ recurrence_type: Optional[str] = Field(default=None, max_length=20) # daily, weekly, monthly, yearly
+ recurrence_interval: Optional[int] = Field(default=1) # e.g., every 2 days, every 3 weeks
+ recurrence_end_date: Optional[datetime] = Field(default=None)
+ parent_task_id: Optional[int] = Field(default=None, foreign_key="tasks.id") # For recurring instances
+
+ created_at: datetime = Field(default_factory=datetime.utcnow)
+ updated_at: datetime = Field(default_factory=datetime.utcnow)
+
+ # Relationships
+ user: "User" = Relationship(back_populates="tasks")
+ subtasks: List["Subtask"] = Relationship(back_populates="task")
+
+ class Config:
+ json_schema_extra = {
+ "example": {
+ "id": 1,
+ "user_id": 42,
+ "title": "Buy groceries",
+ "description": "Milk, eggs, bread",
+ "completed": False,
+ "category": "Personal",
+ "due_date": "2026-02-10T10:00:00Z",
+ "created_at": "2026-02-05T10:00:00Z",
+ "updated_at": "2026-02-05T10:00:00Z"
+ }
+ }
diff --git a/src/models/user.py b/src/models/user.py
new file mode 100644
index 0000000000000000000000000000000000000000..524c57aadb74943c655dde231dcfceb68eac2c88
--- /dev/null
+++ b/src/models/user.py
@@ -0,0 +1,31 @@
+from sqlmodel import SQLModel, Field, Relationship
+from datetime import datetime
+from typing import Optional, List, TYPE_CHECKING
+
+if TYPE_CHECKING:
+ from src.models.conversation import Conversation
+
+class User(SQLModel, table=True):
+ """User model representing an authenticated user of the application."""
+
+ __tablename__ = "users"
+
+ id: Optional[int] = Field(default=None, primary_key=True)
+ email: str = Field(unique=True, index=True, max_length=255)
+ hashed_password: str = Field(max_length=255)
+ created_at: datetime = Field(default_factory=datetime.utcnow)
+ updated_at: datetime = Field(default_factory=datetime.utcnow)
+
+ # Relationships
+ tasks: List["Task"] = Relationship(back_populates="user")
+ conversations: List["Conversation"] = Relationship(back_populates="user")
+
+ class Config:
+ json_schema_extra = {
+ "example": {
+ "id": 1,
+ "email": "user@example.com",
+ "created_at": "2026-02-05T10:00:00Z",
+ "updated_at": "2026-02-05T10:00:00Z"
+ }
+ }
diff --git a/src/services/__init__.py b/src/services/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..0557eb635c5522686a57e633065437599726336a
--- /dev/null
+++ b/src/services/__init__.py
@@ -0,0 +1 @@
+# Services module
diff --git a/src/services/auth.py b/src/services/auth.py
new file mode 100644
index 0000000000000000000000000000000000000000..357685e26f758a60a6ccc9f1ef83643583986e74
--- /dev/null
+++ b/src/services/auth.py
@@ -0,0 +1,116 @@
+"""
+Authentication service for password hashing and JWT token management.
+
+This module provides utilities for:
+- Password hashing and verification using bcrypt
+- JWT token creation and decoding
+- User authentication workflows
+"""
+
+import os
+from datetime import datetime, timedelta
+from typing import Optional, Dict, Any
+
+from passlib.context import CryptContext
+from jose import JWTError, jwt
+
+# Password hashing configuration using Argon2 (more secure and no compatibility issues)
+pwd_context = CryptContext(schemes=["argon2"], deprecated="auto")
+
+# JWT configuration from environment variables
+JWT_SECRET_KEY = os.getenv("JWT_SECRET_KEY", "your-secret-key-change-in-production")
+JWT_ALGORITHM = os.getenv("JWT_ALGORITHM", "HS256")
+JWT_EXPIRATION_MINUTES = int(os.getenv("JWT_EXPIRATION_MINUTES", "10080")) # Default: 7 days
+
+
+def hash_password(password: str) -> str:
+ """
+ Hash a plain text password using Argon2.
+
+ Args:
+ password: Plain text password to hash
+
+ Returns:
+ Hashed password string
+
+ Example:
+ >>> hashed = hash_password("mypassword123")
+ >>> print(hashed)
+ $argon2id$...
+ """
+ return pwd_context.hash(password)
+
+
+def verify_password(plain_password: str, hashed_password: str) -> bool:
+ """
+ Verify a plain text password against a hashed password.
+
+ Args:
+ plain_password: Plain text password to verify
+ hashed_password: Hashed password to compare against
+
+ Returns:
+ True if password matches, False otherwise
+
+ Example:
+ >>> hashed = hash_password("mypassword123")
+ >>> verify_password("mypassword123", hashed)
+ True
+ >>> verify_password("wrongpassword", hashed)
+ False
+ """
+ return pwd_context.verify(plain_password, hashed_password)
+
+
+def create_access_token(data: Dict[str, Any], expires_delta: Optional[timedelta] = None) -> str:
+ """
+ Create a JWT access token with user data.
+
+ Args:
+ data: Dictionary containing user data to encode in token (must include 'user_id')
+ expires_delta: Optional custom expiration time delta
+
+ Returns:
+ Encoded JWT token string
+
+ Example:
+ >>> token = create_access_token({"user_id": 1, "email": "user@example.com"})
+ >>> print(token)
+ eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9...
+ """
+ to_encode = data.copy()
+
+ # Set expiration time
+ if expires_delta:
+ expire = datetime.utcnow() + expires_delta
+ else:
+ expire = datetime.utcnow() + timedelta(minutes=JWT_EXPIRATION_MINUTES)
+
+ to_encode.update({"exp": expire})
+
+ # Create JWT token
+ encoded_jwt = jwt.encode(to_encode, JWT_SECRET_KEY, algorithm=JWT_ALGORITHM)
+ return encoded_jwt
+
+
+def decode_token(token: str) -> Optional[Dict[str, Any]]:
+ """
+ Decode and verify a JWT token.
+
+ Args:
+ token: JWT token string to decode
+
+ Returns:
+ Dictionary containing decoded token payload, or None if invalid
+
+ Example:
+ >>> token = create_access_token({"user_id": 1})
+ >>> payload = decode_token(token)
+ >>> print(payload["user_id"])
+ 1
+ """
+ try:
+ payload = jwt.decode(token, JWT_SECRET_KEY, algorithms=[JWT_ALGORITHM])
+ return payload
+ except JWTError:
+ return None
diff --git a/src/services/cohere_ai.py b/src/services/cohere_ai.py
new file mode 100644
index 0000000000000000000000000000000000000000..0e932740ebdd1589520bcf8e1bab0041e5e4b17c
--- /dev/null
+++ b/src/services/cohere_ai.py
@@ -0,0 +1,290 @@
+"""
+Cohere AI Service for intelligent task management features.
+
+This module provides AI-powered capabilities using Cohere's API:
+- Task suggestions and auto-completion
+- Smart categorization and tagging
+- Priority recommendations
+- Task description enhancement
+"""
+
+import os
+from typing import List, Dict, Optional
+import cohere
+from dotenv import load_dotenv
+
+load_dotenv()
+
+# Initialize Cohere client with v2 API
+COHERE_API_KEY = os.getenv("COHERE_API_KEY")
+if not COHERE_API_KEY:
+ raise ValueError("COHERE_API_KEY not found in environment variables")
+
+co = cohere.ClientV2(COHERE_API_KEY)
+
+
+class CohereAIService:
+ """Service class for Cohere AI operations."""
+
+ @staticmethod
+ def generate_task_suggestions(context: str, count: int = 5) -> List[str]:
+ """
+ Generate task suggestions based on context.
+
+ Args:
+ context: Context or description to base suggestions on
+ count: Number of suggestions to generate
+
+ Returns:
+ List of suggested task titles
+ """
+ try:
+ user_message = f"""Based on the following context, suggest {count} specific, actionable tasks:
+
+Context: {context}
+
+Generate {count} clear, concise task titles (one per line, no numbering):"""
+
+ response = co.chat(
+ model='command-a-reasoning-08-2025',
+ messages=[{"role": "user", "content": user_message}],
+ temperature=0.7,
+ max_tokens=1000 # Increased to allow for thinking + response
+ )
+
+ # Extract text from v2 API response - find the text content item
+ result_text = ""
+
+ for item in response.message.content:
+ if hasattr(item, 'text'):
+ result_text = item.text
+ break
+
+ if not result_text:
+ return []
+
+ suggestions = result_text.strip().split('\n')
+
+ # Clean up suggestions
+ suggestions = [s.strip().lstrip('- ').lstrip('• ').lstrip('* ').lstrip('1234567890. ')
+ for s in suggestions if s.strip()]
+
+ return suggestions[:count]
+
+ except Exception as e:
+ print(f"Error generating task suggestions: {e}")
+ return []
+
+ @staticmethod
+ def enhance_task_description(title: str, description: str = "") -> str:
+ """
+ Enhance a task description with more details and clarity.
+
+ Args:
+ title: Task title
+ description: Current description (optional)
+
+ Returns:
+ Enhanced description
+ """
+ try:
+ user_message = f"""Enhance this task description to be more clear and actionable:
+
+Task: {title}
+Current Description: {description if description else "None"}
+
+Provide a clear, concise enhanced description (2-3 sentences):"""
+
+ response = co.chat(
+ model='command-a-reasoning-08-2025',
+ messages=[{"role": "user", "content": user_message}],
+ temperature=0.6,
+ max_tokens=1000
+ )
+
+ # Extract text from v2 API response
+ enhanced = ""
+ for item in response.message.content:
+ if hasattr(item, 'text'):
+ enhanced = item.text
+ break
+
+ return enhanced.strip() if enhanced else description
+
+ except Exception as e:
+ print(f"Error enhancing description: {e}")
+ return description
+
+ @staticmethod
+ def categorize_task(title: str, description: str = "") -> Dict[str, any]:
+ """
+ Categorize a task and suggest priority level.
+
+ Args:
+ title: Task title
+ description: Task description
+
+ Returns:
+ Dictionary with category, priority, and tags
+ """
+ try:
+ user_message = f"""Analyze this task and provide categorization:
+
+Task: {title}
+Description: {description}
+
+Respond in this exact format:
+Category: [Work/Personal/Health/Finance/Learning/Other]
+Priority: [High/Medium/Low]
+Tags: [tag1, tag2, tag3]"""
+
+ response = co.chat(
+ model='command-a-reasoning-08-2025',
+ messages=[{"role": "user", "content": user_message}],
+ temperature=0.5,
+ max_tokens=1000
+ )
+
+ # Extract text from v2 API response
+ result = ""
+ for item in response.message.content:
+ if hasattr(item, 'text'):
+ result = item.text
+ break
+
+ # Parse response
+ category = "Other"
+ priority = "Medium"
+ tags = []
+
+ for line in result.split('\n'):
+ if line.startswith('Category:'):
+ category = line.split(':', 1)[1].strip()
+ elif line.startswith('Priority:'):
+ priority = line.split(':', 1)[1].strip()
+ elif line.startswith('Tags:'):
+ tags_str = line.split(':', 1)[1].strip()
+ tags = [t.strip() for t in tags_str.strip('[]').split(',')]
+
+ return {
+ "category": category,
+ "priority": priority,
+ "tags": tags
+ }
+
+ except Exception as e:
+ print(f"Error categorizing task: {e}")
+ return {
+ "category": "Other",
+ "priority": "Medium",
+ "tags": []
+ }
+
+ @staticmethod
+ def smart_complete_task(partial_title: str) -> List[str]:
+ """
+ Provide smart auto-completion suggestions for task titles.
+
+ Args:
+ partial_title: Partial task title typed by user
+
+ Returns:
+ List of completion suggestions
+ """
+ try:
+ user_message = f"""Complete this task title with 3 different variations:
+
+Partial task: {partial_title}
+
+Provide 3 complete task titles (one per line):"""
+
+ response = co.chat(
+ model='command-a-reasoning-08-2025',
+ messages=[{"role": "user", "content": user_message}],
+ temperature=0.8,
+ max_tokens=1000
+ )
+
+ # Extract text from v2 API response
+ result_text = ""
+ for item in response.message.content:
+ if hasattr(item, 'text'):
+ result_text = item.text
+ break
+
+ completions = result_text.strip().split('\n')
+ completions = [c.strip().lstrip('- ').lstrip('• ').lstrip('* ').lstrip('1234567890. ')
+ for c in completions if c.strip()]
+
+ return completions[:3]
+
+ except Exception as e:
+ print(f"Error completing task: {e}")
+ return []
+
+ @staticmethod
+ def analyze_task_complexity(title: str, description: str = "") -> Dict[str, any]:
+ """
+ Analyze task complexity and provide time estimate.
+
+ Args:
+ title: Task title
+ description: Task description
+
+ Returns:
+ Dictionary with complexity level and estimated time
+ """
+ try:
+ user_message = f"""Analyze the complexity of this task:
+
+Task: {title}
+Description: {description}
+
+Respond in this format:
+Complexity: [Simple/Moderate/Complex]
+Estimated Time: [time estimate]
+Subtasks Needed: [Yes/No]"""
+
+ response = co.chat(
+ model='command-a-reasoning-08-2025',
+ messages=[{"role": "user", "content": user_message}],
+ temperature=0.5,
+ max_tokens=1000
+ )
+
+ # Extract text from v2 API response
+ result = ""
+ for item in response.message.content:
+ if hasattr(item, 'text'):
+ result = item.text
+ break
+
+ complexity = "Moderate"
+ estimated_time = "Unknown"
+ needs_subtasks = False
+
+ for line in result.split('\n'):
+ if line.startswith('Complexity:'):
+ complexity = line.split(':', 1)[1].strip()
+ elif line.startswith('Estimated Time:'):
+ estimated_time = line.split(':', 1)[1].strip()
+ elif line.startswith('Subtasks Needed:'):
+ needs_subtasks = 'yes' in line.lower()
+
+ return {
+ "complexity": complexity,
+ "estimated_time": estimated_time,
+ "needs_subtasks": needs_subtasks
+ }
+
+ except Exception as e:
+ print(f"Error analyzing complexity: {e}")
+ return {
+ "complexity": "Moderate",
+ "estimated_time": "Unknown",
+ "needs_subtasks": False
+ }
+
+
+# Singleton instance
+cohere_service = CohereAIService()
diff --git a/src/services/conversation_service.py b/src/services/conversation_service.py
new file mode 100644
index 0000000000000000000000000000000000000000..210d8b3fbb58c68b69d37e0a741a37110bbd6edb
--- /dev/null
+++ b/src/services/conversation_service.py
@@ -0,0 +1,157 @@
+"""
+Conversation service for managing chat conversations.
+
+This service handles:
+- Creating and retrieving conversations
+- Storing and loading messages
+- Maintaining conversation history
+- Stateless conversation management
+"""
+
+import logging
+from typing import List, Optional
+from datetime import datetime
+from sqlmodel import Session, select
+from src.models.conversation import Conversation
+from src.models.message import Message
+
+logger = logging.getLogger(__name__)
+
+
+class ConversationService:
+ """
+ Service for managing AI chatbot conversations.
+
+ All conversation operations are stateless - conversation context
+ is loaded from the database on each request.
+ """
+
+ @staticmethod
+ async def get_or_create_conversation(
+ db: Session,
+ user_id: int
+ ) -> Conversation:
+ """
+ Get the most recent conversation for a user, or create a new one.
+
+ Args:
+ db: Database session
+ user_id: User ID
+
+ Returns:
+ Conversation object
+ """
+ # Get most recent conversation
+ statement = select(Conversation).where(
+ Conversation.user_id == user_id
+ ).order_by(Conversation.updated_at.desc())
+
+ conversation = db.exec(statement).first()
+
+ if not conversation:
+ # Create new conversation
+ conversation = Conversation(user_id=user_id)
+ db.add(conversation)
+ db.commit()
+ db.refresh(conversation)
+ logger.info(f"Created new conversation {conversation.id} for user {user_id}")
+ else:
+ logger.info(f"Using existing conversation {conversation.id} for user {user_id}")
+
+ return conversation
+
+ @staticmethod
+ async def store_message(
+ db: Session,
+ conversation_id: int,
+ user_id: int,
+ role: str,
+ content: str
+ ) -> Message:
+ """
+ Store a message in the conversation.
+
+ Args:
+ db: Database session
+ conversation_id: Conversation ID
+ user_id: User ID
+ role: Message role ('user' or 'assistant')
+ content: Message content
+
+ Returns:
+ Created Message object
+ """
+ if role not in ['user', 'assistant']:
+ raise ValueError(f"Invalid role: {role}. Must be 'user' or 'assistant'")
+
+ message = Message(
+ conversation_id=conversation_id,
+ user_id=user_id,
+ role=role,
+ content=content
+ )
+ db.add(message)
+
+ # Update conversation timestamp
+ statement = select(Conversation).where(Conversation.id == conversation_id)
+ conversation = db.exec(statement).first()
+ if conversation:
+ conversation.updated_at = datetime.utcnow()
+
+ db.commit()
+ db.refresh(message)
+
+ logger.info(f"Stored {role} message in conversation {conversation_id}")
+ return message
+
+ @staticmethod
+ async def load_conversation_history(
+ db: Session,
+ conversation_id: int,
+ limit: int = 50
+ ) -> List[Message]:
+ """
+ Load conversation history ordered chronologically.
+
+ Args:
+ db: Database session
+ conversation_id: Conversation ID
+ limit: Maximum number of messages to load
+
+ Returns:
+ List of Message objects
+ """
+ statement = select(Message).where(
+ Message.conversation_id == conversation_id
+ ).order_by(Message.created_at.asc()).limit(limit)
+
+ messages = db.exec(statement).all()
+ logger.info(f"Loaded {len(messages)} messages from conversation {conversation_id}")
+ return list(messages)
+
+ @staticmethod
+ def build_message_array(messages: List[Message]) -> List[dict]:
+ """
+ Build message array for Cohere API from Message objects.
+
+ Filters out messages with empty content as Cohere API v2 requires
+ all messages to have non-empty content or tool calls.
+
+ Args:
+ messages: List of Message objects
+
+ Returns:
+ List of message dictionaries for Cohere API
+ """
+ return [
+ {
+ "role": msg.role,
+ "content": msg.content
+ }
+ for msg in messages
+ if msg.content and msg.content.strip() # Filter out empty messages
+ ]
+
+
+# Singleton instance
+conversation_service = ConversationService()
diff --git a/src/services/email.py b/src/services/email.py
new file mode 100644
index 0000000000000000000000000000000000000000..4450fc44676be68d3f95354809a6ff38d154485c
--- /dev/null
+++ b/src/services/email.py
@@ -0,0 +1,201 @@
+"""
+Email service for sending password reset emails via Gmail SMTP.
+
+This module provides utilities for:
+- Sending HTML emails via Gmail SMTP
+- Rendering password reset email templates
+- Managing SMTP connection and configuration
+"""
+
+import os
+import smtplib
+from email.mime.text import MIMEText
+from email.mime.multipart import MIMEMultipart
+from typing import Optional
+from pathlib import Path
+
+
+# SMTP configuration from environment variables
+SMTP_HOST = os.getenv("SMTP_HOST", "smtp.gmail.com")
+SMTP_PORT = int(os.getenv("SMTP_PORT", "587"))
+SMTP_USERNAME = os.getenv("SMTP_USERNAME", "")
+SMTP_PASSWORD = os.getenv("SMTP_PASSWORD", "")
+SMTP_USE_TLS = os.getenv("SMTP_USE_TLS", "true").lower() == "true"
+EMAIL_FROM = os.getenv("EMAIL_FROM", SMTP_USERNAME)
+EMAIL_FROM_NAME = os.getenv("EMAIL_FROM_NAME", "Todo Application")
+FRONTEND_URL = os.getenv("FRONTEND_URL", "http://localhost:3000")
+
+
+def send_email(to_email: str, subject: str, html_content: str) -> bool:
+ """
+ Send an HTML email via Gmail SMTP.
+
+ Args:
+ to_email: Recipient email address
+ subject: Email subject line
+ html_content: HTML content of the email
+
+ Returns:
+ True if email sent successfully, False otherwise
+
+ Example:
+ >>> success = send_email(
+ ... "user@example.com",
+ ... "Password Reset",
+ ... "
Reset your password
"
+ ... )
+ >>> print(success)
+ True
+ """
+ try:
+ # Create message
+ message = MIMEMultipart("alternative")
+ message["Subject"] = subject
+ message["From"] = f"{EMAIL_FROM_NAME} <{EMAIL_FROM}>"
+ message["To"] = to_email
+
+ # Attach HTML content
+ html_part = MIMEText(html_content, "html")
+ message.attach(html_part)
+
+ # Connect to SMTP server and send email
+ with smtplib.SMTP(SMTP_HOST, SMTP_PORT) as server:
+ if SMTP_USE_TLS:
+ server.starttls()
+
+ # Login if credentials provided
+ if SMTP_USERNAME and SMTP_PASSWORD:
+ server.login(SMTP_USERNAME, SMTP_PASSWORD)
+
+ # Send email
+ server.send_message(message)
+
+ return True
+
+ except Exception as e:
+ print(f"Failed to send email to {to_email}: {str(e)}")
+ return False
+
+
+def render_password_reset_email(reset_link: str, user_email: str) -> str:
+ """
+ Render the password reset email HTML template.
+
+ Args:
+ reset_link: Full URL for password reset
+ user_email: User's email address
+
+ Returns:
+ Rendered HTML email content
+
+ Example:
+ >>> html = render_password_reset_email(
+ ... "http://localhost:3000/reset-password?token=abc123",
+ ... "user@example.com"
+ ... )
+ """
+ # Try to load template from file
+ template_path = Path(__file__).parent.parent.parent / "templates" / "password_reset_email.html"
+
+ if template_path.exists():
+ with open(template_path, "r", encoding="utf-8") as f:
+ template = f.read()
+ else:
+ # Fallback inline template
+ template = """
+
+
+
+
+
+ Password Reset
+
+
+
+
+
+
+
+
+ Password Reset Request
+ |
+
+
+ |
+
+ Hello,
+
+
+ We received a request to reset the password for your account associated with {{USER_EMAIL}}.
+
+
+ Click the button below to reset your password. This link will expire in 15 minutes.
+
+
+
+ If you didn't request a password reset, you can safely ignore this email. Your password will not be changed.
+
+
+ If the button doesn't work, copy and paste this link into your browser:
+
+
+ {{RESET_LINK}}
+
+ |
+
+
+ |
+
+ © 2026 Todo Application. All rights reserved.
+
+ |
+
+
+ |
+
+
+
+
+"""
+
+ # Replace placeholders
+ html = template.replace("{{RESET_LINK}}", reset_link)
+ html = html.replace("{{USER_EMAIL}}", user_email)
+
+ return html
+
+
+def send_password_reset_email(to_email: str, reset_token: str) -> bool:
+ """
+ Send a password reset email to the user.
+
+ Args:
+ to_email: User's email address
+ reset_token: Password reset token
+
+ Returns:
+ True if email sent successfully, False otherwise
+
+ Example:
+ >>> success = send_password_reset_email("user@example.com", "abc123def456")
+ >>> print(success)
+ True
+ """
+ # Construct reset link
+ reset_link = f"{FRONTEND_URL}/reset-password?token={reset_token}"
+
+ # Render email HTML
+ html_content = render_password_reset_email(reset_link, to_email)
+
+ # Send email
+ return send_email(
+ to_email=to_email,
+ subject="Reset Your Password - Todo Application",
+ html_content=html_content
+ )
diff --git a/src/services/error_formatter.py b/src/services/error_formatter.py
new file mode 100644
index 0000000000000000000000000000000000000000..30a48fd488bc866f9b3a480b65de6efda187676d
--- /dev/null
+++ b/src/services/error_formatter.py
@@ -0,0 +1,184 @@
+"""
+Error formatter for user-friendly error messages.
+
+This module maps technical errors to safe, user-friendly messages
+without exposing internal system details.
+"""
+
+import logging
+from typing import Dict, Any, Optional
+from enum import Enum
+
+logger = logging.getLogger(__name__)
+
+
+class ErrorSeverity(str, Enum):
+ """Error severity levels."""
+ LOW = "low"
+ MEDIUM = "medium"
+ HIGH = "high"
+ CRITICAL = "critical"
+
+
+class ErrorCategory(str, Enum):
+ """Error categories for classification."""
+ VALIDATION = "validation"
+ AUTHENTICATION = "authentication"
+ AUTHORIZATION = "authorization"
+ NOT_FOUND = "not_found"
+ DATABASE = "database"
+ EXTERNAL_API = "external_api"
+ INTERNAL = "internal"
+
+
+class ErrorFormatter:
+ """
+ Formats errors into user-friendly messages.
+
+ This formatter ensures that:
+ - Users never see stack traces or technical details
+ - Error messages are actionable and helpful
+ - Internal details are logged for debugging
+ - Severity is properly categorized
+ """
+
+ # Map error codes to user-friendly messages
+ ERROR_MESSAGES = {
+ # Validation errors
+ "INVALID_TITLE": "Task title cannot be empty. Please provide a title for your task.",
+ "INVALID_FILTER": "Invalid filter. Please use 'all', 'pending', or 'completed'.",
+ "MISSING_IDENTIFIER": "Please specify either a task ID or task title.",
+ "NO_UPDATES": "Please specify what you'd like to update (title or description).",
+ "VALIDATION_ERROR": "The information provided is invalid. Please check and try again.",
+
+ # Task errors
+ "TASK_NOT_FOUND": "Task not found. Use 'show tasks' to see your list.",
+ "AMBIGUOUS_MATCH": "Multiple tasks match that description. Please be more specific or use the task ID.",
+ "ALREADY_COMPLETED": "This task is already marked as complete.",
+
+ # Database errors
+ "DATABASE_ERROR": "We're having trouble saving your changes. Please try again in a moment.",
+ "CONNECTION_ERROR": "Unable to connect to the database. Please try again later.",
+
+ # AI/API errors
+ "AI_SERVICE_ERROR": "The AI service is temporarily unavailable. Please try again.",
+ "RATE_LIMIT_EXCEEDED": "Too many requests. Please wait a moment and try again.",
+ "TIMEOUT_ERROR": "The request took too long. Please try again.",
+
+ # Authentication/Authorization errors
+ "UNAUTHORIZED": "You need to be logged in to do that.",
+ "FORBIDDEN": "You don't have permission to access that resource.",
+ "INVALID_TOKEN": "Your session has expired. Please log in again.",
+
+ # Generic errors
+ "INTERNAL_ERROR": "Something went wrong on our end. We're looking into it.",
+ "UNKNOWN_ERROR": "An unexpected error occurred. Please try again."
+ }
+
+ @classmethod
+ def format_error(
+ cls,
+ error_code: str,
+ error_message: Optional[str] = None,
+ severity: ErrorSeverity = ErrorSeverity.MEDIUM,
+ category: ErrorCategory = ErrorCategory.INTERNAL,
+ internal_details: Optional[str] = None
+ ) -> Dict[str, Any]:
+ """
+ Format an error into a user-friendly response.
+
+ Args:
+ error_code: Error code for classification
+ error_message: Optional custom error message
+ severity: Error severity level
+ category: Error category
+ internal_details: Internal details for logging (not shown to user)
+
+ Returns:
+ Formatted error dictionary
+ """
+ # Get user-friendly message
+ user_message = error_message or cls.ERROR_MESSAGES.get(
+ error_code,
+ cls.ERROR_MESSAGES["UNKNOWN_ERROR"]
+ )
+
+ # Log internal details
+ if internal_details:
+ log_level = cls._get_log_level(severity)
+ logger.log(
+ log_level,
+ f"Error [{error_code}] - Category: {category}, Severity: {severity} - {internal_details}"
+ )
+
+ return {
+ "success": False,
+ "error": user_message,
+ "error_code": error_code,
+ "severity": severity.value,
+ "category": category.value
+ }
+
+ @classmethod
+ def format_validation_error(cls, message: str, field: Optional[str] = None) -> Dict[str, Any]:
+ """Format a validation error."""
+ return cls.format_error(
+ error_code="VALIDATION_ERROR",
+ error_message=message,
+ severity=ErrorSeverity.LOW,
+ category=ErrorCategory.VALIDATION,
+ internal_details=f"Validation failed for field: {field}" if field else None
+ )
+
+ @classmethod
+ def format_database_error(cls, exception: Exception) -> Dict[str, Any]:
+ """Format a database error."""
+ return cls.format_error(
+ error_code="DATABASE_ERROR",
+ severity=ErrorSeverity.HIGH,
+ category=ErrorCategory.DATABASE,
+ internal_details=str(exception)
+ )
+
+ @classmethod
+ def format_ai_service_error(cls, exception: Exception) -> Dict[str, Any]:
+ """Format an AI service error."""
+ return cls.format_error(
+ error_code="AI_SERVICE_ERROR",
+ severity=ErrorSeverity.MEDIUM,
+ category=ErrorCategory.EXTERNAL_API,
+ internal_details=str(exception)
+ )
+
+ @classmethod
+ def format_authentication_error(cls) -> Dict[str, Any]:
+ """Format an authentication error."""
+ return cls.format_error(
+ error_code="UNAUTHORIZED",
+ severity=ErrorSeverity.MEDIUM,
+ category=ErrorCategory.AUTHENTICATION
+ )
+
+ @classmethod
+ def format_authorization_error(cls) -> Dict[str, Any]:
+ """Format an authorization error."""
+ return cls.format_error(
+ error_code="FORBIDDEN",
+ severity=ErrorSeverity.MEDIUM,
+ category=ErrorCategory.AUTHORIZATION
+ )
+
+ @staticmethod
+ def _get_log_level(severity: ErrorSeverity) -> int:
+ """Map severity to logging level."""
+ severity_map = {
+ ErrorSeverity.LOW: logging.INFO,
+ ErrorSeverity.MEDIUM: logging.WARNING,
+ ErrorSeverity.HIGH: logging.ERROR,
+ ErrorSeverity.CRITICAL: logging.CRITICAL
+ }
+ return severity_map.get(severity, logging.ERROR)
+
+
+# Singleton instance
+error_formatter = ErrorFormatter()
diff --git a/src/services/password_reset.py b/src/services/password_reset.py
new file mode 100644
index 0000000000000000000000000000000000000000..1312525afe4e42db3ac373f13736e034980c269e
--- /dev/null
+++ b/src/services/password_reset.py
@@ -0,0 +1,272 @@
+"""
+Password reset service for secure token management and validation.
+
+This module provides utilities for:
+- Generating cryptographically secure reset tokens
+- Validating token expiry and usage
+- Rate limiting reset requests
+- Password strength validation
+- Token cleanup
+"""
+
+import os
+import secrets
+from datetime import datetime, timedelta
+from typing import Optional, Dict, List
+from sqlmodel import Session, select
+
+from src.models.password_reset import PasswordResetToken
+from src.models.user import User
+
+
+# Configuration from environment variables
+TOKEN_EXPIRY_MINUTES = int(os.getenv("PASSWORD_RESET_TOKEN_EXPIRY_MINUTES", "15"))
+MAX_REQUESTS_PER_HOUR = int(os.getenv("PASSWORD_RESET_MAX_REQUESTS_PER_HOUR", "3"))
+
+
+def generate_reset_token() -> str:
+ """
+ Generate a cryptographically secure random token.
+
+ Returns:
+ URL-safe random token string (32 bytes = 43 characters)
+
+ Example:
+ >>> token = generate_reset_token()
+ >>> len(token)
+ 43
+ """
+ return secrets.token_urlsafe(32)
+
+
+def create_reset_token(session: Session, user_id: int) -> str:
+ """
+ Create a password reset token for a user.
+
+ Args:
+ session: Database session
+ user_id: User ID to create token for
+
+ Returns:
+ Generated reset token string
+
+ Example:
+ >>> token = create_reset_token(session, user_id=1)
+ >>> print(token)
+ abc123def456...
+ """
+ # Generate token
+ token = generate_reset_token()
+
+ # Calculate expiry time
+ expires_at = datetime.utcnow() + timedelta(minutes=TOKEN_EXPIRY_MINUTES)
+
+ # Create token record
+ reset_token = PasswordResetToken(
+ user_id=user_id,
+ token=token,
+ expires_at=expires_at,
+ used=False,
+ created_at=datetime.utcnow()
+ )
+
+ session.add(reset_token)
+ session.commit()
+ session.refresh(reset_token)
+
+ return token
+
+
+def validate_reset_token(session: Session, token: str) -> Optional[PasswordResetToken]:
+ """
+ Validate a password reset token.
+
+ Checks:
+ - Token exists in database
+ - Token has not expired
+ - Token has not been used
+
+ Args:
+ session: Database session
+ token: Reset token to validate
+
+ Returns:
+ PasswordResetToken if valid, None otherwise
+
+ Example:
+ >>> token_record = validate_reset_token(session, "abc123")
+ >>> if token_record:
+ ... print(f"Valid token for user {token_record.user_id}")
+ """
+ # Find token in database
+ statement = select(PasswordResetToken).where(PasswordResetToken.token == token)
+ token_record = session.exec(statement).first()
+
+ if not token_record:
+ return None
+
+ # Check if token has been used
+ if token_record.used:
+ return None
+
+ # Check if token has expired
+ if datetime.utcnow() > token_record.expires_at:
+ return None
+
+ return token_record
+
+
+def invalidate_token(session: Session, token: str) -> bool:
+ """
+ Mark a token as used (invalidate it).
+
+ Args:
+ session: Database session
+ token: Reset token to invalidate
+
+ Returns:
+ True if token was invalidated, False if not found
+
+ Example:
+ >>> success = invalidate_token(session, "abc123")
+ >>> print(success)
+ True
+ """
+ statement = select(PasswordResetToken).where(PasswordResetToken.token == token)
+ token_record = session.exec(statement).first()
+
+ if not token_record:
+ return False
+
+ token_record.used = True
+ session.add(token_record)
+ session.commit()
+
+ return True
+
+
+def check_rate_limit(session: Session, user_id: int) -> bool:
+ """
+ Check if user has exceeded rate limit for password reset requests.
+
+ Rate limit: MAX_REQUESTS_PER_HOUR requests per hour
+
+ Args:
+ session: Database session
+ user_id: User ID to check
+
+ Returns:
+ True if user is within rate limit, False if exceeded
+
+ Example:
+ >>> can_request = check_rate_limit(session, user_id=1)
+ >>> if not can_request:
+ ... print("Rate limit exceeded")
+ """
+ # Get tokens created in the last hour
+ one_hour_ago = datetime.utcnow() - timedelta(hours=1)
+
+ statement = select(PasswordResetToken).where(
+ PasswordResetToken.user_id == user_id,
+ PasswordResetToken.created_at >= one_hour_ago
+ )
+ recent_tokens = session.exec(statement).all()
+
+ # Check if user has exceeded rate limit
+ return len(recent_tokens) < MAX_REQUESTS_PER_HOUR
+
+
+def validate_password_strength(password: str) -> Dict[str, any]:
+ """
+ Validate password strength requirements.
+
+ Requirements:
+ - At least 8 characters
+ - Contains uppercase letter
+ - Contains lowercase letter
+ - Contains number
+
+ Args:
+ password: Password to validate
+
+ Returns:
+ Dictionary with 'valid' boolean and 'errors' list
+
+ Example:
+ >>> result = validate_password_strength("weak")
+ >>> print(result)
+ {'valid': False, 'errors': ['Password must be at least 8 characters', ...]}
+ """
+ errors = []
+
+ # Check length
+ if len(password) < 8:
+ errors.append("Password must be at least 8 characters")
+
+ # Check for uppercase
+ if not any(c.isupper() for c in password):
+ errors.append("Password must contain at least one uppercase letter")
+
+ # Check for lowercase
+ if not any(c.islower() for c in password):
+ errors.append("Password must contain at least one lowercase letter")
+
+ # Check for number
+ if not any(c.isdigit() for c in password):
+ errors.append("Password must contain at least one number")
+
+ return {
+ "valid": len(errors) == 0,
+ "errors": errors
+ }
+
+
+def cleanup_expired_tokens(session: Session) -> int:
+ """
+ Delete expired password reset tokens from database.
+
+ This should be run periodically as a background job.
+
+ Args:
+ session: Database session
+
+ Returns:
+ Number of tokens deleted
+
+ Example:
+ >>> deleted_count = cleanup_expired_tokens(session)
+ >>> print(f"Deleted {deleted_count} expired tokens")
+ """
+ # Find expired tokens
+ statement = select(PasswordResetToken).where(
+ PasswordResetToken.expires_at < datetime.utcnow()
+ )
+ expired_tokens = session.exec(statement).all()
+
+ # Delete expired tokens
+ for token in expired_tokens:
+ session.delete(token)
+
+ session.commit()
+
+ return len(expired_tokens)
+
+
+def get_user_by_email(session: Session, email: str) -> Optional[User]:
+ """
+ Get user by email address.
+
+ Args:
+ session: Database session
+ email: User email address
+
+ Returns:
+ User if found, None otherwise
+
+ Example:
+ >>> user = get_user_by_email(session, "user@example.com")
+ >>> if user:
+ ... print(f"Found user {user.id}")
+ """
+ statement = select(User).where(User.email == email)
+ return session.exec(statement).first()
diff --git a/src/services/subtasks.py b/src/services/subtasks.py
new file mode 100644
index 0000000000000000000000000000000000000000..1c9fe82ef87f602e3970997498ca394770908b63
--- /dev/null
+++ b/src/services/subtasks.py
@@ -0,0 +1,158 @@
+"""
+Subtask service layer for business logic.
+
+This module provides functions for:
+- Creating subtasks
+- Updating subtasks
+- Deleting subtasks
+- Getting subtasks for a task
+"""
+
+from sqlmodel import Session, select
+from typing import List, Optional
+from datetime import datetime
+
+from ..models.subtask import Subtask
+from ..models.task import Task
+
+
+def get_task_subtasks(session: Session, task_id: int, user_id: int) -> List[Subtask]:
+ """
+ Get all subtasks for a task (with user verification).
+
+ Args:
+ session: Database session
+ task_id: Task ID
+ user_id: User ID for verification
+
+ Returns:
+ List of subtasks ordered by order field
+ """
+ # First verify the task belongs to the user
+ task = session.get(Task, task_id)
+ if not task or task.user_id != user_id:
+ return []
+
+ # Get subtasks
+ statement = select(Subtask).where(Subtask.task_id == task_id).order_by(Subtask.order)
+ subtasks = session.exec(statement).all()
+ return list(subtasks)
+
+
+def create_subtask(
+ session: Session,
+ task_id: int,
+ user_id: int,
+ title: str,
+ order: int = 0
+) -> Optional[Subtask]:
+ """
+ Create a new subtask for a task.
+
+ Args:
+ session: Database session
+ task_id: Task ID
+ user_id: User ID for verification
+ title: Subtask title
+ order: Order position
+
+ Returns:
+ Created subtask or None if task doesn't belong to user
+ """
+ # Verify task belongs to user
+ task = session.get(Task, task_id)
+ if not task or task.user_id != user_id:
+ return None
+
+ # Create subtask
+ subtask = Subtask(
+ task_id=task_id,
+ title=title,
+ order=order,
+ completed=False
+ )
+
+ session.add(subtask)
+ session.commit()
+ session.refresh(subtask)
+
+ return subtask
+
+
+def update_subtask(
+ session: Session,
+ subtask_id: int,
+ user_id: int,
+ title: Optional[str] = None,
+ completed: Optional[bool] = None,
+ order: Optional[int] = None
+) -> Optional[Subtask]:
+ """
+ Update a subtask.
+
+ Args:
+ session: Database session
+ subtask_id: Subtask ID
+ user_id: User ID for verification
+ title: New title (optional)
+ completed: New completion status (optional)
+ order: New order (optional)
+
+ Returns:
+ Updated subtask or None if not found or doesn't belong to user
+ """
+ # Get subtask and verify ownership through task
+ subtask = session.get(Subtask, subtask_id)
+ if not subtask:
+ return None
+
+ task = session.get(Task, subtask.task_id)
+ if not task or task.user_id != user_id:
+ return None
+
+ # Update fields
+ if title is not None:
+ subtask.title = title
+ if completed is not None:
+ subtask.completed = completed
+ if order is not None:
+ subtask.order = order
+
+ subtask.updated_at = datetime.utcnow()
+
+ session.add(subtask)
+ session.commit()
+ session.refresh(subtask)
+
+ return subtask
+
+
+def delete_subtask(
+ session: Session,
+ subtask_id: int,
+ user_id: int
+) -> bool:
+ """
+ Delete a subtask.
+
+ Args:
+ session: Database session
+ subtask_id: Subtask ID
+ user_id: User ID for verification
+
+ Returns:
+ True if deleted, False if not found or doesn't belong to user
+ """
+ # Get subtask and verify ownership through task
+ subtask = session.get(Subtask, subtask_id)
+ if not subtask:
+ return False
+
+ task = session.get(Task, subtask.task_id)
+ if not task or task.user_id != user_id:
+ return False
+
+ session.delete(subtask)
+ session.commit()
+
+ return True
diff --git a/src/services/tasks.py b/src/services/tasks.py
new file mode 100644
index 0000000000000000000000000000000000000000..c55747bd641b5785c476cc2c22db729d344ab4b9
--- /dev/null
+++ b/src/services/tasks.py
@@ -0,0 +1,248 @@
+"""
+Task service for business logic related to task operations.
+
+This module provides:
+- Get user tasks with filtering
+- Create new tasks
+- Update existing tasks
+- Delete tasks
+- User isolation enforcement
+"""
+
+from typing import List, Optional
+from sqlmodel import Session, select
+from ..models.task import Task
+
+
+def get_user_tasks(session: Session, user_id: int) -> List[Task]:
+ """
+ Get all tasks for a specific user.
+
+ Args:
+ session: Database session
+ user_id: ID of the user whose tasks to retrieve
+
+ Returns:
+ List of Task objects belonging to the user, ordered by creation date (newest first)
+
+ Example:
+ >>> tasks = get_user_tasks(session, user_id=1)
+ >>> print(len(tasks))
+ 5
+ """
+ statement = select(Task).where(Task.user_id == user_id).order_by(Task.created_at.desc())
+ tasks = session.exec(statement).all()
+ return list(tasks)
+
+
+def get_task_by_id(session: Session, task_id: int, user_id: int) -> Optional[Task]:
+ """
+ Get a specific task by ID, ensuring it belongs to the user.
+
+ Args:
+ session: Database session
+ task_id: ID of the task to retrieve
+ user_id: ID of the user (for ownership verification)
+
+ Returns:
+ Task object if found and belongs to user, None otherwise
+
+ Example:
+ >>> task = get_task_by_id(session, task_id=1, user_id=1)
+ >>> if task:
+ ... print(task.title)
+ """
+ statement = select(Task).where(Task.id == task_id, Task.user_id == user_id)
+ task = session.exec(statement).first()
+ return task
+
+
+def create_task(
+ session: Session,
+ user_id: int,
+ title: str,
+ description: Optional[str] = None,
+ category: Optional[str] = None,
+ due_date: Optional[str] = None,
+ priority: Optional[str] = "medium",
+ is_recurring: bool = False,
+ recurrence_type: Optional[str] = None,
+ recurrence_interval: Optional[int] = 1,
+ recurrence_end_date: Optional[str] = None
+) -> Task:
+ """
+ Create a new task for a user.
+
+ Args:
+ session: Database session
+ user_id: ID of the user creating the task
+ title: Task title
+ description: Optional task description
+ category: Optional task category/tag
+ due_date: Optional due date in ISO format
+ priority: Task priority (low, medium, high)
+ is_recurring: Whether task is recurring
+ recurrence_type: Type of recurrence (daily, weekly, monthly, yearly)
+ recurrence_interval: Interval for recurrence (e.g., every 2 days)
+ recurrence_end_date: End date for recurrence in ISO format
+
+ Returns:
+ Created Task object
+
+ Example:
+ >>> task = create_task(session, user_id=1, title="Buy groceries", description="Milk, eggs, bread", category="Personal")
+ >>> print(task.id)
+ 1
+ """
+ from datetime import datetime
+
+ # Parse due_date if provided
+ due_date_obj = None
+ if due_date:
+ try:
+ due_date_obj = datetime.fromisoformat(due_date.replace('Z', '+00:00'))
+ except (ValueError, AttributeError):
+ pass
+
+ # Parse recurrence_end_date if provided
+ recurrence_end_date_obj = None
+ if recurrence_end_date:
+ try:
+ recurrence_end_date_obj = datetime.fromisoformat(recurrence_end_date.replace('Z', '+00:00'))
+ except (ValueError, AttributeError):
+ pass
+
+ new_task = Task(
+ user_id=user_id,
+ title=title,
+ description=description,
+ completed=False,
+ category=category,
+ due_date=due_date_obj,
+ priority=priority or "medium",
+ is_recurring=is_recurring,
+ recurrence_type=recurrence_type if is_recurring else None,
+ recurrence_interval=recurrence_interval if is_recurring else None,
+ recurrence_end_date=recurrence_end_date_obj if is_recurring else None
+ )
+
+ session.add(new_task)
+ session.commit()
+ session.refresh(new_task)
+
+ return new_task
+
+
+def update_task(
+ session: Session,
+ task_id: int,
+ user_id: int,
+ title: Optional[str] = None,
+ description: Optional[str] = None,
+ completed: Optional[bool] = None,
+ category: Optional[str] = None,
+ due_date: Optional[str] = None,
+ priority: Optional[str] = None,
+ is_recurring: Optional[bool] = None,
+ recurrence_type: Optional[str] = None,
+ recurrence_interval: Optional[int] = None,
+ recurrence_end_date: Optional[str] = None
+) -> Optional[Task]:
+ """
+ Update an existing task.
+
+ Args:
+ session: Database session
+ task_id: ID of the task to update
+ user_id: ID of the user (for ownership verification)
+ title: New title (optional)
+ description: New description (optional)
+ completed: New completion status (optional)
+ category: New category/tag (optional)
+ due_date: New due date in ISO format (optional)
+ priority: New priority (optional)
+ is_recurring: Whether task is recurring (optional)
+ recurrence_type: Type of recurrence (optional)
+ recurrence_interval: Interval for recurrence (optional)
+ recurrence_end_date: End date for recurrence (optional)
+
+ Returns:
+ Updated Task object if found and belongs to user, None otherwise
+
+ Example:
+ >>> task = update_task(session, task_id=1, user_id=1, completed=True)
+ >>> if task:
+ ... print(task.completed)
+ True
+ """
+ from datetime import datetime
+
+ # Get task with ownership verification
+ task = get_task_by_id(session, task_id, user_id)
+
+ if not task:
+ return None
+
+ # Update fields if provided
+ if title is not None:
+ task.title = title
+ if description is not None:
+ task.description = description
+ if completed is not None:
+ task.completed = completed
+ if category is not None:
+ task.category = category
+ if due_date is not None:
+ try:
+ task.due_date = datetime.fromisoformat(due_date.replace('Z', '+00:00'))
+ except (ValueError, AttributeError):
+ pass
+ if priority is not None:
+ task.priority = priority
+ if is_recurring is not None:
+ task.is_recurring = is_recurring
+ if recurrence_type is not None:
+ task.recurrence_type = recurrence_type
+ if recurrence_interval is not None:
+ task.recurrence_interval = recurrence_interval
+ if recurrence_end_date is not None:
+ try:
+ task.recurrence_end_date = datetime.fromisoformat(recurrence_end_date.replace('Z', '+00:00'))
+ except (ValueError, AttributeError):
+ pass
+
+ task.updated_at = datetime.utcnow()
+ session.add(task)
+ session.commit()
+ session.refresh(task)
+
+ return task
+
+
+def delete_task(session: Session, task_id: int, user_id: int) -> bool:
+ """
+ Delete a task.
+
+ Args:
+ session: Database session
+ task_id: ID of the task to delete
+ user_id: ID of the user (for ownership verification)
+
+ Returns:
+ True if task was deleted, False if not found or doesn't belong to user
+
+ Example:
+ >>> success = delete_task(session, task_id=1, user_id=1)
+ >>> print(success)
+ True
+ """
+ # Get task with ownership verification
+ task = get_task_by_id(session, task_id, user_id)
+
+ if not task:
+ return False
+
+ session.delete(task)
+ session.commit()
+
+ return True
diff --git a/src/validation/__init__.py b/src/validation/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/validation/security_guard.py b/src/validation/security_guard.py
new file mode 100644
index 0000000000000000000000000000000000000000..d79d31e2a3017130ab18dc05648186c3fae59c8c
--- /dev/null
+++ b/src/validation/security_guard.py
@@ -0,0 +1,106 @@
+"""
+Security guard for user authorization checks.
+
+This module ensures that all tool operations are authorized
+and that users can only access their own data.
+"""
+
+import logging
+from typing import Optional
+from sqlmodel import Session, select
+from src.models.task import Task
+
+logger = logging.getLogger(__name__)
+
+
+class SecurityGuard:
+ """
+ Security guard for validating user authorization.
+
+ All MCP tools must use this guard to verify that:
+ 1. User is authenticated (user_id is valid)
+ 2. User owns the resources they're trying to access
+ 3. No cross-user data access is possible
+ """
+
+ @staticmethod
+ async def validate_task_ownership(
+ db: Session,
+ task_id: int,
+ user_id: int
+ ) -> Optional[Task]:
+ """
+ Validate that a task belongs to the authenticated user.
+
+ Args:
+ db: Database session
+ task_id: Task ID to validate
+ user_id: Authenticated user ID
+
+ Returns:
+ Task object if valid, None if not found or unauthorized
+
+ Raises:
+ ValueError: If task doesn't belong to user
+ """
+ statement = select(Task).where(
+ Task.id == task_id,
+ Task.user_id == user_id
+ )
+ task = db.exec(statement).first()
+
+ if not task:
+ logger.warning(f"Task {task_id} not found or unauthorized for user {user_id}")
+ raise ValueError(f"Task not found or access denied")
+
+ return task
+
+ @staticmethod
+ def validate_user_id(user_id: int) -> None:
+ """
+ Validate that user_id is provided and valid.
+
+ Args:
+ user_id: User ID to validate
+
+ Raises:
+ ValueError: If user_id is invalid
+ """
+ if not user_id or user_id <= 0:
+ logger.error(f"Invalid user_id: {user_id}")
+ raise ValueError("Invalid user_id")
+
+ @staticmethod
+ async def validate_conversation_ownership(
+ db: Session,
+ conversation_id: int,
+ user_id: int
+ ) -> bool:
+ """
+ Validate that a conversation belongs to the authenticated user.
+
+ Args:
+ db: Database session
+ conversation_id: Conversation ID to validate
+ user_id: Authenticated user ID
+
+ Returns:
+ True if valid, False otherwise
+ """
+ from src.models.conversation import Conversation
+
+ statement = select(Conversation).where(
+ Conversation.id == conversation_id,
+ Conversation.user_id == user_id
+ )
+ conversation = db.exec(statement).first()
+
+ if not conversation:
+ logger.warning(f"Conversation {conversation_id} not found or unauthorized for user {user_id}")
+ return False
+
+ return True
+
+
+# Singleton instance
+security_guard = SecurityGuard()
diff --git a/src/validation/tool_validator.py b/src/validation/tool_validator.py
new file mode 100644
index 0000000000000000000000000000000000000000..30ef44dd748f806506154c0b1d3e905bfaca45c0
--- /dev/null
+++ b/src/validation/tool_validator.py
@@ -0,0 +1,122 @@
+"""
+Tool schema validator using Pydantic models.
+
+This module validates tool call parameters against defined schemas
+before execution to prevent errors and security issues.
+"""
+
+import logging
+from typing import Dict, Any
+from pydantic import BaseModel, Field, ValidationError
+
+logger = logging.getLogger(__name__)
+
+
+# Pydantic models for tool parameters
+
+class AddTaskParams(BaseModel):
+ """Parameters for add_task tool."""
+ user_id: int = Field(..., gt=0, description="User ID")
+ title: str = Field(..., min_length=1, max_length=255, description="Task title")
+ description: str = Field(default="", max_length=1000, description="Task description")
+
+
+class ListTasksParams(BaseModel):
+ """Parameters for list_tasks tool."""
+ user_id: int = Field(..., gt=0, description="User ID")
+ filter: str = Field(default="all", pattern="^(all|pending|completed)$", description="Task filter")
+ limit: int = Field(default=50, ge=1, le=100, description="Maximum tasks to return")
+
+
+class CompleteTaskParams(BaseModel):
+ """Parameters for complete_task tool."""
+ user_id: int = Field(..., gt=0, description="User ID")
+ task_id: int = Field(default=None, gt=0, description="Task ID")
+ task_title: str = Field(default=None, min_length=1, description="Task title for matching")
+
+
+class DeleteTaskParams(BaseModel):
+ """Parameters for delete_task tool."""
+ user_id: int = Field(..., gt=0, description="User ID")
+ task_id: int = Field(default=None, gt=0, description="Task ID")
+ task_title: str = Field(default=None, min_length=1, description="Task title for matching")
+
+
+class UpdateTaskParams(BaseModel):
+ """Parameters for update_task tool."""
+ user_id: int = Field(..., gt=0, description="User ID")
+ task_id: int = Field(default=None, gt=0, description="Task ID")
+ task_title: str = Field(default=None, min_length=1, description="Current task title for matching")
+ new_title: str = Field(default=None, min_length=1, max_length=255, description="New task title")
+ new_description: str = Field(default=None, max_length=1000, description="New task description")
+
+
+class ToolValidator:
+ """Validates tool call parameters against schemas."""
+
+ # Map tool names to their parameter models
+ TOOL_SCHEMAS = {
+ "add_task": AddTaskParams,
+ "list_tasks": ListTasksParams,
+ "complete_task": CompleteTaskParams,
+ "delete_task": DeleteTaskParams,
+ "update_task": UpdateTaskParams
+ }
+
+ @classmethod
+ def validate(cls, tool_name: str, parameters: Dict[str, Any]) -> Dict[str, Any]:
+ """
+ Validate tool parameters against schema.
+
+ Args:
+ tool_name: Name of the tool
+ parameters: Parameters to validate
+
+ Returns:
+ Validated parameters dictionary
+
+ Raises:
+ ValueError: If tool name is unknown
+ ValidationError: If parameters are invalid
+ """
+ if tool_name not in cls.TOOL_SCHEMAS:
+ raise ValueError(f"Unknown tool: {tool_name}")
+
+ schema = cls.TOOL_SCHEMAS[tool_name]
+
+ try:
+ validated = schema(**parameters)
+ logger.info(f"Tool parameters validated: {tool_name}")
+ return validated.model_dump()
+ except ValidationError as e:
+ logger.error(f"Tool parameter validation failed: {tool_name} - {str(e)}")
+ raise
+
+ @classmethod
+ def validate_tool_call(cls, tool_call: Dict[str, Any]) -> bool:
+ """
+ Validate that a tool call has the required structure.
+
+ Args:
+ tool_call: Tool call dictionary
+
+ Returns:
+ True if valid structure, False otherwise
+ """
+ if not isinstance(tool_call, dict):
+ return False
+
+ if "name" not in tool_call or "parameters" not in tool_call:
+ return False
+
+ if not isinstance(tool_call["name"], str):
+ return False
+
+ if not isinstance(tool_call["parameters"], dict):
+ return False
+
+ return True
+
+
+# Singleton instance
+tool_validator = ToolValidator()
diff --git a/templates/password_reset_email.html b/templates/password_reset_email.html
new file mode 100644
index 0000000000000000000000000000000000000000..b706890318efed4b81e9e4d89a3268d00ded6f3d
--- /dev/null
+++ b/templates/password_reset_email.html
@@ -0,0 +1,75 @@
+
+
+
+
+
+ Password Reset
+
+
+
+
+
+
+
+
+
+ Password Reset Request
+ |
+
+
+
+
+ |
+
+ Hello,
+
+
+ We received a request to reset the password for your account associated with {{USER_EMAIL}}.
+
+
+ Click the button below to reset your password. This link will expire in 15 minutes.
+
+
+
+
+
+
+
+
+ Security Notice: If you didn't request a password reset, you can safely ignore this email. Your password will not be changed.
+
+
+
+
+
+ If the button doesn't work, copy and paste this link into your browser:
+
+
+ {{RESET_LINK}}
+
+ |
+
+
+
+
+ |
+
+ This is an automated message from Todo Application.
+
+
+ © 2026 Todo Application. All rights reserved.
+
+ |
+
+
+ |
+
+
+
+
diff --git a/vercel.json b/vercel.json
new file mode 100644
index 0000000000000000000000000000000000000000..68d6203c7427cc727c1d4348f69cbd32f912f672
--- /dev/null
+++ b/vercel.json
@@ -0,0 +1,18 @@
+{
+ "version": 2,
+ "builds": [
+ {
+ "src": "api/index.py",
+ "use": "@vercel/python"
+ }
+ ],
+ "routes": [
+ {
+ "src": "/(.*)",
+ "dest": "api/index.py"
+ }
+ ],
+ "env": {
+ "PYTHONPATH": "$PYTHONPATH:."
+ }
+}