Gustav2811 commited on
Commit
971a10e
·
1 Parent(s): 8daa8b5

Initial deployment to Hugging Face Spaces

Browse files
.env.example ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # OpenAI Configuration
2
+ OPENAI_API_KEY=your_openai_api_key_here
3
+
4
+ # Google Gemini Configuration
5
+ GOOGLE_API_KEY=your_google_api_key_here
6
+
7
+ # Application Configuration
8
+ DEFAULT_MODEL=gpt-4o
9
+ # Available options: gpt-3.5-turbo, gpt-4, gpt-4-turbo, gemini-pro, gemini-1.5-pro
10
+
11
+ # Chainlit Configuration
12
+ CHAINLIT_HOST=0.0.0.0
13
+ CHAINLIT_PORT=8000
14
+
15
+ # Development Settings
16
+ DEBUG=False
17
+ LOG_LEVEL=INFO
.gitignore ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ *.manifest
28
+ *.spec
29
+
30
+ # PyInstaller
31
+ # Usually these files are written by a python script from a template
32
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
33
+ *.manifest
34
+ *.spec
35
+
36
+ # Installer logs
37
+ pip-log.txt
38
+ pip-delete-this-directory.txt
39
+
40
+ # Unit test / coverage reports
41
+ htmlcov/
42
+ .tox/
43
+ .nox/
44
+ .coverage
45
+ .coverage.*
46
+ .cache
47
+ nosetests.xml
48
+ coverage.xml
49
+ *.cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+
53
+ # Environments
54
+ .env
55
+ .env.*
56
+ !.env.example
57
+ venv/
58
+ .venv/
59
+ env/
60
+ .env/
61
+ ENV/
62
+ env.bak/
63
+ venv.bak/
64
+
65
+ # IDEs and editors
66
+ .idea/
67
+ .vscode/
68
+ *.sublime-*
69
+
70
+ # Chainlit specific
71
+ .chainlit/db/
72
+ .chainlit/
73
+ !.chainlit/config.toml
74
+
75
+ # OS-specific
76
+ .DS_Store
77
+ Thumbs.db
78
+
79
+ # uv
80
+ .uv/
.vercelignore ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ venv/
2
+ venv/
3
+ ENV/
4
+ .git/
5
+ .idea/
6
+ .vscode/
7
+ *.pyc
8
+ *.pyo
9
+ *.pyd
10
+ tests/
11
+ pytest_cache/
12
+ .coverage
13
+ htmlcov/
14
+ .chainlit/db/
15
+ .chainlit/run.log
16
+ .DS_Store
17
+ Thumbs.db
18
+ .env
19
+ .env.*
20
+ !.env.example
Dockerfile ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use the official Python 3.12 image as requested
2
+ FROM python:3.12
3
+
4
+ # Set the working directory inside the container
5
+ WORKDIR /app
6
+
7
+ # Create a non-root user for better security
8
+ RUN useradd -m -u 1000 user
9
+ USER user
10
+
11
+ # Set environment variables for the new user's path
12
+ ENV HOME=/home/user \
13
+ PATH=/home/user/.local/bin:$PATH
14
+
15
+ # Copy requirements first to leverage Docker's layer caching
16
+ COPY --chown=user./requirements.txt.
17
+ RUN pip install -r requirements.txt
18
+
19
+ # Copy the rest of the application code into the container
20
+ COPY --chown=user..
21
+
22
+ # Expose the port Hugging Face Spaces uses for web apps
23
+ EXPOSE 7860
24
+
25
+ # The command to run the Chainlit application in production
26
+ # -h (headless) is critical and prevents the server from trying to open a browser
27
+ # --host 0.0.0.0 makes the app accessible within the container's network
28
+ CMD ["chainlit", "run", "src/naked_chat/app.py", "--host", "0.0.0.0", "--port", "7860", "-h"]
README_OLD.md ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Naked Insurance - LLM Chat Client POC
2
+
3
+ This repository contains the Proof of Concept (POC) for a Large Language Model (LLM) powered chat client for internal use at Naked Insurance. The application is built using the Chainlit framework for both the frontend and backend.
4
+
5
+ ## 🎯 Project Overview
6
+
7
+ This POC demonstrates a modern, production-ready approach to building an LLM-powered chat interface that can integrate with multiple AI providers (OpenAI, Google Gemini) while maintaining a clean, maintainable codebase.
8
+
9
+ ### Key Features
10
+
11
+ - **Multi-Provider LLM Support**: Seamless integration with OpenAI GPT models (including flagship and reasoning models) and Google Gemini
12
+ - **Latest AI Models**: Support for GPT-4.1, GPT-4o, o3 series reasoning models, and Gemini 2.5 Pro/Flash
13
+ - **Modern Python Architecture**: Uses the `src` layout with proper packaging and dependency management
14
+ - **Production-Ready Foundation**: Comprehensive tooling for linting, testing, and type checking
15
+ - **Chainlit Framework**: Leverages Chainlit for rapid prototyping of chat interfaces
16
+ - **Environment-Based Configuration**: Secure handling of API keys and configuration
17
+
18
+ ## 🏗️ Architecture
19
+
20
+ The project follows modern Python best practices with a clear separation of concerns:
21
+
22
+ ```
23
+ naked-chat-poc/
24
+ ├── .chainlit/ # Chainlit-specific configurations
25
+ ├── .github/ # GitHub templates and workflows
26
+ ├── public/ # Static assets (themes, logos, etc.)
27
+ ├── src/naked_chat/ # Main application package
28
+ ├── tests/ # Test suite
29
+ ├── pyproject.toml # Project configuration and dependencies
30
+ └── README.md # This file
31
+ ```
32
+
33
+ ## 🚀 Quick Start
34
+
35
+ ### Prerequisites
36
+
37
+ - Python 3.9 or higher
38
+ - [uv](https://github.com/astral-sh/uv) (recommended) or pip
39
+
40
+ ### Installation
41
+
42
+ 1. **Clone the repository**:
43
+
44
+ ```bash
45
+ git clone https://github.com/naked-insurance/naked-chat-poc.git
46
+ cd naked-chat-poc
47
+ ```
48
+
49
+ 2. **Set up the environment**:
50
+
51
+ ```bash
52
+ # Using uv (recommended)
53
+ uv venv
54
+ source .venv/bin/activate # On Windows: .venv\Scripts\activate
55
+ uv pip install -e ".[dev]"
56
+
57
+ # Or using pip
58
+ python -m venv .venv
59
+ source .venv/bin/activate # On Windows: .venv\Scripts\activate
60
+ pip install -e ".[dev]"
61
+ ```
62
+
63
+ 3. **Configure environment variables**:
64
+
65
+ ```bash
66
+ cp .env.example .env
67
+ # Edit .env with your API keys and configuration
68
+ ```
69
+
70
+ 4. **Run the application**:
71
+ ```bash
72
+ chainlit run src/naked_chat/app.py
73
+ ```
74
+
75
+ The application will be available at `http://localhost:8000`.
76
+
77
+ ## ⚙️ Configuration
78
+
79
+ ### Environment Variables
80
+
81
+ Copy `.env.example` to `.env` and configure the following variables:
82
+
83
+ - `OPENAI_API_KEY`: Your OpenAI API key
84
+ - `GOOGLE_API_KEY`: Your Google Gemini API key
85
+ - `DEFAULT_MODEL`: Default LLM model to use
86
+
87
+ ### Chainlit Configuration
88
+
89
+ The `.chainlit/config.toml` file contains Chainlit-specific settings for data persistence, file uploads, and UI customization.
90
+
91
+ ## 🧪 Development
92
+
93
+ ### Running Tests
94
+
95
+ ```bash
96
+ # Run all tests
97
+ pytest
98
+
99
+ # Run with coverage
100
+ pytest --cov=naked_chat
101
+ ```
102
+
103
+ ### Code Quality
104
+
105
+ ```bash
106
+ # Linting and formatting with Ruff
107
+ ruff check .
108
+ ruff format .
109
+
110
+ # Type checking with mypy
111
+ mypy src/naked_chat
112
+ ```
113
+
114
+ ### Development Workflow
115
+
116
+ 1. Create a feature branch: `git checkout -b feature/your-feature-name`
117
+ 2. Make your changes
118
+ 3. Run tests and quality checks: `pytest && ruff check . && mypy src/naked_chat`
119
+ 4. Commit your changes: `git commit -m "Add your feature"`
120
+ 5. Push and create a pull request
121
+
122
+ ## 📁 Project Structure Details
123
+
124
+ - **`src/naked_chat/`**: Main application package following the src layout
125
+ - `app.py`: Chainlit application entry point
126
+ - `models/`: LLM integration modules
127
+ - `utils/`: Utility functions and helpers
128
+ - **`tests/`**: Comprehensive test suite
129
+ - Unit tests for core functionality
130
+ - Integration tests for LLM providers
131
+ - **`.chainlit/`**: Chainlit configuration
132
+ - `config.toml`: Framework settings and customizations
133
+ - **`public/`**: Static assets
134
+ - `theme.json`: Custom branding and themes
135
+ - Assets like logos, favicons, etc.
136
+
137
+ ## 🤝 Contributing
138
+
139
+ This is an internal POC project. Please follow the established code style and ensure all tests pass before submitting changes.
140
+
141
+ ## 📄 License
142
+
143
+ This project is proprietary to Naked Insurance.
144
+
145
+ ---
146
+
147
+ **Note**: This is a Proof of Concept intended for internal evaluation and demonstration purposes.
chainlit.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Welcome to Chainlit! 🚀🤖
2
+
3
+ Hi there, Developer! 👋 We're excited to have you on board. Chainlit is a powerful tool designed to help you prototype, debug and share applications built on top of LLMs.
4
+
5
+ ## Useful Links 🔗
6
+
7
+ - **Documentation:** Get started with our comprehensive [Chainlit Documentation](https://docs.chainlit.io) 📚
8
+ - **Discord Community:** Join our friendly [Chainlit Discord](https://discord.gg/k73SQ3FyUh) to ask questions, share your projects, and connect with other developers! 💬
9
+
10
+ We can't wait to see what you create with Chainlit! Happy coding! 💻😊
11
+
12
+ ## Welcome screen
13
+
14
+ To modify the welcome screen, edit the `chainlit.md` file at the root of your project. If you do not want a welcome screen, just leave this file empty.
public/favicon.png ADDED
public/logo_dark.png ADDED
public/logo_light.png ADDED
public/theme.json ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "custom_fonts": [
3
+ "https://fonts.googleapis.com/css2?family=Inter:wght@400;500;600;700&display=swap"
4
+ ],
5
+ "variables": {
6
+ "light": {
7
+ "--font-sans": "'Inter', -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif",
8
+ "--font-mono": "source-code-pro, Menlo, Monaco, Consolas, 'Courier New', monospace",
9
+ "--background": "0 0% 98%",
10
+ "--foreground": "0 0% 13%",
11
+ "--card": "0 0% 100%",
12
+ "--card-foreground": "0 0% 13%",
13
+ "--popover": "0 0% 100%",
14
+ "--popover-foreground": "0 0% 13%",
15
+ "--primary": "207 90% 54%",
16
+ "--primary-foreground": "0 0% 100%",
17
+ "--secondary": "340 97% 43%",
18
+ "--secondary-foreground": "0 0% 100%",
19
+ "--muted": "0 0% 96%",
20
+ "--muted-foreground": "0 0% 46%",
21
+ "--accent": "0 0% 96%",
22
+ "--accent-foreground": "0 0% 13%",
23
+ "--destructive": "0 84% 60%",
24
+ "--destructive-foreground": "0 0% 98%",
25
+ "--border": "0 0% 90%",
26
+ "--input": "0 0% 90%",
27
+ "--ring": "207 90% 54%",
28
+ "--radius": "0.75rem",
29
+ "--sidebar-background": "0 0% 98%",
30
+ "--sidebar-foreground": "0 0% 46%",
31
+ "--sidebar-primary": "0 0% 13%",
32
+ "--sidebar-primary-foreground": "0 0% 98%",
33
+ "--sidebar-accent": "0 0% 96%",
34
+ "--sidebar-accent-foreground": "0 0% 13%",
35
+ "--sidebar-border": "0 0% 91%",
36
+ "--sidebar-ring": "207 90% 54%"
37
+ },
38
+ "dark": {
39
+ "--font-sans": "'Inter', -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif",
40
+ "--font-mono": "source-code-pro, Menlo, Monaco, Consolas, 'Courier New', monospace",
41
+ "--background": "0 0% 13%",
42
+ "--foreground": "0 0% 93%",
43
+ "--card": "0 0% 18%",
44
+ "--card-foreground": "145, 61%, 55%",
45
+ "--popover": "0 0% 18%",
46
+ "--popover-foreground": "145, 61%, 55%",
47
+ "--primary": "145, 61%, 55%",
48
+ "--primary-foreground": "0 0% 100%",
49
+ "--secondary": "340 97% 43%",
50
+ "--secondary-foreground": "0 0% 100%",
51
+ "--muted": "0 1% 26%",
52
+ "--muted-foreground": "0 0% 71%",
53
+ "--accent": "0 0% 26%",
54
+ "--accent-foreground": "0 0% 98%",
55
+ "--destructive": "0 63% 31%",
56
+ "--destructive-foreground": "0 0% 98%",
57
+ "--border": "0 1% 26%",
58
+ "--input": "0 1% 26%",
59
+ "--ring": "207 90% 54%",
60
+ "--sidebar-background": "0 0% 9%",
61
+ "--sidebar-foreground": "0 0% 96%",
62
+ "--sidebar-primary": "207 90% 54%",
63
+ "--sidebar-primary-foreground": "0 0% 100%",
64
+ "--sidebar-accent": "0 0% 13%",
65
+ "--sidebar-accent-foreground": "0 0% 96%",
66
+ "--sidebar-border": "0 4% 16%",
67
+ "--sidebar-ring": "207 90% 54%"
68
+ }
69
+ }
70
+ }
pyproject.toml ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "naked-chat-poc"
3
+ version = "0.1.0"
4
+ description = "A POC for an LLM-powered chat client for Naked Insurance using Chainlit."
5
+ authors = [
6
+ { name = "Gustav Klingbiel", email = "gustav.klingbiel@naked.insure" },
7
+ ]
8
+ license = { text = "Proprietary" }
9
+ requires-python = ">=3.9"
10
+ readme = "README.md"
11
+ classifiers = [
12
+ "Programming Language :: Python :: 3",
13
+ "Programming Language :: Python :: 3.9",
14
+ "Programming Language :: Python :: 3.10",
15
+ "Programming Language :: Python :: 3.11",
16
+ "Programming Language :: Python :: 3.12",
17
+ ]
18
+
19
+ # Core runtime dependencies
20
+ dependencies = [
21
+ "chainlit>=1.0.0",
22
+ "openai",
23
+ "google-generativeai",
24
+ "python-dotenv",
25
+ "requests",
26
+ ]
27
+
28
+ [project.urls]
29
+ Repository = "https://github.com/naked-insurance/naked-chat-poc"
30
+
31
+ [project.optional-dependencies]
32
+ # Dependencies for local development, testing, and linting
33
+ dev = [
34
+ "pytest",
35
+ "pytest-cov",
36
+ "pytest-asyncio",
37
+ "ruff",
38
+ "mypy",
39
+ ]
40
+
41
+ [build-system]
42
+ requires = ["setuptools>=61.0", "wheel"]
43
+ build-backend = "setuptools.build_meta"
44
+
45
+ [tool.setuptools]
46
+ packages = ["naked_chat"]
47
+ package-dir = {"" = "src"}
48
+
49
+ # Configuration for the Ruff linter and formatter
50
+ # Ruff is an extremely fast, all-in-one tool that replaces flake8, isort, and black.
51
+ [tool.ruff]
52
+ line-length = 88
53
+
54
+ [tool.ruff.lint]
55
+ select = ["E", "F", "I", "N", "W", "UP"]
56
+ ignore = ["E501"] # Handled by the formatter
57
+
58
+ [tool.ruff.format]
59
+ quote-style = "double"
60
+
61
+ # Configuration for the mypy static type checker
62
+ [tool.mypy]
63
+ strict = true
64
+ ignore_missing_imports = true
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ chainlit>=1.0.0
2
+ openai
3
+ google-generativeai
4
+ python-dotenv
5
+ requests
src/naked_chat/__init__.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Naked Insurance LLM Chat Client POC
3
+
4
+ A Chainlit-powered chat interface for integrating with multiple LLM providers.
5
+ """
6
+
7
+ __version__ = "0.1.0"
8
+ __author__ = "Gustav Klingbiel"
9
+ __email__ = "gustav.klingbiel@naked.insure"
src/naked_chat/app.py ADDED
@@ -0,0 +1,252 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Main Chainlit application for the Naked Insurance LLM Chat POC.
3
+
4
+ This module serves as the entry point for the Chainlit application and handles
5
+ the chat interface, LLM provider integration, and message processing.
6
+ """
7
+
8
+ import os
9
+ from typing import Optional, Dict
10
+
11
+ import chainlit as cl
12
+ import google.generativeai as genai
13
+ from dotenv import load_dotenv
14
+ from openai import AsyncOpenAI
15
+
16
+ # Load environment variables
17
+ load_dotenv()
18
+
19
+ # Configuration
20
+ DEFAULT_MODEL = os.getenv("DEFAULT_MODEL", "gpt-4o")
21
+ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
22
+ GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY")
23
+
24
+ # Initialize clients
25
+ openai_client = AsyncOpenAI(api_key=OPENAI_API_KEY) if OPENAI_API_KEY else None
26
+ if GOOGLE_API_KEY:
27
+ genai.configure(api_key=GOOGLE_API_KEY)
28
+
29
+
30
+ @cl.oauth_callback
31
+ def oauth_callback(
32
+ provider_id: str,
33
+ token: str,
34
+ raw_user_data: Dict[str, str],
35
+ default_user: cl.User,
36
+ ) -> Optional[cl.User]:
37
+ """
38
+ This function is called after a user successfully authenticates with a provider.
39
+ It's responsible for validating the user and granting access.
40
+ """
41
+
42
+ if provider_id == "google":
43
+ # Allow users from both naked.insure and klingbiel.org domains
44
+ allowed_domains = {"naked.insure", "klingbiel.org"}
45
+ user_domain = raw_user_data.get("hd")
46
+
47
+ if user_domain in allowed_domains:
48
+ return default_user
49
+ else:
50
+ return None
51
+
52
+ # If for some reason another provider was configured, deny access.
53
+ return None
54
+
55
+
56
+ @cl.on_chat_start
57
+ async def start():
58
+ """
59
+ Initialize the chat session and set up the initial state.
60
+ """
61
+ settings = await cl.ChatSettings(
62
+ [
63
+ cl.input_widget.Select(
64
+ id="Model",
65
+ label="Choose LLM Model",
66
+ values=[
67
+ # OpenAI Flagship Models
68
+ "gpt-4.1",
69
+ "gpt-4o",
70
+ # OpenAI Reasoning Models
71
+ "o4-mini",
72
+ "o3",
73
+ "o3-pro",
74
+ "o3-mini",
75
+ # Legacy OpenAI Models
76
+ "gpt-3.5-turbo",
77
+ "gpt-4",
78
+ "gpt-4-turbo",
79
+ # Google Gemini Models
80
+ "gemini-2.5-pro",
81
+ "gemini-2.5-flash",
82
+ "gemini-pro",
83
+ ],
84
+ initial_index=1, # Default to gpt-4o
85
+ ),
86
+ cl.input_widget.Slider(
87
+ id="Temperature",
88
+ label="Temperature",
89
+ initial=0.7,
90
+ min=0,
91
+ max=2,
92
+ step=0.1,
93
+ ),
94
+ cl.input_widget.Slider(
95
+ id="MaxTokens",
96
+ label="Max Output Tokens",
97
+ initial=500,
98
+ min=50,
99
+ max=2000,
100
+ step=50,
101
+ ),
102
+ ]
103
+ ).send()
104
+
105
+ # Store initial settings in user session
106
+ cl.user_session.set("settings", settings)
107
+ cl.user_session.set("model", DEFAULT_MODEL)
108
+ cl.user_session.set("temperature", 0.7)
109
+ cl.user_session.set("max_tokens", 500)
110
+
111
+ # Welcome message
112
+ await cl.Message(
113
+ content="""# Welcome to Naked Insurance Chat POC! 🚀
114
+
115
+ You can adjust the model and parameters using the settings panel. Let's get started!
116
+ """,
117
+ author="Assistant",
118
+ ).send()
119
+
120
+
121
+ @cl.on_settings_update
122
+ async def setup_agent(settings):
123
+ """
124
+ Handle settings updates from the user interface.
125
+ """
126
+ print(f"Settings updated: {settings}")
127
+ cl.user_session.set("model", settings["Model"])
128
+ cl.user_session.set("temperature", settings["Temperature"])
129
+ cl.user_session.set("max_tokens", settings["MaxTokens"])
130
+
131
+
132
+ async def call_openai_model(
133
+ model: str, messages: list, temperature: float, max_tokens: int
134
+ ) -> str:
135
+ """
136
+ Call OpenAI API with the specified parameters.
137
+ """
138
+ if not openai_client:
139
+ raise ValueError("OpenAI API key not configured")
140
+
141
+ response = await openai_client.chat.completions.create(
142
+ model=model,
143
+ messages=messages,
144
+ temperature=temperature,
145
+ max_tokens=max_tokens,
146
+ )
147
+
148
+ return response.choices[0].message.content
149
+
150
+
151
+ async def call_gemini_model(
152
+ model: str, messages: list, temperature: float, max_tokens: int
153
+ ) -> str:
154
+ """
155
+ Call Google Gemini API with the specified parameters.
156
+ """
157
+ if not GOOGLE_API_KEY:
158
+ raise ValueError("Google API key not configured")
159
+
160
+ # Convert messages to Gemini format
161
+ prompt = "\n".join([f"{msg['role']}: {msg['content']}" for msg in messages])
162
+
163
+ # Map new model names to actual API names
164
+ model_mapping = {
165
+ "gemini-2.5-pro": "gemini-2.5-pro",
166
+ "gemini-2.5-flash": "gemini-2.5-flash",
167
+ "gemini-pro": "gemini-pro",
168
+ }
169
+
170
+ api_model = model_mapping.get(model, model)
171
+ model_instance = genai.GenerativeModel(api_model)
172
+ response = await model_instance.generate_content_async(
173
+ prompt,
174
+ generation_config=genai.types.GenerationConfig(
175
+ temperature=temperature,
176
+ max_output_tokens=max_tokens,
177
+ ),
178
+ )
179
+
180
+ return response.text
181
+
182
+
183
+ @cl.on_message
184
+ async def main(message: cl.Message):
185
+ """
186
+ Handle incoming messages and generate responses using the selected LLM.
187
+ """
188
+ # Get user settings
189
+ model = cl.user_session.get("model")
190
+ temperature = cl.user_session.get("temperature")
191
+ max_tokens = cl.user_session.get("max_tokens")
192
+
193
+ # Get message history
194
+ message_history = cl.user_session.get("message_history", [])
195
+
196
+ # Add current message to history
197
+ message_history.append({"role": "user", "content": message.content})
198
+
199
+ # Keep only last 10 messages to manage context length
200
+ if len(message_history) > 10:
201
+ message_history = message_history[-10:]
202
+
203
+ # Store updated history
204
+ cl.user_session.set("message_history", message_history)
205
+
206
+ try:
207
+ # Show loading indicator
208
+ msg = cl.Message(content="")
209
+ await msg.send()
210
+
211
+ # Route to appropriate model
212
+ if model.startswith("gpt") or model.startswith("o"):
213
+ response_content = await call_openai_model(
214
+ model=model,
215
+ messages=message_history,
216
+ temperature=temperature,
217
+ max_tokens=max_tokens,
218
+ )
219
+ elif model.startswith("gemini"):
220
+ response_content = await call_gemini_model(
221
+ model=model,
222
+ messages=message_history,
223
+ temperature=temperature,
224
+ max_tokens=max_tokens,
225
+ )
226
+ else:
227
+ response_content = f"Model '{model}' is not supported yet."
228
+
229
+ # Update message with response
230
+ msg.content = response_content
231
+ await msg.update()
232
+
233
+ # Add assistant response to history
234
+ message_history.append({"role": "assistant", "content": response_content})
235
+ cl.user_session.set("message_history", message_history)
236
+
237
+ except Exception as e:
238
+ error_msg = f"Error generating response: {str(e)}"
239
+ print(f"Error: {error_msg}")
240
+
241
+ msg.content = (
242
+ f"❌ **Error**: {error_msg}\n\nPlease check your API keys and try again."
243
+ )
244
+ await msg.update()
245
+
246
+
247
+ if __name__ == "__main__":
248
+ # This allows running the app directly with: python src/naked_chat/app.py
249
+ import subprocess
250
+ import sys
251
+
252
+ subprocess.run([sys.executable, "-m", "chainlit", "run", __file__])
tests/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # Tests package for naked-chat-poc
tests/test_app.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tests for the main Chainlit application.
3
+ """
4
+
5
+ import os
6
+ import sys
7
+ from unittest.mock import patch
8
+
9
+ import pytest
10
+
11
+ # Add the src directory to the path so we can import our modules
12
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", "src"))
13
+
14
+ from naked_chat import __version__
15
+
16
+
17
+ def test_version():
18
+ """Test that the version is properly defined."""
19
+ assert __version__ == "0.1.0"
20
+
21
+
22
+ def test_environment_variables():
23
+ """Test that environment variables are properly handled."""
24
+ # Test with missing environment variables
25
+ with patch.dict(os.environ, {}, clear=True):
26
+ from naked_chat.app import DEFAULT_MODEL, GOOGLE_API_KEY, OPENAI_API_KEY
27
+
28
+ assert DEFAULT_MODEL == "gpt-4o"
29
+ assert OPENAI_API_KEY is None
30
+ assert GOOGLE_API_KEY is None
31
+
32
+
33
+ def test_openai_model_validation():
34
+ """Test OpenAI model validation."""
35
+ from naked_chat.app import call_openai_model
36
+
37
+ # Test with no API key configured
38
+ with patch("naked_chat.app.openai_client", None):
39
+ with pytest.raises(ValueError, match="OpenAI API key not configured"):
40
+ import asyncio
41
+
42
+ asyncio.run(call_openai_model("gpt-3.5-turbo", [], 0.7, 100))
43
+
44
+
45
+ def test_gemini_model_validation():
46
+ """Test Gemini model validation."""
47
+ from naked_chat.app import call_gemini_model
48
+
49
+ # Test with no API key configured
50
+ with patch("naked_chat.app.GOOGLE_API_KEY", None):
51
+ with pytest.raises(ValueError, match="Google API key not configured"):
52
+ import asyncio
53
+
54
+ asyncio.run(call_gemini_model("gemini-pro", [], 0.7, 100))
55
+
56
+
57
+ class TestAppConstants:
58
+ """Test class for application constants and configuration."""
59
+
60
+ def test_default_model_is_valid(self):
61
+ """Test that the default model is a valid option."""
62
+ from naked_chat.app import DEFAULT_MODEL
63
+
64
+ valid_models = [
65
+ # OpenAI Flagship Models
66
+ "gpt-4.1",
67
+ "gpt-4o",
68
+ # OpenAI Reasoning Models
69
+ "o4-mini",
70
+ "o3",
71
+ "o3-pro",
72
+ "o3-mini",
73
+ # Google Gemini Models
74
+ "gemini-2.5-pro",
75
+ "gemini-2.5-flash",
76
+ ]
77
+ assert DEFAULT_MODEL in valid_models
78
+
79
+ def test_import_structure(self):
80
+ """Test that all required modules can be imported."""
81
+ try:
82
+ import naked_chat
83
+ import naked_chat.app
84
+
85
+ assert True
86
+ except ImportError as e:
87
+ pytest.fail(f"Import failed: {e}")
88
+
89
+
90
+ # Example of integration test structure (would need mocking for actual API calls)
91
+ class TestChatIntegration:
92
+ """Integration tests for chat functionality."""
93
+
94
+ @pytest.mark.asyncio
95
+ async def test_message_history_management(self):
96
+ """Test that message history is properly managed."""
97
+ # This would be a more complex test with actual Chainlit session mocking
98
+ # For now, just test the concept
99
+ message_history = []
100
+
101
+ # Simulate adding messages
102
+ for i in range(15):
103
+ message_history.append({"role": "user", "content": f"Message {i}"})
104
+
105
+ # Test that history is truncated to last 10 messages
106
+ if len(message_history) > 10:
107
+ message_history = message_history[-10:]
108
+
109
+ assert len(message_history) == 10
110
+ assert message_history[0]["content"] == "Message 5"
111
+ assert message_history[-1]["content"] == "Message 14"
vercel.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "builds": [
3
+ {
4
+ "src": "src/naked_chat/app.py",
5
+ "use": "@vercel/python"
6
+ }
7
+ ],
8
+ "routes": [
9
+ {
10
+ "src": "/(.*)",
11
+ "dest": "src/naked_chat/app.py"
12
+ }
13
+ ]
14
+ }