yuvrajsingh6 commited on
Commit
c5b5cc8
·
0 Parent(s):

Initial commit: Analytical Finance Chatbot with Next.js frontend and FastAPI backend

Browse files
.DS_Store ADDED
Binary file (8.2 kB). View file
 
.gitignore ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Python-generated files
2
+ __pycache__/
3
+ *.py[oc]
4
+ build/
5
+ dist/
6
+ wheels/
7
+ *.egg-info
8
+
9
+ # Virtual environments
10
+ .venv
11
+
12
+ # Environment variables
13
+ .env
14
+ *.env
15
+
16
+ # Node modules
17
+ node_modules/
18
+ .next/
19
+
20
+ # OS files
21
+ .DS_Store
README.md ADDED
@@ -0,0 +1,251 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Analytical Finance Chatbot
2
+
3
+ A full-stack AI-powered chatbot for financial data analysis with an OpenAI-inspired interface. The system analyzes CSV data (holdings and trades) and provides intelligent insights through natural language queries with real-time streaming responses.
4
+
5
+ ![Tech Stack](https://img.shields.io/badge/Next.js-15+-black?style=flat-square&logo=next.js)
6
+ ![FastAPI](https://img.shields.io/badge/FastAPI-0.100+-009688?style=flat-square&logo=fastapi)
7
+ ![Python](https://img.shields.io/badge/Python-3.10+-3776AB?style=flat-square&logo=python)
8
+ ![Tailwind](https://img.shields.io/badge/Tailwind-4.0-38B2AC?style=flat-square&logo=tailwind-css)
9
+
10
+ ## 🌟 Features
11
+
12
+ ### Backend
13
+ - **Intelligent Data Analysis**: Processes financial CSV data (holdings & trades)
14
+ - **Contextual Facts Engine**: Pre-computes analytical facts for accurate responses
15
+ - **LLM-Powered Insights**: Uses Groq's Llama 3.1 for natural language understanding
16
+ - **Streaming Responses**: Real-time Server-Sent Events (SSE) for smooth UX
17
+ - **Conversation Management**: Persistent conversation history and context
18
+
19
+ ### Frontend
20
+ - **OpenAI-Inspired UI**: Clean, minimalist black & white design
21
+ - **Real-Time Streaming**: Live message streaming with visual feedback
22
+ - **Rich Markdown Support**: Tables, code blocks, lists with proper formatting
23
+ - **Responsive Design**: Mobile-friendly with collapsible sidebar
24
+ - **Dark Mode**: Automatic light/dark theme support
25
+ - **Conversation History**: Sidebar with all past conversations
26
+
27
+ ## 🏗️ Architecture
28
+
29
+ ```
30
+ project-proojectloop/
31
+ ├── backend/ # FastAPI backend
32
+ │ ├── main.py # API endpoints & streaming logic
33
+ │ ├── csv_engine.py # Data processing & fact extraction
34
+ │ ├── llm_client.py # Groq LLM integration
35
+ │ └── data/
36
+ │ ├── holdings.csv # Financial holdings data
37
+ │ └── trades.csv # Trading data
38
+
39
+ └── frontend-next/ # Next.js frontend
40
+ ├── src/
41
+ │ ├── app/
42
+ │ │ ├── layout.js # Root layout with sidebar
43
+ │ │ ├── page.js # Home page
44
+ │ │ ├── globals.css # Theme & styling
45
+ │ │ └── c/[id]/ # Dynamic chat routes
46
+ │ ├── components/
47
+ │ │ ├── Sidebar.js # Conversation list
48
+ │ │ └── ChatInterface.js # Main chat UI
49
+ │ └── lib/
50
+ │ └── api.js # API client
51
+ └── package.json
52
+ ```
53
+
54
+ ## 🚀 Quick Start
55
+
56
+ ### Prerequisites
57
+ - Python 3.10+
58
+ - Node.js 18+
59
+ - Groq API Key ([Get one here](https://console.groq.com))
60
+
61
+ ### Backend Setup
62
+
63
+ 1. **Navigate to backend directory**:
64
+ ```bash
65
+ cd backend
66
+ ```
67
+
68
+ 2. **Create virtual environment** (optional but recommended):
69
+ ```bash
70
+ python -m venv .venv
71
+ source .venv/bin/activate # On Windows: .venv\Scripts\activate
72
+ ```
73
+
74
+ 3. **Install dependencies**:
75
+ ```bash
76
+ pip install fastapi uvicorn pandas groq python-dotenv
77
+ ```
78
+
79
+ 4. **Configure environment**:
80
+ Create a `.env` file in the `backend/` directory:
81
+ ```env
82
+ GROQ_API_KEY=your_groq_api_key_here
83
+ DATA_DIR=data
84
+ ```
85
+
86
+ 5. **Run the server**:
87
+ ```bash
88
+ uvicorn main:app --host 0.0.0.0 --port 8000 --reload
89
+ ```
90
+
91
+ Backend will be available at: `http://localhost:8000`
92
+
93
+ ### Frontend Setup
94
+
95
+ 1. **Navigate to frontend directory**:
96
+ ```bash
97
+ cd frontend-next
98
+ ```
99
+
100
+ 2. **Install dependencies**:
101
+ ```bash
102
+ npm install
103
+ ```
104
+
105
+ 3. **Run the development server**:
106
+ ```bash
107
+ npm run dev
108
+ ```
109
+
110
+ Frontend will be available at: `http://localhost:3000` (or next available port)
111
+
112
+ ## 📡 API Endpoints
113
+
114
+ ### Core Endpoints
115
+
116
+ | Method | Endpoint | Description |
117
+ |--------|----------|-------------|
118
+ | `GET` | `/auth/status` | Authentication status |
119
+ | `GET` | `/chat_models` | Available chat models |
120
+ | `POST` | `/chat/new_conversation` | Create new conversation |
121
+ | `GET` | `/conversation/all` | List all conversations |
122
+ | `GET` | `/conversation/{id}` | Get specific conversation |
123
+ | `POST` | `/chat/conversation` | Send message (streaming) |
124
+ | `POST` | `/chat/get_alias` | Generate conversation title |
125
+
126
+ ### Example Request
127
+
128
+ ```bash
129
+ curl -X POST http://localhost:8000/chat/conversation \
130
+ -H "Content-Type: application/json" \
131
+ -d '{
132
+ "conversation_id": "123e4567-e89b-12d3-a456-426614174000",
133
+ "user_message": [{"type": "text", "text": "Which funds performed better?"}]
134
+ }'
135
+ ```
136
+
137
+ ## 💡 Example Queries
138
+
139
+ Try these questions with your data:
140
+
141
+ - "Which funds performed better depending on the yearly Profit and Loss?"
142
+ - "Show me the top 5 portfolios by total holdings"
143
+ - "What is the total quantity for YTUM fund?"
144
+ - "Which portfolio has the most records?"
145
+ - "Compare the performance of Garfield vs Heather funds"
146
+
147
+ ## 🎨 Tech Stack
148
+
149
+ ### Backend
150
+ - **FastAPI**: Modern Python web framework
151
+ - **Pandas**: Data processing and analysis
152
+ - **Groq**: LLM API (Llama 3.1 8B Instant)
153
+ - **Uvicorn**: ASGI server
154
+
155
+ ### Frontend
156
+ - **Next.js 15+**: React framework with App Router
157
+ - **Tailwind CSS v4**: Utility-first styling
158
+ - **React Markdown**: Rich text rendering
159
+ - **Lucide React**: Icon library
160
+ - **Inter Font**: Clean typography
161
+
162
+ ## 🔧 Configuration
163
+
164
+ ### Backend Environment Variables
165
+
166
+ ```env
167
+ GROQ_API_KEY=your_api_key # Required: Groq API key
168
+ DATA_DIR=data # Optional: Data directory path
169
+ ```
170
+
171
+ ### Frontend API Configuration
172
+
173
+ Update `src/lib/api.js` if your backend runs on a different port:
174
+
175
+ ```javascript
176
+ const API_BASE_URL = 'http://localhost:8000';
177
+ ```
178
+
179
+ ## 📊 Data Format
180
+
181
+ The system expects CSV files with the following structure:
182
+
183
+ ### Holdings CSV
184
+ - `PortfolioName`: Fund/portfolio identifier
185
+ - `PL_YTD`: Year-to-date profit/loss
186
+ - `Qty`: Quantity
187
+ - `MV_Base`: Market value
188
+ - Other financial metrics...
189
+
190
+ ### Trades CSV
191
+ - Similar structure with trade-specific fields
192
+
193
+ ## 🛠️ Development
194
+
195
+ ### Running Tests
196
+
197
+ ```bash
198
+ # Backend tests
199
+ cd backend
200
+ python test_api.py
201
+
202
+ # Frontend (if tests are added)
203
+ cd frontend-next
204
+ npm test
205
+ ```
206
+
207
+ ### Building for Production
208
+
209
+ ```bash
210
+ # Frontend production build
211
+ cd frontend-next
212
+ npm run build
213
+ npm start
214
+ ```
215
+
216
+ ## 🐛 Troubleshooting
217
+
218
+ ### Backend Issues
219
+
220
+ **Problem**: `ModuleNotFoundError: No module named 'csv_engine'`
221
+ - **Solution**: Run uvicorn from the `backend/` directory
222
+
223
+ **Problem**: No CSV files found
224
+ - **Solution**: Ensure CSV files are in `backend/data/` directory
225
+
226
+ ### Frontend Issues
227
+
228
+ **Problem**: Grey/blurry text in responses
229
+ - **Solution**: Already fixed with `prose-neutral dark:prose-invert` classes
230
+
231
+ **Problem**: Page reloads on new chat
232
+ - **Solution**: Using `window.history.pushState()` instead of router navigation
233
+
234
+ **Problem**: Tables not rendering
235
+ - **Solution**: Ensure `react-markdown` and `remark-gfm` are installed
236
+
237
+ ## 📝 License
238
+
239
+ This project is open source and available under the MIT License.
240
+
241
+ ## 🤝 Contributing
242
+
243
+ Contributions are welcome! Please feel free to submit a Pull Request.
244
+
245
+ ## 📧 Support
246
+
247
+ For issues and questions, please open an issue on the repository.
248
+
249
+ ---
250
+
251
+ **Built with ❤️ using Next.js, FastAPI, and Groq AI**
backend/.DS_Store ADDED
Binary file (6.15 kB). View file
 
backend/.python-version ADDED
@@ -0,0 +1 @@
 
 
1
+ 3.10
backend/csv_engine.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import os
3
+ import re
4
+ from typing import Dict, List, Any
5
+
6
+ class CSVEngine:
7
+ def __init__(self, data_directory: str):
8
+ self.data_directory = data_directory
9
+ self.dataframes: Dict[str, pd.DataFrame] = {}
10
+
11
+ def load_all_csvs(self) -> bool:
12
+ if not os.path.isdir(self.data_directory):
13
+ return False
14
+
15
+ loaded = False
16
+ for file in os.listdir(self.data_directory):
17
+ if file.lower().endswith(".csv"):
18
+ path = os.path.join(self.data_directory, file)
19
+ try:
20
+ df = pd.read_csv(path)
21
+ # Normalize: strip and lowercase all string values
22
+ for col in df.columns:
23
+ if df[col].dtype == 'object':
24
+ df[col] = df[col].astype(str).str.strip().str.lower()
25
+ if any(x in col.lower() for x in ['qty', 'pl_', 'mv_', 'price', 'principal']):
26
+ df[col] = pd.to_numeric(df[col].replace('NULL', 0), errors='coerce').fillna(0)
27
+
28
+ if not df.empty:
29
+ self.dataframes[file] = df
30
+ loaded = True
31
+ except Exception as e:
32
+ print(f"Error loading {file}: {e}")
33
+
34
+ return loaded
35
+
36
+ def get_analytical_facts(self, query: str) -> str:
37
+ """
38
+ Consolidated analytical engine. Detects intent and computes facts.
39
+ """
40
+ q = query.lower()
41
+ facts = []
42
+
43
+ # 1. Identify potential entities (words that aren't common stopwords)
44
+ words = set(re.findall(r'\b\w{3,}\b', q))
45
+ stop_words = {
46
+ 'the', 'what', 'which', 'many', 'total', 'how', 'for', 'present',
47
+ 'data', 'provided', 'based', 'perform', 'better', 'than', 'number',
48
+ 'trades', 'holdings', 'and', 'with', 'that', 'fund', 'depends',
49
+ 'depending', 'yearly', 'loss', 'profit', 'show', 'tell', 'me', 'about',
50
+ 'most', 'least', 'highest', 'lowest', 'records', 'portfolio'
51
+ }
52
+ entities = words - stop_words
53
+
54
+ # Find all unique fund/portfolio names in the data for cross-referencing
55
+ all_known_funds = set()
56
+ for df in self.dataframes.values():
57
+ group_col = next((c for c in df.columns if c.lower() in ['portfolioname', 'portfolio_name', 'shortname', 'name']), None)
58
+ if group_col:
59
+ all_known_funds.update(df[group_col].astype(str).unique())
60
+
61
+ # 2. Performance / Ranking Facts
62
+ if any(k in q for k in ["profit", "loss", "performed", "performance", "p&l", "ranking", "better", "best"]):
63
+ for filename, df in self.dataframes.items():
64
+ pl_col = next((c for c in df.columns if c.lower() == 'pl_ytd'), None)
65
+ group_col = next((c for c in df.columns if c.lower() in ['portfolioname', 'portfolio_name', 'shortname', 'name']), None)
66
+
67
+ if pl_col and group_col:
68
+ stats = df.groupby(group_col)[pl_col].sum().sort_values(ascending=False).to_dict()
69
+ if stats:
70
+ ranked_names = [name.capitalize() for name in stats.keys()]
71
+ facts.append(f"Fact: In {filename}, funds ranked by performance (Best to Worst): {', '.join(ranked_names)}")
72
+ for name, val in list(stats.items()):
73
+ facts.append(f"Fact: Fund {name.capitalize()} in {filename} has total PL_YTD of {val:.4f}")
74
+
75
+ # 3. Global Stats / Generic record counting
76
+ if any(k in q for k in ["most", "least", "count", "record", "portfolio", "records", "highest", "lowest"]):
77
+ for filename, df in self.dataframes.items():
78
+ group_col = next((c for c in df.columns if c.lower() in ['portfolioname', 'portfolio_name', 'shortname', 'name']), None)
79
+ if group_col:
80
+ counts = df[group_col].value_counts().sort_values(ascending=False).to_dict()
81
+ if counts:
82
+ top_portfolio = list(counts.keys())[0]
83
+ facts.append(f"Fact: In {filename}, portfolio '{top_portfolio.capitalize()}' has the absolute highest number of records ({counts[top_portfolio]}).")
84
+ # Detailed counts for top portfolios
85
+ details = [f"{k.capitalize()}: {v} records" for k, v in list(counts.items())]
86
+ facts.append(f"Fact: Summary of record counts in {filename}: {', '.join(details)}.")
87
+
88
+ # 4. Entity specific investigation (Dynamic)
89
+ detected_entities = entities & all_known_funds
90
+
91
+
92
+ # If no specific fund detected but user is asking about a fund, we check if any query word matches PARTIALLY
93
+ if not detected_entities:
94
+ for entity in entities:
95
+ matches = [fund for fund in all_known_funds if entity in fund]
96
+ detected_entities.update(matches)
97
+
98
+ for entity in detected_entities:
99
+ for filename, df in self.dataframes.items():
100
+ group_col = next((c for c in df.columns if c.lower() in ['portfolioname', 'portfolio_name', 'shortname', 'name', 'strategyname']), None)
101
+ if group_col:
102
+ mask = df[group_col].astype(str).str.contains(entity, case=False, na=False)
103
+ count = int(mask.sum())
104
+ if count > 0 or entity in ["ytum", "garfield", "heather"]:
105
+ facts.append(f"Fact: {entity.capitalize()} has {count} records in {filename}.")
106
+
107
+ if count > 0:
108
+ pl_col = next((c for c in df.columns if c.lower() == 'pl_ytd'), None)
109
+ if pl_col:
110
+ total_pl = df[mask][pl_col].sum()
111
+ facts.append(f"Fact: {entity.capitalize()} has a total PL_YTD of {total_pl:.4f} in {filename}.")
112
+
113
+ qty_col = next((c for c in df.columns if c.lower() == 'qty'), None)
114
+ if qty_col:
115
+ total_qty = df[mask][qty_col].sum()
116
+ facts.append(f"Fact: {entity.capitalize()} has a total Quantity of {total_qty:.4f} in {filename}.")
117
+
118
+ # Deduplicate facts
119
+ facts = list(dict.fromkeys(facts))
120
+ return "\n".join(facts) if facts else "No specific numerical facts computed for the given entities/metrics."
121
+
122
+ def get_schema_sample(self, row_limit: int = 3) -> str:
123
+ output = []
124
+ for filename, df in self.dataframes.items():
125
+ output.append(f"### Dataset: {filename}")
126
+ output.append(f"Columns: {', '.join(df.columns)}")
127
+ output.append(df.head(row_limit).to_string(index=False))
128
+ output.append("")
129
+ return "\n".join(output)
130
+
131
+ def validate_schema(self) -> bool:
132
+ if not self.dataframes: return False
133
+ for df in self.dataframes.values():
134
+ if df.empty or len(df.columns) == 0: return False
135
+ return True
backend/data/holdings.csv ADDED
The diff for this file is too large to render. See raw diff
 
backend/data/trades.csv ADDED
The diff for this file is too large to render. See raw diff
 
backend/llm_client.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from groq import Groq
3
+ from dotenv import load_dotenv
4
+
5
+ load_dotenv()
6
+
7
+ SYSTEM_PROMPT_TEMPLATE = """You are a helpful and professional Financial Portfolio Assistant.
8
+
9
+ Your objective is to provide clear, insightful, and accurate answers based on the provided data.
10
+
11
+ CONTEXT:
12
+ 1. DATA INSIGHTS: These are pre-verified facts computed from the entire dataset. Use these as your primary source of truth for counts, sums, and rankings.
13
+ 2. SCHEMA SAMPLE: This shows the structure and a few example rows for context.
14
+
15
+ HOW TO ANSWER:
16
+ - Be conversational yet professional. Do NOT mention "Pre-computed facts" or "technical data" to the user. Simply present the numbers as part of your helpful response.
17
+ - Use bold text, bullet points, and tables to make the data easy to read.
18
+ - **Provide a comprehensive list**: If the DATA INSIGHTS contain many funds or portfolios, include all of them in your answer (using a table or list) instead of just the top few, unless the user specifically asks for a "top X".
19
+ - If the data is available, answer directly and clearly.
20
+ - If you don't have enough data to answer, say: "I'm sorry, I couldn't find specific information for that in the current data."
21
+ - Treat technical terms like 'PL_YTD' as 'Profit and Loss' and 'MV_Base' as 'Market Value'.
22
+
23
+ STRICT CONSTRAINTS:
24
+ - Use only the provided information. No external knowledge.
25
+ - Be precise with numbers—do not round them unless requested.
26
+
27
+ DATA INSIGHTS:
28
+ {FACTS}
29
+
30
+ SCHEMA SAMPLE:
31
+ {CSV_DATA}
32
+
33
+ User Question:
34
+ {USER_QUESTION}
35
+ """
36
+
37
+
38
+
39
+
40
+
41
+
42
+ class LLMClient:
43
+ def __init__(self):
44
+ api_key = os.getenv("GROQ_API_KEY")
45
+ if not api_key:
46
+ raise ValueError("GROQ_API_KEY not found in environment variables.")
47
+
48
+ self.client = Groq(api_key=api_key)
49
+ self.model = "llama-3.1-8b-instant"
50
+
51
+ def get_answer(self, context: str, question: str, facts: str = "") -> str:
52
+ prompt = SYSTEM_PROMPT_TEMPLATE.format(
53
+ FACTS=facts if facts else "No specific facts computed.",
54
+ CSV_DATA=context,
55
+ USER_QUESTION=question
56
+ )
57
+
58
+ try:
59
+
60
+ response = self.client.chat.completions.create(
61
+ model=self.model,
62
+ messages=[{"role": "system", "content": prompt}],
63
+ temperature=0,
64
+ max_tokens=1000
65
+ )
66
+ return response.choices[0].message.content.strip()
67
+
68
+ except Exception as e:
69
+ return f"Error communicating with LLM: {str(e)}"
backend/main.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, HTTPException, Request
2
+ from fastapi.middleware.cors import CORSMiddleware
3
+ from fastapi.responses import StreamingResponse
4
+ from pydantic import BaseModel
5
+ from csv_engine import CSVEngine
6
+ from llm_client import LLMClient
7
+ import os
8
+ import json
9
+ import uuid
10
+ from datetime import datetime
11
+ from contextlib import asynccontextmanager
12
+
13
+ # Memory storage for conversations
14
+ conversations = {}
15
+
16
+ @asynccontextmanager
17
+ async def lifespan(app: FastAPI):
18
+ # Startup
19
+ if not engine.load_all_csvs():
20
+ print("Warning: No CSV files found.")
21
+ if not engine.validate_schema():
22
+ print("Warning: CSV files are invalid or empty.")
23
+ yield
24
+ # Shutdown
25
+
26
+ app = FastAPI(title="Analytical Chatbot API", lifespan=lifespan)
27
+
28
+ # Enable CORS for the frontend
29
+ app.add_middleware(
30
+ CORSMiddleware,
31
+ allow_origins=["*"], # In production, restrict this to your frontend URL
32
+ allow_credentials=True,
33
+ allow_methods=["*"],
34
+ allow_headers=["*"],
35
+ )
36
+
37
+ class ChatRequest(BaseModel):
38
+ question: str
39
+
40
+ class OldChatResponse(BaseModel):
41
+ answer: str
42
+
43
+ DATA_DIR = os.getenv("DATA_DIR", "data")
44
+ engine = CSVEngine(DATA_DIR)
45
+ llm = LLMClient()
46
+
47
+ # --- Compatibility Endpoints for DevoChat UI ---
48
+
49
+ @app.get("/auth/status")
50
+ async def auth_status():
51
+ return {"logged_in": True, "user": {"id": "user_123", "username": "admin", "role": "admin"}}
52
+
53
+ @app.get("/auth/user")
54
+ async def auth_user():
55
+ return {"id": "user_123", "username": "admin", "role": "admin"}
56
+
57
+ @app.get("/chat_models")
58
+ async def chat_models():
59
+ return {
60
+ "default": "analytical",
61
+ "models": [{
62
+ "model_name": "analytical",
63
+ "model_alias": "Analytical Engine",
64
+ "capabilities": {"stream": True, "inference": False, "search": False, "deep_research": False, "image": False, "mcp": False},
65
+ "controls": {"temperature": True, "reason": True, "verbosity": True, "system_message": True},
66
+ "billing": {"in_billing": 0, "out_billing": 0},
67
+ "description": "Financial Analysis Engine",
68
+ "endpoint": "/chat/conversation"
69
+ }]
70
+ }
71
+
72
+ @app.get("/image_models")
73
+ async def image_models():
74
+ return {"default": None, "models": []}
75
+
76
+ @app.get("/realtime_models")
77
+ async def realtime_models():
78
+ return {"default": None, "models": []}
79
+
80
+ @app.get("/notice")
81
+ async def get_notice():
82
+ return {"message": "Welcome to the Analytical Finance Chatbot!", "hash": "v1"}
83
+
84
+ @app.post("/chat/new_conversation")
85
+ async def new_conversation():
86
+ conversation_id = str(uuid.uuid4())
87
+ now = datetime.now().isoformat()
88
+ conversations[conversation_id] = {
89
+ "conversation_id": conversation_id,
90
+ "messages": [],
91
+ "created_at": now,
92
+ "updated_at": now,
93
+ "model": "analytical"
94
+ }
95
+ return conversations[conversation_id]
96
+
97
+ @app.get("/conversation/all")
98
+ async def get_all_conversations():
99
+ return list(conversations.values())
100
+
101
+ @app.get("/conversation/{conversation_id}")
102
+ async def get_conversation(conversation_id: str):
103
+ if conversation_id not in conversations:
104
+ raise HTTPException(status_code=404, detail="Conversation not found")
105
+ return conversations[conversation_id]
106
+
107
+ @app.post("/chat/get_alias")
108
+ async def get_alias(request: dict):
109
+ # Simple alias generator: take first 20 chars of the text
110
+ text = request.get("text", "New Conversation")
111
+ alias = text[:20] + "..." if len(text) > 20 else text
112
+ return {"alias": alias}
113
+
114
+ # --- Core Analytical Chat Endpoint (Streaming version for DevoChat) ---
115
+
116
+ class DevoChatRequest(BaseModel):
117
+ conversation_id: str
118
+ user_message: list # List of dicts with 'text' or 'image'
119
+
120
+ async def stream_analytical_response(question: str, conversation_id: str):
121
+ try:
122
+ # Get facts and schema
123
+ facts_text = engine.get_analytical_facts(question)
124
+ sample = engine.get_schema_sample()
125
+
126
+ # Get answer from LLM
127
+ # Note: Our current LLMClient doesn't support streaming from Groq yet,
128
+ # so we'll simulate streaming for the UI.
129
+ full_answer = llm.get_answer(sample, question, facts_text)
130
+
131
+ # Update history
132
+ if conversation_id in conversations:
133
+ conversations[conversation_id]["messages"].append({"role": "user", "content": question})
134
+ conversations[conversation_id]["messages"].append({"role": "assistant", "content": full_answer})
135
+ conversations[conversation_id]["updated_at"] = datetime.now().isoformat()
136
+
137
+ # Stream chunks to the UI
138
+ # DevoChat expects "data: {\"content\": \"...\"}\n\n"
139
+ chunk_size = 20
140
+ for i in range(0, len(full_answer), chunk_size):
141
+ chunk = full_answer[i:i+chunk_size]
142
+ yield f"data: {json.dumps({'content': chunk})}\n\n"
143
+
144
+ yield "data: [DONE]\n\n"
145
+ except Exception as e:
146
+ yield f"data: {json.dumps({'error': str(e)})}\n\n"
147
+
148
+ @app.post("/chat/conversation")
149
+ async def chat_conversation(request: DevoChatRequest):
150
+ conversation_id = request.conversation_id
151
+ # Extract the text message from the list
152
+ user_text = ""
153
+ for item in request.user_message:
154
+ if item.get("type") == "text":
155
+ user_text = item.get("text")
156
+ break
157
+
158
+ if not user_text:
159
+ raise HTTPException(status_code=400, detail="No text message found")
160
+
161
+ return StreamingResponse(
162
+ stream_analytical_response(user_text, conversation_id),
163
+ media_type="text/event-stream"
164
+ )
165
+
166
+ # --- Keep original endpoint for compatibility ---
167
+ @app.post("/chat", response_model=OldChatResponse)
168
+ async def chat_legacy(request: ChatRequest):
169
+ facts_text = engine.get_analytical_facts(request.question)
170
+ sample = engine.get_schema_sample()
171
+ answer = llm.get_answer(sample, request.question, facts_text)
172
+ return OldChatResponse(answer=answer)
173
+
174
+ if __name__ == "__main__":
175
+ import uvicorn
176
+ uvicorn.run(app, host="0.0.0.0", port=8000)
backend/pyproject.toml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "project-proojectloop"
3
+ version = "0.1.0"
4
+ description = "Add your description here"
5
+ readme = "README.md"
6
+ requires-python = ">=3.10"
7
+ dependencies = [
8
+ "fastapi",
9
+ "uvicorn",
10
+ "pandas",
11
+ "groq",
12
+ "python-dotenv",
13
+ ]
14
+
backend/test_api.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import json
3
+
4
+ BASE_URL = "http://127.0.0.1:8000"
5
+
6
+ def test_chat(question):
7
+ payload = {"question": question}
8
+ response = requests.post(f"{BASE_URL}/chat", json=payload)
9
+ print(f"Question: {question}")
10
+ if response.status_code == 200:
11
+ print(f"Answer: {response.json()['answer']}")
12
+ else:
13
+ print(f"Error: {response.status_code} - {response.text}")
14
+ print("-" * 30)
15
+
16
+ if __name__ == "__main__":
17
+ # Test 1: Performance Ranking
18
+ test_chat("Which funds performed better depending on the yearly Profit and Loss of that fund?")
19
+
20
+ # Test 2: Specific Fund (Ytum)
21
+ test_chat("How many holdings does the Ytum fund have?")
22
+
23
+ # Test 3: Specific Fund (Garfield)
24
+ test_chat("Can you tell me about the performance of the Garfield fund?")
25
+
26
+ # Test 4: Cross-file check
27
+ test_chat("Does Heather have any trades?")
28
+
29
+ # Test 5: Generic query
30
+ test_chat("Which portfolio has the most records?")
31
+
frontend-next/.gitignore ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
2
+
3
+ # dependencies
4
+ /node_modules
5
+ /.pnp
6
+ .pnp.*
7
+ .yarn/*
8
+ !.yarn/patches
9
+ !.yarn/plugins
10
+ !.yarn/releases
11
+ !.yarn/versions
12
+
13
+ # testing
14
+ /coverage
15
+
16
+ # next.js
17
+ /.next/
18
+ /out/
19
+
20
+ # production
21
+ /build
22
+
23
+ # misc
24
+ .DS_Store
25
+ *.pem
26
+
27
+ # debug
28
+ npm-debug.log*
29
+ yarn-debug.log*
30
+ yarn-error.log*
31
+ .pnpm-debug.log*
32
+
33
+ # env files (can opt-in for committing if needed)
34
+ .env*
35
+
36
+ # vercel
37
+ .vercel
38
+
39
+ # typescript
40
+ *.tsbuildinfo
41
+ next-env.d.ts
frontend-next/README.md ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ This is a [Next.js](https://nextjs.org) project bootstrapped with [`create-next-app`](https://github.com/vercel/next.js/tree/canary/packages/create-next-app).
2
+
3
+ ## Getting Started
4
+
5
+ First, run the development server:
6
+
7
+ ```bash
8
+ npm run dev
9
+ # or
10
+ yarn dev
11
+ # or
12
+ pnpm dev
13
+ # or
14
+ bun dev
15
+ ```
16
+
17
+ Open [http://localhost:3000](http://localhost:3000) with your browser to see the result.
18
+
19
+ You can start editing the page by modifying `app/page.js`. The page auto-updates as you edit the file.
20
+
21
+ This project uses [`next/font`](https://nextjs.org/docs/app/building-your-application/optimizing/fonts) to automatically optimize and load [Geist](https://vercel.com/font), a new font family for Vercel.
22
+
23
+ ## Learn More
24
+
25
+ To learn more about Next.js, take a look at the following resources:
26
+
27
+ - [Next.js Documentation](https://nextjs.org/docs) - learn about Next.js features and API.
28
+ - [Learn Next.js](https://nextjs.org/learn) - an interactive Next.js tutorial.
29
+
30
+ You can check out [the Next.js GitHub repository](https://github.com/vercel/next.js) - your feedback and contributions are welcome!
31
+
32
+ ## Deploy on Vercel
33
+
34
+ The easiest way to deploy your Next.js app is to use the [Vercel Platform](https://vercel.com/new?utm_medium=default-template&filter=next.js&utm_source=create-next-app&utm_campaign=create-next-app-readme) from the creators of Next.js.
35
+
36
+ Check out our [Next.js deployment documentation](https://nextjs.org/docs/app/building-your-application/deploying) for more details.
frontend-next/eslint.config.mjs ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { defineConfig, globalIgnores } from "eslint/config";
2
+ import nextVitals from "eslint-config-next/core-web-vitals";
3
+
4
+ const eslintConfig = defineConfig([
5
+ ...nextVitals,
6
+ // Override default ignores of eslint-config-next.
7
+ globalIgnores([
8
+ // Default ignores of eslint-config-next:
9
+ ".next/**",
10
+ "out/**",
11
+ "build/**",
12
+ "next-env.d.ts",
13
+ ]),
14
+ ]);
15
+
16
+ export default eslintConfig;
frontend-next/jsconfig.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "compilerOptions": {
3
+ "paths": {
4
+ "@/*": ["./src/*"]
5
+ }
6
+ }
7
+ }
frontend-next/next.config.mjs ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ /** @type {import('next').NextConfig} */
2
+ const nextConfig = {
3
+ /* config options here */
4
+ };
5
+
6
+ export default nextConfig;
frontend-next/package-lock.json ADDED
The diff for this file is too large to render. See raw diff
 
frontend-next/package.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "frontend-next",
3
+ "version": "0.1.0",
4
+ "private": true,
5
+ "scripts": {
6
+ "dev": "next dev",
7
+ "build": "next build",
8
+ "start": "next start",
9
+ "lint": "eslint"
10
+ },
11
+ "dependencies": {
12
+ "@tailwindcss/typography": "^0.5.19",
13
+ "lucide-react": "^0.562.0",
14
+ "next": "16.1.4",
15
+ "react": "19.2.3",
16
+ "react-dom": "19.2.3",
17
+ "react-markdown": "^10.1.0",
18
+ "remark-gfm": "^4.0.1"
19
+ },
20
+ "devDependencies": {
21
+ "@tailwindcss/postcss": "^4",
22
+ "eslint": "^9",
23
+ "eslint-config-next": "16.1.4",
24
+ "tailwindcss": "^4"
25
+ }
26
+ }
frontend-next/postcss.config.mjs ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ const config = {
2
+ plugins: {
3
+ "@tailwindcss/postcss": {},
4
+ },
5
+ };
6
+
7
+ export default config;
frontend-next/public/file.svg ADDED
frontend-next/public/globe.svg ADDED
frontend-next/public/next.svg ADDED
frontend-next/public/vercel.svg ADDED
frontend-next/public/window.svg ADDED
frontend-next/src/app/c/[id]/page.js ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ChatInterface from '@/components/ChatInterface';
2
+ import { api } from '@/lib/api';
3
+ import { redirect } from 'next/navigation';
4
+
5
+ // This is a Server Component fetching initial data
6
+ export default async function ChatPage({ params }) {
7
+ const { id } = params;
8
+
9
+ // Fetch conversation history server-side for initial render
10
+ // Note: We need to handle this gracefully if it fails
11
+ let initialMessages = [];
12
+ try {
13
+ const data = await api.getConversation(id);
14
+ if (!data) {
15
+ // Conversation not found, redirect to new chat
16
+ redirect('/');
17
+ }
18
+ if (data && data.messages) {
19
+ initialMessages = data.messages;
20
+ }
21
+ } catch (e) {
22
+ // If redirect throws (which it does in Next.js), let it bubble up
23
+ if (e.message === 'NEXT_REDIRECT') throw e;
24
+ console.error("Failed to load conversation", e);
25
+ }
26
+
27
+ return (
28
+ <div className="h-full flex-1">
29
+ <ChatInterface conversationId={id} initialMessages={initialMessages} />
30
+ </div>
31
+ );
32
+ }
frontend-next/src/app/favicon.ico ADDED
frontend-next/src/app/globals.css ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ @import "tailwindcss";
2
+ @plugin "@tailwindcss/typography";
3
+
4
+ @theme {
5
+ --font-sans: 'Inter', sans-serif;
6
+ }
7
+
8
+ :root {
9
+ --foreground-rgb: 0, 0, 0;
10
+ --background-start-rgb: 255, 255, 255;
11
+ --background-end-rgb: 255, 255, 255;
12
+
13
+ --sidebar-bg: #000000;
14
+ --sidebar-fg: #ffffff;
15
+ --sidebar-hover: #202123;
16
+
17
+ --user-msg-bg: #ffffff;
18
+ --ai-msg-bg: #f7f7f8;
19
+ --border-color: #e5e5e5;
20
+ }
21
+
22
+ @media (prefers-color-scheme: dark) {
23
+ :root {
24
+ --foreground-rgb: 255, 255, 255;
25
+ --background-start-rgb: 52, 53, 65;
26
+ --background-end-rgb: 52, 53, 65;
27
+
28
+ --sidebar-bg: #202123;
29
+ --sidebar-fg: #ececf1;
30
+ --sidebar-hover: #2a2b32;
31
+
32
+ --user-msg-bg: #343541;
33
+ --ai-msg-bg: #444654;
34
+ --border-color: #2a2b32;
35
+ }
36
+ }
37
+
38
+ body {
39
+ color: rgb(var(--foreground-rgb));
40
+ background: rgb(var(--background-start-rgb));
41
+ font-family: var(--font-sans);
42
+ }
43
+
44
+ /* Custom Scrollbar */
45
+ .scrollbar-hide::-webkit-scrollbar {
46
+ display: none;
47
+ }
48
+
49
+ .custom-scrollbar::-webkit-scrollbar {
50
+ width: 6px;
51
+ }
52
+
53
+ .custom-scrollbar::-webkit-scrollbar-track {
54
+ background: transparent;
55
+ }
56
+
57
+ .custom-scrollbar::-webkit-scrollbar-thumb {
58
+ background-color: rgba(255, 255, 255, 0.2);
59
+ border-radius: 3px;
60
+ }
frontend-next/src/app/layout.js ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Inter } from 'next/font/google';
2
+ import './globals.css';
3
+ import Sidebar from '@/components/Sidebar';
4
+
5
+ const inter = Inter({ subsets: ['latin'] });
6
+
7
+ export const metadata = {
8
+ title: 'Analytical Chat',
9
+ description: 'AI-powered analytical assistant',
10
+ };
11
+
12
+ export default function RootLayout({ children }) {
13
+ return (
14
+ <html lang="en">
15
+ <body className={inter.className}>
16
+ <div className="flex h-screen overflow-hidden">
17
+ {/* Sidebar */}
18
+ <div className="hidden md:flex flex-shrink-0 bg-black">
19
+ <Sidebar />
20
+ </div>
21
+
22
+ {/* Main Content */}
23
+ <main className="flex-1 relative h-full max-w-full overflow-hidden flex flex-col bg-[var(--background-start-rgb)]">
24
+ {children}
25
+ </main>
26
+ </div>
27
+ </body>
28
+ </html>
29
+ );
30
+ }
frontend-next/src/app/page.js ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import ChatInterface from '@/components/ChatInterface';
2
+
3
+ export default function Home() {
4
+ return (
5
+ <div className="h-full flex-1">
6
+ <ChatInterface />
7
+ </div>
8
+ );
9
+ }
frontend-next/src/components/ChatInterface.js ADDED
@@ -0,0 +1,237 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "use client";
2
+
3
+ import { useState, useRef, useEffect } from 'react';
4
+ import { Send, User, Bot, Loader2 } from 'lucide-react';
5
+ import { api } from '@/lib/api';
6
+ import { useRouter } from 'next/navigation';
7
+ import ReactMarkdown from 'react-markdown';
8
+ import remarkGfm from 'remark-gfm';
9
+
10
+ export default function ChatInterface({ conversationId, initialMessages = [] }) {
11
+ const [messages, setMessages] = useState(initialMessages);
12
+ const [input, setInput] = useState('');
13
+ const [isLoading, setIsLoading] = useState(false);
14
+ const messagesEndRef = useRef(null);
15
+ const router = useRouter();
16
+ const [streamingContent, setStreamingContent] = useState('');
17
+
18
+ const scrollToBottom = () => {
19
+ messagesEndRef.current?.scrollIntoView({ behavior: "smooth" });
20
+ };
21
+
22
+ useEffect(() => {
23
+ scrollToBottom();
24
+ }, [messages, streamingContent]);
25
+
26
+ const handleSubmit = async (e) => {
27
+ e.preventDefault();
28
+ if (!input.trim() || isLoading) return;
29
+
30
+ const userMessage = input.trim();
31
+ setInput('');
32
+ setMessages(prev => [...prev, { role: 'user', content: userMessage }]);
33
+ setIsLoading(true);
34
+ setStreamingContent('');
35
+
36
+ let currentId = conversationId;
37
+
38
+ try {
39
+ // If no conversation ID, create one first
40
+ if (!currentId) {
41
+ const newConv = await api.createConversation();
42
+ if (!newConv) throw new Error("Failed to create conversation");
43
+ currentId = newConv.conversation_id;
44
+ // Update URL without reloading
45
+ window.history.pushState({}, '', `/c/${currentId}`);
46
+ // Or use router.replace if prefer Next.js way, but we want to stay mounted
47
+ // We might need to handle this carefully.
48
+ // For now, let's just proceed with the currentId for the request.
49
+ }
50
+
51
+ // Start streaming request
52
+ const response = await fetch(api.getChatEndpoint(), {
53
+ method: 'POST',
54
+ headers: {
55
+ 'Content-Type': 'application/json',
56
+ },
57
+ body: JSON.stringify({
58
+ conversation_id: currentId,
59
+ user_message: [{ type: 'text', text: userMessage }]
60
+ }),
61
+ });
62
+
63
+ if (!response.ok) throw new Error('Network response was not ok');
64
+
65
+ const reader = response.body.getReader();
66
+ const decoder = new TextDecoder();
67
+ let done = false;
68
+ let fullAssistantMessage = '';
69
+
70
+ while (!done) {
71
+ const { value, done: doneReading } = await reader.read();
72
+ done = doneReading;
73
+ const chunkValue = decoder.decode(value, { stream: !done });
74
+
75
+ // Parse SSE format: "data: {...}\n\n"
76
+ const lines = chunkValue.split('\n\n');
77
+ for (const line of lines) {
78
+ if (line.startsWith('data: ')) {
79
+ const dataStr = line.slice(6);
80
+ if (dataStr === '[DONE]') {
81
+ done = true;
82
+ break;
83
+ }
84
+ try {
85
+ const data = JSON.parse(dataStr);
86
+ if (data.content) {
87
+ fullAssistantMessage += data.content;
88
+ setStreamingContent(fullAssistantMessage);
89
+ }
90
+ if (data.error) {
91
+ console.error("Stream error:", data.error);
92
+ }
93
+ } catch (e) {
94
+ // ignore partial json
95
+ }
96
+ }
97
+ }
98
+ }
99
+
100
+ // Finalize message
101
+ setMessages(prev => [...prev, { role: 'assistant', content: fullAssistantMessage }]);
102
+ setStreamingContent('');
103
+ setIsLoading(false);
104
+
105
+ // If we created a new chat, update the sidebar without reloading the chat component
106
+ if (!conversationId && currentId) {
107
+ window.dispatchEvent(new Event('chat-update'));
108
+ // Ensure the router knows about the new path for future navigations,
109
+ // but do it silently if possible or just rely on pushState.
110
+ // We already did pushState.
111
+ } else {
112
+ // Triggers sidebar update for existing chats too (timestamp update)
113
+ window.dispatchEvent(new Event('chat-update'));
114
+ }
115
+
116
+ } catch (error) {
117
+ console.error("Error sending message:", error);
118
+ setMessages(prev => [...prev, { role: 'system', content: "Error sending message. Please try again." }]);
119
+ setIsLoading(false);
120
+ }
121
+ };
122
+
123
+ return (
124
+ <div className="flex flex-col h-full max-w-3xl mx-auto w-full">
125
+ {/* Messages Area */}
126
+ <div className="flex-1 overflow-y-auto w-full p-4 md:p-6 pb-32">
127
+ {messages.length === 0 && (
128
+ <div className="h-full flex flex-col items-center justify-center text-center opacity-50">
129
+ <h1 className="text-4xl font-semibold mb-8">Analytical Chat</h1>
130
+ <p className="max-w-md text-sm">Ask anything about your data. The AI will analyze and provide insights.</p>
131
+ </div>
132
+ )}
133
+
134
+ {messages.map((msg, idx) => (
135
+ <div key={idx} className={`group w-full text-gray-800 dark:text-gray-100 border-b border-black/5 dark:border-white/5 ${msg.role === 'assistant' ? 'bg-[var(--ai-msg-bg)]' : 'bg-[var(--user-msg-bg)]'
136
+ }`}>
137
+ <div className="text-base gap-4 md:gap-6 md:max-w-2xl lg:max-w-[38rem] xl:max-w-3xl flex lg:px-0 m-auto p-4 md:py-6">
138
+ <div className="w-[30px] flex flex-col relative items-end">
139
+ <div className={`relative h-[30px] w-[30px] p-1 rounded-sm flex items-center justify-center ${msg.role === 'user' ? 'bg-black text-white dark:bg-white dark:text-black' : 'bg-green-500 text-white'}`}>
140
+ {msg.role === 'user' ? <User size={18} /> : <Bot size={18} />}
141
+ </div>
142
+ </div>
143
+ <div className="relative flex w-[calc(100%-50px)] flex-col gap-1 md:gap-3 lg:w-[calc(100%-115px)]">
144
+ <div className="flex flex-grow flex-col gap-3">
145
+ <div className="min-h-[20px] flex flex-col items-start gap-4 whitespace-pre-wrap break-words prose prose-neutral dark:prose-invert max-w-none text-black dark:text-gray-100">
146
+ <ReactMarkdown
147
+ remarkPlugins={[remarkGfm]}
148
+ components={{
149
+ table: ({ node, ...props }) => <div className="overflow-x-auto my-4 w-full"><table className="border-collapse table-auto w-full text-sm" {...props} /></div>,
150
+ th: ({ node, ...props }) => <th className="border-b dark:border-white/20 border-black/10 px-4 py-2 text-left font-semibold" {...props} />,
151
+ td: ({ node, ...props }) => <td className="border-b dark:border-white/10 border-black/5 px-4 py-2" {...props} />,
152
+ p: ({ node, ...props }) => <p className="mb-2 last:mb-0" {...props} />,
153
+ ul: ({ node, ...props }) => <ul className="list-disc pl-4 mb-4" {...props} />,
154
+ ol: ({ node, ...props }) => <ol className="list-decimal pl-4 mb-4" {...props} />,
155
+ li: ({ node, ...props }) => <li className="mb-1" {...props} />,
156
+ code: ({ node, inline, className, children, ...props }) => {
157
+ return inline ?
158
+ <code className="bg-black/10 dark:bg-white/10 rounded-sm px-1 py-0.5 font-mono text-sm" {...props}>{children}</code> :
159
+ <code className="block bg-black/10 dark:bg-white/10 rounded-md p-4 font-mono text-sm overflow-x-auto my-2" {...props}>{children}</code>
160
+ }
161
+ }}
162
+ >
163
+ {msg.content}
164
+ </ReactMarkdown>
165
+ </div>
166
+ </div>
167
+ </div>
168
+ </div>
169
+ </div>
170
+ ))}
171
+
172
+ {/* Streaming Message Bubble */}
173
+ {(isLoading && streamingContent) && (
174
+ <div className="group w-full text-gray-800 dark:text-gray-100 border-b border-black/5 dark:border-white/5 bg-[var(--ai-msg-bg)]">
175
+ <div className="text-base gap-4 md:gap-6 md:max-w-2xl lg:max-w-[38rem] xl:max-w-3xl flex lg:px-0 m-auto p-4 md:py-6">
176
+ <div className="w-[30px] flex flex-col relative items-end">
177
+ <div className="relative h-[30px] w-[30px] p-1 rounded-sm flex items-center justify-center bg-green-500 text-white">
178
+ <Bot size={18} />
179
+ </div>
180
+ </div>
181
+ <div className="relative flex w-[calc(100%-50px)] flex-col gap-1 md:gap-3 lg:w-[calc(100%-115px)]">
182
+ <div className="flex flex-grow flex-col gap-3">
183
+ <div className="min-h-[20px] flex flex-col items-start gap-4 whitespace-pre-wrap break-words prose prose-neutral dark:prose-invert max-w-none text-black dark:text-gray-100">
184
+ <ReactMarkdown
185
+ remarkPlugins={[remarkGfm]}
186
+ components={{
187
+ table: ({ node, ...props }) => <div className="overflow-x-auto my-4 w-full"><table className="border-collapse table-auto w-full text-sm" {...props} /></div>,
188
+ th: ({ node, ...props }) => <th className="border-b dark:border-white/20 border-black/10 px-4 py-2 text-left font-semibold" {...props} />,
189
+ td: ({ node, ...props }) => <td className="border-b dark:border-white/10 border-black/5 px-4 py-2" {...props} />,
190
+ }}
191
+ >
192
+ {streamingContent}
193
+ </ReactMarkdown>
194
+ <span className="w-2 h-4 bg-gray-500 inline-block animate-pulse" />
195
+ </div>
196
+ </div>
197
+ </div>
198
+ </div>
199
+ </div>
200
+ )}
201
+ {(isLoading && !streamingContent) && (
202
+ <div className="flex justify-center p-4"><Loader2 className="animate-spin text-gray-400" /></div>
203
+ )}
204
+ <div ref={messagesEndRef} />
205
+ </div>
206
+
207
+ {/* Input Area */}
208
+ <div className="absolute bottom-0 left-0 w-full border-t md:border-t-0 dark:border-white/20 md:border-transparent md:dark:border-transparent md:bg-gradient-to-t from-white dark:from-[var(--background-start-rgb)] to-transparent pt-0 md:pt-2">
209
+ <div className="stretch mx-2 flex flex-row gap-3 md:mx-4 md:last:mb-6 lg:mx-auto lg:max-w-2xl xl:max-w-3xl">
210
+ <div className="relative flex h-full flex-1 items-stretch md:flex-col">
211
+ <div className="flex flex-col w-full py-2 flex-grow md:py-3 md:pl-4 relative border border-black/10 dark:border-gray-900/50 text-black dark:text-white bg-white dark:bg-gray-700 rounded-md shadow-[0_0_10px_rgba(0,0,0,0.10)] dark:shadow-[0_0_15px_rgba(0,0,0,0.10)]">
212
+ <form onSubmit={handleSubmit} className="flex flex-row w-full items-center">
213
+ <input
214
+ className="m-0 w-full resize-none border-0 bg-transparent p-0 pr-7 focus:ring-0 focus-visible:ring-0 dark:bg-transparent pl-2 md:pl-0 outline-none overflow-y-hidden h-[24px]"
215
+ placeholder="Send a message..."
216
+ value={input}
217
+ onChange={(e) => setInput(e.target.value)}
218
+ autoFocus
219
+ />
220
+ <button
221
+ type="submit"
222
+ disabled={isLoading || input.length === 0}
223
+ className="absolute p-1 rounded-md text-gray-500 bottom-1.5 right-1 md:bottom-2.5 md:right-2 hover:bg-gray-100 dark:hover:bg-gray-900 disabled:hover:bg-transparent disabled:opacity-40 transition-colors"
224
+ >
225
+ <Send size={16} />
226
+ </button>
227
+ </form>
228
+ </div>
229
+ <div className="px-2 py-2 text-center text-xs text-gray-600 dark:text-gray-300 md:px-[60px]">
230
+ <span>AI can make mistakes. Consider checking important information.</span>
231
+ </div>
232
+ </div>
233
+ </div>
234
+ </div>
235
+ </div>
236
+ );
237
+ }
frontend-next/src/components/Sidebar.js ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "use client";
2
+
3
+ import { useEffect, useState } from 'react';
4
+ import Link from 'next/link';
5
+ import { usePathname, useRouter } from 'next/navigation';
6
+ import { Plus, MessageSquare, ExternalLink, LogOut, User } from 'lucide-react';
7
+ import { api } from '@/lib/api';
8
+
9
+ export default function Sidebar() {
10
+ const [conversations, setConversations] = useState([]);
11
+ const [loading, setLoading] = useState(true);
12
+ const router = useRouter();
13
+ const pathname = usePathname();
14
+
15
+ useEffect(() => {
16
+ loadConversations();
17
+
18
+ // Listen for updates from ChatInterface
19
+ const handleUpdate = () => loadConversations();
20
+ window.addEventListener('chat-update', handleUpdate);
21
+ return () => window.removeEventListener('chat-update', handleUpdate);
22
+ }, [pathname]); // Reload when path changes (e.g. new chat created)
23
+
24
+ const loadConversations = async () => {
25
+ try {
26
+ const data = await api.getConversations();
27
+ // Sort by updated_at desc
28
+ const sorted = (data || []).sort((a, b) =>
29
+ new Date(b.updated_at) - new Date(a.updated_at)
30
+ );
31
+ setConversations(sorted);
32
+ } catch (error) {
33
+ console.error('Failed to load conversations', error);
34
+ } finally {
35
+ setLoading(false);
36
+ }
37
+ };
38
+
39
+ const handleNewChat = async () => {
40
+ try {
41
+ const newConv = await api.createConversation();
42
+ if (newConv && newConv.conversation_id) {
43
+ router.push(`/c/${newConv.conversation_id}`);
44
+ // loadConversations will trigger via useEffect
45
+ }
46
+ } catch (error) {
47
+ console.error('Failed to create new chat', error);
48
+ }
49
+ };
50
+
51
+ return (
52
+ <div className="flex flex-col h-full w-[260px] bg-[var(--sidebar-bg)] text-[var(--sidebar-fg)] transition-all">
53
+ {/* New Chat Button */}
54
+ <div className="p-3">
55
+ <button
56
+ onClick={handleNewChat}
57
+ className="flex items-center gap-3 w-full px-3 py-3 rounded-md border border-white/20 transition-colors duration-200 hover:bg-[var(--sidebar-hover)] text-sm text-white mb-1"
58
+ >
59
+ <Plus size={16} />
60
+ <span>New chat</span>
61
+ </button>
62
+ </div>
63
+
64
+ {/* Conversation List */}
65
+ <div className="flex-1 overflow-y-auto custom-scrollbar px-2">
66
+ <div className="flex flex-col gap-2 pb-2 text-sm text-gray-100">
67
+ {!loading && conversations.length === 0 && (
68
+ <div className="px-3 text-gray-400 text-xs">No conversations yet.</div>
69
+ )}
70
+
71
+ {conversations.map((conv) => {
72
+ const isActive = pathname === `/c/${conv.conversation_id}`;
73
+ // If alias is missing, show ID or fallback
74
+ const label = conv.alias || conv.messages?.[0]?.content || "New conversation";
75
+ const truncatedLabel = label.length > 25 ? label.substring(0, 25) + '...' : label;
76
+
77
+ return (
78
+ <Link
79
+ key={conv.conversation_id}
80
+ href={`/c/${conv.conversation_id}`}
81
+ className={`group flex items-center gap-3 px-3 py-3 rounded-md transition-colors duration-200 hover:bg-[var(--sidebar-hover)] ${isActive ? 'bg-[var(--sidebar-hover)]' : ''
82
+ }`}
83
+ >
84
+ <MessageSquare size={16} className="text-gray-300" />
85
+ <div className="flex-1 overflow-hidden relative truncate">
86
+ {truncatedLabel}
87
+ </div>
88
+ </Link>
89
+ );
90
+ })}
91
+ </div>
92
+ </div>
93
+
94
+ {/* Footer / User Profile section */}
95
+ <div className="border-t border-white/20 p-3">
96
+ <button className="flex items-center gap-3 w-full px-3 py-3 rounded-md hover:bg-[var(--sidebar-hover)] transition-colors duration-200 text-sm text-white">
97
+ <User size={16} />
98
+ <div className="font-bold">Upgrade to Plus</div>
99
+ </button>
100
+ <div className="flex items-center gap-3 w-full px-3 py-3 rounded-md hover:bg-[var(--sidebar-hover)] transition-colors duration-200 text-sm text-white mt-1 cursor-pointer">
101
+ <div className="w-5 h-5 rounded-sm bg-purple-600 flex items-center justify-center text-xs">U</div>
102
+ <span>User</span>
103
+ </div>
104
+ </div>
105
+ </div>
106
+ );
107
+ }
frontend-next/src/lib/api.js ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Basic API client for the analytical backend
2
+
3
+ const API_BASE_URL = 'http://localhost:8000'; // Make this configurable via env
4
+
5
+ export const api = {
6
+ // Get all conversations
7
+ getConversations: async () => {
8
+ try {
9
+ const res = await fetch(`${API_BASE_URL}/conversation/all`);
10
+ if (!res.ok) throw new Error('Failed to fetch conversations');
11
+ return await res.json();
12
+ } catch (error) {
13
+ console.error(error);
14
+ return [];
15
+ }
16
+ },
17
+
18
+ // Create new conversation
19
+ createConversation: async () => {
20
+ try {
21
+ const res = await fetch(`${API_BASE_URL}/chat/new_conversation`, {
22
+ method: 'POST',
23
+ });
24
+ if (!res.ok) throw new Error('Failed to create conversation');
25
+ return await res.json();
26
+ } catch (error) {
27
+ console.error(error);
28
+ return null;
29
+ }
30
+ },
31
+
32
+ // Get specific conversation details
33
+ getConversation: async (id) => {
34
+ try {
35
+ const res = await fetch(`${API_BASE_URL}/conversation/${id}`);
36
+ if (res.status === 404) return null; // Gracefully handle not found
37
+ if (!res.ok) throw new Error('Failed to fetch conversation');
38
+ return await res.json();
39
+ } catch (error) {
40
+ console.error('API Error:', error);
41
+ return null;
42
+ }
43
+ },
44
+
45
+ // Stream response - helper for EventSource usage
46
+ getChatEndpoint: () => `${API_BASE_URL}/chat/conversation`,
47
+ };