Spaces:
Running
Running
Commit ·
03e8305
1
Parent(s): 859216d
app/services/ folder was completed
Browse files- app/services/__init__.py +1 -0
- app/services/chat_service.py +231 -0
- app/services/groq_service.py +228 -0
- app/services/realtime_service.py +168 -0
- app/services/vector_store.py +142 -0
app/services/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
# Services Package
|
app/services/chat_service.py
ADDED
|
@@ -0,0 +1,231 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
CHAT SERVICE MODULE
|
| 3 |
+
===================
|
| 4 |
+
|
| 5 |
+
This service owns all chat session and conversation logic. It is used by the
|
| 6 |
+
/chat and /chat/realtime endpoints. Designed for single-user use: the server
|
| 7 |
+
has one ChatService and one in-memory session store; the user can have many
|
| 8 |
+
sessions (each identified by session_id).
|
| 9 |
+
|
| 10 |
+
RESPONSIBILITIES:
|
| 11 |
+
- get or create session(session id): Return existing session or create new one.
|
| 12 |
+
If the user sends a session_id that was used before (e.g. before a restart),
|
| 13 |
+
we try to load it from disk so the conversation continues.
|
| 14 |
+
- add_message / get_chat_history: Keep messages in memory per session.
|
| 15 |
+
- format_history_for_llm: Turn the message list into (user, assistant) pairs
|
| 16 |
+
and trim to MAX_CHAT_HISTORY_TURNS so we don't overflow the prompt.
|
| 17 |
+
- process message / process realtime message: Add user message, call Groq (or
|
| 18 |
+
RealtimeGroq), add assistant reply, return reply.
|
| 19 |
+
- save_chat_session: Write session to database/chats_data/*.json so it persists
|
| 20 |
+
and can be loaded on next startup (and used by the vector store for retrieval).
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
import json
|
| 24 |
+
import logging
|
| 25 |
+
from pathlib import Path
|
| 26 |
+
from typing import List, Optional, Dict
|
| 27 |
+
import uuid
|
| 28 |
+
|
| 29 |
+
from config import CHATS_DATA_DIR, MAX_CHAT_HISTORY_TURNS
|
| 30 |
+
from app.models import ChatMessage, ChatHistory
|
| 31 |
+
from app.services.groq_service import groqservice
|
| 32 |
+
from app.services.realtime_service import RealtimeGroqService
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
logger = logging.getLogger("J.A.R.V.I.S")
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
# ============================================================================
|
| 39 |
+
# CHAT SERVICE
|
| 40 |
+
# ============================================================================
|
| 41 |
+
|
| 42 |
+
class ChatService:
|
| 43 |
+
"""
|
| 44 |
+
Manages chat sessions: in-memory message lists, load/save to disk, and
|
| 45 |
+
calling Groq (or Realtime) to get replies. All state for active sessions
|
| 46 |
+
is in self.sessions; saving to disk is done after each message so
|
| 47 |
+
conversations survive restarts.
|
| 48 |
+
"""
|
| 49 |
+
|
| 50 |
+
def __init__(self, groq_service: 'GroqService', realtime_service: RealtimeGroqService = None):
|
| 51 |
+
"""Store references to the Groq and Realtime services; keep sessions in memory."""
|
| 52 |
+
self.groq_service = groq_service
|
| 53 |
+
self.realtime_service = realtime_service
|
| 54 |
+
# Map: session_id -> list of ChatMessage (user and assistant messages in order).
|
| 55 |
+
self.sessions: Dict[str, List[ChatMessage]] = {}
|
| 56 |
+
|
| 57 |
+
# -------------------------------------------------------------------------
|
| 58 |
+
# SESSION LOAD / VALIDATE / GET-OR-CREATE
|
| 59 |
+
# -------------------------------------------------------------------------
|
| 60 |
+
|
| 61 |
+
def load_session_from_disk(self, session_id: str) -> bool:
|
| 62 |
+
"""
|
| 63 |
+
Load a session from database/chats_data/ if a file for this session_id exists.
|
| 64 |
+
|
| 65 |
+
File name is chat_{safe_session_id}.json where safe_session_id has dashes/spaces removed.
|
| 66 |
+
On success we put the messages into self.sessions[session_id] so later requests use them.
|
| 67 |
+
Returns True if loaded, False if file missing or unreadable.
|
| 68 |
+
"""
|
| 69 |
+
# Sanitize ID for use in filename (no dashes or spaces).
|
| 70 |
+
safe_session_id = session_id.replace("-", "_").replace(" ", "_")
|
| 71 |
+
filename = f"chat_{safe_session_id}.json"
|
| 72 |
+
filepath = CHATS_DATA_DIR / filename
|
| 73 |
+
|
| 74 |
+
if not filepath.exists():
|
| 75 |
+
return False
|
| 76 |
+
|
| 77 |
+
try:
|
| 78 |
+
with open(filepath, "r", encoding="utf-8") as f:
|
| 79 |
+
chat_dict = json.load(f)
|
| 80 |
+
# Convert strored dicts back to ChatMessage objects.
|
| 81 |
+
messages = [
|
| 82 |
+
ChatMessage(role=msg.get("role"), content=msg.get("content"))
|
| 83 |
+
for msg in chat_dict.get("messages", [])
|
| 84 |
+
]
|
| 85 |
+
self.sessions[session_id] = messages
|
| 86 |
+
return True
|
| 87 |
+
except Exception as e:
|
| 88 |
+
logger.warning("Failed to load session %s from disk: %s", session_id, e)
|
| 89 |
+
return False
|
| 90 |
+
|
| 91 |
+
def validate_session_id(self, session_id: str) -> bool:
|
| 92 |
+
"""
|
| 93 |
+
Return True if session_id is safe to use (non-empty, no path traversal, length <= 255).
|
| 94 |
+
Used to reject malicious or invalid IDs before we use them in file paths.
|
| 95 |
+
"""
|
| 96 |
+
if not session_id or not session_id.strip():
|
| 97 |
+
return False
|
| 98 |
+
# Block path traversal and path separators.
|
| 99 |
+
if ".." in session_id or "/" in session_id or "\\" in session_id:
|
| 100 |
+
return False
|
| 101 |
+
if len(session_id) > 255:
|
| 102 |
+
return False
|
| 103 |
+
return True
|
| 104 |
+
|
| 105 |
+
def get_or_create_session(self, session_id: Optional[str] = None) -> str:
|
| 106 |
+
"""
|
| 107 |
+
Return a session ID and ensure that session exists in memory.
|
| 108 |
+
|
| 109 |
+
- If session_id is None: create a new session a new UUID and return it.
|
| 110 |
+
- If session_id is provided: validate it; if it's in self.sessions return it;
|
| 111 |
+
else try to load from disk; if not found, create a new session with that ID.
|
| 112 |
+
Raises ValueError is session_id is invalid (empty, path traversal, or too long).
|
| 113 |
+
"""
|
| 114 |
+
if not session_id:
|
| 115 |
+
new_sessio_id = str(uuid.uuid4())
|
| 116 |
+
self.sessoions[new_sessio_id] = []
|
| 117 |
+
return new_sessio_id
|
| 118 |
+
|
| 119 |
+
if not self.validate_session_id(session_id):
|
| 120 |
+
raise ValueError(
|
| 121 |
+
f"Invalid session_id format: {session_id}. Session ID must be non-empty, "
|
| 122 |
+
"not contain path traversal characters, and be under 255 characters."
|
| 123 |
+
)
|
| 124 |
+
|
| 125 |
+
if session_id in self.sessions:
|
| 126 |
+
return session_id
|
| 127 |
+
|
| 128 |
+
if self.load_session_from_disk(session_id):
|
| 129 |
+
return session_id
|
| 130 |
+
|
| 131 |
+
# New session with this ID (e.g. client an ID was never saved).
|
| 132 |
+
self.sessions[session_id] = []
|
| 133 |
+
return session_id
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
# ---------------------------------------------------------------
|
| 137 |
+
# MESSAGES AND HISTORY FROMATTING
|
| 138 |
+
# ---------------------------------------------------------------
|
| 139 |
+
def add_message(self, session_id: str, role: str, content: str):
|
| 140 |
+
"""Append one message (user or assistant) to the session's message list. Creates session if missing."""
|
| 141 |
+
if session_id not in self.sessions:
|
| 142 |
+
self.sessions[session_id] = []
|
| 143 |
+
self.sessions[session_id].append(ChatMessage(role=role, content=content))
|
| 144 |
+
|
| 145 |
+
def get_chat_history(self, session_id: str) -> List[ChatMessage]:
|
| 146 |
+
"""Return the list of messages for this session (chronological). Empty list if session unkown."""
|
| 147 |
+
return self.sessions.get(session_id, [])
|
| 148 |
+
|
| 149 |
+
def format_history_for_llm(self, session_id: str, exclude_last: bool = False ) -> List[tuple]:
|
| 150 |
+
"""
|
| 151 |
+
Build a list of (user_text, assistant_text) pairs for the LLM prompt.
|
| 152 |
+
|
| 153 |
+
We only include complete pairs and cap at MAX_CHAT_HISTORY_TRUNS (e.g. 20)
|
| 154 |
+
so the prompt does not grow unbounded. If exclude_last is True we drop the
|
| 155 |
+
last message (the current user message that we are about to reply to).
|
| 156 |
+
"""
|
| 157 |
+
messages = self.get_chat_history(session_id)
|
| 158 |
+
history = []
|
| 159 |
+
# If exclude_last, we skip the last message (the current user message we are about to reply to).
|
| 160 |
+
messages_to_process = messages[:-1] if exclude_last and messages else messages
|
| 161 |
+
i = 0
|
| 162 |
+
while i < len(messages_to_process) - 1:
|
| 163 |
+
user_msg = messages_to_process[i]
|
| 164 |
+
ai_msg = messages_to_process[i + 1]
|
| 165 |
+
if user_msg.role == "user" and ai_msg.role == "assistant":
|
| 166 |
+
history.append((user_msg.content, ai_msg.content))
|
| 167 |
+
i += 2
|
| 168 |
+
else:
|
| 169 |
+
i += 1
|
| 170 |
+
# Keep only the most recent turns so the prompt does not exceed token limit.
|
| 171 |
+
if len(history) > MAX_CHAT_HISTORY_TURNS:
|
| 172 |
+
history = history[-MAX_CHAT_HISTORY_TURNS:]
|
| 173 |
+
return history
|
| 174 |
+
|
| 175 |
+
# --------------------------------------------------------------------------
|
| 176 |
+
# PROCESS MESSAGE (GENERAL AND REALTIME)
|
| 177 |
+
# --------------------------------------------------------------------------
|
| 178 |
+
|
| 179 |
+
def process_message(self, session_id: str, user_message: str) -> str:
|
| 180 |
+
"""
|
| 181 |
+
Handle one general-chat message: add user message, call Groq (no web search), add reply, return it.
|
| 182 |
+
"""
|
| 183 |
+
self.add_message(session_id, "user", user_message)
|
| 184 |
+
chat_history = self.format_history_for_llm(session_id, exclude_last=True)
|
| 185 |
+
response = self.groq_service.get_response(question=user_message, chat_history=chat_history)
|
| 186 |
+
self.add_messages(session_id, "assistant", response)
|
| 187 |
+
return response
|
| 188 |
+
|
| 189 |
+
def process_realtime_message(self, session_id: str, user_message: str) -> str:
|
| 190 |
+
"""
|
| 191 |
+
Handle one realtime message: add user message, call realtime service (Tavily + Groq), add reply, return it.
|
| 192 |
+
Uses the same session as process_message so history is shared. Raises ValueError if realtime_service is None.
|
| 193 |
+
"""
|
| 194 |
+
if not self.realtime_service:
|
| 195 |
+
raise ValueError("Realtime service is not initialized. Cannot process realtime queries.")
|
| 196 |
+
self.add_message(session_id, "user", user_message)
|
| 197 |
+
chat_history = self.format_history_for_llm(session_id, exclude_last=True)
|
| 198 |
+
response = self.realtime_service.get_response(question=user_message, chat_history=chat_history)
|
| 199 |
+
self.add_messages(session_id, "assistant", response)
|
| 200 |
+
return response
|
| 201 |
+
|
| 202 |
+
# -------------------------------------------------------------
|
| 203 |
+
# PERSIST SESSION TO DISK
|
| 204 |
+
# -------------------------------------------------------------
|
| 205 |
+
|
| 206 |
+
def save_chat_session(self, sessiom_id: str):
|
| 207 |
+
"""
|
| 208 |
+
Write this session's messages to database/chats_data/chat_{safeid}.json.
|
| 209 |
+
|
| 210 |
+
Called after each message so the conversation is persisted. The vector store
|
| 211 |
+
is rebuilt on startup from these files, so new chats are included after restart.
|
| 212 |
+
If the session is missing or empty we do nothing. On write error we only log.
|
| 213 |
+
"""
|
| 214 |
+
if session_id not in self.sessions or not self.sessions[session_id]:
|
| 215 |
+
return
|
| 216 |
+
|
| 217 |
+
messages = self.sessions[session_id]
|
| 218 |
+
safe_session_id = session_id.replace("-", "").replace(" ", "_")
|
| 219 |
+
filename = f"chat_{safe_session_id}.json"
|
| 220 |
+
filepath = CHATS_DATA_DIR / filename
|
| 221 |
+
chat_dict = {
|
| 222 |
+
"session_id": sessiom_id,
|
| 223 |
+
"messages": [{"role":msg.role, "content":msg.content} for msg in messages]
|
| 224 |
+
}
|
| 225 |
+
|
| 226 |
+
try:
|
| 227 |
+
with open(filepath, "w", encoding="utf-8") as f:
|
| 228 |
+
json.dump(chat_dict, f, indent=2, ensure_ascii=False)
|
| 229 |
+
except Exception as e:
|
| 230 |
+
logger.error("Failed to save chat session %s to disk: %s", sessiom_id, e)
|
| 231 |
+
|
app/services/groq_service.py
ADDED
|
@@ -0,0 +1,228 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
GROQ SERVICE MODULE
|
| 3 |
+
===================
|
| 4 |
+
|
| 5 |
+
This module handles general chat: no web search, only the Groq LLM plus context
|
| 6 |
+
from the vector store (learning data + past chats). Used by ChatService for
|
| 7 |
+
POST /chat.
|
| 8 |
+
|
| 9 |
+
MULTIPLE API KEYS (round-robin and fallback):
|
| 10 |
+
- You can set multiple Groq API keys in .env: Groq_API_KEY, GROQ_API_KEY_2,
|
| 11 |
+
GROQ_API_KEY_3, ... (no limits).
|
| 12 |
+
- Each request uses one key in roatation: 1st request -> 1st key, 2nd request ->
|
| 13 |
+
2nd key, 3rd request -> 3rd key, then back to 1st key, and so on. Every Key
|
| 14 |
+
is used one-by-on so usage is spread across keys.
|
| 15 |
+
- The round-robin counter is shared across all instances (GroqService and
|
| 16 |
+
RealtimeGroqService), so both /chat and /chat/realtime endpoints use the
|
| 17 |
+
same rotation sequence.
|
| 18 |
+
- If the chosen key fail (rate limit 429 or any error), we try the next key,
|
| 19 |
+
then the next, until one succeeds or all have been tried.
|
| 20 |
+
- All APi key usage is logged with masked keys (first 8 and last 4 chars visible)
|
| 21 |
+
for security and debugging purposes.
|
| 22 |
+
|
| 23 |
+
FLOW;
|
| 24 |
+
1. get_response(question, chat_history) is called.
|
| 25 |
+
2. We ask the vector store for the top-k chunks most similar to the question (retrieval).
|
| 26 |
+
3. We build a system message: JARVIS_SYSTEM_PROMPT + current time + retrived context.
|
| 27 |
+
4. We send to Groq using the next key in rotation (or fallback to next key on failure).
|
| 28 |
+
5. We return the assistant's reply.
|
| 29 |
+
|
| 30 |
+
Context is only what we retrieve (no full dump of learning data ), so token usage stays bounded.
|
| 31 |
+
"""
|
| 32 |
+
|
| 33 |
+
from typing import List, Optional
|
| 34 |
+
from lanchain_groq import ChatGroq
|
| 35 |
+
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
| 36 |
+
from langchain_core.messages import HumanMessage, AImessage
|
| 37 |
+
|
| 38 |
+
import logging
|
| 39 |
+
|
| 40 |
+
from config import Groq_API_KEYS, GROQ_MODEL, JARVIS_SYSTEM_PROMPT
|
| 41 |
+
from app.services.vector_store import VectorStoreService
|
| 42 |
+
from app.utils.time_info import get_time_information
|
| 43 |
+
|
| 44 |
+
logger = logging.getLogger("J.A.R.V.I.S")
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
# ==============================================================================
|
| 48 |
+
# HELPER: EXCAPE CURLY BRACES FOR LANGCHAIN
|
| 49 |
+
# ==============================================================================
|
| 50 |
+
# LangChain prompt templates use {variable_name}. If learning data or chat
|
| 51 |
+
# content contains { or }, the template engine can break. Doubling them
|
| 52 |
+
# makes them literal in the final string
|
| 53 |
+
|
| 54 |
+
def escape_curly_braces(text: str) -> str:
|
| 55 |
+
"""
|
| 56 |
+
Double every { and } so LangChain does not treat them as template variables/
|
| 57 |
+
Learning data or chat content might contain { or }; without escaping escapin, invoke() can fail.
|
| 58 |
+
"""
|
| 59 |
+
if not text:
|
| 60 |
+
return text
|
| 61 |
+
return text.replace("{", "{{").replace("}", "}}")
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def _is_rate_limit_error(exc: BaseException) -> bool:
|
| 65 |
+
"""
|
| 66 |
+
Return True if the exception indicates a Groq rate limit (e.g. 429, tokens per day).
|
| 67 |
+
used for logging; actual fallback tries the next key on any failure when multiple keys exist.
|
| 68 |
+
"""
|
| 69 |
+
msg = str(exc).lower()
|
| 70 |
+
return "429" in str(exc) or "rate limit" in msg or "tokens per day" in msg
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def _mask_api_key(key: str) -> str:
|
| 74 |
+
"""
|
| 75 |
+
Mask an APi key for safe logging. Shows first 8 and last 4 characters, masks the middle.
|
| 76 |
+
Example: gsk_1234567890abcdef -> gsk_1234...cdef
|
| 77 |
+
"""
|
| 78 |
+
if not key or len(key) <= 12:
|
| 79 |
+
return "***masked***"
|
| 80 |
+
return f"{key[:8]}...{key[-4:]}"
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
# =============================================================
|
| 84 |
+
# GROQ SERVICE CLAS
|
| 85 |
+
# =============================================================
|
| 86 |
+
|
| 87 |
+
class GroqService:
|
| 88 |
+
"""
|
| 89 |
+
General chat: retrieves context from the vector store and calls the Groq LLM.
|
| 90 |
+
Supports multiple API keys: each reuqest uses the next key in rotation (one-by-one),
|
| 91 |
+
and if that key fails, the server tries the next key until one succeeds or all fail.
|
| 92 |
+
|
| 93 |
+
ROUND-ROBIN BEHAVIOR:
|
| 94 |
+
- Request 1 uses key 0 (first key)
|
| 95 |
+
- Request 2 uses key 1 (second key)
|
| 96 |
+
- Request 3 uses key 2 (third key)
|
| 97 |
+
- After all keys are used, cycles back to key 0
|
| 98 |
+
- If a key fails (rate limit, error), tries the next key in sequence
|
| 99 |
+
- All reuqests share the same roundrobin counter (class-level)
|
| 100 |
+
"""
|
| 101 |
+
|
| 102 |
+
# Class-level counter shared across all instances (GroqService and Realtimeg\GroqService)
|
| 103 |
+
# This ensures round-robin works across both /chat and /chat/realtime endpoints
|
| 104 |
+
_shared_key_index = 0
|
| 105 |
+
_lock = None # Will be set threading.Lock if threading needed (currently single-threaded)
|
| 106 |
+
|
| 107 |
+
def __init__(self, vector_store_service: VectorStoreService):
|
| 108 |
+
"""
|
| 109 |
+
Create one Groq LLm client per APi key and store the vector store for retrieval.
|
| 110 |
+
se;f.llms[i] corresponds to GROQ_API_KEY[i]; request N uses key at index (N % len(keys)).
|
| 111 |
+
"""
|
| 112 |
+
if not GROQ_API_KEYS:
|
| 113 |
+
raise ValueError(
|
| 114 |
+
"No Groq APi keys configured. Set GROQ_API_KEY (and optionally GROQ_API_KEY_2, GROQ_API_KEY_3, ...) in .env"
|
| 115 |
+
)
|
| 116 |
+
# One ChatGroq instance per key: each reuqest will use one of these in rotation.
|
| 117 |
+
self.llm = [
|
| 118 |
+
ChatGroq(
|
| 119 |
+
groq_api_key=key,
|
| 120 |
+
model_name=GROQ_MODEL,
|
| 121 |
+
temperature=0.8,
|
| 122 |
+
)
|
| 123 |
+
for key in GROQ_API_KEYS
|
| 124 |
+
]
|
| 125 |
+
self.vector_store_service = vector_store_service
|
| 126 |
+
logger.info(f"Initialized GroqService with {len(GROQ_API_KEYS)} API key(s)")
|
| 127 |
+
|
| 128 |
+
def _invoke_llm(
|
| 129 |
+
self,
|
| 130 |
+
prompt: ChatPromptTemplate,
|
| 131 |
+
messages: list,
|
| 132 |
+
question: str,
|
| 133 |
+
) -> str:
|
| 134 |
+
"""
|
| 135 |
+
Call the LLM using the next key in rotation; on failure, try the next key until one secceeds.
|
| 136 |
+
|
| 137 |
+
- Round-robin: the request uses key at index (_shared_key_index % n), then we increment
|
| 138 |
+
_shared_key_index so the next reuqest uses the next key. All instances share the same counter,
|
| 139 |
+
- Fallback: if the chosen key raises (e.g. 429 rate limit), we try the next key, then the next,
|
| 140 |
+
until one returns successfully or we have tried all keys.
|
| 141 |
+
Returns response.content. Raises if all keys fail.
|
| 142 |
+
"""
|
| 143 |
+
n = len(self.llms)
|
| 144 |
+
# Which key to try first for this reqests (rount-robin: request 1 -> key 0, request 2 -> key , ...).
|
| 145 |
+
# Use class-level counter so all instances (GroqService and RealtimeGroqService) sjare the same rotation.
|
| 146 |
+
start_o = GroqService._shared_key_index % n
|
| 147 |
+
current_key_index = GroqService._shared_key_index
|
| 148 |
+
GroqService._shared_key_index += 1 # Next request will use the next key.
|
| 149 |
+
|
| 150 |
+
# Log which key we're using (maksed for security)
|
| 151 |
+
masked_key = _mask_api_key(GROQ_API_KEYS[start_o])
|
| 152 |
+
logger.info(f"Using API key {start_o + 1}/{n} (round-robin index: {current_key_index}): {masked_key}")
|
| 153 |
+
|
| 154 |
+
last_exc = None
|
| 155 |
+
keys_tried = []
|
| 156 |
+
# Try each key in order starting from start_i (wrap around with % n).
|
| 157 |
+
for j in range(n):
|
| 158 |
+
i = (start_i + j) % n
|
| 159 |
+
keys_tried.append(i)
|
| 160 |
+
try:
|
| 161 |
+
# Build chain with this key's LLM and invoke once.
|
| 162 |
+
chain = prompt | self.llms[i]
|
| 163 |
+
response = chain.invoke({"history": messages, "question": question})
|
| 164 |
+
# Log succes if we had to fall back to a different key
|
| 165 |
+
if j > 0:
|
| 166 |
+
masked_success_key = _mask_api_key(GROQ_API_KEYS[i])
|
| 167 |
+
logger.info(f"Fallback successful: API key #{i + 1}/{n} succeeded: {masked_success_key}")
|
| 168 |
+
return response.content
|
| 169 |
+
except Exception as e:
|
| 170 |
+
last_exc = e
|
| 171 |
+
masked_failed_key = _mask_api_key(GROQ_API_KEYS[i])
|
| 172 |
+
if _is_rate_limit_error(e):
|
| 173 |
+
logger.warning(f"API key #{i + 1}/{n} rate limited: {masked_failed_key} - {str(e)[:100]}")
|
| 174 |
+
else:
|
| 175 |
+
logger.warning(f"API key #{i + 1}/{n} failed : {masked_failed_key} - {str(e)[:100]}")
|
| 176 |
+
# If we have more than one key, try the next one; otherwise raise immediately .
|
| 177 |
+
if n > 1:
|
| 178 |
+
continue
|
| 179 |
+
raise Exception(f"Error getting response from Groq: {str(e)}") from e
|
| 180 |
+
# All keys were tried and all failed; raise the last exception.
|
| 181 |
+
masked_all_keys = ", ".join([_mask_api_key(GROQ_API_KEYS[i]) for i in keys_tried])
|
| 182 |
+
logger.error(f"All API keys failed. Tried keys: {masked_all_keys}")
|
| 183 |
+
raise Exception(f"Error getting response from Groq: {str(last_exc)}") from last_exc
|
| 184 |
+
|
| 185 |
+
def get_response(
|
| 186 |
+
self,
|
| 187 |
+
question: str,
|
| 188 |
+
chat_history: Optional[List[tuple]] = None
|
| 189 |
+
) -> str:
|
| 190 |
+
"""
|
| 191 |
+
Return the assistant's reply for this question (general chat, no web search).
|
| 192 |
+
Retrives context from the vector store, builds the prompt, then calls , _invoke_llm
|
| 193 |
+
which use the next API key in rotation and fallback to other keys on failure.
|
| 194 |
+
"""
|
| 195 |
+
try:
|
| 196 |
+
# Get relevant chunks from the learning data and past chats (bounded token usage).
|
| 197 |
+
# If retrieval fails (e.g. vector store not ready), use empty context so the LLM still answer.
|
| 198 |
+
context = ""
|
| 199 |
+
try:
|
| 200 |
+
retriever = self.vector_store_service.get_retriever(k=10)
|
| 201 |
+
context_docs = retriever.invoke(question)
|
| 202 |
+
context = "\n".join([doc.page_content for doc in context_docs]) if context_docs else ""
|
| 203 |
+
except Exception as e:
|
| 204 |
+
logger.warning(f"Vector store retrieval failed, using empty context: %s", retrieve_err)
|
| 205 |
+
|
| 206 |
+
# Build the system message: personality + current time + retrieved context.
|
| 207 |
+
time_info = get_time_information()
|
| 208 |
+
system_message = JARVIS_SYSTEM_PROMPT + f"\n\nCurrent time and date: {time_info}"
|
| 209 |
+
if context:
|
| 210 |
+
system_message += f"\n\nRelevant Context from your learning data and past conversations:\n{escape_curly_braces(context)}"
|
| 211 |
+
|
| 212 |
+
# Prompt template: system message, chat history placeholder, current question.
|
| 213 |
+
prompt = ChatPromptTemplate.from_messages([
|
| 214 |
+
("system", system_message),
|
| 215 |
+
MessagesPlaceholder(variable_name="history"),
|
| 216 |
+
("human", "{question}"),
|
| 217 |
+
])
|
| 218 |
+
# Convert (user, assistant) pairs to LangChain message objects.
|
| 219 |
+
messages = []
|
| 220 |
+
if chat_history:
|
| 221 |
+
for human_msg, ai_msg in chat_history:
|
| 222 |
+
messages.append(HumanMessage(content=human_msg))
|
| 223 |
+
messages.append(AImessage(content=ai_msg))
|
| 224 |
+
|
| 225 |
+
# Use next key in rotation; on failure, try remaining keys until one succeeds or all fail.
|
| 226 |
+
return self._invoke_llm(prompt, messages, question)
|
| 227 |
+
except Exception as e:
|
| 228 |
+
raise Exception(f"Error response from groq: {str(e)}") from e
|
app/services/realtime_service.py
ADDED
|
@@ -0,0 +1,168 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
REALTIME GROQ SERVICE MODULE
|
| 3 |
+
=============================
|
| 4 |
+
|
| 5 |
+
Extents GroqService to add Tavily web search before calling the LLM. Used by
|
| 6 |
+
ChatService for POST /chat/realtime. Same session and history as general chat;
|
| 7 |
+
the only difference is we run a Tavily search for the user's question and add
|
| 8 |
+
the results to the system message, them call Groq.
|
| 9 |
+
|
| 10 |
+
ROUND-ROBIN API KEYS:
|
| 11 |
+
- Shares the same round-robin counter as GroqService (class-level _shared_key_index)
|
| 12 |
+
- This means /chat and /chat/realtime requests use the same rotation sequence
|
| 13 |
+
- Example: If /chat uses key 1, the next /chat/realtime request will use key 2
|
| 14 |
+
- All API key usage is logged wih masked keys for security and debugging
|
| 15 |
+
|
| 16 |
+
FLOW:
|
| 17 |
+
1. search_tavily(question): call Tavily API, format results as text (or "" on failure).
|
| 18 |
+
2. get_response(question, chat_history): add search results to system message,
|
| 19 |
+
then same as parent: retrieve context from vector store, build prompt , call Groq.
|
| 20 |
+
|
| 21 |
+
If TAVILY_API_KEY is not set, tavily_clinet is None and search_tavily returns "";
|
| 22 |
+
the user still gets an answer from Groq with no search results.
|
| 23 |
+
"""
|
| 24 |
+
|
| 25 |
+
from typing import List, Optional
|
| 26 |
+
from tavily import TavilyClient
|
| 27 |
+
import logging
|
| 28 |
+
import os
|
| 29 |
+
|
| 30 |
+
from app.services.groq_service import GroqService, escape_curly_braces
|
| 31 |
+
from app.services.vector_store import VectorStoreService
|
| 32 |
+
from app.utils.time_info import get_time_information
|
| 33 |
+
from app.utils.retry import with_retry
|
| 34 |
+
from config import JARVIS_SYSTEM_PROMPT
|
| 35 |
+
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
| 36 |
+
from langchain_core.messages import HumanMessage, AIMessage
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
logger = logging.getLogger("J.A.R.V.I.S")
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
# ==============================================================================
|
| 43 |
+
# REALTIME GROQ SERVICE CLASS (extends GroqService)
|
| 44 |
+
# ==============================================================================
|
| 45 |
+
|
| 46 |
+
class RealtimeGroqService(GroqService):
|
| 47 |
+
"""
|
| 48 |
+
Same as GroqService but runs a Tavily web search first and adds the results
|
| 49 |
+
to the system message. If Tavily is missing or fails, we still call Groq with
|
| 50 |
+
no search results (user gets and answer without real-time data).
|
| 51 |
+
"""
|
| 52 |
+
|
| 53 |
+
def __init__(self, vector_store_service: VectorStoreService):
|
| 54 |
+
"""Call parent init (Groq LLM + vector store); then create Tavily client if key is set."""
|
| 55 |
+
super().__init__(vector_store_service)
|
| 56 |
+
tavily_api_key = os.getenv("TAVILY_API_KEY", "")
|
| 57 |
+
if tavily_api_key:
|
| 58 |
+
self.tavily_client = TavilyClient(api_key=tavily_api_key)
|
| 59 |
+
logger.info("Tavily search client initialized seccessfully")
|
| 60 |
+
else:
|
| 61 |
+
self.tavily_client = None
|
| 62 |
+
logger.warning("TAVILY_API_KEY not set. Realtime search will be unavailable.")
|
| 63 |
+
|
| 64 |
+
def search_tavily(self, ques: str, num_results: int = 5) -> str:
|
| 65 |
+
"""
|
| 66 |
+
Call Tavily API with the given query and return formatted result text for the prompt.
|
| 67 |
+
On any failure (no key, rate limit, network) we return "" so the LLM still gets a reply.
|
| 68 |
+
"""
|
| 69 |
+
if not self.tavily_client:
|
| 70 |
+
logger.warning("Tavily client not initialized. TAVILY_API_KEY not set.")
|
| 71 |
+
return ""
|
| 72 |
+
|
| 73 |
+
try:
|
| 74 |
+
# Perform Tavily search with retries for rate limits and transient errors.
|
| 75 |
+
response = with_retry(
|
| 76 |
+
lambda: self.tavily_client.search(
|
| 77 |
+
query=query,
|
| 78 |
+
search_depth="basic",
|
| 79 |
+
max_results=num_results,
|
| 80 |
+
include_answer=False,
|
| 81 |
+
include_raw_content=False,
|
| 82 |
+
),
|
| 83 |
+
max_retries=3,
|
| 84 |
+
initial_delay=1.0,
|
| 85 |
+
)
|
| 86 |
+
|
| 87 |
+
results = response.get('results', [])
|
| 88 |
+
|
| 89 |
+
if not results:
|
| 90 |
+
logger.warning(f"No Tavily search results found for query: {query}")
|
| 91 |
+
return ""
|
| 92 |
+
|
| 93 |
+
# Format search results as text for the system message.
|
| 94 |
+
formatted_results = f"Search resluts for '{query}':\n[start]\n"
|
| 95 |
+
|
| 96 |
+
for i, result in enumerate(results[:num_results], 1):
|
| 97 |
+
title = result.get('title', 'No title')
|
| 98 |
+
content = result.get('content', 'No description')
|
| 99 |
+
url = result.get('url', '')
|
| 100 |
+
|
| 101 |
+
formatted_results += f"Title: {title}\n"
|
| 102 |
+
formatted_results += f"Description: {content}\n"
|
| 103 |
+
if url:
|
| 104 |
+
formatted_results += f"URL: {url}\n"
|
| 105 |
+
formatted_results += "\n"
|
| 106 |
+
|
| 107 |
+
formatted_results += "[end]"
|
| 108 |
+
|
| 109 |
+
logger.info(f"Tavily search completed for query: {query} with {len(results)} results")
|
| 110 |
+
return formatted_results
|
| 111 |
+
|
| 112 |
+
except Exception as e:
|
| 113 |
+
# If search fails (network error, rate limit, etc.), log and return empty
|
| 114 |
+
# The AI will still respond using its knwonledge, just without real-time data
|
| 115 |
+
logger.error(f"error performing Tavily search: {e}")
|
| 116 |
+
return ""
|
| 117 |
+
|
| 118 |
+
def get_response(self, question: str, chat_history: Optional[List[tuple]] = None) -> str:
|
| 119 |
+
"""
|
| 120 |
+
Run Tavily search for the question, add results to system message, then call the Groq
|
| 121 |
+
via the parent's _invoke_llm (same multi-key round-robin and fallback as general chat).
|
| 122 |
+
"""
|
| 123 |
+
try:
|
| 124 |
+
logger.info(f"Searching Tavily for: {question}")
|
| 125 |
+
search_results = self.search_tavily(question, num_results=5)
|
| 126 |
+
|
| 127 |
+
# Retrieve context from vector store (learning data + past chats).
|
| 128 |
+
# If retrieval fails, use empty context so the LLM still answers (e.eg. with Tavily results).
|
| 129 |
+
context = ""
|
| 130 |
+
try:
|
| 131 |
+
retriever = self.vector_store_service.get_retriever(k=10)
|
| 132 |
+
context_docs = retriever.invoke(question)
|
| 133 |
+
context = "\n\n".join([doc.page_content for doc in context_docs]) if context_docs else ""
|
| 134 |
+
except Exception as retrieval_err:
|
| 135 |
+
logger.warning("Vector store retrieval failed, using empty context: %s", retrieval_err)
|
| 136 |
+
|
| 137 |
+
# Build system message: personality + time + Tavily results + retrieved context.
|
| 138 |
+
time_info = get_time_information()
|
| 139 |
+
system_message += JARVIS_SYSTEM_PROMPT + f"\n\nCurrent time and data: {time_info}"
|
| 140 |
+
|
| 141 |
+
if search_result:
|
| 142 |
+
escaped_search_results = escape_curly_braces(search_results)
|
| 143 |
+
system_message += f"\n\nRecent search results:\n{escaped_search_results}"
|
| 144 |
+
|
| 145 |
+
if context:
|
| 146 |
+
escaped_context = escape_curly_braces(context)
|
| 147 |
+
system_message += f"\n\nRelevant context from your learning data and past conversations:\n{escaped_context}"
|
| 148 |
+
|
| 149 |
+
prompt = ChatPromptTemplate.from_messages([
|
| 150 |
+
("system", system_message),
|
| 151 |
+
MessagesPlaceholder(variable_name="history"),
|
| 152 |
+
("human","{question}"),
|
| 153 |
+
])
|
| 154 |
+
messages =[]
|
| 155 |
+
if chat_history:
|
| 156 |
+
for human_msg, ai_msg in chat_history:
|
| 157 |
+
messages.append(HumanMessage(content=human_msg))
|
| 158 |
+
messages.append(AIMessage(content=ai_msg))
|
| 159 |
+
|
| 160 |
+
# uses same round-robin and fallback as general chat: next key one-by-one, try next on failure.
|
| 161 |
+
response_content = self._invoke_llm(prompt, messages, question)
|
| 162 |
+
logger.info(f"Realtime response generated for: {question}")
|
| 163 |
+
return response_content
|
| 164 |
+
|
| 165 |
+
except Exception as e:
|
| 166 |
+
logger.error(f"Error in realtime get_response: {e}", exc_info=True)
|
| 167 |
+
# Re-raise so main.py can return 429 (rate limit) or 500 consistently with general chat.
|
| 168 |
+
raise
|
app/services/vector_store.py
ADDED
|
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
VECTOR STORE SERVICE MODULE
|
| 3 |
+
===========================
|
| 4 |
+
|
| 5 |
+
This service builds and queries the FAISS vector index used for context retrieval.
|
| 6 |
+
Learning data (database/learning_data/*.txt) and past chats (database/chats_data/*.json)
|
| 7 |
+
are loaded at startup, split into chunks, embedded with HuggingFace, and stored in FAISS.
|
| 8 |
+
When the user ask a question we embed it and retrieve and k most similar chunks; only
|
| 9 |
+
those chunks are sent to the LLM, so token usage is bounded.
|
| 10 |
+
|
| 11 |
+
LIFECYCLE:
|
| 12 |
+
- create_vector_store(): Load all .txt and .json, chunk, embed, build FAISS, save to disk.
|
| 13 |
+
Called once at startup. Restart the server after adding new .txt files so they are included.
|
| 14 |
+
- get_retriever(k): Return a retriever that fetches k nearest chunks for a query string.
|
| 15 |
+
- save_vector_store(): Write the current FAISS index to database/vector_store/ (called after create).
|
| 16 |
+
|
| 17 |
+
Embeddings run locally (sentence-transformers); no extra API key. Groq and Realtime services
|
| 18 |
+
call get_retriever() for every request to get context.
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
import json
|
| 22 |
+
import logging
|
| 23 |
+
from pathlib import Path
|
| 24 |
+
from typing import List, Optional
|
| 25 |
+
from langchain_text_splitters import RecursiveCharacterTextSplitter
|
| 26 |
+
from langchain_huggingface import HuggingFaceEmbeddings
|
| 27 |
+
from langchain_community.vectorstores import FAISS
|
| 28 |
+
from langchain_core.documents import Document
|
| 29 |
+
|
| 30 |
+
from config import (
|
| 31 |
+
LEARNING_DATA_DIR,
|
| 32 |
+
CHAS_DATA_DIR,
|
| 33 |
+
VECTOR_STORE_DIR,
|
| 34 |
+
EMBEDDING_MODEL,
|
| 35 |
+
CHUNK_SIZE,
|
| 36 |
+
CHUNK_OVERLAP,
|
| 37 |
+
)
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
logger = logging.getLogger("J.A.R.V.I.S")
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
# =========================================================
|
| 44 |
+
# VECTOR STORE SERVICE CLASS
|
| 45 |
+
# =========================================================
|
| 46 |
+
|
| 47 |
+
class VectorStoreService:
|
| 48 |
+
"""
|
| 49 |
+
Builds a FAISS index from learning_data .txt files and chats_data .json files,
|
| 50 |
+
and provides a retriever to fetch the k most relevant chunks for a query.
|
| 51 |
+
"""
|
| 52 |
+
|
| 53 |
+
def __init__(self):
|
| 54 |
+
"""Create the embedding model (local) and text splitter; vector_store is set in create_vector_store()."""
|
| 55 |
+
# Embeddings run locally (no API key); used to convert text into vectors for similarity search.
|
| 56 |
+
self.embeddings = HuggingFaceEmbeddings(
|
| 57 |
+
model_name=EMBEDDING_MODEL,
|
| 58 |
+
model_kwargs={"device":"cpu"},
|
| 59 |
+
)
|
| 60 |
+
self.text_splitter = RecursiveCharacterTextSplitter(
|
| 61 |
+
chunk_size=CHUNK_SIZE,
|
| 62 |
+
chunk_overlap=CHUNK_OVERLAP,
|
| 63 |
+
)
|
| 64 |
+
self.vector_store: Optional[FAISS] = None
|
| 65 |
+
|
| 66 |
+
# ----------------------------------------------------------------------
|
| 67 |
+
# LoAD DOcUMENTS FROM DISK
|
| 68 |
+
# ----------------------------------------------------------------------
|
| 69 |
+
|
| 70 |
+
def load_learning_data(self) -> List[Document]:
|
| 71 |
+
"""Read all .text files in database/learning_data/ and return one Document per file (content + source name). """
|
| 72 |
+
documents = []
|
| 73 |
+
for file_path in list(LEARNING_DATA_DIR.glob("*.txt")):
|
| 74 |
+
try:
|
| 75 |
+
with open(file_path, "r", encoding="utf-8") as f:
|
| 76 |
+
content = f.read().strip()
|
| 77 |
+
if content:
|
| 78 |
+
documents.append(Document(page_content=content,, metadata={"source": str(file_path.name)}))
|
| 79 |
+
except Exception as e:
|
| 80 |
+
logger.warning("Could not load learning dat file %s: %s", file_path, e)
|
| 81 |
+
return documents
|
| 82 |
+
|
| 83 |
+
def load_chat_history(self) -> List[Document]:
|
| 84 |
+
"""load all .json files in database/chats_data/; turn each into one Document (User:/Assistant: lines)."""
|
| 85 |
+
documents = []
|
| 86 |
+
for file_path in list(CHATS_DATA_DIR.glob("*.json")):
|
| 87 |
+
try:
|
| 88 |
+
with open(file_path, "r", encoding="utf-8") as f:
|
| 89 |
+
chat_data = json.load(f)
|
| 90 |
+
messages = chat_data.get("messages", [])
|
| 91 |
+
# Format as "User: ..." / "Assistant: ..." so the retriever can match past conversations.
|
| 92 |
+
chat_content = "\n".join([
|
| 93 |
+
f"User: {msg.get('content', '')}"if msg.get('role') == 'user'
|
| 94 |
+
else f"Assistant: {msg.get('content', '')}"
|
| 95 |
+
for msg in messages
|
| 96 |
+
])
|
| 97 |
+
if chat_content.strip():
|
| 98 |
+
documents.append(Document(page_content=chat_content, metadata={"source": f"chat_{file_path.stem}"}))
|
| 99 |
+
except Exception as e:
|
| 100 |
+
logger.warning("Could not load chat history file %s: %s", file_path, e)
|
| 101 |
+
return documents
|
| 102 |
+
|
| 103 |
+
# -------------------------------------------------------
|
| 104 |
+
# BUILD AND SAVE FAISS INDEX
|
| 105 |
+
# -------------------------------------------------------
|
| 106 |
+
|
| 107 |
+
def create_vector_store(self) -> FAISS:
|
| 108 |
+
"""
|
| 109 |
+
Load learning_data + chats_data, embed, build FAISS index, save to disk.
|
| 110 |
+
Called once at startup. If there are no documents we create a tiny placeholder index.
|
| 111 |
+
"""
|
| 112 |
+
learning_docs = self.load_learning_data()
|
| 113 |
+
chat_docs = self.load_chat_history()
|
| 114 |
+
all_documents = learning_docs + chat_docs
|
| 115 |
+
|
| 116 |
+
if not all_documents:
|
| 117 |
+
# Placeholder so get_retriever() never fails; return this single chunk for any query.
|
| 118 |
+
self.vector_store = FAISS.from_texts(["No data available yet."], self.embeddiings)
|
| 119 |
+
else:
|
| 120 |
+
chunks = self.text_splitter.split_document(all_documents)
|
| 121 |
+
self.vector_store = FAISS.from_documents(chunks, self.embeddings)
|
| 122 |
+
|
| 123 |
+
self.save_vector_store()
|
| 124 |
+
return self.vector_store
|
| 125 |
+
|
| 126 |
+
def save_vector_store(self):
|
| 127 |
+
"""Write the current FAISS index to database/vector_store/. On error we only log."""
|
| 128 |
+
if self.vector_store:
|
| 129 |
+
try:
|
| 130 |
+
self.vector_store.save_local(str(VECTOR_STORE_DIR))
|
| 131 |
+
except Exception as e:
|
| 132 |
+
logger.error("failed to save vector store to disk: %s", e)
|
| 133 |
+
|
| 134 |
+
# ---------------------------------------------------------------------------
|
| 135 |
+
# RETRIEVER FOR CONTEXT
|
| 136 |
+
# ---------------------------------------------------------------------------
|
| 137 |
+
|
| 138 |
+
def get_retriever(self, k: int = 10):
|
| 139 |
+
"""Return a retriever that returns that k most similar chunks for a query string."""
|
| 140 |
+
if not self.vector_store:
|
| 141 |
+
raise RuntimeError("Vector store not initialized. This should not happen.")
|
| 142 |
+
return self.vector_store.as_retriever(search_kwargs={"k":k})
|