cryogenic22 commited on
Commit
292eb86
Β·
verified Β·
1 Parent(s): 10c877a

Update utils/database.py

Browse files
Files changed (1) hide show
  1. utils/database.py +137 -136
utils/database.py CHANGED
@@ -1,4 +1,5 @@
1
  # utils/database.py
 
2
  # Update the imports first
3
  from langchain_community.chat_models import ChatOpenAI
4
  from langchain_core.messages import (
@@ -25,12 +26,18 @@ import traceback
25
  import time
26
  import io
27
  import tempfile
28
- from langchain_community.document_loaders import PyPDFLoader
29
-
30
  from sqlite3 import Error
31
 
 
32
  def create_connection(db_file):
33
- """Create a database connection to the SQLite database."""
 
 
 
 
 
 
 
34
  conn = None
35
  try:
36
  conn = sqlite3.connect(db_file)
@@ -39,8 +46,14 @@ def create_connection(db_file):
39
  st.error("Failed to connect to database. Please try again or contact support.")
40
  return None
41
 
 
42
  def create_tables(conn):
43
- """Create necessary tables in the database."""
 
 
 
 
 
44
  try:
45
  sql_create_documents_table = '''
46
  CREATE TABLE IF NOT EXISTS documents (
@@ -81,11 +94,18 @@ def create_tables(conn):
81
 
82
 
83
  def process_document(file_path):
84
- """Process a PDF document with proper chunking."""
 
 
 
 
 
 
 
85
  # Load PDF
86
  loader = PyPDFLoader(file_path)
87
  documents = loader.load()
88
-
89
  # Create text splitter
90
  text_splitter = RecursiveCharacterTextSplitter(
91
  chunk_size=1000,
@@ -93,52 +113,54 @@ def process_document(file_path):
93
  length_function=len,
94
  separators=["\n\n", "\n", " ", ""]
95
  )
96
-
97
  # Split documents into chunks
98
  chunks = text_splitter.split_documents(documents)
99
-
100
  # Extract text content for database storage
101
  full_content = "\n".join(doc.page_content for doc in documents)
102
-
103
  return chunks, full_content
104
 
 
105
  def get_documents(conn):
106
- """Retrieve all documents from the database.
107
-
 
108
  Args:
109
- conn: SQLite database connection
110
-
111
  Returns:
112
- tuple: (list of document contents, list of document names)
113
  """
114
  try:
115
  cursor = conn.cursor()
116
  cursor.execute("SELECT content, name FROM documents")
117
  results = cursor.fetchall()
118
-
119
  if not results:
120
  return [], []
121
-
122
  # Separate contents and names
123
  document_contents = [row[0] for row in results]
124
  document_names = [row[1] for row in results]
125
-
126
  return document_contents, document_names
127
-
128
  except Error as e:
129
  st.error(f"Error retrieving documents: {e}")
130
  return [], []
131
 
 
132
  def insert_document(conn, name, content):
133
- """Insert a new document into the database.
134
-
 
135
  Args:
136
- conn: SQLite database connection
137
- name (str): Name of the document
138
- content (str): Content of the document
139
-
140
  Returns:
141
- int: ID of the inserted document, or None if insertion failed
142
  """
143
  try:
144
  cursor = conn.cursor()
@@ -147,19 +169,20 @@ def insert_document(conn, name, content):
147
  cursor.execute(sql, (name, content))
148
  conn.commit()
149
  return cursor.lastrowid
150
-
151
  except Error as e:
152
  st.error(f"Error inserting document: {e}")
153
  return None
154
 
 
155
  def verify_vector_store(vector_store):
156
- """Verify that the vector store has documents loaded.
157
-
 
158
  Args:
159
- vector_store: FAISS vector store instance
160
-
161
  Returns:
162
- bool: True if vector store is properly initialized with documents
163
  """
164
  try:
165
  # Try to perform a simple similarity search
@@ -170,31 +193,35 @@ def verify_vector_store(vector_store):
170
  return False
171
 
172
 
173
-
174
  def handle_document_upload(uploaded_files):
175
- """Handle document upload with progress tracking."""
 
 
 
 
 
176
  try:
177
  # Initialize session state variables if they don't exist
178
  if 'qa_system' not in st.session_state:
179
  st.session_state.qa_system = None
180
  if 'vector_store' not in st.session_state:
181
  st.session_state.vector_store = None
182
-
183
  # Create a progress container
184
  progress_container = st.empty()
185
  status_container = st.empty()
186
  details_container = st.empty()
187
-
188
  # Initialize progress bar
189
  progress_bar = progress_container.progress(0)
190
  status_container.info("πŸ”„ Initializing document processing...")
191
-
192
  # Reset existing states
193
  if st.session_state.vector_store is not None:
194
  st.session_state.vector_store = None
195
  if st.session_state.qa_system is not None:
196
  st.session_state.qa_system = None
197
-
198
  # Initialize embeddings (10% progress)
199
  status_container.info("πŸ”„ Initializing embeddings model...")
200
  embeddings = get_embeddings_model()
@@ -202,142 +229,92 @@ def handle_document_upload(uploaded_files):
202
  status_container.error("❌ Failed to initialize embeddings model")
203
  return
204
  progress_bar.progress(10)
205
-
206
- # Process documents
207
  all_chunks = []
208
  documents = []
209
  document_names = []
210
-
211
  progress_per_file = 70 / len(uploaded_files)
212
  current_progress = 10
213
-
214
  for idx, uploaded_file in enumerate(uploaded_files):
215
  file_name = uploaded_file.name
216
  status_container.info(f"πŸ”„ Processing document {idx + 1}/{len(uploaded_files)}: {file_name}")
217
-
218
  # Create temporary file
219
  with tempfile.NamedTemporaryFile(delete=False, suffix='.pdf') as tmp_file:
220
  tmp_file.write(uploaded_file.getvalue())
221
  tmp_file.flush()
222
-
223
  # Process document with chunking
224
  chunks, content = process_document(tmp_file.name)
225
-
226
  # Store in database
227
  doc_id = insert_document(st.session_state.db_conn, file_name, content)
228
  if not doc_id:
229
  status_container.error(f"❌ Failed to store document: {file_name}")
230
  continue
231
-
232
  # Add chunks with metadata
233
  for chunk in chunks:
234
  chunk.metadata["source"] = file_name
235
  all_chunks.extend(chunks)
236
-
237
  documents.append(content)
238
  document_names.append(file_name)
239
-
240
  current_progress += progress_per_file
241
  progress_bar.progress(int(current_progress))
242
-
243
  # Initialize vector store with chunks instead of full documents
244
  status_container.info("πŸ”„ Initializing vector store...")
245
  vector_store = FAISS.from_documents(
246
  all_chunks,
247
  embeddings
248
  )
249
-
250
- # Calculate progress steps per file
251
- progress_per_file = 70 / len(uploaded_files) # 70% of progress for file processing
252
- current_progress = 10
253
-
254
- for idx, uploaded_file in enumerate(uploaded_files):
255
- file_name = uploaded_file.name
256
- status_container.info(f"πŸ”„ Processing document {idx + 1}/{len(uploaded_files)}: {file_name}")
257
- details_container.text(f"πŸ“„ Current file: {file_name}")
258
-
259
- # Create a temporary file to save the PDF
260
- with tempfile.NamedTemporaryFile(delete=False, suffix='.pdf') as tmp_file:
261
- # Write the uploaded file content to the temporary file
262
- tmp_file.write(uploaded_file.getvalue())
263
- tmp_file.flush()
264
-
265
- # Use PyPDFLoader to load the PDF
266
- loader = PyPDFLoader(tmp_file.name)
267
- pdf_documents = loader.load()
268
-
269
- # Extract text content from the PDF
270
- content = "\n".join(doc.page_content for doc in pdf_documents)
271
-
272
- # Store in database
273
- details_container.text(f"πŸ’Ύ Storing {file_name} in database...")
274
- doc_id = insert_document(st.session_state.db_conn, file_name, content)
275
- if not doc_id:
276
- status_container.error(f"❌ Failed to store document: {file_name}")
277
- continue
278
-
279
- documents.append(content)
280
- document_names.append(file_name)
281
-
282
- # Update progress
283
- current_progress += progress_per_file
284
- progress_bar.progress(int(current_progress))
285
-
286
- if not documents:
287
- status_container.error("❌ No documents were successfully processed")
288
- return
289
-
290
- # Initialize vector store (80-90% progress)
291
- status_container.info("πŸ”„ Initializing vector store...")
292
- details_container.text("πŸ” Creating vector embeddings...")
293
- vector_store = initialize_faiss(embeddings, documents, document_names)
294
- if not vector_store:
295
- status_container.error("❌ Failed to initialize vector store")
296
- return
297
-
298
- # Store vector store in session state
299
- st.session_state.vector_store = vector_store
300
- progress_bar.progress(90)
301
-
302
  # Verify vector store
303
  status_container.info("πŸ”„ Verifying document indexing...")
304
  details_container.text("✨ Performing final checks...")
305
  if not verify_vector_store(vector_store):
306
  status_container.error("❌ Vector store verification failed")
307
  return
308
-
309
  # Initialize QA system (90-100% progress)
310
  status_container.info("πŸ”„ Setting up QA system...")
311
  qa_system = initialize_qa_system(vector_store)
312
  if not qa_system:
313
  status_container.error("❌ Failed to initialize QA system")
314
  return
315
-
316
  # Store QA system in session state
317
  st.session_state.qa_system = qa_system
318
-
319
  # Complete!
320
  progress_bar.progress(100)
321
  status_container.success("βœ… Documents processed successfully!")
322
- details_container.markdown("""
323
- πŸŽ‰ **Ready to chat!**
324
- - Documents loaded: {}
325
- - Total content size: {:.2f} KB
326
- - Vector store initialized
327
- - QA system ready
328
-
329
- You can now start asking questions about your documents!
330
- """.format(
331
- len(documents),
332
- sum(len(doc) for doc in documents) / 1024
333
- ))
334
-
 
 
335
  # Add notification
336
  st.balloons()
337
-
338
  # Set chat ready flag
339
  st.session_state.chat_ready = True
340
-
341
  except Exception as e:
342
  status_container.error(f"❌ Error processing documents: {e}")
343
  details_container.error(traceback.format_exc())
@@ -345,44 +322,46 @@ def handle_document_upload(uploaded_files):
345
  st.session_state.vector_store = None
346
  st.session_state.qa_system = None
347
  st.session_state.chat_ready = False
348
- except Exception as e:
349
-
350
  finally:
351
  # Clean up progress display after 5 seconds if successful
352
  if st.session_state.get('qa_system') is not None:
353
  time.sleep(5)
354
  progress_container.empty()
355
 
 
356
  def display_vector_store_info():
357
- """Display information about the current vector store state."""
 
 
358
  if 'vector_store' not in st.session_state:
359
  st.info("ℹ️ No documents loaded yet.")
360
  return
361
-
362
  try:
363
  # Get the vector store from session state
364
  vector_store = st.session_state.vector_store
365
-
366
  # Get basic stats
367
  test_query = vector_store.similarity_search("test", k=1)
368
  doc_count = len(test_query)
369
-
370
  # Create an expander for detailed info
371
  with st.expander("πŸ“Š Knowledge Base Status"):
372
  col1, col2 = st.columns(2)
373
-
374
  with col1:
375
  st.metric(
376
  label="Documents Loaded",
377
  value=doc_count
378
  )
379
-
380
  with col2:
381
  st.metric(
382
  label="System Status",
383
  value="Ready" if verify_vector_store(vector_store) else "Not Ready"
384
  )
385
-
386
  # Display sample queries
387
  if verify_vector_store(vector_store):
388
  st.markdown("### πŸ” Sample Document Snippets")
@@ -391,14 +370,21 @@ def display_vector_store_info():
391
  with st.container():
392
  st.markdown(f"**Snippet {i}:**")
393
  st.text(doc.page_content[:200] + "...")
394
-
395
  except Exception as e:
396
  st.error(f"Error displaying vector store info: {e}")
397
  st.error(traceback.format_exc())
398
 
399
 
400
  def initialize_qa_system(vector_store):
401
- """Initialize QA system with optimized retrieval."""
 
 
 
 
 
 
 
402
  try:
403
  llm = ChatOpenAI(
404
  temperature=0.5,
@@ -439,8 +425,7 @@ Tone and Language: Use formal and professional language, ensuring clarity and pr
439
 
440
  Accuracy: Double-check all information for accuracy and completeness before providing it to the user.
441
 
442
-
443
- """),
444
  MessagesPlaceholder(variable_name="chat_history"),
445
  ("human", "{input}\n\nContext: {context}")
446
  ])
@@ -474,10 +459,20 @@ Accuracy: Double-check all information for accuracy and completeness before prov
474
  except Exception as e:
475
  st.error(f"Error initializing QA system: {e}")
476
  return None
477
-
 
478
  # FAISS vector store initialization
479
  def initialize_faiss(embeddings, documents, document_names):
480
- """Initialize FAISS vector store."""
 
 
 
 
 
 
 
 
 
481
  try:
482
  from langchain.vectorstores import FAISS
483
 
@@ -491,10 +486,16 @@ def initialize_faiss(embeddings, documents, document_names):
491
  st.error(f"Error initializing FAISS: {e}")
492
  return None
493
 
 
494
  # Embeddings model retrieval
495
  @st.cache_resource
496
  def get_embeddings_model():
497
- """Get the embeddings model."""
 
 
 
 
 
498
  try:
499
  from langchain.embeddings import HuggingFaceEmbeddings
500
 
@@ -503,4 +504,4 @@ def get_embeddings_model():
503
  return embeddings
504
  except Exception as e:
505
  st.error(f"Error loading embeddings model: {e}")
506
- return None
 
1
  # utils/database.py
2
+
3
  # Update the imports first
4
  from langchain_community.chat_models import ChatOpenAI
5
  from langchain_core.messages import (
 
26
  import time
27
  import io
28
  import tempfile
 
 
29
  from sqlite3 import Error
30
 
31
+
32
  def create_connection(db_file):
33
+ """
34
+ Create a database connection to the SQLite database.
35
+
36
+ Args:
37
+ db_file (str): Path to the SQLite database file.
38
+ Returns:
39
+ sqlite3.Connection: Database connection object or None if an error occurs.
40
+ """
41
  conn = None
42
  try:
43
  conn = sqlite3.connect(db_file)
 
46
  st.error("Failed to connect to database. Please try again or contact support.")
47
  return None
48
 
49
+
50
  def create_tables(conn):
51
+ """
52
+ Create necessary tables in the database.
53
+
54
+ Args:
55
+ conn (sqlite3.Connection): SQLite database connection.
56
+ """
57
  try:
58
  sql_create_documents_table = '''
59
  CREATE TABLE IF NOT EXISTS documents (
 
94
 
95
 
96
  def process_document(file_path):
97
+ """
98
+ Process a PDF document with proper chunking.
99
+
100
+ Args:
101
+ file_path (str): Path to the PDF file.
102
+ Returns:
103
+ tuple: (list of document chunks, full content of the document).
104
+ """
105
  # Load PDF
106
  loader = PyPDFLoader(file_path)
107
  documents = loader.load()
108
+
109
  # Create text splitter
110
  text_splitter = RecursiveCharacterTextSplitter(
111
  chunk_size=1000,
 
113
  length_function=len,
114
  separators=["\n\n", "\n", " ", ""]
115
  )
116
+
117
  # Split documents into chunks
118
  chunks = text_splitter.split_documents(documents)
119
+
120
  # Extract text content for database storage
121
  full_content = "\n".join(doc.page_content for doc in documents)
122
+
123
  return chunks, full_content
124
 
125
+
126
  def get_documents(conn):
127
+ """
128
+ Retrieve all documents from the database.
129
+
130
  Args:
131
+ conn (sqlite3.Connection): SQLite database connection.
 
132
  Returns:
133
+ tuple: (list of document contents, list of document names).
134
  """
135
  try:
136
  cursor = conn.cursor()
137
  cursor.execute("SELECT content, name FROM documents")
138
  results = cursor.fetchall()
139
+
140
  if not results:
141
  return [], []
142
+
143
  # Separate contents and names
144
  document_contents = [row[0] for row in results]
145
  document_names = [row[1] for row in results]
146
+
147
  return document_contents, document_names
148
+
149
  except Error as e:
150
  st.error(f"Error retrieving documents: {e}")
151
  return [], []
152
 
153
+
154
  def insert_document(conn, name, content):
155
+ """
156
+ Insert a new document into the database.
157
+
158
  Args:
159
+ conn (sqlite3.Connection): SQLite database connection.
160
+ name (str): Name of the document.
161
+ content (str): Content of the document.
 
162
  Returns:
163
+ int: ID of the inserted document, or None if insertion failed.
164
  """
165
  try:
166
  cursor = conn.cursor()
 
169
  cursor.execute(sql, (name, content))
170
  conn.commit()
171
  return cursor.lastrowid
172
+
173
  except Error as e:
174
  st.error(f"Error inserting document: {e}")
175
  return None
176
 
177
+
178
  def verify_vector_store(vector_store):
179
+ """
180
+ Verify that the vector store has documents loaded.
181
+
182
  Args:
183
+ vector_store (FAISS): FAISS vector store instance.
 
184
  Returns:
185
+ bool: True if vector store is properly initialized with documents.
186
  """
187
  try:
188
  # Try to perform a simple similarity search
 
193
  return False
194
 
195
 
 
196
  def handle_document_upload(uploaded_files):
197
+ """
198
+ Handle document upload with progress tracking.
199
+
200
+ Args:
201
+ uploaded_files (list): List of uploaded files.
202
+ """
203
  try:
204
  # Initialize session state variables if they don't exist
205
  if 'qa_system' not in st.session_state:
206
  st.session_state.qa_system = None
207
  if 'vector_store' not in st.session_state:
208
  st.session_state.vector_store = None
209
+
210
  # Create a progress container
211
  progress_container = st.empty()
212
  status_container = st.empty()
213
  details_container = st.empty()
214
+
215
  # Initialize progress bar
216
  progress_bar = progress_container.progress(0)
217
  status_container.info("πŸ”„ Initializing document processing...")
218
+
219
  # Reset existing states
220
  if st.session_state.vector_store is not None:
221
  st.session_state.vector_store = None
222
  if st.session_state.qa_system is not None:
223
  st.session_state.qa_system = None
224
+
225
  # Initialize embeddings (10% progress)
226
  status_container.info("πŸ”„ Initializing embeddings model...")
227
  embeddings = get_embeddings_model()
 
229
  status_container.error("❌ Failed to initialize embeddings model")
230
  return
231
  progress_bar.progress(10)
232
+
233
+ # Process documents
234
  all_chunks = []
235
  documents = []
236
  document_names = []
237
+
238
  progress_per_file = 70 / len(uploaded_files)
239
  current_progress = 10
240
+
241
  for idx, uploaded_file in enumerate(uploaded_files):
242
  file_name = uploaded_file.name
243
  status_container.info(f"πŸ”„ Processing document {idx + 1}/{len(uploaded_files)}: {file_name}")
244
+
245
  # Create temporary file
246
  with tempfile.NamedTemporaryFile(delete=False, suffix='.pdf') as tmp_file:
247
  tmp_file.write(uploaded_file.getvalue())
248
  tmp_file.flush()
249
+
250
  # Process document with chunking
251
  chunks, content = process_document(tmp_file.name)
252
+
253
  # Store in database
254
  doc_id = insert_document(st.session_state.db_conn, file_name, content)
255
  if not doc_id:
256
  status_container.error(f"❌ Failed to store document: {file_name}")
257
  continue
258
+
259
  # Add chunks with metadata
260
  for chunk in chunks:
261
  chunk.metadata["source"] = file_name
262
  all_chunks.extend(chunks)
263
+
264
  documents.append(content)
265
  document_names.append(file_name)
266
+
267
  current_progress += progress_per_file
268
  progress_bar.progress(int(current_progress))
269
+
270
  # Initialize vector store with chunks instead of full documents
271
  status_container.info("πŸ”„ Initializing vector store...")
272
  vector_store = FAISS.from_documents(
273
  all_chunks,
274
  embeddings
275
  )
276
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
277
  # Verify vector store
278
  status_container.info("πŸ”„ Verifying document indexing...")
279
  details_container.text("✨ Performing final checks...")
280
  if not verify_vector_store(vector_store):
281
  status_container.error("❌ Vector store verification failed")
282
  return
283
+
284
  # Initialize QA system (90-100% progress)
285
  status_container.info("πŸ”„ Setting up QA system...")
286
  qa_system = initialize_qa_system(vector_store)
287
  if not qa_system:
288
  status_container.error("❌ Failed to initialize QA system")
289
  return
290
+
291
  # Store QA system in session state
292
  st.session_state.qa_system = qa_system
293
+
294
  # Complete!
295
  progress_bar.progress(100)
296
  status_container.success("βœ… Documents processed successfully!")
297
+ details_container.markdown(
298
+ """
299
+ πŸŽ‰ **Ready to chat!**
300
+ - Documents loaded: {}
301
+ - Total content size: {:.2f} KB
302
+ - Vector store initialized
303
+ - QA system ready
304
+
305
+ You can now start asking questions about your documents!
306
+ """.format(
307
+ len(documents),
308
+ sum(len(doc) for doc in documents) / 1024
309
+ )
310
+ )
311
+
312
  # Add notification
313
  st.balloons()
314
+
315
  # Set chat ready flag
316
  st.session_state.chat_ready = True
317
+
318
  except Exception as e:
319
  status_container.error(f"❌ Error processing documents: {e}")
320
  details_container.error(traceback.format_exc())
 
322
  st.session_state.vector_store = None
323
  st.session_state.qa_system = None
324
  st.session_state.chat_ready = False
325
+
 
326
  finally:
327
  # Clean up progress display after 5 seconds if successful
328
  if st.session_state.get('qa_system') is not None:
329
  time.sleep(5)
330
  progress_container.empty()
331
 
332
+
333
  def display_vector_store_info():
334
+ """
335
+ Display information about the current vector store state.
336
+ """
337
  if 'vector_store' not in st.session_state:
338
  st.info("ℹ️ No documents loaded yet.")
339
  return
340
+
341
  try:
342
  # Get the vector store from session state
343
  vector_store = st.session_state.vector_store
344
+
345
  # Get basic stats
346
  test_query = vector_store.similarity_search("test", k=1)
347
  doc_count = len(test_query)
348
+
349
  # Create an expander for detailed info
350
  with st.expander("πŸ“Š Knowledge Base Status"):
351
  col1, col2 = st.columns(2)
352
+
353
  with col1:
354
  st.metric(
355
  label="Documents Loaded",
356
  value=doc_count
357
  )
358
+
359
  with col2:
360
  st.metric(
361
  label="System Status",
362
  value="Ready" if verify_vector_store(vector_store) else "Not Ready"
363
  )
364
+
365
  # Display sample queries
366
  if verify_vector_store(vector_store):
367
  st.markdown("### πŸ” Sample Document Snippets")
 
370
  with st.container():
371
  st.markdown(f"**Snippet {i}:**")
372
  st.text(doc.page_content[:200] + "...")
373
+
374
  except Exception as e:
375
  st.error(f"Error displaying vector store info: {e}")
376
  st.error(traceback.format_exc())
377
 
378
 
379
  def initialize_qa_system(vector_store):
380
+ """
381
+ Initialize QA system with optimized retrieval.
382
+
383
+ Args:
384
+ vector_store (FAISS): FAISS vector store instance.
385
+ Returns:
386
+ dict: QA system chain or None if initialization fails.
387
+ """
388
  try:
389
  llm = ChatOpenAI(
390
  temperature=0.5,
 
425
 
426
  Accuracy: Double-check all information for accuracy and completeness before providing it to the user.
427
 
428
+ """),
 
429
  MessagesPlaceholder(variable_name="chat_history"),
430
  ("human", "{input}\n\nContext: {context}")
431
  ])
 
459
  except Exception as e:
460
  st.error(f"Error initializing QA system: {e}")
461
  return None
462
+
463
+
464
  # FAISS vector store initialization
465
  def initialize_faiss(embeddings, documents, document_names):
466
+ """
467
+ Initialize FAISS vector store.
468
+
469
+ Args:
470
+ embeddings (Embeddings): Embeddings model to use.
471
+ documents (list): List of document contents.
472
+ document_names (list): List of document names.
473
+ Returns:
474
+ FAISS: FAISS vector store instance or None if initialization fails.
475
+ """
476
  try:
477
  from langchain.vectorstores import FAISS
478
 
 
486
  st.error(f"Error initializing FAISS: {e}")
487
  return None
488
 
489
+
490
  # Embeddings model retrieval
491
  @st.cache_resource
492
  def get_embeddings_model():
493
+ """
494
+ Get the embeddings model.
495
+
496
+ Returns:
497
+ Embeddings: Embeddings model instance or None if loading fails.
498
+ """
499
  try:
500
  from langchain.embeddings import HuggingFaceEmbeddings
501
 
 
504
  return embeddings
505
  except Exception as e:
506
  st.error(f"Error loading embeddings model: {e}")
507
+ return None