Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -686,7 +686,7 @@ def create_embeddings(chunks):
|
|
| 686 |
def retrieve_relevant_chunks(question, chunks, embeddings, top_k=3):
|
| 687 |
"""Retrieve most relevant chunks for a question"""
|
| 688 |
if embedding_model is None or embeddings is None:
|
| 689 |
-
return chunks[:3]
|
| 690 |
|
| 691 |
try:
|
| 692 |
question_embedding = embedding_model.encode([question], show_progress_bar=False)
|
|
@@ -975,7 +975,7 @@ with gr.Blocks(
|
|
| 975 |
return history
|
| 976 |
|
| 977 |
if not processed_markdown:
|
| 978 |
-
return history + [
|
| 979 |
|
| 980 |
try:
|
| 981 |
# Use RAG to get relevant chunks from markdown
|
|
@@ -1003,14 +1003,14 @@ Please provide a clear and helpful answer based on the context provided."""
|
|
| 1003 |
|
| 1004 |
# Generate response using local Gemma 3n
|
| 1005 |
response_text = gemma_model.chat(prompt)
|
| 1006 |
-
return history + [
|
| 1007 |
|
| 1008 |
except Exception as e:
|
| 1009 |
error_msg = f"❌ Error generating response: {str(e)}"
|
| 1010 |
print(f"Full error: {e}")
|
| 1011 |
import traceback
|
| 1012 |
traceback.print_exc()
|
| 1013 |
-
return history + [
|
| 1014 |
|
| 1015 |
send_btn.click(
|
| 1016 |
fn=chatbot_response,
|
|
|
|
| 686 |
def retrieve_relevant_chunks(question, chunks, embeddings, top_k=3):
|
| 687 |
"""Retrieve most relevant chunks for a question"""
|
| 688 |
if embedding_model is None or embeddings is None:
|
| 689 |
+
return chunks[:3] # Fallback to first 3 chunks
|
| 690 |
|
| 691 |
try:
|
| 692 |
question_embedding = embedding_model.encode([question], show_progress_bar=False)
|
|
|
|
| 975 |
return history
|
| 976 |
|
| 977 |
if not processed_markdown:
|
| 978 |
+
return history + [{"role": "user", "content": message}, {"role": "assistant", "content": "❌ Please process a PDF document first before asking questions."}]
|
| 979 |
|
| 980 |
try:
|
| 981 |
# Use RAG to get relevant chunks from markdown
|
|
|
|
| 1003 |
|
| 1004 |
# Generate response using local Gemma 3n
|
| 1005 |
response_text = gemma_model.chat(prompt)
|
| 1006 |
+
return history + [{"role": "user", "content": message}, {"role": "assistant", "content": response_text}]
|
| 1007 |
|
| 1008 |
except Exception as e:
|
| 1009 |
error_msg = f"❌ Error generating response: {str(e)}"
|
| 1010 |
print(f"Full error: {e}")
|
| 1011 |
import traceback
|
| 1012 |
traceback.print_exc()
|
| 1013 |
+
return history + [{"role": "user", "content": message}, {"role": "assistant", "content": error_msg}]
|
| 1014 |
|
| 1015 |
send_btn.click(
|
| 1016 |
fn=chatbot_response,
|