Spaces:
Sleeping
Sleeping
Update chatbot_rag.py
Browse files- chatbot_rag.py +3 -1
chatbot_rag.py
CHANGED
|
@@ -1,9 +1,9 @@
|
|
| 1 |
-
|
| 2 |
from langchain_community.vectorstores import Chroma
|
| 3 |
from langchain_community.embeddings import HuggingFaceEmbeddings
|
| 4 |
from langchain_community.llms import HuggingFacePipeline
|
| 5 |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
| 6 |
from langchain.chains import RetrievalQA
|
|
|
|
| 7 |
|
| 8 |
|
| 9 |
def build_qa():
|
|
@@ -55,12 +55,14 @@ def build_qa():
|
|
| 55 |
print("✅ QA pipeline ready.")
|
| 56 |
return qa
|
| 57 |
|
|
|
|
| 58 |
# Build at import time (so it's ready when app runs)
|
| 59 |
try:
|
| 60 |
qa_pipeline = build_qa()
|
| 61 |
except Exception as e:
|
| 62 |
qa_pipeline = None
|
| 63 |
print("❌ Failed to build QA pipeline:", e)
|
|
|
|
| 64 |
|
| 65 |
|
| 66 |
def get_answer(query: str) -> str:
|
|
|
|
|
|
|
| 1 |
from langchain_community.vectorstores import Chroma
|
| 2 |
from langchain_community.embeddings import HuggingFaceEmbeddings
|
| 3 |
from langchain_community.llms import HuggingFacePipeline
|
| 4 |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
| 5 |
from langchain.chains import RetrievalQA
|
| 6 |
+
import traceback # ✅ added
|
| 7 |
|
| 8 |
|
| 9 |
def build_qa():
|
|
|
|
| 55 |
print("✅ QA pipeline ready.")
|
| 56 |
return qa
|
| 57 |
|
| 58 |
+
|
| 59 |
# Build at import time (so it's ready when app runs)
|
| 60 |
try:
|
| 61 |
qa_pipeline = build_qa()
|
| 62 |
except Exception as e:
|
| 63 |
qa_pipeline = None
|
| 64 |
print("❌ Failed to build QA pipeline:", e)
|
| 65 |
+
traceback.print_exc() # ✅ added: full error details
|
| 66 |
|
| 67 |
|
| 68 |
def get_answer(query: str) -> str:
|