clean output
Browse files- README.md +4 -0
- src/streamlit_app.py +14 -4
README.md
CHANGED
|
@@ -7,6 +7,10 @@ sdk: docker
|
|
| 7 |
app_port: 8501
|
| 8 |
tags:
|
| 9 |
- streamlit
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
pinned: false
|
| 11 |
short_description: LLM-powered question-answering system using RAG
|
| 12 |
---
|
|
|
|
| 7 |
app_port: 8501
|
| 8 |
tags:
|
| 9 |
- streamlit
|
| 10 |
+
- RAG
|
| 11 |
+
- Ollama
|
| 12 |
+
- FAISS
|
| 13 |
+
|
| 14 |
pinned: false
|
| 15 |
short_description: LLM-powered question-answering system using RAG
|
| 16 |
---
|
src/streamlit_app.py
CHANGED
|
@@ -12,6 +12,7 @@ import os
|
|
| 12 |
import torch
|
| 13 |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
| 14 |
from huggingface_hub import InferenceClient
|
|
|
|
| 15 |
|
| 16 |
|
| 17 |
HF_TOKEN = os.environ.get("HF_TOKEN")
|
|
@@ -92,8 +93,17 @@ if query:
|
|
| 92 |
result = qa.invoke({"query":query})
|
| 93 |
raw = result["result"]
|
| 94 |
answer = raw.split("<answer - put answer after this tag>", 1)
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 99 |
|
|
|
|
| 12 |
import torch
|
| 13 |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
| 14 |
from huggingface_hub import InferenceClient
|
| 15 |
+
import re
|
| 16 |
|
| 17 |
|
| 18 |
HF_TOKEN = os.environ.get("HF_TOKEN")
|
|
|
|
| 93 |
result = qa.invoke({"query":query})
|
| 94 |
raw = result["result"]
|
| 95 |
answer = raw.split("<answer - put answer after this tag>", 1)
|
| 96 |
+
raw_answer = result["result"] # from your RAG pipeline
|
| 97 |
+
|
| 98 |
+
# Extract text from href
|
| 99 |
+
match = re.search(r'href="([^"]+)"', raw_answer)
|
| 100 |
+
if match:
|
| 101 |
+
clean_answer = match.group(1)
|
| 102 |
+
else:
|
| 103 |
+
clean_answer = raw_answer
|
| 104 |
+
|
| 105 |
+
st.success(clean_answer)
|
| 106 |
+
# st.success(answer[-1])
|
| 107 |
+
# st.success(answer)
|
| 108 |
+
# st.success(result)
|
| 109 |
|