Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,61 +1,63 @@
|
|
| 1 |
-
import streamlit as st
|
| 2 |
-
import logging
|
| 3 |
import os
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
from langchain_community.vectorstores import FAISS
|
| 5 |
-
from langchain_community.document_loaders import CSVLoader
|
| 6 |
from langchain_community.embeddings import HuggingFaceEmbeddings
|
| 7 |
from langchain.prompts import PromptTemplate
|
| 8 |
from langchain.llms import HuggingFaceHub
|
| 9 |
-
import dotenv
|
| 10 |
-
import yaml
|
| 11 |
-
import os
|
| 12 |
-
import zipfile
|
| 13 |
-
|
| 14 |
-
zip_file = "faiss_index.zip"
|
| 15 |
-
with zipfile.ZipFile(zip_file, 'r') as zip_ref:
|
| 16 |
-
zip_ref.extractall(".") # Extract to the current directory
|
| 17 |
-
|
| 18 |
-
print("Unzipping completed successfully.")
|
| 19 |
|
|
|
|
| 20 |
dotenv.load_dotenv()
|
| 21 |
|
|
|
|
| 22 |
def load_config():
|
| 23 |
-
with open("
|
| 24 |
-
|
| 25 |
-
return config
|
| 26 |
|
| 27 |
-
hf_token = os.getenv("HUGGING")
|
| 28 |
config = load_config()
|
|
|
|
|
|
|
|
|
|
| 29 |
logging.basicConfig(level=logging.INFO)
|
| 30 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
embeddings_model = HuggingFaceEmbeddings(model_name=config["embedding_model"])
|
| 32 |
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
logging.info("Vector database successfully created and saved.")
|
| 40 |
-
except Exception as e:
|
| 41 |
-
logging.error("Error creating vector database:", exc_info=e)
|
| 42 |
|
| 43 |
-
|
|
|
|
| 44 |
try:
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 48 |
|
| 49 |
-
vectordb = FAISS.load_local(
|
| 50 |
-
config["vector_db_path"], embeddings_model, allow_dangerous_deserialization=True
|
| 51 |
-
)
|
| 52 |
retriever = vectordb.as_retriever(score_threshold=config["score_threshold"])
|
| 53 |
relevant_docs = retriever.get_relevant_documents(query)[:3]
|
| 54 |
|
| 55 |
if not relevant_docs:
|
| 56 |
-
return "No relevant information found."
|
| 57 |
|
| 58 |
summarized_context = " ".join(doc.page_content for doc in relevant_docs)
|
|
|
|
| 59 |
prompt_template = """
|
| 60 |
Given the following health-related context and a question, generate a structured answer:
|
| 61 |
|
|
@@ -77,55 +79,58 @@ def get_qa_chain(query):
|
|
| 77 |
)
|
| 78 |
|
| 79 |
response = llm(prompt)
|
| 80 |
-
return response.strip()
|
|
|
|
| 81 |
except Exception as e:
|
| 82 |
-
logging.error("Error
|
| 83 |
-
return "
|
| 84 |
|
| 85 |
-
def
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
st.markdown(
|
| 89 |
-
"""
|
| 90 |
-
<style>
|
| 91 |
-
.stApp {
|
| 92 |
-
background-color: #f0f2f6;
|
| 93 |
-
color: #333;
|
| 94 |
-
font-family: 'Arial', sans-serif;
|
| 95 |
-
}
|
| 96 |
-
.title {
|
| 97 |
-
color: #2E7D32;
|
| 98 |
-
text-align: center;
|
| 99 |
-
}
|
| 100 |
-
.query-input {
|
| 101 |
-
border-radius: 10px;
|
| 102 |
-
padding: 10px;
|
| 103 |
-
}
|
| 104 |
-
.response-box {
|
| 105 |
-
background-color: #ffffff;
|
| 106 |
-
padding: 15px;
|
| 107 |
-
border-radius: 8px;
|
| 108 |
-
box-shadow: 2px 2px 10px rgba(0,0,0,0.1);
|
| 109 |
-
}
|
| 110 |
-
</style>
|
| 111 |
-
""",
|
| 112 |
-
unsafe_allow_html=True
|
| 113 |
-
)
|
| 114 |
-
|
| 115 |
-
st.markdown("<h1 class='title'>🩺 Health Disease Chatbot</h1>", unsafe_allow_html=True)
|
| 116 |
-
st.write("Enter a question related to health conditions, symptoms, or treatments.")
|
| 117 |
-
|
| 118 |
-
query = st.text_input("Your health-related question:", key="query", help="Ask about diseases, symptoms, or treatments.")
|
| 119 |
-
|
| 120 |
-
if st.button("Get Information"):
|
| 121 |
-
if query:
|
| 122 |
-
response = get_qa_chain(query)
|
| 123 |
-
st.markdown(f"<div class='response-box'><b>Response:</b><br>{response}</div>", unsafe_allow_html=True)
|
| 124 |
-
else:
|
| 125 |
-
st.warning("Please enter a query to get a response.")
|
| 126 |
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import os
|
| 2 |
+
import logging
|
| 3 |
+
import threading
|
| 4 |
+
import streamlit as st
|
| 5 |
+
import requests
|
| 6 |
+
import yaml
|
| 7 |
+
import dotenv
|
| 8 |
+
from flask import Flask, request, jsonify
|
| 9 |
from langchain_community.vectorstores import FAISS
|
|
|
|
| 10 |
from langchain_community.embeddings import HuggingFaceEmbeddings
|
| 11 |
from langchain.prompts import PromptTemplate
|
| 12 |
from langchain.llms import HuggingFaceHub
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
|
| 14 |
+
# Load environment variables
|
| 15 |
dotenv.load_dotenv()
|
| 16 |
|
| 17 |
+
# Load config
|
| 18 |
def load_config():
|
| 19 |
+
with open("config.yaml", "r") as f:
|
| 20 |
+
return yaml.safe_load(f)
|
|
|
|
| 21 |
|
|
|
|
| 22 |
config = load_config()
|
| 23 |
+
|
| 24 |
+
# Initialize Flask app (Backend)
|
| 25 |
+
app = Flask(__name__)
|
| 26 |
logging.basicConfig(level=logging.INFO)
|
| 27 |
|
| 28 |
+
# Hugging Face Token
|
| 29 |
+
hf_token = os.getenv("HUGGING")
|
| 30 |
+
|
| 31 |
+
# Load Embeddings Model
|
| 32 |
embeddings_model = HuggingFaceEmbeddings(model_name=config["embedding_model"])
|
| 33 |
|
| 34 |
+
# Load FAISS Vector Store
|
| 35 |
+
if os.path.exists(config["vector_db_path"]):
|
| 36 |
+
vectordb = FAISS.load_local(config["vector_db_path"], embeddings_model, allow_dangerous_deserialization=True)
|
| 37 |
+
else:
|
| 38 |
+
logging.error(f"Vector database not found at {config['vector_db_path']}")
|
| 39 |
+
vectordb = None
|
|
|
|
|
|
|
|
|
|
| 40 |
|
| 41 |
+
@app.route("/query", methods=["POST"])
|
| 42 |
+
def get_qa_chain():
|
| 43 |
try:
|
| 44 |
+
data = request.json
|
| 45 |
+
query = data.get("query")
|
| 46 |
+
|
| 47 |
+
if not query:
|
| 48 |
+
return jsonify({"error": "Query is required"}), 400
|
| 49 |
+
|
| 50 |
+
if not vectordb:
|
| 51 |
+
return jsonify({"error": "Vector database is missing"}), 500
|
| 52 |
|
|
|
|
|
|
|
|
|
|
| 53 |
retriever = vectordb.as_retriever(score_threshold=config["score_threshold"])
|
| 54 |
relevant_docs = retriever.get_relevant_documents(query)[:3]
|
| 55 |
|
| 56 |
if not relevant_docs:
|
| 57 |
+
return jsonify({"response": "No relevant information found."})
|
| 58 |
|
| 59 |
summarized_context = " ".join(doc.page_content for doc in relevant_docs)
|
| 60 |
+
|
| 61 |
prompt_template = """
|
| 62 |
Given the following health-related context and a question, generate a structured answer:
|
| 63 |
|
|
|
|
| 79 |
)
|
| 80 |
|
| 81 |
response = llm(prompt)
|
| 82 |
+
return jsonify({"response": response.strip()})
|
| 83 |
+
|
| 84 |
except Exception as e:
|
| 85 |
+
logging.error("Error processing query:", exc_info=e)
|
| 86 |
+
return jsonify({"error": "An error occurred processing your request"}), 500
|
| 87 |
|
| 88 |
+
def run_flask():
|
| 89 |
+
"""Run Flask server on a separate thread."""
|
| 90 |
+
app.run(host="0.0.0.0", port=7860, debug=False, use_reloader=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 91 |
|
| 92 |
+
# Start Flask server in a thread
|
| 93 |
+
threading.Thread(target=run_flask, daemon=True).start()
|
| 94 |
+
|
| 95 |
+
# --- STREAMLIT FRONTEND ---
|
| 96 |
+
st.set_page_config(page_title="🩺 Health Disease Chatbot", page_icon="🩺", layout="centered")
|
| 97 |
+
|
| 98 |
+
st.markdown(
|
| 99 |
+
"""
|
| 100 |
+
<style>
|
| 101 |
+
.stApp {
|
| 102 |
+
background-color: #f0f2f6;
|
| 103 |
+
color: #333;
|
| 104 |
+
font-family: 'Arial', sans-serif;
|
| 105 |
+
}
|
| 106 |
+
.title {
|
| 107 |
+
color: #2E7D32;
|
| 108 |
+
text-align: center;
|
| 109 |
+
}
|
| 110 |
+
.response-box {
|
| 111 |
+
background-color: #ffffff;
|
| 112 |
+
padding: 15px;
|
| 113 |
+
border-radius: 8px;
|
| 114 |
+
box-shadow: 2px 2px 10px rgba(0,0,0,0.1);
|
| 115 |
+
}
|
| 116 |
+
</style>
|
| 117 |
+
""",
|
| 118 |
+
unsafe_allow_html=True
|
| 119 |
+
)
|
| 120 |
+
|
| 121 |
+
st.markdown("<h1 class='title'>🩺 Health Disease Chatbot</h1>", unsafe_allow_html=True)
|
| 122 |
+
st.write("Enter a question related to health conditions, symptoms, or treatments.")
|
| 123 |
+
|
| 124 |
+
query = st.text_input("Your health-related question:", key="query")
|
| 125 |
+
|
| 126 |
+
API_URL = "http://0.0.0.0:7860/query"
|
| 127 |
+
|
| 128 |
+
if st.button("Get Information"):
|
| 129 |
+
if query:
|
| 130 |
+
response = requests.post(API_URL, json={"query": query})
|
| 131 |
+
if response.status_code == 200:
|
| 132 |
+
st.markdown(f"<div class='response-box'><b>Response:</b><br>{response.json().get('response')}</div>", unsafe_allow_html=True)
|
| 133 |
+
else:
|
| 134 |
+
st.error("Error fetching response. Please try again.")
|
| 135 |
+
else:
|
| 136 |
+
st.warning("Please enter a query.")
|