Spaces:
Runtime error
Runtime error
adarsh
commited on
Commit
·
5839dd5
1
Parent(s):
6eca0dd
updated
Browse files- app.py +24 -9
- src/helper.py +2 -2
app.py
CHANGED
|
@@ -1,17 +1,21 @@
|
|
| 1 |
import streamlit as st
|
| 2 |
from langchain.prompts import PromptTemplate
|
| 3 |
-
from
|
| 4 |
from src.helper import download_hf_embeddings, text_split, download_hf_model
|
| 5 |
-
from
|
| 6 |
import os
|
| 7 |
from dotenv import load_dotenv
|
| 8 |
from src.prompt import prompt_template
|
| 9 |
from langchain.chains import RetrievalQA
|
| 10 |
import time
|
|
|
|
|
|
|
| 11 |
|
| 12 |
# Load environment variables
|
|
|
|
| 13 |
|
| 14 |
PINECONE_API_KEY = os.environ.get('PINECONE_API_KEY')
|
|
|
|
| 15 |
|
| 16 |
# Set page configuration
|
| 17 |
st.set_page_config(page_title="Medical Chatbot", page_icon="🏥", layout="wide")
|
|
@@ -62,14 +66,25 @@ st.title("🏥 Medical Chatbot")
|
|
| 62 |
# Initialize the chatbot components
|
| 63 |
@st.cache_resource
|
| 64 |
def initialize_chatbot():
|
|
|
|
| 65 |
embeddings = download_hf_embeddings()
|
| 66 |
-
model_name_or_path = "TheBloke/Llama-2-7B-Chat-GGML"
|
| 67 |
-
model_basename = "llama-2-7b-chat.ggmlv3.q4_0.bin"
|
| 68 |
-
model_path = download_hf_model(model_name_or_path, model_basename)
|
|
|
|
| 69 |
llm = CTransformers(model=model_path,
|
| 70 |
model_type="llama",
|
| 71 |
config={'max_new_tokens': 512,
|
| 72 |
'temperature': 0.8})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 73 |
PROMPT = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
|
| 74 |
chain_type_kwargs = {"prompt": PROMPT}
|
| 75 |
docsearch = LangchainPinecone(index, embeddings.embed_query, "text")
|
|
@@ -114,11 +129,11 @@ def load_animation():
|
|
| 114 |
st.markdown("""
|
| 115 |
<div class="footer">
|
| 116 |
<div class="social-icons">
|
| 117 |
-
<a href="https://github.com/
|
| 118 |
-
<a href="https://linkedin.com/in/
|
| 119 |
-
<a href="https://
|
| 120 |
</div>
|
| 121 |
-
<p
|
| 122 |
</div>
|
| 123 |
""", unsafe_allow_html=True)
|
| 124 |
|
|
|
|
| 1 |
import streamlit as st
|
| 2 |
from langchain.prompts import PromptTemplate
|
| 3 |
+
from langchain_community.llms import CTransformers
|
| 4 |
from src.helper import download_hf_embeddings, text_split, download_hf_model
|
| 5 |
+
from langchain_community.vectorstores import Pinecone as LangchainPinecone
|
| 6 |
import os
|
| 7 |
from dotenv import load_dotenv
|
| 8 |
from src.prompt import prompt_template
|
| 9 |
from langchain.chains import RetrievalQA
|
| 10 |
import time
|
| 11 |
+
from pinecone import Pinecone
|
| 12 |
+
|
| 13 |
|
| 14 |
# Load environment variables
|
| 15 |
+
load_dotenv()
|
| 16 |
|
| 17 |
PINECONE_API_KEY = os.environ.get('PINECONE_API_KEY')
|
| 18 |
+
index_name = "medicure-chatbot"
|
| 19 |
|
| 20 |
# Set page configuration
|
| 21 |
st.set_page_config(page_title="Medical Chatbot", page_icon="🏥", layout="wide")
|
|
|
|
| 66 |
# Initialize the chatbot components
|
| 67 |
@st.cache_resource
|
| 68 |
def initialize_chatbot():
|
| 69 |
+
|
| 70 |
embeddings = download_hf_embeddings()
|
| 71 |
+
# model_name_or_path = "TheBloke/Llama-2-7B-Chat-GGML"
|
| 72 |
+
# model_basename = "llama-2-7b-chat.ggmlv3.q4_0.bin"
|
| 73 |
+
# model_path = download_hf_model(model_name_or_path, model_basename)
|
| 74 |
+
model_path = "TheBloke/Llama-2-7B-Chat-GGML"
|
| 75 |
llm = CTransformers(model=model_path,
|
| 76 |
model_type="llama",
|
| 77 |
config={'max_new_tokens': 512,
|
| 78 |
'temperature': 0.8})
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
# initiaize pinecone
|
| 84 |
+
|
| 85 |
+
pc = Pinecone(api_key=PINECONE_API_KEY)
|
| 86 |
+
index = pc.Index(index_name)
|
| 87 |
+
|
| 88 |
PROMPT = PromptTemplate(template=prompt_template, input_variables=["context", "question"])
|
| 89 |
chain_type_kwargs = {"prompt": PROMPT}
|
| 90 |
docsearch = LangchainPinecone(index, embeddings.embed_query, "text")
|
|
|
|
| 129 |
st.markdown("""
|
| 130 |
<div class="footer">
|
| 131 |
<div class="social-icons">
|
| 132 |
+
<a href="https://github.com/4darsh-Dev" target="_blank"><i class="fab fa-github"></i></a>
|
| 133 |
+
<a href="https://linkedin.com/in/adarsh-maurya-dev" target="_blank"><i class="fab fa-linkedin"></i></a>
|
| 134 |
+
<a href="https://adarshmaurya.onionreads.com" target="_blank"><i class="fas fa-globe"></i></a>
|
| 135 |
</div>
|
| 136 |
+
<p>Adarsh Maurya© 2024 Medical Chatbot. All rights reserved.</p>
|
| 137 |
</div>
|
| 138 |
""", unsafe_allow_html=True)
|
| 139 |
|
src/helper.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
-
from
|
| 2 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
| 3 |
-
from
|
| 4 |
from huggingface_hub import hf_hub_download
|
| 5 |
|
| 6 |
|
|
|
|
| 1 |
+
from langchain_community.document_loaders import PyPDFDirectoryLoader
|
| 2 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
| 3 |
+
from langchain_community.embeddings import HuggingFaceEmbeddings
|
| 4 |
from huggingface_hub import hf_hub_download
|
| 5 |
|
| 6 |
|