Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,140 +1,140 @@
|
|
| 1 |
-
import gradio as gr
|
| 2 |
-
from huggingface_hub import InferenceClient
|
| 3 |
-
from langchain.embeddings import HuggingFaceEmbeddings
|
| 4 |
-
from langchain.vectorstores import FAISS
|
| 5 |
-
from langchain.text_splitter import CharacterTextSplitter
|
| 6 |
-
from langchain.document_loaders import PyPDFLoader
|
| 7 |
-
import os
|
| 8 |
-
|
| 9 |
-
# Load the model client
|
| 10 |
-
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
| 11 |
-
|
| 12 |
-
# Initialize vector store
|
| 13 |
-
vector_store = None
|
| 14 |
-
|
| 15 |
-
# Preload and process the PDF document
|
| 16 |
-
#PDF_PATH = "
|
| 17 |
-
|
| 18 |
-
#PDF_PATH = "
|
| 19 |
-
PDF_PATH = "
|
| 20 |
-
|
| 21 |
-
def preload_pdf():
|
| 22 |
-
global vector_store
|
| 23 |
-
|
| 24 |
-
# Load PDF and extract text
|
| 25 |
-
loader = PyPDFLoader(PDF_PATH)
|
| 26 |
-
documents = loader.load()
|
| 27 |
-
|
| 28 |
-
# Split the text into smaller chunks for retrieval
|
| 29 |
-
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
|
| 30 |
-
docs = text_splitter.split_documents(documents)
|
| 31 |
-
|
| 32 |
-
# Compute embeddings for the chunks
|
| 33 |
-
embeddings = HuggingFaceEmbeddings()
|
| 34 |
-
vector_store = FAISS.from_documents(docs, embeddings)
|
| 35 |
-
|
| 36 |
-
print(f"PDF '{PDF_PATH}' loaded and indexed successfully.")
|
| 37 |
-
|
| 38 |
-
# Response generation
|
| 39 |
-
def respond(
|
| 40 |
-
message,
|
| 41 |
-
history: list[tuple[str, str]],
|
| 42 |
-
system_message,
|
| 43 |
-
max_tokens,
|
| 44 |
-
temperature,
|
| 45 |
-
top_p,
|
| 46 |
-
):
|
| 47 |
-
global vector_store
|
| 48 |
-
|
| 49 |
-
if vector_store is None:
|
| 50 |
-
return "The PDF document is not loaded. Please check the code setup."
|
| 51 |
-
|
| 52 |
-
# Retrieve relevant chunks from the PDF
|
| 53 |
-
relevant_docs = vector_store.similarity_search(message, k=3)
|
| 54 |
-
context = "\n".join([doc.page_content for doc in relevant_docs])
|
| 55 |
-
|
| 56 |
-
# Combine system message, context, and user message
|
| 57 |
-
full_system_message = (
|
| 58 |
-
f"{system_message}\n\nContext from the document:\n{context}\n\n"
|
| 59 |
-
)
|
| 60 |
-
|
| 61 |
-
messages = [{"role": "system", "content": full_system_message}]
|
| 62 |
-
|
| 63 |
-
for val in history:
|
| 64 |
-
if val[0]:
|
| 65 |
-
messages.append({"role": "user", "content": val[0]})
|
| 66 |
-
if val[1]:
|
| 67 |
-
messages.append({"role": "assistant", "content": val[1]})
|
| 68 |
-
|
| 69 |
-
messages.append({"role": "user", "content": message})
|
| 70 |
-
|
| 71 |
-
response = ""
|
| 72 |
-
|
| 73 |
-
for message in client.chat_completion(
|
| 74 |
-
messages,
|
| 75 |
-
max_tokens=max_tokens,
|
| 76 |
-
stream=True,
|
| 77 |
-
temperature=temperature,
|
| 78 |
-
top_p=top_p,
|
| 79 |
-
):
|
| 80 |
-
token = message.choices[0].delta.content
|
| 81 |
-
response += token
|
| 82 |
-
yield response
|
| 83 |
-
|
| 84 |
-
# Gradio interface
|
| 85 |
-
#demo = gr.Blocks()
|
| 86 |
-
|
| 87 |
-
demo = gr.Blocks(css="""
|
| 88 |
-
|
| 89 |
-
.gr-chat-container {
|
| 90 |
-
display: flex;
|
| 91 |
-
background-color: skyblue;
|
| 92 |
-
justify-content: center;
|
| 93 |
-
align-items: center;
|
| 94 |
-
height: 80vh;
|
| 95 |
-
padding: 20px;
|
| 96 |
-
}
|
| 97 |
-
|
| 98 |
-
.gr-chat {
|
| 99 |
-
height: 90vh;
|
| 100 |
-
justify-content: center;
|
| 101 |
-
align-items: center;
|
| 102 |
-
border: 1px solid #ccc;
|
| 103 |
-
padding: 10px;
|
| 104 |
-
box-shadow: 2px 2px 10px rgba(0, 0, 0, 0.1);
|
| 105 |
-
}
|
| 106 |
-
""")
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
with demo:
|
| 110 |
-
with gr.Row(elem_classes=["gr-chat-container"]):
|
| 111 |
-
#with gr.Row():
|
| 112 |
-
with gr.Column(elem_classes=["gr-chat"]):
|
| 113 |
-
#with gr.Column():
|
| 114 |
-
chatbot = gr.ChatInterface(
|
| 115 |
-
respond,
|
| 116 |
-
additional_inputs=[
|
| 117 |
-
gr.Textbox(
|
| 118 |
-
value=(
|
| 119 |
-
"You are going to act like a medical practitioner. Hear the symptoms, "
|
| 120 |
-
"diagnose the disease, mention the disease in seperate line, suggest tips to overcome the issue and suggest some good habits "
|
| 121 |
-
"to overcome the issue. Base your answers on the provided document. limit the response to 5 to 6 sentence point by point"
|
| 122 |
-
),visible=False,
|
| 123 |
-
label="system_message",
|
| 124 |
-
),
|
| 125 |
-
gr.Slider(minimum=1, maximum=2048, value=512, step=1,visible=False, label="Max new tokens"),
|
| 126 |
-
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, visible=False,label="Temperature"),
|
| 127 |
-
gr.Slider(minimum=0.1,maximum=1.0,value=0.95,step=0.05,visible=False,label="Top-p (nucleus sampling)", ),
|
| 128 |
-
],
|
| 129 |
-
examples=[
|
| 130 |
-
["I am not well and feeling feverish, tired"],
|
| 131 |
-
["Can you guide me through quick health tips?"],
|
| 132 |
-
["How do I stop worrying about things I can't control?"],
|
| 133 |
-
],
|
| 134 |
-
title="Diagnify 🕊️",
|
| 135 |
-
)
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
if __name__ == "__main__":
|
| 139 |
-
preload_pdf()
|
| 140 |
-
demo.launch()
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from huggingface_hub import InferenceClient
|
| 3 |
+
from langchain.embeddings import HuggingFaceEmbeddings
|
| 4 |
+
from langchain.vectorstores import FAISS
|
| 5 |
+
from langchain.text_splitter import CharacterTextSplitter
|
| 6 |
+
from langchain.document_loaders import PyPDFLoader
|
| 7 |
+
import os
|
| 8 |
+
|
| 9 |
+
# Load the model client
|
| 10 |
+
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
| 11 |
+
|
| 12 |
+
# Initialize vector store
|
| 13 |
+
vector_store = None
|
| 14 |
+
|
| 15 |
+
# Preload and process the PDF document
|
| 16 |
+
#PDF_PATH = "general symptoms.pdf" # Path to the pre-defined PDF document
|
| 17 |
+
|
| 18 |
+
#PDF_PATH = "general symptoms.pdf"
|
| 19 |
+
PDF_PATH = "general symptoms.pdf"
|
| 20 |
+
|
| 21 |
+
def preload_pdf():
|
| 22 |
+
global vector_store
|
| 23 |
+
|
| 24 |
+
# Load PDF and extract text
|
| 25 |
+
loader = PyPDFLoader(PDF_PATH)
|
| 26 |
+
documents = loader.load()
|
| 27 |
+
|
| 28 |
+
# Split the text into smaller chunks for retrieval
|
| 29 |
+
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
|
| 30 |
+
docs = text_splitter.split_documents(documents)
|
| 31 |
+
|
| 32 |
+
# Compute embeddings for the chunks
|
| 33 |
+
embeddings = HuggingFaceEmbeddings()
|
| 34 |
+
vector_store = FAISS.from_documents(docs, embeddings)
|
| 35 |
+
|
| 36 |
+
print(f"PDF '{PDF_PATH}' loaded and indexed successfully.")
|
| 37 |
+
|
| 38 |
+
# Response generation
|
| 39 |
+
def respond(
|
| 40 |
+
message,
|
| 41 |
+
history: list[tuple[str, str]],
|
| 42 |
+
system_message,
|
| 43 |
+
max_tokens,
|
| 44 |
+
temperature,
|
| 45 |
+
top_p,
|
| 46 |
+
):
|
| 47 |
+
global vector_store
|
| 48 |
+
|
| 49 |
+
if vector_store is None:
|
| 50 |
+
return "The PDF document is not loaded. Please check the code setup."
|
| 51 |
+
|
| 52 |
+
# Retrieve relevant chunks from the PDF
|
| 53 |
+
relevant_docs = vector_store.similarity_search(message, k=3)
|
| 54 |
+
context = "\n".join([doc.page_content for doc in relevant_docs])
|
| 55 |
+
|
| 56 |
+
# Combine system message, context, and user message
|
| 57 |
+
full_system_message = (
|
| 58 |
+
f"{system_message}\n\nContext from the document:\n{context}\n\n"
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
messages = [{"role": "system", "content": full_system_message}]
|
| 62 |
+
|
| 63 |
+
for val in history:
|
| 64 |
+
if val[0]:
|
| 65 |
+
messages.append({"role": "user", "content": val[0]})
|
| 66 |
+
if val[1]:
|
| 67 |
+
messages.append({"role": "assistant", "content": val[1]})
|
| 68 |
+
|
| 69 |
+
messages.append({"role": "user", "content": message})
|
| 70 |
+
|
| 71 |
+
response = ""
|
| 72 |
+
|
| 73 |
+
for message in client.chat_completion(
|
| 74 |
+
messages,
|
| 75 |
+
max_tokens=max_tokens,
|
| 76 |
+
stream=True,
|
| 77 |
+
temperature=temperature,
|
| 78 |
+
top_p=top_p,
|
| 79 |
+
):
|
| 80 |
+
token = message.choices[0].delta.content
|
| 81 |
+
response += token
|
| 82 |
+
yield response
|
| 83 |
+
|
| 84 |
+
# Gradio interface
|
| 85 |
+
#demo = gr.Blocks()
|
| 86 |
+
|
| 87 |
+
demo = gr.Blocks(css="""
|
| 88 |
+
|
| 89 |
+
.gr-chat-container {
|
| 90 |
+
display: flex;
|
| 91 |
+
background-color: skyblue;
|
| 92 |
+
justify-content: center;
|
| 93 |
+
align-items: center;
|
| 94 |
+
height: 80vh;
|
| 95 |
+
padding: 20px;
|
| 96 |
+
}
|
| 97 |
+
|
| 98 |
+
.gr-chat {
|
| 99 |
+
height: 90vh;
|
| 100 |
+
justify-content: center;
|
| 101 |
+
align-items: center;
|
| 102 |
+
border: 1px solid #ccc;
|
| 103 |
+
padding: 10px;
|
| 104 |
+
box-shadow: 2px 2px 10px rgba(0, 0, 0, 0.1);
|
| 105 |
+
}
|
| 106 |
+
""")
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
with demo:
|
| 110 |
+
with gr.Row(elem_classes=["gr-chat-container"]):
|
| 111 |
+
#with gr.Row():
|
| 112 |
+
with gr.Column(elem_classes=["gr-chat"]):
|
| 113 |
+
#with gr.Column():
|
| 114 |
+
chatbot = gr.ChatInterface(
|
| 115 |
+
respond,
|
| 116 |
+
additional_inputs=[
|
| 117 |
+
gr.Textbox(
|
| 118 |
+
value=(
|
| 119 |
+
"You are going to act like a medical practitioner. Hear the symptoms, "
|
| 120 |
+
"diagnose the disease, mention the disease in seperate line, suggest tips to overcome the issue and suggest some good habits "
|
| 121 |
+
"to overcome the issue. Base your answers on the provided document. limit the response to 5 to 6 sentence point by point"
|
| 122 |
+
),visible=False,
|
| 123 |
+
label="system_message",
|
| 124 |
+
),
|
| 125 |
+
gr.Slider(minimum=1, maximum=2048, value=512, step=1,visible=False, label="Max new tokens"),
|
| 126 |
+
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, visible=False,label="Temperature"),
|
| 127 |
+
gr.Slider(minimum=0.1,maximum=1.0,value=0.95,step=0.05,visible=False,label="Top-p (nucleus sampling)", ),
|
| 128 |
+
],
|
| 129 |
+
examples=[
|
| 130 |
+
["I am not well and feeling feverish, tired"],
|
| 131 |
+
["Can you guide me through quick health tips?"],
|
| 132 |
+
["How do I stop worrying about things I can't control?"],
|
| 133 |
+
],
|
| 134 |
+
title="Diagnify 🕊️",
|
| 135 |
+
)
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
if __name__ == "__main__":
|
| 139 |
+
preload_pdf()
|
| 140 |
+
demo.launch()
|