Spaces:
Runtime error
Runtime error
File size: 10,173 Bytes
0785c43 ace9760 0785c43 5278dab dbe6595 5278dab ef9716d 0785c43 ef9716d dbe6595 ef9716d 5278dab dbe6595 5278dab dbe6595 5278dab ef9716d 5278dab ef9716d 5278dab ef9716d 5278dab ef9716d 5278dab ef9716d 5278dab ef9716d 5278dab ef9716d 5278dab ef9716d dbe6595 ef9716d 5278dab dbe6595 5278dab ef9716d 5278dab ef9716d dbe6595 ef9716d 5278dab ef9716d 5278dab dbe6595 ef9716d 5278dab dbe6595 ef9716d dbe6595 ef9716d 5278dab ef9716d 5278dab ef9716d 5278dab ef9716d 5278dab ef9716d 5278dab ef9716d 5278dab ef9716d 5278dab ef9716d 5278dab dbe6595 ef9716d 5278dab ef9716d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 |
import gradio as gr
import os
api_token = os.getenv("HF_TOKEN")
from langchain_community.vectorstores import FAISS
from langchain_community.document_loaders import TextLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.chains import ConversationalRetrievalChain
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.llms import HuggingFaceEndpoint
from langchain.memory import ConversationBufferMemory
from langchain.prompts import PromptTemplate
list_llm = ["meta-llama/Meta-Llama-3-8B-Instruct", "mistralai/Mistral-7B-Instruct-v0.2"]
list_llm_simple = [os.path.basename(llm) for llm in list_llm]
# Custom prompt template
CUSTOM_PROMPT_TEMPLATE = """
**Response Instructions:**
- Write a detailed, coherent, and insightful article that fully addresses the query based on the provided context.
- Adhere to the following principles:
1. **Define the Core Subject**: Introduce and build the discussion logically around the main topic.
2. **Establish Connections**: Highlight relationships between ideas and concepts with reasoning and examples.
3. **Elaborate on Key Points**: Provide in-depth explanations and emphasize the significance of concepts.
4. **Maintain Objectivity**: Use only the context provided, avoiding speculation or external knowledge.
5. **Ensure Structure and Clarity**: Present information sequentially for a smooth narrative flow.
6. **Engage with Content**: Explore implicit meanings, resolve doubts, and address counterpoints logically.
7. **Provide Examples and Insights**: Use examples to clarify abstract ideas and offer actionable steps if applicable.
8. **Logical Depth**: Draw inferences, explain purposes, and refute opposing ideas when necessary.
Context: {context}
Question: {question}
Chat History: {chat_history}
Craft the response as a seamless, thorough, and authoritative explanation that naturally integrates all aspects of the query.
"""
# Load and split text documents
def load_doc(list_file_path):
pages = []
for file_path in list_file_path:
if file_path.endswith('.txt'):
loader = TextLoader(file_path)
pages.extend(loader.load())
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1024,
chunk_overlap=64
)
doc_splits = text_splitter.split_documents(pages)
return doc_splits
# Create vector database
def create_db(splits):
embeddings = HuggingFaceEmbeddings()
vectordb = FAISS.from_documents(splits, embeddings)
return vectordb
# Initialize langchain LLM chain with custom prompt
def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
llm = HuggingFaceEndpoint(
repo_id=llm_model,
huggingfacehub_api_token=api_token,
temperature=temperature,
max_new_tokens=max_tokens,
top_k=top_k,
)
memory = ConversationBufferMemory(
memory_key="chat_history",
output_key='answer',
return_messages=True
)
# Create custom prompt
custom_prompt = PromptTemplate(
template=CUSTOM_PROMPT_TEMPLATE,
input_variables=["context", "question", "chat_history"]
)
retriever = vector_db.as_retriever()
qa_chain = ConversationalRetrievalChain.from_llm(
llm,
retriever=retriever,
chain_type="stuff",
memory=memory,
return_source_documents=True,
verbose=False,
combine_docs_chain_kwargs={"prompt": custom_prompt}
)
return qa_chain
# Initialize database
def initialize_database(list_file_obj, progress=gr.Progress()):
list_file_path = [x.name for x in list_file_obj if x is not None]
doc_splits = load_doc(list_file_path)
vector_db = create_db(doc_splits)
return vector_db, "Text database created!"
# Initialize LLM
def initialize_LLM(llm_option, llm_temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
llm_name = list_llm[llm_option]
qa_chain = initialize_llmchain(llm_name, llm_temperature, max_tokens, top_k, vector_db, progress)
return qa_chain, "QA chain initialized. Chatbot is ready!"
def format_chat_history(message, chat_history):
formatted_chat_history = []
for user_message, bot_message in chat_history:
formatted_chat_history.append(f"User: {user_message}")
formatted_chat_history.append(f"Assistant: {bot_message}")
return formatted_chat_history
def conversation(qa_chain, message, history):
formatted_chat_history = format_chat_history(message, history)
response = qa_chain.invoke({"question": message, "chat_history": formatted_chat_history})
response_answer = response["answer"]
response_sources = response["source_documents"]
# Get sources (with fallback for when there are fewer than 3 sources)
sources_content = []
sources_pages = []
for i in range(3):
if i < len(response_sources):
sources_content.append(response_sources[i].page_content.strip())
sources_pages.append(0) # For text files, we don't have page numbers
else:
sources_content.append("")
sources_pages.append(0)
new_history = history + [(message, response_answer)]
return (qa_chain, gr.update(value=""), new_history,
sources_content[0], sources_pages[0],
sources_content[1], sources_pages[1],
sources_content[2], sources_pages[2])
def demo():
with gr.Blocks(theme=gr.themes.Default(primary_hue="red", secondary_hue="pink", neutral_hue="sky")) as demo:
vector_db = gr.State()
qa_chain = gr.State()
gr.HTML("<center><h1>RAG Text Document Chatbot</h1><center>")
gr.Markdown("""<b>Query your text documents!</b> This AI agent performs retrieval augmented generation (RAG) on TXT documents.
<b>Please do not upload confidential documents.</b>
""")
with gr.Row():
with gr.Column(scale=86):
gr.Markdown("<b>Step 1 - Upload Text Files and Initialize RAG pipeline</b>")
with gr.Row():
document = gr.Files(height=300, file_count="multiple",
file_types=["txt"], interactive=True,
label="Upload TXT documents")
with gr.Row():
db_btn = gr.Button("Create text database")
with gr.Row():
db_progress = gr.Textbox(value="Not initialized", show_label=False)
gr.Markdown("<b>Select Large Language Model (LLM) and input parameters</b>")
with gr.Row():
llm_btn = gr.Radio(list_llm_simple, label="Available LLMs",
value=list_llm_simple[0], type="index")
with gr.Row():
with gr.Accordion("LLM input parameters", open=False):
with gr.Row():
slider_temperature = gr.Slider(minimum=0.01, maximum=1.0, value=0.5,
step=0.1, label="Temperature")
with gr.Row():
slider_maxtokens = gr.Slider(minimum=128, maximum=9192, value=4096,
step=128, label="Max New Tokens")
with gr.Row():
slider_topk = gr.Slider(minimum=1, maximum=10, value=3,
step=1, label="top-k")
with gr.Row():
qachain_btn = gr.Button("Initialize Question Answering Chatbot")
with gr.Row():
llm_progress = gr.Textbox(value="Not initialized", show_label=False)
with gr.Column(scale=200):
gr.Markdown("<b>Step 2 - Chat with your Document</b>")
chatbot = gr.Chatbot(height=505)
with gr.Accordion("Relevant context from the source document", open=False):
for i in range(1, 4):
with gr.Row():
doc_source = gr.Textbox(label=f"Reference {i}", lines=2,
container=True, scale=20)
source_page = gr.Number(label="Line Range", scale=1, visible=False)
with gr.Row():
msg = gr.Textbox(placeholder="Ask a question", container=True)
with gr.Row():
submit_btn = gr.Button("Submit")
clear_btn = gr.ClearButton([msg, chatbot], value="Clear")
# Event handlers
db_btn.click(initialize_database, inputs=[document], outputs=[vector_db, db_progress])
qachain_btn.click(initialize_LLM,
inputs=[llm_btn, slider_temperature, slider_maxtokens, slider_topk, vector_db],
outputs=[qa_chain, llm_progress]).then(
lambda: [None, "", 0, "", 0, "", 0],
outputs=[chatbot] + [doc for i in range(1,4) for doc in [globals()[f"doc_source{i}"], globals()[f"source_page{i}"]]],
queue=False)
msg.submit(conversation, inputs=[qa_chain, msg, chatbot],
outputs=[qa_chain, msg, chatbot] + [globals()[f"doc_source{i}"] for i in range(1,4)] + [globals()[f"source_page{i}"] for i in range(1,4)]],
queue=False)
submit_btn.click(conversation, inputs=[qa_chain, msg, chatbot],
outputs=[qa_chain, msg, chatbot] + [globals()[f"doc_source{i}"] for i in range(1,4)] + [globals()[f"source_page{i}"] for i in range(1,4)]],
queue=False)
clear_btn.click(lambda: [None, "", 0, "", 0, "", 0],
outputs=[chatbot] + [doc for i in range(1,4) for doc in [globals()[f"doc_source{i}"], globals()[f"source_page{i}"]]],
queue=False)
demo.queue().launch(debug=True)
if __name__ == "__main__":
demo() |