Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import os | |
| api_token = os.getenv("HF_TOKEN") | |
| from langchain_community.vectorstores import FAISS | |
| from langchain_community.document_loaders import TextLoader | |
| from langchain.text_splitter import RecursiveCharacterTextSplitter | |
| from langchain.chains import ConversationalRetrievalChain | |
| from langchain_community.embeddings import HuggingFaceEmbeddings | |
| from langchain_community.llms import HuggingFaceEndpoint | |
| from langchain.memory import ConversationBufferMemory | |
| from langchain.prompts import PromptTemplate | |
| list_llm = ["meta-llama/Meta-Llama-3-8B-Instruct", "mistralai/Mistral-7B-Instruct-v0.2"] | |
| list_llm_simple = [os.path.basename(llm) for llm in list_llm] | |
| # Custom prompt template | |
| CUSTOM_PROMPT_TEMPLATE = """ | |
| **Response Instructions:** | |
| - Write a detailed, coherent, and insightful article that fully addresses the query based on the provided context. | |
| - Adhere to the following principles: | |
| 1. **Define the Core Subject**: Introduce and build the discussion logically around the main topic. | |
| 2. **Establish Connections**: Highlight relationships between ideas and concepts with reasoning and examples. | |
| 3. **Elaborate on Key Points**: Provide in-depth explanations and emphasize the significance of concepts. | |
| 4. **Maintain Objectivity**: Use only the context provided, avoiding speculation or external knowledge. | |
| 5. **Ensure Structure and Clarity**: Present information sequentially for a smooth narrative flow. | |
| 6. **Engage with Content**: Explore implicit meanings, resolve doubts, and address counterpoints logically. | |
| 7. **Provide Examples and Insights**: Use examples to clarify abstract ideas and offer actionable steps if applicable. | |
| 8. **Logical Depth**: Draw inferences, explain purposes, and refute opposing ideas when necessary. | |
| Context: {context} | |
| Question: {question} | |
| Chat History: {chat_history} | |
| Craft the response as a seamless, thorough, and authoritative explanation that naturally integrates all aspects of the query. | |
| """ | |
| # Load and split text documents | |
| def load_doc(list_file_path): | |
| pages = [] | |
| for file_path in list_file_path: | |
| if file_path.endswith('.txt'): | |
| loader = TextLoader(file_path) | |
| pages.extend(loader.load()) | |
| text_splitter = RecursiveCharacterTextSplitter( | |
| chunk_size=1024, | |
| chunk_overlap=64 | |
| ) | |
| doc_splits = text_splitter.split_documents(pages) | |
| return doc_splits | |
| # Create vector database | |
| def create_db(splits): | |
| embeddings = HuggingFaceEmbeddings() | |
| vectordb = FAISS.from_documents(splits, embeddings) | |
| return vectordb | |
| # Initialize langchain LLM chain with custom prompt | |
| def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, progress=gr.Progress()): | |
| llm = HuggingFaceEndpoint( | |
| repo_id=llm_model, | |
| huggingfacehub_api_token=api_token, | |
| temperature=temperature, | |
| max_new_tokens=max_tokens, | |
| top_k=top_k, | |
| ) | |
| memory = ConversationBufferMemory( | |
| memory_key="chat_history", | |
| output_key='answer', | |
| return_messages=True | |
| ) | |
| # Create custom prompt | |
| custom_prompt = PromptTemplate( | |
| template=CUSTOM_PROMPT_TEMPLATE, | |
| input_variables=["context", "question", "chat_history"] | |
| ) | |
| retriever = vector_db.as_retriever() | |
| qa_chain = ConversationalRetrievalChain.from_llm( | |
| llm, | |
| retriever=retriever, | |
| chain_type="stuff", | |
| memory=memory, | |
| return_source_documents=True, | |
| verbose=False, | |
| combine_docs_chain_kwargs={"prompt": custom_prompt} | |
| ) | |
| return qa_chain | |
| # Initialize database | |
| def initialize_database(list_file_obj, progress=gr.Progress()): | |
| list_file_path = [x.name for x in list_file_obj if x is not None] | |
| doc_splits = load_doc(list_file_path) | |
| vector_db = create_db(doc_splits) | |
| return vector_db, "Text database created!" | |
| # Initialize LLM | |
| def initialize_LLM(llm_option, llm_temperature, max_tokens, top_k, vector_db, progress=gr.Progress()): | |
| llm_name = list_llm[llm_option] | |
| qa_chain = initialize_llmchain(llm_name, llm_temperature, max_tokens, top_k, vector_db, progress) | |
| return qa_chain, "QA chain initialized. Chatbot is ready!" | |
| def format_chat_history(message, chat_history): | |
| formatted_chat_history = [] | |
| for user_message, bot_message in chat_history: | |
| formatted_chat_history.append(f"User: {user_message}") | |
| formatted_chat_history.append(f"Assistant: {bot_message}") | |
| return formatted_chat_history | |
| def conversation(qa_chain, message, history): | |
| formatted_chat_history = format_chat_history(message, history) | |
| response = qa_chain.invoke({"question": message, "chat_history": formatted_chat_history}) | |
| response_answer = response["answer"] | |
| response_sources = response["source_documents"] | |
| # Get sources (with fallback for when there are fewer than 3 sources) | |
| sources_content = [] | |
| sources_pages = [] | |
| for i in range(3): | |
| if i < len(response_sources): | |
| sources_content.append(response_sources[i].page_content.strip()) | |
| sources_pages.append(0) # For text files, we don't have page numbers | |
| else: | |
| sources_content.append("") | |
| sources_pages.append(0) | |
| new_history = history + [(message, response_answer)] | |
| return (qa_chain, gr.update(value=""), new_history, | |
| sources_content[0], sources_pages[0], | |
| sources_content[1], sources_pages[1], | |
| sources_content[2], sources_pages[2]) | |
| def demo(): | |
| with gr.Blocks(theme=gr.themes.Default(primary_hue="red", secondary_hue="pink", neutral_hue="sky")) as demo: | |
| vector_db = gr.State() | |
| qa_chain = gr.State() | |
| gr.HTML("<center><h1>RAG Text Document Chatbot</h1><center>") | |
| gr.Markdown("""<b>Query your text documents!</b> This AI agent performs retrieval augmented generation (RAG) on TXT documents. | |
| <b>Please do not upload confidential documents.</b> | |
| """) | |
| with gr.Row(): | |
| with gr.Column(scale=86): | |
| gr.Markdown("<b>Step 1 - Upload Text Files and Initialize RAG pipeline</b>") | |
| with gr.Row(): | |
| document = gr.Files(height=300, file_count="multiple", | |
| file_types=["txt"], interactive=True, | |
| label="Upload TXT documents") | |
| with gr.Row(): | |
| db_btn = gr.Button("Create text database") | |
| with gr.Row(): | |
| db_progress = gr.Textbox(value="Not initialized", show_label=False) | |
| gr.Markdown("<b>Select Large Language Model (LLM) and input parameters</b>") | |
| with gr.Row(): | |
| llm_btn = gr.Radio(list_llm_simple, label="Available LLMs", | |
| value=list_llm_simple[0], type="index") | |
| with gr.Row(): | |
| with gr.Accordion("LLM input parameters", open=False): | |
| with gr.Row(): | |
| slider_temperature = gr.Slider(minimum=0.01, maximum=1.0, value=0.5, | |
| step=0.1, label="Temperature") | |
| with gr.Row(): | |
| slider_maxtokens = gr.Slider(minimum=128, maximum=9192, value=4096, | |
| step=128, label="Max New Tokens") | |
| with gr.Row(): | |
| slider_topk = gr.Slider(minimum=1, maximum=10, value=3, | |
| step=1, label="top-k") | |
| with gr.Row(): | |
| qachain_btn = gr.Button("Initialize Question Answering Chatbot") | |
| with gr.Row(): | |
| llm_progress = gr.Textbox(value="Not initialized", show_label=False) | |
| with gr.Column(scale=200): | |
| gr.Markdown("<b>Step 2 - Chat with your Document</b>") | |
| chatbot = gr.Chatbot(height=505) | |
| with gr.Accordion("Relevant context from the source document", open=False): | |
| for i in range(1, 4): | |
| with gr.Row(): | |
| doc_source = gr.Textbox(label=f"Reference {i}", lines=2, | |
| container=True, scale=20) | |
| source_page = gr.Number(label="Line Range", scale=1, visible=False) | |
| with gr.Row(): | |
| msg = gr.Textbox(placeholder="Ask a question", container=True) | |
| with gr.Row(): | |
| submit_btn = gr.Button("Submit") | |
| clear_btn = gr.ClearButton([msg, chatbot], value="Clear") | |
| # Event handlers | |
| db_btn.click(initialize_database, inputs=[document], outputs=[vector_db, db_progress]) | |
| qachain_btn.click(initialize_LLM, | |
| inputs=[llm_btn, slider_temperature, slider_maxtokens, slider_topk, vector_db], | |
| outputs=[qa_chain, llm_progress]).then( | |
| lambda: [None, "", 0, "", 0, "", 0], | |
| outputs=[chatbot] + [doc for i in range(1,4) for doc in [globals()[f"doc_source{i}"], globals()[f"source_page{i}"]]], | |
| queue=False) | |
| msg.submit(conversation, inputs=[qa_chain, msg, chatbot], | |
| outputs=[qa_chain, msg, chatbot] + [globals()[f"doc_source{i}"] for i in range(1,4)] + [globals()[f"source_page{i}"] for i in range(1,4)]], | |
| queue=False) | |
| submit_btn.click(conversation, inputs=[qa_chain, msg, chatbot], | |
| outputs=[qa_chain, msg, chatbot] + [globals()[f"doc_source{i}"] for i in range(1,4)] + [globals()[f"source_page{i}"] for i in range(1,4)]], | |
| queue=False) | |
| clear_btn.click(lambda: [None, "", 0, "", 0, "", 0], | |
| outputs=[chatbot] + [doc for i in range(1,4) for doc in [globals()[f"doc_source{i}"], globals()[f"source_page{i}"]]], | |
| queue=False) | |
| demo.queue().launch(debug=True) | |
| if __name__ == "__main__": | |
| demo() |