| | import os |
| | import gradio as gr |
| |
|
| | from langchain_community.vectorstores import FAISS |
| | from langchain_community.document_loaders import PyPDFLoader |
| | from langchain.text_splitter import RecursiveCharacterTextSplitter |
| | from langchain.chains import ConversationalRetrievalChain |
| | from langchain.memory import ConversationBufferMemory |
| | from langchain_community.embeddings import HuggingFaceEmbeddings |
| | from langchain_community.llms import HuggingFaceEndpoint |
| |
|
| | |
| | |
| | |
| | list_llm = [ |
| | "meta-llama/Meta-Llama-3-8B-Instruct", |
| | "mistralai/Mistral-7B-Instruct-v0.2" |
| | ] |
| | list_llm_simple = [os.path.basename(llm) for llm in list_llm] |
| |
|
| | |
| | api_token = os.getenv("hf_token") |
| |
|
| | |
| | |
| | |
| | def load_doc(list_file_path): |
| | pages = [] |
| | for file_path in list_file_path: |
| | try: |
| | loader = PyPDFLoader(file_path) |
| | pages.extend(loader.load()) |
| | except Exception as e: |
| | print(f"Error loading {file_path}: {e}") |
| | text_splitter = RecursiveCharacterTextSplitter( |
| | chunk_size=1024, |
| | chunk_overlap=32 |
| | ) |
| | return text_splitter.split_documents(pages) |
| |
|
| | |
| | |
| | |
| | def create_db(doc_splits): |
| | embeddings = HuggingFaceEmbeddings() |
| | vectordb = FAISS.from_documents(doc_splits, embeddings) |
| | return vectordb |
| |
|
| | |
| | |
| | |
| | def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db): |
| | llm = HuggingFaceEndpoint( |
| | repo_id=llm_model, |
| | huggingfacehub_api_token=api_token, |
| | temperature=temperature, |
| | max_new_tokens=max_tokens, |
| | top_k=top_k, |
| | ) |
| |
|
| | memory = ConversationBufferMemory( |
| | memory_key="chat_history", |
| | output_key='answer', |
| | return_messages=True |
| | ) |
| |
|
| | retriever = vector_db.as_retriever() |
| | qa_chain = ConversationalRetrievalChain.from_llm( |
| | llm, |
| | retriever=retriever, |
| | chain_type="stuff", |
| | memory=memory, |
| | return_source_documents=True, |
| | verbose=False, |
| | ) |
| | return qa_chain |
| |
|
| | |
| | |
| | |
| | def initialize_database(list_file_obj): |
| | list_file_path = [x.name for x in list_file_obj if x is not None] |
| | doc_splits = load_doc(list_file_path) |
| | vector_db = create_db(doc_splits) |
| | return vector_db, "Database created!" |
| |
|
| | |
| | |
| | |
| | def initialize_LLM(llm_option, llm_temperature, max_tokens, top_k, vector_db): |
| | llm_name = list_llm[llm_option] |
| | qa_chain = initialize_llmchain(llm_name, llm_temperature, max_tokens, top_k, vector_db) |
| | return qa_chain, "QA chain initialized. Chatbot is ready!" |
| |
|
| | |
| | |
| | |
| | def format_chat_history(chat_history, max_messages=5): |
| | formatted = [] |
| | for user_msg, bot_msg in chat_history[-max_messages:]: |
| | formatted.append(f"User: {user_msg}") |
| | formatted.append(f"Assistant: {bot_msg}") |
| | return formatted |
| |
|
| | def conversation(qa_chain, message, history): |
| | formatted_history = format_chat_history(history) |
| | try: |
| | response = qa_chain.invoke({"question": message, "chat_history": formatted_history}) |
| | answer = response["answer"] |
| | if "Helpful Answer:" in answer: |
| | answer = answer.split("Helpful Answer:")[-1] |
| |
|
| | sources = response["source_documents"] |
| | top_sources = [(s.page_content.strip(), s.metadata.get("page", 0) + 1) for s in sources[:3]] |
| | while len(top_sources) < 3: |
| | top_sources.append(("", 0)) |
| |
|
| | new_history = history + [(message, answer)] |
| | return qa_chain, gr.update(value=""), new_history, *sum(top_sources, ()) |
| | except Exception as e: |
| | print(f"Conversation error: {e}") |
| | return qa_chain, gr.update(value=""), history, "", 0, "", 0, "", 0 |
| |
|
| | |
| | |
| | |
| | def demo(): |
| | with gr.Blocks(theme=gr.themes.Default(primary_hue="red", secondary_hue="pink")) as demo: |
| | vector_db = gr.State() |
| | qa_chain = gr.State() |
| |
|
| | gr.HTML("<center><h1>AERO RAG (CPU-only, Safe Secret)</h1></center>") |
| | gr.Markdown("<b>Query your PDF documents!</b> CPU-only mode. Token must be stored in Hugging Face Space secret `hf_token`.") |
| |
|
| | with gr.Row(): |
| | |
| | with gr.Column(scale=1): |
| | document = gr.Files(file_count="multiple", file_types=[".pdf"], label="Upload PDFs") |
| | db_btn = gr.Button("Create vector DB") |
| | db_progress = gr.Textbox(value="Not initialized", show_label=False) |
| |
|
| | llm_btn = gr.Radio(list_llm_simple, label="Available LLMs", value=list_llm_simple[0], type="index") |
| | slider_temperature = gr.Slider(0.01, 1.0, 0.5, 0.1, label="Temperature") |
| | slider_maxtokens = gr.Slider(128, 4096, 1024, 128, label="Max New Tokens") |
| | slider_topk = gr.Slider(1, 10, 3, 1, label="Top-K Tokens") |
| | qachain_btn = gr.Button("Initialize QA Chatbot") |
| | llm_progress = gr.Textbox(value="Not initialized", show_label=False) |
| |
|
| | |
| | with gr.Column(scale=8): |
| | chatbot = gr.Chatbot(height=480) |
| | doc_source1 = gr.Textbox(label="Reference 1", lines=2) |
| | source1_page = gr.Number(label="Page") |
| | doc_source2 = gr.Textbox(label="Reference 2", lines=2) |
| | source2_page = gr.Number(label="Page") |
| | doc_source3 = gr.Textbox(label="Reference 3", lines=2) |
| | source3_page = gr.Number(label="Page") |
| | msg = gr.Textbox(placeholder="Ask a question") |
| | submit_btn = gr.Button("Submit") |
| | clear_btn = gr.ClearButton([msg, chatbot], value="Clear") |
| |
|
| | |
| | db_btn.click(initialize_database, inputs=[document], outputs=[vector_db, db_progress]) |
| | qachain_btn.click(initialize_LLM, inputs=[llm_btn, slider_temperature, slider_maxtokens, slider_topk, vector_db], |
| | outputs=[qa_chain, llm_progress]) |
| | msg.submit(conversation, inputs=[qa_chain, msg, chatbot], |
| | outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page]) |
| | submit_btn.click(conversation, inputs=[qa_chain, msg, chatbot], |
| | outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page]) |
| | clear_btn.click(lambda: [None, "", 0, "", 0, "", 0], |
| | inputs=None, outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page]) |
| |
|
| | demo.queue().launch(debug=True) |
| |
|
| | if __name__ == "__main__": |
| | demo() |
| |
|