| import gradio as gr |
| import os |
|
|
| from langchain.document_loaders import PyPDFLoader |
| from langchain.text_splitter import RecursiveCharacterTextSplitter |
| from langchain.vectorstores import Chroma |
| from langchain.chains import ConversationalRetrievalChain |
| from langchain.embeddings import HuggingFaceEmbeddings |
| from langchain.llms import HuggingFacePipeline |
| from langchain.chains import ConversationChain |
| from langchain.memory import ConversationBufferMemory |
| from langchain.llms import HuggingFaceHub |
|
|
| from pathlib import Path |
| import chromadb |
|
|
| from transformers import AutoTokenizer |
| import transformers |
| import torch |
| import tqdm |
| import accelerate |
|
|
| from transformers import MBartForConditionalGeneration, MBart50TokenizerFast |
| translation_model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-50-many-to-many-mmt") |
| translation_tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50-many-to-many-mmt") |
|
|
| languages_list = [("Gujarati", "gu_IN"), ('Hindi',"hi_IN") , ("Bengali","bn_IN"), ("Malayalam","ml_IN"), |
| ("Marathi","mr_IN"), ("Tamil","ta_IN"), ("Telugu","te_IN")] |
|
|
|
|
|
|
| def english_to_indian(sentence): |
| translation_tokenizer.src_lang = "en_xx" |
| encoded_hi = translation_tokenizer(sentence, return_tensors="pt") |
| generated_tokens = translation_model.generate(**encoded_hi, forced_bos_token_id=translation_tokenizer.lang_code_to_id["hi_IN"] ) |
| return (translation_tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)) |
|
|
|
|
| def indian_to_english(sentence): |
| translation_tokenizer.src_lang = "hi_IN" |
| encoded_hi = translation_tokenizer(sentence, return_tensors="pt") |
| generated_tokens = translation_model.generate(**encoded_hi, forced_bos_token_id=translation_tokenizer.lang_code_to_id["en_XX"] ) |
| return (translation_tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)) |
|
|
| def intitalize_lang(language): |
| translation_tokenizer.src_lang = "en_xx" |
| encoded_hi = translation_tokenizer(sentence, return_tensors="pt") |
| generated_tokens = translation_model.generate(**encoded_hi, forced_bos_token_id=translation_tokenizer.lang_code_to_id[language] ) |
| x = translation_tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) |
| print(language) |
| return x |
|
|
|
|
| llm_model = "mistralai/Mistral-7B-Instruct-v0.2" |
|
|
|
|
| |
| list_llm = ["mistralai/Mistral-7B-Instruct-v0.2", "mistralai/Mixtral-8x7B-Instruct-v0.1", "mistralai/Mistral-7B-Instruct-v0.1", \ |
| "google/gemma-7b-it","google/gemma-2b-it", \ |
| "HuggingFaceH4/zephyr-7b-beta", "meta-llama/Llama-2-7b-chat-hf", "microsoft/phi-2", \ |
| "TinyLlama/TinyLlama-1.1B-Chat-v1.0", "mosaicml/mpt-7b-instruct", "tiiuae/falcon-7b-instruct", \ |
| "google/flan-t5-xxl" |
| ] |
| list_llm_simple = [os.path.basename(llm) for llm in list_llm] |
|
|
| |
| def load_doc(list_file_path, chunk_size, chunk_overlap): |
| |
| |
| |
| loaders = [PyPDFLoader(x) for x in list_file_path] |
| pages = [] |
| for loader in loaders: |
| pages.extend(loader.load()) |
| |
| text_splitter = RecursiveCharacterTextSplitter( |
| chunk_size = chunk_size, |
| chunk_overlap = chunk_overlap) |
| doc_splits = text_splitter.split_documents(pages) |
| return doc_splits |
|
|
|
|
| |
| def create_db(splits, collection_name): |
| embedding = HuggingFaceEmbeddings() |
| new_client = chromadb.EphemeralClient() |
| vectordb = Chroma.from_documents( |
| documents=splits, |
| embedding=embedding, |
| client=new_client, |
| collection_name=collection_name, |
| |
| ) |
| return vectordb |
|
|
|
|
| |
| def load_db(): |
| embedding = HuggingFaceEmbeddings() |
| vectordb = Chroma( |
| |
| embedding_function=embedding) |
| return vectordb |
|
|
|
|
| |
| def initialize_llmchain(temperature, max_tokens, top_k, vector_db, progress=gr.Progress()): |
| progress(0.1, desc="Initializing HF tokenizer...") |
| |
| |
| progress(0.5, desc="Initializing HF Hub...") |
| |
| |
| |
|
|
| llm = HuggingFaceHub(repo_id=llm_model, model_kwargs={"temperature": temperature, |
| "max_new_tokens": max_tokens, |
| "top_k": top_k, |
| "load_in_8bit": True}) |
| |
| progress(0.75, desc="Defining buffer memory...") |
| memory = ConversationBufferMemory( |
| memory_key="chat_history", |
| output_key='answer', |
| return_messages=True |
| ) |
| |
| retriever=vector_db.as_retriever() |
| progress(0.8, desc="Defining retrieval chain...") |
| qa_chain = ConversationalRetrievalChain.from_llm( |
| llm, |
| retriever=retriever, |
| chain_type="stuff", |
| memory=memory, |
| |
| return_source_documents=True, |
| |
| verbose=False, |
| ) |
| progress(0.9, desc="Done!") |
| return qa_chain |
|
|
|
|
| |
| def initialize_database(list_file_obj, chunk_size, chunk_overlap, progress=gr.Progress()): |
| |
| list_file_path = [x.name for x in list_file_obj if x is not None] |
| |
| progress(0.1, desc="Creating collection name...") |
| collection_name = Path(list_file_path[0]).stem |
| |
| |
| collection_name = collection_name.replace(" ","-") |
| |
| collection_name = collection_name[:50] |
| |
| if not collection_name[0].isalnum(): |
| collection_name[0] = 'A' |
| if not collection_name[-1].isalnum(): |
| collection_name[-1] = 'Z' |
| |
| print('Collection name: ', collection_name) |
| progress(0.25, desc="Loading document...") |
| |
| doc_splits = load_doc(list_file_path, chunk_size, chunk_overlap) |
| |
| progress(0.5, desc="Generating vector database...") |
| |
| vector_db = create_db(doc_splits, collection_name) |
| progress(0.9, desc="Done!") |
| return vector_db, collection_name, "Complete!" |
|
|
|
|
| def initialize_LLM(llm_temperature, max_tokens, top_k, vector_db, progress=gr.Progress()): |
| |
| llm_name = llm_model |
| print("llm_name: ",llm_name) |
| qa_chain = initialize_llmchain(llm_temperature, max_tokens, top_k, vector_db, progress) |
| return qa_chain, "Complete!" |
|
|
|
|
| def format_chat_history(message, chat_history): |
| formatted_chat_history = [] |
| for user_message, bot_message in chat_history: |
| formatted_chat_history.append(f"User: {user_message}") |
| formatted_chat_history.append(f"Assistant: {bot_message}") |
| return formatted_chat_history |
| |
|
|
| def conversation(qa_chain, message, history): |
| formatted_chat_history = format_chat_history(message, history) |
| |
| |
| |
| response = qa_chain({"question": message, "chat_history": formatted_chat_history}) |
| response_answer = response["answer"] |
| print(english_to_hindi(response["answer"][:100])) |
| if response_answer.find("Helpful Answer:") != -1: |
| response_answer = response_answer.split("Helpful Answer:")[-1] |
| response_sources = response["source_documents"] |
| response_source1 = response_sources[0].page_content.strip() |
| response_source2 = response_sources[1].page_content.strip() |
| response_source3 = response_sources[2].page_content.strip() |
| |
| response_source1_page = response_sources[0].metadata["page"] + 1 |
| response_source2_page = response_sources[1].metadata["page"] + 1 |
| response_source3_page = response_sources[2].metadata["page"] + 1 |
| |
| |
| |
| |
| new_history = history + [(message, response_answer)] |
| |
| return qa_chain, gr.update(value=""), new_history, response_source1, response_source1_page, response_source2, response_source2_page, response_source3, response_source3_page |
| |
|
|
| def upload_file(file_obj): |
| list_file_path = [] |
| for idx, file in enumerate(file_obj): |
| file_path = file_obj.name |
| list_file_path.append(file_path) |
| |
| |
| return list_file_path |
|
|
|
|
| def demo(): |
| with gr.Blocks(theme="base") as demo: |
| vector_db = gr.State() |
| qa_chain = gr.State() |
| collection_name = gr.State() |
| pdf_directory = '/home/user/app/pdfs' |
|
|
| def process_pdfs(): |
| |
| pdf_files = [os.path.join(pdf_directory, file) for file in os.listdir(pdf_directory) if file.endswith(".pdf")] |
| return pdf_files |
| |
| |
| pdf_dict = {"value": process_pdfs, "height": 100, "file_count": "multiple", |
| "visible": False, "file_types": ["pdf"], "interactive": True, |
| "label": "Uploaded PDF documents"} |
| |
| |
| |
|
|
| with gr.Row(): |
| |
| |
| document = gr.Files(**pdf_dict) |
| with gr.Row(): |
| db_btn = gr.Radio(["ChromaDB"], label="Vector database type", value = "ChromaDB", type="index", info="Choose your vector database",visible=False) |
| with gr.Accordion("Advanced options - Document text splitter", open=False, visible=False): |
| with gr.Row(): |
| slider_chunk_size = gr.Slider(value=2048, label="Chunk size", info="Chunk size", interactive=False, visible=False) |
| with gr.Row(): |
| slider_chunk_overlap = gr.Slider(value=256, label="Chunk overlap", info="Chunk overlap", interactive=False, visible=False) |
|
|
| with gr.Accordion("Advanced options - LLM model", open=False, visible=False): |
| with gr.Row(): |
| slider_temperature = gr.Slider(value = 0.1,visible=False) |
| with gr.Row(): |
| slider_maxtokens = gr.Slider(value = 1000, visible=False) |
| with gr.Row(): |
| slider_topk = gr.Slider(value = 3, visible=False) |
|
|
| with gr.Row(): |
| db_progress = gr.Textbox(label="Vector database initialization", value="None", visible=True) |
| with gr.Row(): |
| db_btn = gr.Button("Generate vector database...") |
| with gr.Row(): |
| llm_progress = gr.Textbox(value="None",label="QA chain initialization", visible=True) |
| with gr.Row(): |
| qachain_btn = gr.Button("Initialize question-answering chain...") |
|
|
| with gr.Row(): |
| lang_btn = gr.Dropdown(languages_list, label="Languages", value = languages_list[1], |
| type="value", info="Choose your language",interactive = True).select(intitalize_lang) |
| |
| |
| chatbot = gr.Chatbot(height=300) |
|
|
| with gr.Row(): |
| msg = gr.Textbox(placeholder="Type message", container=True) |
| with gr.Accordion("References", open=False): |
| with gr.Row(): |
| doc_source1 = gr.Textbox(label="Reference 1", lines=2, container=True, scale=20) |
| source1_page = gr.Number(label="Page", scale=1) |
| with gr.Row(): |
| doc_source2 = gr.Textbox(label="Reference 2", lines=2, container=True, scale=20) |
| source2_page = gr.Number(label="Page", scale=1) |
| with gr.Row(): |
| doc_source3 = gr.Textbox(label="Reference 3", lines=2, container=True, scale=20) |
| source3_page = gr.Number(label="Page", scale=1) |
|
|
| with gr.Row(): |
| submit_btn = gr.Button("Submit") |
| clear_btn = gr.ClearButton([msg, chatbot]) |
| |
| |
| |
| db_btn.click(initialize_database, \ |
| inputs=[document, slider_chunk_size, slider_chunk_overlap], \ |
| outputs=[vector_db, collection_name, db_progress]) |
| qachain_btn.click(initialize_LLM, \ |
| inputs=[slider_temperature, slider_maxtokens, slider_topk, vector_db], \ |
| outputs=[qa_chain, llm_progress]).then(lambda:[None,"",0,"",0,"",0], \ |
| inputs=None, \ |
| outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \ |
| queue=False) |
|
|
| |
| msg.submit(conversation, \ |
| inputs=[qa_chain, msg, chatbot], \ |
| outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \ |
| queue=False) |
| submit_btn.click(conversation, \ |
| inputs=[qa_chain, msg, chatbot], \ |
| outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \ |
| queue=False) |
| clear_btn.click(lambda:[None,"",0,"",0,"",0], \ |
| inputs=None, \ |
| outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \ |
| queue=False) |
| demo.queue().launch(debug=True) |
|
|
|
|
| if __name__ == "__main__": |
| demo() |