Spaces:
Paused
Paused
| import gradio as gr | |
| from langchain.embeddings.openai import OpenAIEmbeddings | |
| from langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTextSplitter | |
| from langchain.vectorstores import DocArrayInMemorySearch | |
| from langchain.chains import RetrievalQA, ConversationalRetrievalChain | |
| from langchain.memory import ConversationBufferMemory | |
| from langchain.chat_models import ChatOpenAI | |
| from langchain.embeddings import HuggingFaceEmbeddings | |
| from langchain import HuggingFaceHub | |
| from langchain.llms import LlamaCpp | |
| from huggingface_hub import hf_hub_download | |
| from langchain.document_loaders import ( | |
| EverNoteLoader, | |
| TextLoader, | |
| UnstructuredEPubLoader, | |
| UnstructuredHTMLLoader, | |
| UnstructuredMarkdownLoader, | |
| UnstructuredODTLoader, | |
| UnstructuredPowerPointLoader, | |
| UnstructuredWordDocumentLoader, | |
| PyPDFLoader, | |
| ) | |
| import param | |
| import os | |
| import torch | |
| from conversadocs.bones import DocChat | |
| dc = DocChat() | |
| ##### GRADIO CONFIG #### | |
| if torch.cuda.is_available(): | |
| print("CUDA is available on this system.") | |
| os.system('CMAKE_ARGS="-DLLAMA_CUBLAS=on" FORCE_CMAKE=1 pip install llama-cpp-python --force-reinstall --upgrade --no-cache-dir --verbose') | |
| else: | |
| print("CUDA is not available on this system.") | |
| os.system('pip install llama-cpp-python') | |
| css=""" | |
| #col-container {max-width: 700px; margin-left: auto; margin-right: auto;} | |
| """ | |
| title = """ | |
| <div style="text-align: center;max-width: 700px;"> | |
| <h1>Chat with Documents π - Falcon, Llama-2</h1> | |
| <p style="text-align: center;">Upload txt, pdf, doc, docx, enex, epub, html, md, odt, ptt, pttx; click the "Click to Upload Files" button, <br /> | |
| Wait for the Status to show Loaded documents, start typing your questions. <br /> | |
| The app is set to store chat-history</p> | |
| </div> | |
| """ | |
| theme='aliabid94/new-theme' | |
| def flag(): | |
| return "PROCESSING..." | |
| def upload_file(files, max_docs): | |
| file_paths = [file.name for file in files] | |
| return dc.call_load_db(file_paths, max_docs) | |
| def predict(message, chat_history, max_k): | |
| print(message) | |
| bot_message = dc.convchain(message, max_k) | |
| print(bot_message) | |
| return "", dc.get_chats() | |
| def convert(): | |
| docs = dc.get_sources() | |
| data_docs = "" | |
| for i in range(0,len(docs),2): | |
| txt = docs[i][1].replace("\n","<br>") | |
| sc = "Archive: " + docs[i+1][1]["source"] | |
| try: | |
| pg = "Page: " + str(docs[i+1][1]["page"]) | |
| except: | |
| pg = "Document Data" | |
| data_docs += f"<hr><h3 style='color:red;'>{pg}</h2><p>{txt}</p><p>{sc}</p>" | |
| return data_docs | |
| with gr.Blocks(theme=theme, css=css) as demo: | |
| with gr.Tab("Chat"): | |
| with gr.Column(elem_id="col-container"): | |
| gr.HTML(title) | |
| upload_button = gr.UploadButton("Click to Upload Files", file_types=["pdf"], file_count="multiple") | |
| file_output = gr.HTML() | |
| chatbot = gr.Chatbot([], elem_id="chatbot").style(height=300) | |
| msg = gr.Textbox(label="Question", placeholder="Type your question and hit Enter ") | |
| with gr.Column(): | |
| sou = gr.HTML("") | |
| with gr.Tab("Chat Options"): | |
| max_docs = gr.inputs.Slider(1, 10, default=3, label="Maximum querys to the DB.", step=1) | |
| row_table = gr.HTML("<hr><h4> </h2>") | |
| clear_button = gr.Button("CLEAR CHAT HISTORY", ) | |
| link_output = gr.HTML("") | |
| clear_button.click(flag,[],[link_output]).then(dc.clr_history,[], [link_output]).then(lambda: None, None, chatbot, queue=False) | |
| upload_button.upload(flag,[],[file_output]).then(upload_file, [upload_button, max_docs], file_output) | |
| with gr.Tab("Change model"): | |
| gr.HTML("<h3>Only models from the GGML library are accepted.</h3>") | |
| repo_ = gr.Textbox(label="Repository" ,value="TheBloke/Llama-2-7B-Chat-GGML") | |
| file_ = gr.Textbox(label="File name" ,value="llama-2-7b-chat.ggmlv3.q2_K.bin") | |
| max_tokens = gr.inputs.Slider(1, 2048, default=16, label="Max new tokens", step=1) | |
| temperature = gr.inputs.Slider(0.1, 1., default=0.2, label="Temperature", step=0.1) | |
| top_k = gr.inputs.Slider(0.01, 1., default=0.95, label="Top K", step=0.01) | |
| top_p = gr.inputs.Slider(0, 100, default=50, label="Top P", step=1) | |
| repeat_penalty = gr.inputs.Slider(0.1, 100., default=1.2, label="Repeat penalty", step=0.1) | |
| change_model_button = gr.Button("Load Model") | |
| model_verify = gr.HTML("Loaded model Falcon 7B-instruct") | |
| default_model = gr.HTML("<hr><h4>Default Model</h2>") | |
| falcon_button = gr.Button("FALCON 7B-Instruct") | |
| msg.submit(predict,[msg, chatbot, max_docs],[msg, chatbot]).then(convert,[],[sou]) | |
| change_model_button.click(dc.change_llm,[repo_, file_, max_tokens, temperature, top_p, top_k, repeat_penalty, max_docs],[model_verify]) | |
| falcon_button.click(dc.default_falcon_model, [], [model_verify]) | |
| demo.launch(enable_queue=True) | |