File size: 2,829 Bytes
94232b5
 
 
 
 
 
 
 
 
 
 
 
 
479cb43
94232b5
 
 
 
 
7dbfbec
94232b5
 
 
 
 
 
 
 
 
 
c353b99
94232b5
 
 
 
 
133e845
94232b5
 
 
 
 
 
 
 
 
 
 
 
 
7dbfbec
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
94232b5
 
 
 
 
 
7dbfbec
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
import gradio as gr
import os
import tempfile
from langchain.document_loaders import UnstructuredPDFLoader
from langchain.indexes import VectorstoreIndexCreator
from langchain.chains import RetrievalQA
from langchain.schema import AIMessage, HumanMessage
from langchain.vectorstores import FAISS
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain import HuggingFaceHub

# Set your API keys
API_KEY = os.environ["API_KEY"] 

# Create a temporary upload directory

# Define global variables for loaders and index
index = None


def chat(message,history):
    global index
    history_langchain_format = []
    for human, ai in history:
        history_langchain_format.append(HumanMessage(content=human))
        history_langchain_format.append(AIMessage(content=ai))
    history_langchain_format.append(HumanMessage(content=message))
    history_langchain_format.append(HumanMessage(content=message))
    # Create the index (update index)
    llm2 = HuggingFaceHub(repo_id="declare-lab/flan-alpaca-large", model_kwargs={"temperature": 0, "max_length": 512},huggingfacehub_api_token = API_KEY )
    chain = RetrievalQA.from_chain_type(llm=llm2,
                                        chain_type="stuff",
                                        retriever=index.vectorstore.as_retriever(),
                                        input_key="question")
    # Perform question-answering on the uploaded PDF with the user's question
    gpt_response = chain.run("Based on the file you have processed, provide a related answer to this question: "+ message)
    return gpt_response


# Create a Gradio interface for chat
chat_interface = gr.ChatInterface(
    chat,
    theme=gr.themes.Soft()
)



with gr.Blocks(theme=gr.themes.Soft()) as demo:
    with gr.Row():
        with gr.Column(scale=1):
            with gr.Row():
                upload_file = gr.File(label="Upload a PDF",file_types=["pdf"])
            with gr.Row():
                upload_button = gr.Button(label="Upload a PDF")
            with gr.Row():
                text = gr.Textbox(label="Status")
            def load_file(pdf_file):
                global index
                pdf_loader = UnstructuredPDFLoader(pdf_file.name)
                index = VectorstoreIndexCreator(
                embedding=HuggingFaceEmbeddings(),
                text_splitter=CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
                ).from_loaders([pdf_loader])
                return "DONE ✅"
            upload_button.click(load_file, [upload_file], text)
        with gr.Column(scale=2):
            chat_interface = gr.ChatInterface(
                            chat,
                            theme=gr.themes.Soft()
                        )

demo.queue().launch(inline=False)