File size: 964 Bytes
f7dd205
 
 
 
 
 
 
 
65a23ce
f7dd205
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
from langchain.chains import RetrievalQA
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import Chroma
from data import prepare_data

path = './llm'
persist_directory = "vector_db"

llm = prepare_data(db_path = persist_directory, llm_path = path)

embeddings = HuggingFaceEmbeddings()
vectordb = Chroma(persist_directory = persist_directory, embedding_function = embeddings)
doc_retriever = vectordb.as_retriever()
shakespeare_qa = RetrievalQA.from_chain_type(llm = llm, chain_type = "stuff", retriever = doc_retriever)

if __name__ == "__main__":
    # make a gradio interface
    import gradio as gr

    def make_inference(query):
        shakespeare_qa.run(query)


    demo = gr.Interface(fn = make_inference, inputs = "text", outputs = "text",
                        title = "Answer to the question about Shakespeare",
                        description = "This is a demo of the LangChain library.", )

    demo.launch()