| # # we'll need a few dependencies before we can do this | |
| # #!pip install chromadb -q | |
| # from langchain.vectorstores import Chroma | |
| # persist_directory = "vector_db" | |
| # vectordb = Chroma.from_documents(documents=documents, embedding=embeddings, persist_directory=persist_directory) #### YOUR CODE HERE | |
| # """Now we can persist our Chroma vector store - and then show an example of how you would load that persisted vector store.""" | |
| # vectordb.persist() | |
| # vectordb = None | |
| # """As you can see when you run the following cell - loaded the persisted vectore store is *much* quicker than reinstantiating it - and that is the benefit of `persist_directory`!""" | |
| # vectordb = Chroma(persist_directory=persist_directory, embedding_function=embeddings) | |
| # """Now that we have our docs set-up - we're ready to progress to the next part of the LangChain applciation puzzle! | |
| # ### Tool Chain | |
| # Now we can leverage our `oc_retriever` as a tool in our LangChain application! | |
| # We'll be utilizing the BLOOMZ-1b7 model as our LLM today - so we can expect that our results will be less effective than if we used OpenAI's gpt-3.5-turbo, but the advantage is that no information will escape outside of our Colab environment. | |
| # First up, let's load our model! | |
| # """ | |
| # from langchain import HuggingFacePipeline | |
| # llm = HuggingFacePipeline.from_model_id( | |
| # model_id="bigscience/bloomz-1b7", ### YOUR CODE HERE | |
| # task="text-generation", ### YOUR CODE HERE | |
| # model_kwargs={"temperature" : 0, "max_length" : 500}) | |
| # """Now let's set up our document vector store as a Retriever tool so we can leverage it in our chain!""" | |
| # doc_retriever = vectordb.as_retriever() ### YOUR CODE HERE | |
| # """### Final Chain | |
| # With that set-up, we're good to set-up our final RetrievalQA chain and leverage all the documents we have in our Vector DB! | |
| # """ | |
| # from langchain.chains import RetrievalQA | |
| # shakespeare_qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=doc_retriever) ### YOUR CODE HERE | |
| # """Let's test it out by itself!""" | |
| # #shakespeare_qa.run("Who was Hamlet's Mother?") | |
| # """### Conclusion | |
| # Here we have it! | |
| # A system capable of querying over multiple documents - all without every needing to hit an external API! | |
| # """ | |
| # def make_inference(query): | |
| # docs = docsearch.get_relevant_documents(query) | |
| # return(chain.run(input_documents=docs, question=query)) | |
| # if __name__ == "__main__": | |
| # # make a gradio interface | |
| # import gradio as gr | |
| # gr.Interface( | |
| # make_inference, | |
| # [ | |
| # gr.inputs.Textbox(lines=2, label="Query"), | |
| # ], | |
| # gr.outputs.Textbox(label="Response"), | |
| # title="🗣️TalkToMyDoc📄", | |
| # description="🗣️TalkToMyDoc📄 is a tool that allows you to ask questions about a document. In this case - Hitch Hitchhiker's Guide to the Galaxy.", | |
| # ).launch() | |
| import gradio as gr | |
| def greet(name): | |
| return "Hello " + name + "!!" | |
| iface = gr.Interface(fn=greet, inputs="text", outputs="text") | |
| iface.launch() |