import gradio as gr from langchain.llms import Replicate from langchain.vectorstores import Pinecone from langchain.text_splitter import CharacterTextSplitter from langchain.document_loaders import PyPDFLoader from langchain.embeddings import HuggingFaceEmbeddings from langchain.chains import ConversationalRetrievalChain from datasets import load_dataset import os import pinecone key = os.environ.get('API') yeh = os.environ.get('pineapi') os.environ["REPLICATE_API_TOKEN"] = key pinecone.init(api_key=yeh, environment='gcp-starter') import sentence_transformers import faiss def loading_pdf(): return "Loading..." def pdf_changes(pdf_doc): loader = PyPDFLoader(pdf_doc.name) documents = loader.load() text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0) texts = text_splitter.split_documents(documents) embeddings = HuggingFaceEmbeddings() index_name = "chatbot" index = pinecone.Index(index_name) vectordb = Pinecone.from_documents(texts, embeddings, index_name=index_name) llm = Replicate( model="a16z-infra/llama13b-v2-chat:df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5", input={"temperature": 0.2, "max_length": 3000, "length_penalty":1.5, "num_beams":3} ) global qa qa = ConversationalRetrievalChain.from_llm( llm, vectordb.as_retriever(search_kwargs={'k': 2}), return_source_documents=True ) return "Ready" def add_text(history, text): history = history + [(text, None)] return history, "" def bot(history): response = infer(history[-1][0]) history[-1][1] = response['result'] return history def infer(question): query = question result = qa({"query": query}) return result css=""" #col-container {max-width: 700px; margin-left: auto; margin-right: auto;} """ title = """