Spaces:
Build error
Build error
| import streamlit as st | |
| import time | |
| from langchain_community.document_loaders import PyPDFLoader | |
| from langchain.text_splitter import RecursiveCharacterTextSplitter | |
| from langchain_google_genai import GoogleGenerativeAIEmbeddings | |
| from langchain_chroma import Chroma | |
| from langchain_google_genai import ChatGoogleGenerativeAI | |
| from langchain.chains import create_retrieval_chain | |
| from langchain.chains.combine_documents import create_stuff_documents_chain | |
| from langchain_core.prompts import ChatPromptTemplate | |
| import pdfplumber | |
| from langchain_community.document_loaders import PDFPlumberLoader | |
| #https://mrmaheshrajput.medium.com/how-to-productionize-large-language-models-llms-060a4cb1a169 | |
| import traceback | |
| api_key = "AIzaSyBAzG1ck9Pn81THl8CBgXmYabklRMIrJCM" | |
| from dotenv import load_dotenv | |
| load_dotenv() | |
| st.title("RAG and Gemini Model") | |
| # Load data from PDF | |
| pdf_path = "100340.pdf" | |
| loader = PDFPlumberLoader(pdf_path) | |
| data = loader.load() | |
| text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000) | |
| docs = text_splitter.split_documents(data) | |
| vectorstore = Chroma.from_documents(documents=docs, embedding=GoogleGenerativeAIEmbeddings(model="models/embedding-001")) | |
| retriever = vectorstore.as_retriever(search_type="similarity", search_kwargs={"k": 30}) | |
| llm = ChatGoogleGenerativeAI(model="gemini-1.5-pro",temperature=0,max_tokens=None,timeout=None) | |
| query = st.chat_input("Say something: ") | |
| prompt = query | |
| system_prompt = ( | |
| "You are an assistant for question-answering tasks. " | |
| "Use the following pieces of retrieved context to answer " | |
| "the question. If you don't know the answer, say that you " | |
| "don't know. Use three sentences maximum and keep the " | |
| "answer concise." | |
| "\n\n" | |
| "{context}" | |
| ) | |
| prompt = ChatPromptTemplate.from_messages( | |
| [ | |
| ("system", system_prompt), | |
| ("human", "{input}"), | |
| ] | |
| ) | |
| if query: | |
| question_answer_chain = create_stuff_documents_chain(llm, prompt) | |
| rag_chain = create_retrieval_chain(retriever, question_answer_chain) | |
| response = rag_chain.invoke({"input": query}) | |
| #print(response["answer"]) | |
| st.write(response["answer"]) | |