Spaces:
Paused
Paused
| import streamlit as st | |
| from PyPDF2 import PdfReader | |
| from llama_index.llms import HuggingFaceInferenceAPI | |
| from llama_index import VectorStoreIndex | |
| from llama_index.embeddings import HuggingFaceEmbedding | |
| from llama_index import ServiceContext | |
| from llama_index.schema import Document | |
| def read_pdf(uploaded_file): | |
| pdf_reader = PdfReader(uploaded_file) | |
| text = "" | |
| for page_num in range(len(pdf_reader.pages)): | |
| text += pdf_reader.pages[page_num].extract_text() | |
| return text | |
| def querying(query_engine): | |
| progress_container = st.empty() | |
| query = st.text_input("Enter the Query for PDF:") | |
| submit = st.button("Generate The response for the query") | |
| if submit: | |
| progress_container.text("Fetching the response...") | |
| response = query_engine.query(query) | |
| st.write(f"**Response:** {response}") | |
| # docs = document_search.similarity_search(query_text) | |
| # output = chain.run(input_documents=docs, question=query_text) | |
| # st.write(output) | |
| def main(): | |
| st.title("PdfQuerier using LLAMA by Rahul Bhoyar") | |
| hf_token = st.text_input("Enter your Hugging Face token:") | |
| llm = HuggingFaceInferenceAPI(model_name="HuggingFaceH4/zephyr-7b-alpha", token=hf_token) | |
| uploaded_file = st.file_uploader("Choose a PDF file", type=["pdf"]) | |
| if uploaded_file is not None: | |
| file_contents = read_pdf(uploaded_file) | |
| documents = Document(text=file_contents) | |
| documents = [documents] | |
| st.success("Documents loaded successfully!") | |
| embed_model_uae = HuggingFaceEmbedding(model_name="WhereIsAI/UAE-Large-V1") | |
| service_context = ServiceContext.from_defaults(llm=llm, chunk_size=800, chunk_overlap=20, embed_model=embed_model_uae) | |
| # Indexing the documents | |
| progress_container = st.empty() | |
| progress_container.text("Creating VectorStoreIndex...") | |
| # Download embeddings from OpenAI | |
| index = VectorStoreIndex.from_documents(documents, service_context=service_context, show_progress=True) | |
| index.storage_context.persist() | |
| query_engine = index.as_query_engine() | |
| st.success("VectorStoreIndex created successfully!") | |
| querying(query_engine) | |
| if __name__ == "__main__": | |
| main() | |