ChinarQ-AI commited on
Commit
7a70ad3
·
verified ·
1 Parent(s): 167d6d1

Delete src/app.py

Browse files
Files changed (1) hide show
  1. src/app.py +0 -132
src/app.py DELETED
@@ -1,132 +0,0 @@
1
- import streamlit as st
2
- import os
3
- from langchain_groq import ChatGroq
4
- from langchain.text_splitter import RecursiveCharacterTextSplitter
5
- from langchain.chains.combine_documents import create_stuff_documents_chain
6
- from langchain_core.prompts import ChatPromptTemplate
7
- from langchain.chains import create_retrieval_chain
8
- from langchain_community.vectorstores import FAISS
9
- from langchain_community.document_loaders import PyPDFDirectoryLoader
10
- from langchain_google_genai import GoogleGenerativeAIEmbeddings
11
- from dotenv import load_dotenv
12
- from PDFprocess_sample import process_pdf
13
-
14
- # Loading GROQ and Google API
15
- load_dotenv()
16
-
17
- GROQ_API_KEY = os.getenv('GROQ_API_KEY')
18
- os.environ["GOOGLE_API_KEY"]= os.getenv('GOOGLE_API_KEY')
19
-
20
- #Loading CSS files
21
-
22
- def load_css(file_name):
23
- with open(file_name) as f:
24
- css = f.read()
25
- st.markdown(f"<style>{css}</style>", unsafe_allow_html=True)
26
-
27
- load_css('CSS/style.css')
28
-
29
- #setting up LLM
30
- llm = ChatGroq(
31
- api_key=GROQ_API_KEY,
32
- model_name="Llama3-8b-8192"
33
- )
34
-
35
-
36
- prompt = ChatPromptTemplate.from_template(
37
- """
38
- Answer the questions based on the provided context only.
39
- Please provide the most accurate response based on the question. Try to answer in detail in 1500 words
40
- <context>
41
- {context}
42
- <context>
43
- Questions: {input}
44
- """
45
- )
46
-
47
- input_method = st.sidebar.selectbox("Choose a method" , ["Choose input method...","Interact with Doc", "Get Ques from Doc"])
48
-
49
-
50
-
51
- st.sidebar.title("Upload your pdf")
52
-
53
- main_placeholder = st.empty()
54
- #Document upload
55
- uploaded_file = st.sidebar.file_uploader("_____________________________________", type="pdf", accept_multiple_files=True)
56
- st.sidebar.write("Press Submit to process:")
57
- process = st.sidebar.button("Submit")
58
-
59
- #Document processing to convert it into vectors
60
- if process:
61
- if uploaded_file:
62
- # Process the uploaded PDF file
63
- process_pdf(uploaded_file)
64
- else:
65
- st.warning("Please upload a PDF file.")
66
-
67
- if input_method == "Choose input method...":
68
- st.title(f"Welcome You all!")
69
- st.title("Choose an option in the sidebar")
70
- st.title("Now, let's get started!")
71
-
72
-
73
- #If User wants to interact with the document
74
- elif input_method == "Interact with Doc":
75
- st.title(f"let's Interact with pdf's")
76
-
77
- prompt1 = st.text_input("______", placeholder="Enter your Question")
78
-
79
-
80
- # Generate response if question is entered
81
- if prompt1 and "vectors" in st.session_state:
82
- document_chain = create_stuff_documents_chain(llm, prompt)
83
- retriever = st.session_state.vectors.as_retriever()
84
- retrieval_chain = create_retrieval_chain(retriever, document_chain)
85
-
86
-
87
- response = retrieval_chain.invoke({'input': prompt1})
88
-
89
- # st.write(response['answer'])
90
-
91
- #Get the respose in the card
92
- st.markdown(
93
- f"""
94
- <div class="card">
95
- <div class="response">{response['answer']}</div>
96
- </div>
97
- """,
98
- unsafe_allow_html=True,
99
- )
100
-
101
-
102
-
103
- #When User wants to get questions from the doc based on certain topic
104
- elif input_method == "Get Ques from Doc":
105
- st.title(f"Let's Get Ques from Document")
106
-
107
- prompt2 = """Based on the topic of {topic},
108
- kindly provide a comprehensive list of all possible questions that could arise.
109
- For each question, provide detailed and explanatory answers in atleast 1000 words detail based on the context,
110
- ensuring that the responses are as informative as possible.
111
- make sure you strictly follow the {topic}"""
112
- topic = st.text_input("Enter a topic", placeholder="What is your topic")
113
-
114
- # Generate response if question is entered
115
- if topic and "vectors" in st.session_state:
116
- document_chain = create_stuff_documents_chain(llm, prompt)
117
- retriever = st.session_state.vectors.as_retriever()
118
- retrieval_chain = create_retrieval_chain(retriever, document_chain)
119
-
120
-
121
- response = retrieval_chain.invoke({'input': prompt2})
122
-
123
- #Get the respose in the card
124
- st.markdown(
125
- f"""
126
- <div class="card">
127
- <div class="response">{response['answer']}</div>
128
- </div>
129
- """,
130
- unsafe_allow_html=True,
131
- )
132
-