HugMeBytes commited on
Commit
6e5d3d5
Β·
verified Β·
1 Parent(s): 4ca9725

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +166 -166
app.py CHANGED
@@ -1,166 +1,166 @@
1
- import gradio as gr
2
- import fitz # PyMuPDF
3
- import os
4
- import tempfile
5
- import requests
6
- from sklearn.feature_extraction.text import TfidfVectorizer
7
- from sklearn.metrics.pairwise import cosine_similarity
8
- from datetime import datetime
9
-
10
- # === CONFIG CHECK ===
11
- if not os.getenv("GROQ_API_KEY"):
12
- print("WARNING: GROQ_API_KEY environment variable not set. API calls will fail.")
13
-
14
- # === Globals ===
15
- vectorizer = TfidfVectorizer(stop_words='english')
16
-
17
- # === UTILITY FUNCTIONS ===
18
- def call_groq_api(prompt):
19
- api_key = os.getenv("GROQ_API_KEY")
20
- if not api_key:
21
- return "Error: GROQ_API_KEY environment variable not set."
22
-
23
- headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
24
- data = {"model": "llama3-8b-8192", "messages": [{"role": "user", "content": prompt}]}
25
-
26
- try:
27
- response = requests.post("https://api.groq.com/openai/v1/chat/completions", json=data, headers=headers)
28
- response.raise_for_status()
29
- return response.json()["choices"][0]["message"]["content"]
30
- except requests.exceptions.RequestException as e:
31
- return f"API Error: {str(e)}"
32
- except (KeyError, IndexError) as e:
33
- return f"Error parsing API response: {str(e)}"
34
-
35
- def extract_text_from_pdfs(pdf_files):
36
- chunks, pages, file_names = [], [], []
37
- for file in pdf_files:
38
- try:
39
- doc = fitz.open(file.name)
40
- for page_num, page in enumerate(doc, start=1):
41
- text = page.get_text().strip()
42
- if text:
43
- chunks.append(text)
44
- pages.append(page_num)
45
- file_names.append(os.path.basename(file.name))
46
- except Exception as e:
47
- print(f"Error processing {file.name}: {e}")
48
- return chunks, pages, file_names
49
-
50
- def retrieve_context(query, chunks, pages, file_names, top_k=3):
51
- all_texts = chunks + [query]
52
- tfidf_matrix = vectorizer.fit_transform(all_texts)
53
- query_vec = tfidf_matrix[-1]
54
- similarities = cosine_similarity(query_vec, tfidf_matrix[:-1]).flatten()
55
-
56
- if max(similarities) < 0.2:
57
- return "Ask a relevant question.", [], []
58
-
59
- top_indices = similarities.argsort()[-top_k:][::-1]
60
- selected_chunks = [chunks[i] for i in top_indices]
61
- references = [f"{file_names[i]} (p.{pages[i]})" for i in top_indices]
62
- return "\n".join(selected_chunks), selected_chunks, references
63
-
64
-
65
- def download_chat(chat_history):
66
- if not chat_history:
67
- return None
68
- timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
69
- filename = f"chat_{timestamp}.txt"
70
- path = os.path.join(tempfile.gettempdir(), filename)
71
- with open(path, "w", encoding="utf-8") as f:
72
- for q, a in chat_history:
73
- f.write(f"Q: {q}\nA: {a}\n\n")
74
- return path
75
-
76
-
77
- # === Main Q&A Logic ===
78
- def answer_question(text_input, pdf_files, chat_history):
79
- if chat_history is None:
80
- chat_history = []
81
-
82
- if not text_input:
83
- return "❗ Please type a question.", chat_history, chat_history
84
- if not pdf_files:
85
- return "❗ Please upload PDF files first.", chat_history, chat_history
86
-
87
- chunks, pages, file_names = extract_text_from_pdfs(pdf_files)
88
- if not chunks:
89
- return "❗ Could not extract text from PDFs.", chat_history, chat_history
90
-
91
- context, matched_chunks, references = retrieve_context(text_input, chunks, pages, file_names)
92
-
93
- if context == "Ask a relevant question.":
94
- response = "⚠️ Ask a relevant question based on the PDFs."
95
- chat_history.append([text_input, response])
96
- return response, chat_history, chat_history
97
-
98
- prompt = f"Answer the question using this context:\n\n{context}\n\nQuestion: {text_input}\n\nAnswer:"
99
- answer = call_groq_api(prompt)
100
- full_answer = f"{answer}\n\nπŸ“Œ Sources: {', '.join(references)}"
101
- chat_history.append([text_input, full_answer])
102
- return full_answer, chat_history, chat_history
103
-
104
-
105
- # === Custom CSS ===
106
- custom_css = """
107
- .gradio-container {
108
- max-width: 900px !important;
109
- margin: auto;
110
- font-family: 'Segoe UI', sans-serif;
111
- }
112
-
113
- body {
114
- background-color: var(--background-primary);
115
- color: var(--body-text-color);
116
- }
117
-
118
- textarea, input, button {
119
- font-family: 'Segoe UI', sans-serif !important;
120
- }
121
- """
122
-
123
- # === Launch UI ===
124
- with gr.Blocks(css=custom_css, theme=gr.themes.Base()) as demo:
125
- gr.Markdown("""
126
- # 🧠 **SmartPDF Q&A Bot**
127
- _Ask questions from your PDFs. Get answers with page references. Download chat history._
128
- """, elem_id="title")
129
-
130
- chat_state = gr.State([])
131
-
132
- with gr.Tabs():
133
- with gr.Tab("πŸ“‚ Upload PDFs"):
134
- gr.Markdown("### Step 1: Upload one or more PDF documents.")
135
- pdf_input = gr.File(label="πŸ“ Upload PDF Files", file_types=[".pdf"], file_count="multiple")
136
-
137
- with gr.Tab("πŸ’¬ Ask Questions"):
138
- gr.Markdown("### Step 2: Ask a question about the uploaded documents.")
139
- with gr.Row():
140
- text_input = gr.Textbox(label="❓ Type your question here", placeholder="e.g. What is the main idea of the first document?", lines=2)
141
- ask_btn = gr.Button("πŸ” Ask")
142
-
143
- answer_output = gr.Textbox(label="🧠 Answer", lines=6)
144
- chatbox = gr.Dataframe(headers=["User", "Bot"], label="πŸ’¬ Chat History", interactive=False)
145
-
146
- with gr.Tab("πŸ“₯ Export Chat History"):
147
- gr.Markdown("### Step 3: Download your chat session.")
148
- download_btn = gr.Button("⬇️ Download Chat History")
149
- download_file = gr.File(label="πŸ“„ Your Chat File", visible=False)
150
-
151
- # === Button Event Binding ===
152
- ask_btn.click(
153
- answer_question,
154
- inputs=[text_input, pdf_input, chat_state],
155
- outputs=[answer_output, chatbox, chat_state]
156
- )
157
-
158
- download_btn.click(
159
- download_chat,
160
- inputs=[chat_state],
161
- outputs=download_file
162
- ).then(lambda: gr.update(visible=True), None, [download_file])
163
-
164
-
165
- if __name__ == "__main__":
166
- demo.launch()
 
1
+ import gradio as gr
2
+ import fitz # PyMuPDF
3
+ import os
4
+ import tempfile
5
+ import requests
6
+ from sklearn.feature_extraction.text import TfidfVectorizer
7
+ from sklearn.metrics.pairwise import cosine_similarity
8
+ from datetime import datetime
9
+
10
+ # === CONFIG CHECK ===
11
+ if not os.getenv("GROQ_API_KEY"):
12
+ print("WARNING: GROQ_API_KEY environment variable not set. API calls will fail.")
13
+
14
+ # === Globals ===
15
+ vectorizer = TfidfVectorizer(stop_words='english')
16
+
17
+ # === UTILITY FUNCTIONS ===
18
+ def call_groq_api(prompt):
19
+ api_key = os.getenv("GROQ_API_KEY")
20
+ if not api_key:
21
+ return "Error: GROQ_API_KEY environment variable not set."
22
+
23
+ headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
24
+ data = {"model": "llama3-8b-8192", "messages": [{"role": "user", "content": prompt}]}
25
+
26
+ try:
27
+ response = requests.post("https://api.groq.com/openai/v1/chat/completions", json=data, headers=headers)
28
+ response.raise_for_status()
29
+ return response.json()["choices"][0]["message"]["content"]
30
+ except requests.exceptions.RequestException as e:
31
+ return f"API Error: {str(e)}"
32
+ except (KeyError, IndexError) as e:
33
+ return f"Error parsing API response: {str(e)}"
34
+
35
+ def extract_text_from_pdfs(pdf_files):
36
+ chunks, pages, file_names = [], [], []
37
+ for file in pdf_files:
38
+ try:
39
+ doc = fitz.open(file.name)
40
+ for page_num, page in enumerate(doc, start=1):
41
+ text = page.get_text().strip()
42
+ if text:
43
+ chunks.append(text)
44
+ pages.append(page_num)
45
+ file_names.append(os.path.basename(file.name))
46
+ except Exception as e:
47
+ print(f"Error processing {file.name}: {e}")
48
+ return chunks, pages, file_names
49
+
50
+ def retrieve_context(query, chunks, pages, file_names, top_k=3):
51
+ all_texts = chunks + [query]
52
+ tfidf_matrix = vectorizer.fit_transform(all_texts)
53
+ query_vec = tfidf_matrix[-1]
54
+ similarities = cosine_similarity(query_vec, tfidf_matrix[:-1]).flatten()
55
+
56
+ if max(similarities) < 0.2:
57
+ return "Ask a relevant question.", [], []
58
+
59
+ top_indices = similarities.argsort()[-top_k:][::-1]
60
+ selected_chunks = [chunks[i] for i in top_indices]
61
+ references = [f"{file_names[i]} (p.{pages[i]})" for i in top_indices]
62
+ return "\n".join(selected_chunks), selected_chunks, references
63
+
64
+
65
+ def download_chat(chat_history):
66
+ if not chat_history:
67
+ return None
68
+ timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
69
+ filename = f"chat_{timestamp}.txt"
70
+ path = os.path.join(tempfile.gettempdir(), filename)
71
+ with open(path, "w", encoding="utf-8") as f:
72
+ for q, a in chat_history:
73
+ f.write(f"Q: {q}\nA: {a}\n\n")
74
+ return path
75
+
76
+
77
+ # === Main Q&A Logic ===
78
+ def answer_question(text_input, pdf_files, chat_history):
79
+ if chat_history is None:
80
+ chat_history = []
81
+
82
+ if not text_input:
83
+ return "❗ Please type a question.", chat_history, chat_history
84
+ if not pdf_files:
85
+ return "❗ Please upload PDF files first.", chat_history, chat_history
86
+
87
+ chunks, pages, file_names = extract_text_from_pdfs(pdf_files)
88
+ if not chunks:
89
+ return "❗ Could not extract text from PDFs.", chat_history, chat_history
90
+
91
+ context, matched_chunks, references = retrieve_context(text_input, chunks, pages, file_names)
92
+
93
+ if context == "Ask a relevant question.":
94
+ response = "⚠️ Ask a relevant question based on the PDFs."
95
+ chat_history.append([text_input, response])
96
+ return response, chat_history, chat_history
97
+
98
+ prompt = f"Answer the question using this context:\n\n{context}\n\nQuestion: {text_input}\n\nAnswer:"
99
+ answer = call_groq_api(prompt)
100
+ full_answer = f"{answer}\n\nπŸ“Œ Sources: {', '.join(references)}"
101
+ chat_history.append([text_input, full_answer])
102
+ return full_answer, chat_history, chat_history
103
+
104
+
105
+ # === Custom CSS ===
106
+ custom_css = """
107
+ .gradio-container {
108
+ max-width: 900px !important;
109
+ margin: auto;
110
+ font-family: 'Segoe UI', sans-serif;
111
+ }
112
+
113
+ body {
114
+ background-color: var(--background-primary);
115
+ color: var(--body-text-color);
116
+ }
117
+
118
+ textarea, input, button {
119
+ font-family: 'Segoe UI', sans-serif !important;
120
+ }
121
+ """
122
+
123
+ # === Launch UI ===
124
+ with gr.Blocks(css=custom_css, theme=gr.themes.Base()) as demo:
125
+ gr.Markdown("""
126
+ # 🧠 **SmartPDF Q&A Bot**
127
+ _Ask questions from your PDFs. Get answers with page references. Download chat history._
128
+ """, elem_id="title")
129
+
130
+ chat_state = gr.State([])
131
+
132
+ with gr.Tabs():
133
+ with gr.Tab("πŸ“‚ Upload PDFs"):
134
+ gr.Markdown("### Step 1: Upload one or more PDF documents.")
135
+ pdf_input = gr.File(label="πŸ“ Upload PDF Files", file_types=[".pdf"], file_count="multiple")
136
+
137
+ with gr.Tab("πŸ’¬ Ask Questions"):
138
+ gr.Markdown("### Step 2: Ask a question about the uploaded documents.")
139
+ with gr.Row():
140
+ text_input = gr.Textbox(label="❓ Type your question here", placeholder="e.g. What is the main idea of the first document?", lines=2)
141
+ ask_btn = gr.Button("πŸ” Ask")
142
+
143
+ answer_output = gr.Textbox(label="🧠 Answer", lines=6)
144
+ chatbox = gr.Dataframe(headers=["User", "Bot"], label="πŸ’¬ Chat History", interactive=False)
145
+
146
+ with gr.Tab("πŸ“₯ Export Chat History"):
147
+ gr.Markdown("### Step 3: Download your chat session.")
148
+ download_btn = gr.Button("⬇️ Download Chat History")
149
+ download_file = gr.File(label="πŸ“„ Your Chat File", visible=False)
150
+
151
+ # === Button Event Binding ===
152
+ ask_btn.click(
153
+ answer_question,
154
+ inputs=[text_input, pdf_input, chat_state],
155
+ outputs=[answer_output, chatbox, chat_state]
156
+ )
157
+
158
+ download_btn.click(
159
+ download_chat,
160
+ inputs=[chat_state],
161
+ outputs=download_file
162
+ ).then(lambda: gr.update(visible=True), None, [download_file])
163
+
164
+
165
+ if __name__ == "__main__":
166
+ demo.launch(share=True)