pavanammm commited on
Commit
fed4ee7
·
verified ·
1 Parent(s): 787d82d

Upload 3 files

Browse files
Files changed (4) hide show
  1. .gitattributes +1 -0
  2. Document2.pdf +3 -0
  3. app.py +253 -0
  4. requirements.txt +3 -0
.gitattributes CHANGED
@@ -34,3 +34,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
  my_info.pdf filter=lfs diff=lfs merge=lfs -text
 
 
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
  my_info.pdf filter=lfs diff=lfs merge=lfs -text
37
+ Document2.pdf filter=lfs diff=lfs merge=lfs -text
Document2.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:98d708c94e4103d6f5f924f4c1177388eea43b6890968f65a2b0f51ac0e3da35
3
+ size 163499
app.py ADDED
@@ -0,0 +1,253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import os
3
+ import json
4
+ from pypdf import PdfReader
5
+ from openai import OpenAI
6
+ import gradio as gr
7
+
8
+ # --- 1. PDF Data Processing Functions ---
9
+
10
+ def extract_text_from_pdf(pdf_path):
11
+ """
12
+ Extracts text content from a PDF file.
13
+
14
+ Args:
15
+ pdf_path (str): The path to the PDF document.
16
+
17
+ Returns:
18
+ str: The concatenated text content from all pages of the PDF.
19
+ Returns an empty string if the file is not found or an error occurs.
20
+ """
21
+ if not os.path.exists(pdf_path):
22
+ print(f"Error: PDF file not found at '{pdf_path}'")
23
+ return ""
24
+
25
+ try:
26
+ reader = PdfReader(pdf_path)
27
+ text_content = []
28
+ for page in reader.pages:
29
+ text_content.append(page.extract_text())
30
+ return "\n".join(text_content)
31
+ except Exception as e:
32
+ print(f"An error occurred while reading the PDF: {e}")
33
+ return ""
34
+
35
+ def chunk_text(text, chunk_size=1000, chunk_overlap=200):
36
+ """
37
+ Splits a given text into smaller, overlapping chunks.
38
+
39
+ Args:
40
+ text (str): The input text to be chunked.
41
+ chunk_size (int): The desired size of each chunk.
42
+ chunk_overlap (int): The number of characters to overlap between consecutive chunks.
43
+
44
+ Returns:
45
+ list: A list of text chunks.
46
+ """
47
+ chunks = []
48
+ if not text:
49
+ return chunks
50
+
51
+ start_index = 0
52
+ while start_index < len(text):
53
+ end_index = min(start_index + chunk_size, len(text))
54
+ chunks.append(text[start_index:end_index])
55
+ if end_index == len(text):
56
+ break
57
+ start_index += chunk_size - chunk_overlap
58
+ return chunks
59
+
60
+ # --- 2. OpenRouter API Client Setup ---
61
+
62
+ # For local testing, get the API key from the environment or a hardcoded value if not found.
63
+ # For Hugging Face deployment, ensure it's set as a Space Secret.
64
+ OPENROUTER_API_KEY = os.getenv("OPENROUTER_API_KEY", "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") # Placeholder from earlier cell
65
+
66
+ # If no key is found (e.g., if the placeholder was removed and env var isn't set)
67
+ if not OPENROUTER_API_KEY:
68
+ raise ValueError("OPENROUTER_API_KEY not found. Please set it as an environment variable or provide it in app.py for local testing.")
69
+
70
+ client = OpenAI(
71
+ base_url="https://openrouter.ai/api/v1",
72
+ api_key=OPENROUTER_API_KEY,
73
+ )
74
+
75
+ OPENROUTER_MODEL_NAME = "google/gemma-2-9b-it"
76
+
77
+ # --- 3. Response Generation Agent ---
78
+
79
+ def generate_response(user_query, context_chunks):
80
+ """
81
+ Generates a response from the LLM based on a user query and provided context chunks.
82
+
83
+ Args:
84
+ user_query (str): The user's question.
85
+ context_chunks (list): A list of strings, each being a chunk of text from the PDF.
86
+
87
+ Returns:
88
+ str: The LLM's generated response, or an error message if something goes wrong.
89
+ """
90
+ context = "\n---\n".join(context_chunks)
91
+
92
+ system_message_content = (
93
+ "You are a personal avatar chatbot. Your task is to answer the user's questions "
94
+ "based *only* on the provided context. If the answer cannot be found in the context, "
95
+ "state that you don't have enough information to answer. Do not make up information."
96
+ f"\n\nContext:\n{context}"
97
+ )
98
+
99
+ messages = [
100
+ {"role": "system", "content": system_message_content},
101
+ {"role": "user", "content": user_query}
102
+ ]
103
+
104
+ try:
105
+ response = client.chat.completions.create(
106
+ model=OPENROUTER_MODEL_NAME,
107
+ messages=messages,
108
+ temperature=0.5,
109
+ max_tokens=500
110
+ )
111
+ return response.choices[0].message.content
112
+ except Exception as e:
113
+ return f"An error occurred while generating response: {e}"
114
+
115
+ # --- 4. Response Evaluation Agent ---
116
+
117
+ def evaluate_response(user_query, generated_response, context_chunks):
118
+ """
119
+ Evaluates a generated response based on the user query and context,
120
+ using an OpenRouter-powered LLM.
121
+
122
+ Args:
123
+ user_query (str): The original user's question.
124
+ generated_response (str): The response generated by the first agent.
125
+ context_chunks (list): The list of text chunks from the PDF used as context.
126
+
127
+ Returns:
128
+ tuple: A tuple containing (evaluation_pass_fail (bool), reasoning (str)).
129
+ Returns (False, error_message) if an error occurs.
130
+ """
131
+ context = "\n---\n".join(context_chunks)
132
+
133
+ evaluation_system_message = (
134
+ "You are an evaluation agent. Your task is to assess a 'generated_response' "
135
+ "based on a 'user_query' and provided 'context'.\n\n"
136
+ "Your assessment should focus on the following criteria:\n"
137
+ "1. **Accuracy**: Is the 'generated_response' factually correct according to the 'context'?\n"
138
+ "2. **Relevance**: Does the 'generated_response' directly address the 'user_query'?\n"
139
+ "3. **Context Adherence**: Does the 'generated_response' *only* use information present "
140
+ "in the 'context'? If it brings in outside information or makes up facts, it fails this criterion.\n\n"
141
+ "Based on these criteria, determine if the 'generated_response' is acceptable. "
142
+ "If the response explicitly states it cannot answer based on the context, and it's true, consider it acceptable."
143
+ "Return your evaluation as a JSON object with two keys: 'pass' (boolean: true if acceptable, false otherwise) "
144
+ "and 'reasoning' (string: a brief explanation for your decision).\n\n"
145
+ f"Context:\n{context}"
146
+ )
147
+
148
+ evaluation_user_message = (
149
+ f"User Query: {user_query}\n\n"
150
+ f"Generated Response: {generated_response}\n\n"
151
+ "Please evaluate this generated response according to the instructions."
152
+ )
153
+
154
+ messages = [
155
+ {"role": "system", "content": evaluation_system_message},
156
+ {"role": "user", "content": evaluation_user_message}
157
+ ]
158
+
159
+ try:
160
+ response = client.chat.completions.create(
161
+ model=OPENROUTER_MODEL_NAME,
162
+ messages=messages,
163
+ temperature=0.1,
164
+ max_tokens=300,
165
+ response_format={ "type": "json_object" }
166
+ )
167
+
168
+ evaluation_output = response.choices[0].message.content
169
+
170
+ try:
171
+ eval_result = json.loads(evaluation_output)
172
+ return eval_result.get('pass', False), eval_result.get('reasoning', 'No reasoning provided.')
173
+ except json.JSONDecodeError:
174
+ print(f"Warning: Could not decode JSON from evaluator: {evaluation_output}")
175
+ return False, f"Evaluator returned malformed JSON: {evaluation_output}"
176
+
177
+ except Exception as e:
178
+ return False, f"An error occurred while evaluating response: {e}"
179
+
180
+ # --- 5. Chatbot Orchestration Logic ---
181
+
182
+ def chat_with_avatar(user_query, context_chunks, max_retries=3):
183
+ """
184
+ Orchestrates the interaction between the generator and evaluator agents.
185
+ Attempts to generate a response and evaluates it, retrying if necessary.
186
+
187
+ Args:
188
+ user_query (str): The user's question.
189
+ context_chunks (list): A list of strings, each being a chunk of text from the PDF.
190
+ max_retries (int): The maximum number of attempts to generate an acceptable response.
191
+
192
+ Returns:
193
+ str: The final, approved response or a message indicating failure.
194
+ """
195
+ for attempt in range(max_retries):
196
+ # print(f"\n--- Attempt {attempt + 1}/{max_retries} ---") # Commented for Gradio output
197
+
198
+ generated_response = generate_response(user_query, context_chunks)
199
+ # print(f"Generated Response (Attempt {attempt + 1}):\n{generated_response[:200]}...")
200
+
201
+ pass_fail, reasoning = evaluate_response(user_query, generated_response, context_chunks)
202
+ # print(f"Evaluation Result (Attempt {attempt + 1}): Pass = {pass_fail}, Reasoning: {reasoning}")
203
+
204
+ if pass_fail:
205
+ return generated_response
206
+ else:
207
+ # print(f"Response failed evaluation. Retrying... Reason: {reasoning}")
208
+ pass # Just retry with the same prompt for now
209
+
210
+ return "I'm sorry, I couldn't generate an acceptable response after several attempts. Please try rephrasing your question."
211
+
212
+ # --- Initial Setup (Load PDF and Chunk) ---
213
+
214
+ # Path to your personal PDF document.
215
+ # For Hugging Face Spaces, upload your PDF to the 'data' folder in your Space repository.
216
+ PDF_PATH = "Document2.pdf" # Make sure this matches the filename you upload
217
+
218
+ # Create a dummy PDF file for testing if it doesn't exist, this part is mainly for local dev/testing
219
+ # In Hugging Face spaces, the PDF should be present.
220
+ if not os.path.exists(PDF_PATH):
221
+ # This block would only execute if the PDF is missing locally for testing.
222
+ # In a deployed HF Space, the PDF should be present.
223
+ print(f"Warning: PDF file not found at '{PDF_PATH}'. Using empty content.")
224
+ PDF_CONTENT = ""
225
+ else:
226
+ print(f"Extracting text from {PDF_PATH}...")
227
+ PDF_CONTENT = extract_text_from_pdf(PDF_PATH)
228
+ print("PDF content extracted.")
229
+
230
+ TEXT_CHUNKS = chunk_text(PDF_CONTENT)
231
+
232
+ if not TEXT_CHUNKS:
233
+ print("Warning: No text chunks were created from the PDF. The chatbot will not have context.")
234
+
235
+ # --- Gradio Interface ---
236
+
237
+ def respond(message, history):
238
+ global TEXT_CHUNKS # Use the globally loaded text chunks
239
+ if not TEXT_CHUNKS:
240
+ return "I am unable to answer questions as my knowledge base (PDF) could not be loaded or processed. Please check the PDF file."
241
+ return chat_with_avatar(message, TEXT_CHUNKS)
242
+
243
+ gr.ChatInterface(
244
+ respond,
245
+ title="Personal Avatar Chatbot",
246
+ description="Ask me anything about Pavan Thakkallapalli! (Information based on provided PDF)",
247
+ examples=[
248
+ "What is Pavan Thakkallapalli's primary role and education?",
249
+ "Tell me about Pavan's experience with MLOps and Machine Learning.",
250
+ "What is Pavan's favorite movie?"
251
+ ],
252
+ theme="soft"
253
+ ).launch(debug=True)
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ pypdf==6.6.2
2
+ openai==2.15.0
3
+ gradio==4.20.0