Krishna Kumar S commited on
Commit
049b08c
·
1 Parent(s): 407e916
Files changed (4) hide show
  1. agents.py +69 -0
  2. app.py +30 -45
  3. functions +343 -0
  4. pip_install.txt +1 -1
agents.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from crewai.tools import BaseTool
2
+ from pydantic import BaseModel, Field, PrivateAttr
3
+ from typing import Type, Any
4
+
5
+ class PolicyQueryToolInput(BaseModel):
6
+ """
7
+ Schema for input to the PolicyQueryTool.
8
+
9
+ Attributes:
10
+ UIN (str): The UIN (Unique Identification Number) of the policy.
11
+ question (str): The question to ask about the policy.
12
+ """
13
+ UIN: str = Field(..., description="UIN number of the policy.")
14
+ question: str = Field(..., description="Question about the policy.")
15
+
16
+ class PolicyQueryTool(BaseTool):
17
+ """
18
+ A custom CrewAI tool to query insurance policy documents by UIN using a vector store.
19
+
20
+ Attributes:
21
+ name (str): Name of the tool.
22
+ description (str): Description of the tool’s functionality.
23
+ args_schema (Type[BaseModel]): The schema defining expected arguments.
24
+ _vector_store (Any): The vector store used for querying policy documents.
25
+ """
26
+ name: str = "Policy Query Tool"
27
+ description: str = "Answers questions about a specific insurance policy using its UIN number."
28
+ args_schema: Type[BaseModel] = PolicyQueryToolInput
29
+
30
+ _vector_store: Any = PrivateAttr() # Holds the internal vector store object, excluded from Pydantic validation
31
+
32
+ def __init__(self, vector_store):
33
+ """
34
+ Initializes the PolicyQueryTool with the provided vector store.
35
+
36
+ Args:
37
+ vector_store (Any): A Chroma-based vector store used to perform retrieval.
38
+ """
39
+ super().__init__()
40
+ self._vector_store = vector_store # Store vector DB client internally (not exposed via schema)
41
+
42
+ def _run(self, **kwargs) -> str:
43
+ """
44
+ Executes the tool with the provided UIN and question.
45
+
46
+ Args:
47
+ kwargs: Should include 'UIN' (policy identifier) and 'question' (user query).
48
+
49
+ Returns:
50
+ str: The answer to the user's question as generated by the LLM.
51
+ """
52
+ UIN = kwargs.get("UIN")
53
+ question = kwargs.get("question")
54
+
55
+ # Debug print to verify tool execution
56
+ #print("PolicyQueryTool======> Running with UIN:", UIN, "and question:", question)
57
+
58
+ # Create a query engine specific to the UIN using vector similarity and metadata filters
59
+ query_engine = create_query_engine(UIN=UIN,
60
+ embedding_model="BAAI/bge-small-en-v1.5",
61
+ vector_store=vector_store,
62
+ similarity_top_k=10,
63
+ llm_model="deepseek/deepseek-chat-v3-0324:free",
64
+ api_key="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")
65
+
66
+ # Run the query on the engine and return the response
67
+ return query_engine.query(question)
68
+
69
+
app.py CHANGED
@@ -1,45 +1,30 @@
1
- # Importing libraries for web scraping
2
- import requests # For making HTTP requests
3
- from bs4 import BeautifulSoup # For parsing HTML content
4
-
5
- # Importing library for data handling
6
- import pandas as pd
7
-
8
- # OS and file handling libraries
9
- import os
10
- import shutil
11
-
12
- # LlamaIndex imports for document indexing and retrieval
13
- from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
14
- from llama_index.core.node_parser import HierarchicalNodeParser
15
-
16
- # Importing ChromaDB for persistent vector storage
17
- import chromadb
18
-
19
- # LlamaIndex wrapper for using Chroma as a vector store
20
- from llama_index.vector_stores.chroma import ChromaVectorStore
21
-
22
- # HuggingFace embedding model for generating vector representations
23
- from llama_index.embeddings.huggingface import HuggingFaceEmbedding
24
-
25
- # Ingestion pipeline to preprocess and ingest documents into a vector store
26
- from llama_index.core.ingestion import IngestionPipeline
27
-
28
- # Tools for creating complex metadata-based filters for search and retrieval
29
- from llama_index.core.vector_stores import MetadataFilters, ExactMatchFilter, MetadataFilter, FilterOperator, FilterCondition
30
-
31
- # For retrieving relevant documents using a vector index
32
- from llama_index.core.retrievers import VectorIndexRetriever
33
-
34
- # OpenRouter LLM wrapper to use models via OpenRouter platform
35
- from llama_index.llms.openrouter import OpenRouter
36
-
37
- # Synthesizer to generate responses from retrieved documents
38
- from llama_index.core.response_synthesizers import get_response_synthesizer
39
-
40
- # Query engine that combines retriever and synthesizer for answering queries
41
- from llama_index.core.query_engine import RetrieverQueryEngine
42
-
43
- # Import core classes from CrewAI
44
- from crewai import Crew, Agent, Task
45
-
 
1
+ # This is a Gradio app that includes a chat interface and a button to run a function.
2
+ import gradio as gr
3
+ import random
4
+
5
+ # Define a function that returns a random response for the chat interface.
6
+ def random_response(message, history):
7
+ return random.choice(["Yes", "No"])
8
+
9
+ # Define a function that returns a message when the button is clicked.
10
+ def run_button_clicked(input_text):
11
+ insurer_name = input_text
12
+ #df = fetch_and_download_policy_documents(insurer=insurer_name, UIN='', results=999, save_path='./policy_docs')
13
+ #return f"Fetched {len(df)} policies for insurer '{insurer_name}'."
14
+ return insurer_name
15
+
16
+ # Create a Gradio Blocks app.
17
+ with gr.Blocks() as demo:
18
+ with gr.Tab("Chat"):
19
+ # Create a chat interface that uses the random_response function.
20
+ chat_interface = gr.ChatInterface(random_response, type="messages", autofocus=False)
21
+
22
+ with gr.Tab("Run"):
23
+ input_text = gr.Textbox(label="Insurer Name...")
24
+ run_button = gr.Button("Get Policies from IRDAI")
25
+ text_output = gr.Textbox(label="Result", interactive=False)
26
+ run_button.click(fn=run_button_clicked, inputs=[input_text], outputs=[text_output])
27
+
28
+ # Launch the Gradio app.
29
+ if __name__ == "__main__":
30
+ demo.launch(show_error=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
functions ADDED
@@ -0,0 +1,343 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Importing libraries for web scraping
2
+ import requests # For making HTTP requests
3
+ from bs4 import BeautifulSoup # For parsing HTML content
4
+
5
+ # Importing library for data handling
6
+ import pandas as pd
7
+
8
+ # OS and file handling libraries
9
+ import os
10
+ import shutil
11
+
12
+ # LlamaIndex imports for document indexing and retrieval
13
+ from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
14
+ from llama_index.core.node_parser import HierarchicalNodeParser
15
+
16
+ # Importing ChromaDB for persistent vector storage
17
+ import chromadb
18
+
19
+ # LlamaIndex wrapper for using Chroma as a vector store
20
+ from llama_index.vector_stores.chroma import ChromaVectorStore
21
+
22
+ # HuggingFace embedding model for generating vector representations
23
+ from llama_index.embeddings.huggingface import HuggingFaceEmbedding
24
+
25
+ # Ingestion pipeline to preprocess and ingest documents into a vector store
26
+ from llama_index.core.ingestion import IngestionPipeline
27
+
28
+ # Tools for creating complex metadata-based filters for search and retrieval
29
+ from llama_index.core.vector_stores import MetadataFilters, ExactMatchFilter, MetadataFilter, FilterOperator, FilterCondition
30
+
31
+ # For retrieving relevant documents using a vector index
32
+ from llama_index.core.retrievers import VectorIndexRetriever
33
+
34
+ # OpenRouter LLM wrapper to use models via OpenRouter platform
35
+ from llama_index.llms.openrouter import OpenRouter
36
+
37
+ # Synthesizer to generate responses from retrieved documents
38
+ from llama_index.core.response_synthesizers import get_response_synthesizer
39
+
40
+ # Query engine that combines retriever and synthesizer for answering queries
41
+ from llama_index.core.query_engine import RetrieverQueryEngine
42
+
43
+ # Import core classes from CrewAI
44
+ from crewai import Crew, Agent, Task
45
+
46
+
47
+ def fetch_and_download_policy_documents(insurer, UIN, results, save_path):
48
+ """
49
+ Fetches health insurance policy documents from the IRDAI website using the insurer name and UIN.
50
+ Downloads the associated PDF files and saves metadata as a CSV.
51
+
52
+ Args:
53
+ insurer (str): Name of the insurance provider.
54
+ UIN (str): Unique Identification Number for the insurance product.
55
+ results (int): Number of search results to fetch.
56
+ save_path (str): Local directory path where documents will be downloaded.
57
+
58
+ Returns:
59
+ pd.DataFrame: DataFrame containing metadata of the downloaded documents.
60
+ """
61
+
62
+ # Construct the URL for IRDAI document search with filters applied
63
+ url = (
64
+ f'https://irdai.gov.in/health-insurance-products'
65
+ f'?p_p_id=com_irdai_document_media_IRDAIDocumentMediaPortlet'
66
+ f'&p_p_lifecycle=0&p_p_state=normal&p_p_mode=view'
67
+ f'&_com_irdai_document_media_IRDAIDocumentMediaPortlet_filterInsurer={insurer}'
68
+ f'&_com_irdai_document_media_IRDAIDocumentMediaPortlet_filterUIN={UIN}'
69
+ f'&_com_irdai_document_media_IRDAIDocumentMediaPortlet_filterApprovalDateFrom=01%2F01%2F2020'
70
+ f'&_com_irdai_document_media_IRDAIDocumentMediaPortlet_resetCur=false'
71
+ f'&_com_irdai_document_media_IRDAIDocumentMediaPortlet_delta={results}'
72
+ )
73
+
74
+ # Set headers to mimic a browser request
75
+ headers = {
76
+ "User-Agent": "Mozilla/5.0"
77
+ }
78
+
79
+ # Make a GET request and parse the HTML content
80
+ response = requests.get(url, headers=headers)
81
+ soup = BeautifulSoup(response.content, "html.parser")
82
+
83
+ # Find the table containing policy data
84
+ table = soup.find("table")
85
+ if not table:
86
+ raise ValueError("No table found – the content structure may have changed.")
87
+
88
+ # Extract all rows in the table
89
+ rows = table.find_all("tr")
90
+ data = []
91
+
92
+ # Extract column headers and append additional metadata columns
93
+ header_row = rows[0]
94
+ header_cols = [th.text.strip() for th in header_row.find_all("th")]
95
+ header_cols.append("Document URL")
96
+ header_cols.append("Document Name")
97
+
98
+ # Parse each row to extract text data and document link info
99
+ for row in rows[1:-1]:
100
+ cols = row.find_all("td")
101
+ text_data = [ele.text.strip() for ele in cols]
102
+
103
+ # Extract the document link and name from the relevant column
104
+ doc_col = cols[7]
105
+ link_tag = doc_col.find("a")
106
+ href = link_tag['href'] if link_tag and 'href' in link_tag.attrs else None
107
+ doc_name = link_tag.text.strip() if link_tag else None
108
+
109
+ text_data.append(href)
110
+ text_data.append(doc_name)
111
+ data.append(text_data)
112
+
113
+ # Create a DataFrame from the extracted data
114
+ df = pd.DataFrame(data, columns=header_cols)
115
+
116
+ # Remove the directory if it already exists to avoid old file conflicts
117
+ try:
118
+ shutil.rmtree(save_path)
119
+ except FileNotFoundError:
120
+ pass # Ignore if directory does not exist
121
+
122
+ # Create directory for saving documents
123
+ os.makedirs(save_path, exist_ok=True)
124
+
125
+ # Download each document using the extracted URLs
126
+ for index, row in df.iterrows():
127
+ document_url = row['Document URL']
128
+ if document_url:
129
+ try:
130
+ # Stream download to avoid loading entire file in memory
131
+ response = requests.get(document_url, stream=True, headers=headers)
132
+ response.raise_for_status()
133
+
134
+ # Construct filename using UIN and save to file
135
+ filename = row['UIN'] + '.pdf'
136
+ filepath = os.path.join(save_path, filename)
137
+
138
+ with open(filepath, 'wb') as file:
139
+ for chunk in response.iter_content(chunk_size=8192):
140
+ file.write(chunk)
141
+
142
+ # Uncomment to log downloaded files
143
+ # print(f"Downloaded: {filename}")
144
+ except requests.exceptions.RequestException as e:
145
+ print(f"Error downloading {document_url}: {e}")
146
+ else:
147
+ # Skip rows without a valid document link
148
+ print(f"Skipping row {index}: No document URL found.")
149
+
150
+ # Save the DataFrame with document metadata to a CSV file
151
+ csv_file_path = './policy_documents_metadata.csv'
152
+ df.to_csv(csv_file_path, index=False)
153
+ print('./policy_documents_metadata.csv has been saved')
154
+
155
+ return df
156
+
157
+
158
+ async def create_vDB(doc_path, vDB_path, vDB_colection, embedding_model):
159
+ """
160
+ Asynchronously creates a vector database (vDB) using ChromaDB and stores embedded document data.
161
+
162
+ Args:
163
+ doc_path (str): Path to the folder containing input documents.
164
+ vDB_path (str): Directory path for storing the persistent ChromaDB vector database.
165
+ vDB_colection (str): Name of the vector collection inside ChromaDB.
166
+ embedding_model (str): Name of the HuggingFace model used for embedding text.
167
+
168
+ Returns:
169
+ ChromaVectorStore: An instance of the vector store containing embedded document nodes.
170
+ """
171
+
172
+ # Load all documents from the specified directory
173
+ documents = SimpleDirectoryReader(doc_path).load_data()
174
+
175
+ # Add 'UIN' metadata to each document using the filename (excluding extension)
176
+ for doc in documents:
177
+ doc.metadata['UIN'] = doc.metadata['file_name'][:-4]
178
+
179
+ # Parse documents into hierarchical nodes for structured semantic representation
180
+ node_parser = HierarchicalNodeParser.from_defaults()
181
+ nodes = node_parser.get_nodes_from_documents(documents)
182
+
183
+ # Create a persistent Chroma client using the specified vector DB path
184
+ db = chromadb.PersistentClient(path=vDB_path)
185
+
186
+ # Remove the existing collection if it exists (for a fresh start)
187
+ try:
188
+ db.delete_collection(name=vDB_colection)
189
+ except Exception as e:
190
+ pass # Ignore errors if the collection does not exist
191
+
192
+ # Create or retrieve a vector collection in ChromaDB
193
+ chroma_collection = db.get_or_create_collection(name=vDB_colection)
194
+
195
+ # Initialize the Chroma-based vector store
196
+ vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
197
+
198
+ # Set up an ingestion pipeline that includes HuggingFace embedding transformation
199
+ pipeline = IngestionPipeline(
200
+ transformations=[
201
+ HuggingFaceEmbedding(model_name=embedding_model),
202
+ ],
203
+ vector_store=vector_store,
204
+ )
205
+
206
+ # Set batch size to control memory usage during ingestion
207
+ BATCH_SIZE = 1000
208
+
209
+ # Asynchronously ingest nodes into the vector store in batches
210
+ async def ingest_in_batches(nodes):
211
+ for i in range(0, len(nodes), BATCH_SIZE):
212
+ batch = nodes[i:i + BATCH_SIZE]
213
+ print(f"Ingesting batch {i // BATCH_SIZE + 1} ({len(batch)} nodes)...")
214
+ await pipeline.arun(nodes=batch)
215
+
216
+ # Run the batch ingestion process
217
+ await ingest_in_batches(nodes)
218
+
219
+ # Return the vector store instance for further querying or retrieval
220
+ return vector_store
221
+
222
+
223
+
224
+ def create_query_engine(UIN, embedding_model, vector_store, similarity_top_k, llm_model, api_key):
225
+ """
226
+ Creates a RetrieverQueryEngine that performs filtered semantic search and generates responses using an LLM.
227
+
228
+ Args:
229
+ UIN (str): Unique Identification Number used to filter relevant documents.
230
+ embedding_model (str): Name of the HuggingFace model used for embedding text.
231
+ vector_store (ChromaVectorStore): Pre-built vector store containing embedded documents.
232
+ similarity_top_k (int): Number of most semantically similar nodes to retrieve.
233
+ llm_model (str): Name of the language model served via OpenRouter for generating responses.
234
+ api_key (str): API key for accessing the OpenRouter platform.
235
+
236
+ Returns:
237
+ RetrieverQueryEngine: A query engine capable of semantic search and LLM-powered response generation.
238
+ """
239
+
240
+ # Build a vector index from the existing vector store using the specified embedding model
241
+ index = VectorStoreIndex.from_vector_store(
242
+ vector_store=vector_store,
243
+ embed_model=HuggingFaceEmbedding(model_name=embedding_model)
244
+ )
245
+
246
+ # Define metadata filters to limit search results to documents matching the specified UIN
247
+ filters = MetadataFilters(
248
+ filters=[
249
+ ExactMatchFilter(key="UIN", value=UIN)
250
+ ]
251
+ )
252
+
253
+ # Create a retriever that uses both vector similarity and metadata filters
254
+ retriever = VectorIndexRetriever(
255
+ index=index,
256
+ filters=filters,
257
+ similarity_top_k=similarity_top_k # Retrieve top x most semantically similar nodes
258
+ )
259
+
260
+ # Initialize the LLM from OpenRouter using the specified model name
261
+ llm = OpenRouter(
262
+ api_key=api_key,
263
+ model=llm_model,
264
+ )
265
+
266
+ # Create a response synthesizer that leverages the LLM to answer user queries
267
+ response_synthesizer = get_response_synthesizer(llm=llm)
268
+
269
+ # Set up the complete query engine by combining retriever and response synthesizer
270
+ query_engine = RetrieverQueryEngine(
271
+ retriever=retriever,
272
+ response_synthesizer=response_synthesizer
273
+ )
274
+
275
+ return query_engine
276
+
277
+
278
+
279
+ def archive_vdb(vdb_path, archive_path):
280
+ """
281
+ Archives the vDB (vector database) directory into a ZIP file.
282
+
283
+ Args:
284
+ vdb_path (str): Path to the directory containing the vector database to archive.
285
+ archive_path (str): Full path (including .zip extension) where the archive will be saved.
286
+
287
+ Returns:
288
+ None
289
+ """
290
+ try:
291
+ # Create a ZIP archive of the vDB directory
292
+ # shutil.make_archive requires the archive path without the extension
293
+ shutil.make_archive(archive_path[:-4], 'zip', vdb_path) # Remove .zip before archiving
294
+ print(f"vDB successfully archived to {archive_path}")
295
+ except FileNotFoundError:
296
+ # Handle case where vDB path does not exist
297
+ print(f"Error: vDB directory not found at {vdb_path}")
298
+ except Exception as e:
299
+ # Catch-all for any unexpected errors during archiving
300
+ print(f"An error occurred during archiving: {e}")
301
+
302
+
303
+
304
+ def load_vdb_from_archive(archive_path, vdb_path, collection):
305
+ """
306
+ Extracts and loads a Chroma-based vector database (vDB) from a ZIP archive.
307
+
308
+ Args:
309
+ archive_path (str): Full path to the ZIP archive containing the vDB.
310
+ vdb_path (str): Destination directory where the archive contents will be extracted.
311
+ collection (str): Name of the Chroma collection within the vDB.
312
+
313
+ Returns:
314
+ ChromaVectorStore or None: A vector store object ready for use, or None if loading fails.
315
+ """
316
+ try:
317
+ # Extract the archive to the specified vdb_path directory
318
+ shutil.unpack_archive(archive_path, vdb_path)
319
+ print(f"vDB archive extracted to {vdb_path}")
320
+
321
+ # Initialize a persistent ChromaDB client from the extracted directory
322
+ db = chromadb.PersistentClient(path=vdb_path)
323
+
324
+ # Retrieve or create the 'IRDAI' collection from the ChromaDB
325
+ chroma_collection = db.get_or_create_collection(name=collection)
326
+
327
+ # Wrap the Chroma collection in a ChromaVectorStore object for use with LlamaIndex
328
+ vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
329
+
330
+ print("ChromaDB loaded successfully from archive.")
331
+ return vector_store
332
+
333
+ except FileNotFoundError:
334
+ # Handle case where the ZIP archive does not exist
335
+ print(f"Error: vDB archive not found at {archive_path}")
336
+ return None
337
+ except Exception as e:
338
+ # Catch-all for any unexpected errors during extraction or loading
339
+ print(f"An error occurred during loading: {e}")
340
+ return None
341
+
342
+
343
+
pip_install.txt CHANGED
@@ -1 +1 @@
1
- pip install bs4 llama-index llama-index-llms-openrouter chromadb llama-index-vector-stores-chroma llama-index-embeddings-huggingface langtrace-python-sdk crewai crewai-tools -q -U
 
1
+ pip install bs4 llama-index llama-index-llms-openrouter chromadb llama-index-vector-stores-chroma llama-index-embeddings-huggingface langtrace-python-sdk crewai crewai-tools