File size: 10,361 Bytes
02fe1da
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6a0b840
 
 
02fe1da
 
 
 
 
 
 
 
 
 
8b6d78f
 
 
 
 
 
 
 
 
 
 
02fe1da
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8b6d78f
 
 
02fe1da
8b6d78f
 
 
 
 
 
 
02fe1da
8b6d78f
02fe1da
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
909bda2
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
import os
from typing import List, Dict, Optional, Tuple
from groq import Groq
from dotenv import load_dotenv
from web_scraper import WebScraper, TextChunker

from vector_store import VectorStore
import time

load_dotenv()

class RAGChatbot:
    def __init__(self):
        """Initialize RAG Chatbot with all components"""
        print("πŸ€– Initializing RAG Chatbot...")
        
        # Initialize Groq client
        self.groq_client = Groq(api_key=os.getenv("GROQ_API_KEY"))
        
        # Initialize components
        self.vector_store = VectorStore()
        self.web_scraper = WebScraper(delay=1.0)
        self.text_chunker = TextChunker(
            chunk_size=int(os.getenv("MAX_CHUNK_SIZE", 500)),
            overlap=50
        )
        
        # Configuration
        self.model_name = "llama3-8b-8192"
        self.top_k = int(os.getenv("TOP_K_RESULTS", 5))
        self.max_tokens = 1000
        
        print("βœ… RAG Chatbot initialized successfully!")
    
    def ingest_url(self, url: str) -> Dict[str, any]:
        """
        Ingest content from a URL into the knowledge base
        Args:
            url: URL to scrape and ingest
        Returns:
            Dictionary with ingestion results
        """
        try:
            print(f"πŸ“₯ Ingesting content from: {url}")
            
            # Scrape the article
            article_data = self.web_scraper.scrape_article(url)
            
            if not article_data['content']:
                return {
                    'success': False,
                    'message': f"Could not extract content from {url}",
                    'chunks_added': 0
                }
            
            # Create chunks
            chunks = self.text_chunker.chunk_text(
                article_data['content'],
                metadata={
                    'url': article_data['url'],
                    'title': article_data['title']
                }
            )
            
            if not chunks:
                return {
                    'success': False,
                    'message': "No valid chunks created from content",
                    'chunks_added': 0
                }
            
            # Add to vector store
            success = self.vector_store.add_documents(chunks)
            
            if success:
                return {
                    'success': True,
                    'message': f"Successfully ingested '{article_data['title']}'",
                    'chunks_added': len(chunks),
                    'title': article_data['title'],
                    'word_count': article_data['word_count']
                }
            else:
                return {
                    'success': False,
                    'message': "Failed to add chunks to vector store",
                    'chunks_added': 0
                }
                
        except Exception as e:
            return {
                'success': False,
                'message': f"Error ingesting {url}: {str(e)}",
                'chunks_added': 0
            }
    
    def chat(self, message: str, include_sources: bool = True) -> Dict[str, any]:
        """
        Chat with the RAG system
        Args:
            message: User's question/message
            include_sources: Whether to include source information
        Returns:
            Dictionary with response and metadata
        """
        try:
            print(f"πŸ’¬ Processing query: {message[:50]}...")
            
            # Step 1: Retrieve relevant context
            start_time = time.time()
            relevant_docs = self.vector_store.search_similar(message, top_k=self.top_k)
            retrieval_time = time.time() - start_time
            
            if not relevant_docs:
                return {
                    'response': "I don't have enough information to answer your question. Please add some relevant content to my knowledge base first.",
                    'sources': [],
                    'retrieval_time': retrieval_time,
                    'generation_time': 0,
                    'total_time': retrieval_time
                }
            
            # Step 2: Create context from retrieved documents
            context_parts = []
            sources = []
            
            for i, doc in enumerate(relevant_docs):
                clean_text = doc['text'].replace("\n", " ").strip()
                # πŸ” Filter: skip too short chunks (less than 50 words)
                if len(clean_text.split()) < 50:
                    continue
                context_parts.append(clean_text)
                sources.append({
                    'title': doc['title'],
                    'url': doc['url'],
                    'similarity_score': doc['score'],
                    'snippet': doc['text'][:200] + "..." if len(doc['text']) > 200 else doc['text']
                })
            
            context = "\n\n".join(context_parts)
            
            # βœ… Fallback: if no meaningful context remains after filtering
            if not context.strip():
                return {
                    'response': "I couldn't find any good content to answer your question. Try ingesting a more informative page.",
                    'sources': [],
                    'retrieval_time': round(retrieval_time, 3),
                    'generation_time': 0,
                    'total_time': round(retrieval_time, 3),
                    'context_used': 0
                }

            # Step 3: Generate response using Groq
            generation_start = time.time()
            response = self._generate_response(message, context)
            generation_time = time.time() - generation_start
            
            total_time = time.time() - start_time
            
            return {
                'response': response,
                'sources': sources if include_sources else [],
                'retrieval_time': round(retrieval_time, 3),
                'generation_time': round(generation_time, 3),
                'total_time': round(total_time, 3),
                'context_used': len(relevant_docs)
            }
            
        except Exception as e:
            return {
                'response': f"Sorry, I encountered an error: {str(e)}",
                'sources': [],
                'retrieval_time': 0,
                'generation_time': 0,
                'total_time': 0,
                'error': str(e)
            }
    
    def _generate_response(self, query: str, context: str) -> str:
        """
        Generate response using Groq API
        Args:
            query: User's question
            context: Retrieved context
        Returns:
            Generated response
        """
        system_prompt = """You are a helpful AI assistant.

You must answer user questions based strictly on the provided context below. Do not use outside knowledge, do not make up facts, and do not guess.

If the context does not contain enough information, say clearly: "I don’t have enough information in the context to answer that."

When you do answer:
- Be accurate, concise, and truthful
- Use facts and phrases from the context only
- If asked for a source, refer to the matching context
- Keep your tone friendly and professional
"""

        
        user_prompt = f"""Context:
{context}

Question: {query}

Please provide a detailed answer based on the context above. If the context doesn't contain sufficient information to answer the question, please say so clearly."""

        try:
            completion = self.groq_client.chat.completions.create(
                model=self.model_name,
                messages=[
                    {"role": "system", "content": system_prompt},
                    {"role": "user", "content": user_prompt}
                ],
                max_tokens=self.max_tokens,
                temperature=0.3,  # Lower temperature for more focused responses
                top_p=0.9
            )
            
            return completion.choices[0].message.content.strip()
            
        except Exception as e:
            return f"Error generating response: {str(e)}"
    
    def get_knowledge_base_stats(self) -> Dict[str, any]:
        """Get statistics about the knowledge base"""
        try:
            stats = self.vector_store.get_index_stats()
            return {
                'total_documents': stats.get('total_vectors', 0),
                'index_dimension': stats.get('dimension', 0),
                'index_fullness': stats.get('index_fullness', 0),
                'model_used': self.model_name,
                'embedding_model': os.getenv("EMBEDDING_MODEL", "all-MiniLM-L6-v2")
            }
        except Exception as e:
            return {'error': str(e)}
    
    def clear_knowledge_base(self) -> bool:
        """Clear all documents from knowledge base"""
        try:
            return self.vector_store.delete_all()
        except Exception as e:
            print(f"Error clearing knowledge base: {str(e)}")
            return False

# Test the chatbot
if __name__ == "__main__":
    # Initialize chatbot
    chatbot = RAGChatbot()
    
    # Test ingestion (replace with your URL)
    test_url = "https://medium.com/@aminajavaid30/building-a-rag-system-the-data-ingestion-pipeline-d04235fd17ea"
    
    print("Testing content ingestion...")
    ingestion_result = chatbot.ingest_url(test_url)
    print(f"Ingestion result: {ingestion_result}")
    
    if ingestion_result['success']:
        print("\nTesting chat functionality...")
        
        # Test questions
        test_questions = [
            "What is RAG?",
            "How does the data ingestion pipeline work?",
            "What are the main components of a RAG system?"
        ]
        
        for question in test_questions:
            print(f"\n❓ Question: {question}")
            response = chatbot.chat(question)
            print(f"πŸ€– Answer: {response['response']}")
            print(f"⏱️ Time: {response['total_time']}s (Retrieval: {response['retrieval_time']}s, Generation: {response['generation_time']}s)")
            print(f"πŸ“š Sources used: {response['context_used']}")
    
    # Show knowledge base stats
    stats = chatbot.get_knowledge_base_stats()
    print(f"\nπŸ“Š Knowledge Base Stats: {stats}")