File size: 10,824 Bytes
96f2e64
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
# app.py - Main application file for Hugging Face Space
import gradio as gr
import os
from typing import List, Tuple
import numpy as np
from sentence_transformers import SentenceTransformer
import faiss
from langchain.text_splitter import RecursiveCharacterTextSplitter
import PyPDF2
import docx
import openai
import tempfile

class RAGChatbot:
    def __init__(self):
        """Initialize the RAG chatbot with embedding model and vector store."""
        # Initialize embedding model
        print("Loading embedding model...")
        self.embedding_model = SentenceTransformer('all-MiniLM-L6-v2')
        
        # Initialize vector store (FAISS)
        self.dimension = 384  # Dimension for all-MiniLM-L6-v2
        self.index = faiss.IndexFlatL2(self.dimension)
        
        # Store for document chunks
        self.documents = []
        
        # Text splitter for chunking documents
        self.text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=500,
            chunk_overlap=50,
            length_function=len,
            separators=["\n\n", "\n", " ", ""]
        )
        
        # Get OpenAI API key from Hugging Face Secrets
        openai.api_key = os.getenv("OPENAI_API_KEY")
        
    def read_pdf(self, file_path: str) -> str:
        """Extract text from PDF file."""
        text = ""
        try:
            with open(file_path, 'rb') as file:
                pdf_reader = PyPDF2.PdfReader(file)
                for page_num in range(len(pdf_reader.pages)):
                    page = pdf_reader.pages[page_num]
                    text += page.extract_text() or ""
        except Exception as e:
            print(f"Error reading PDF: {e}")
        return text
    
    def read_docx(self, file_path: str) -> str:
        """Extract text from DOCX file."""
        text = ""
        try:
            doc = docx.Document(file_path)
            for paragraph in doc.paragraphs:
                text += paragraph.text + "\n"
        except Exception as e:
            print(f"Error reading DOCX: {e}")
        return text
    
    def read_txt(self, file_path: str) -> str:
        """Read text from TXT file."""
        try:
            with open(file_path, 'r', encoding='utf-8') as file:
                return file.read()
        except Exception as e:
            print(f"Error reading TXT: {e}")
            return ""
    
    def process_documents(self, files) -> str:
        """Process uploaded documents and add to vector store."""
        if not files:
            return "No files uploaded."
        
        all_text = ""
        processed_files = 0
        
        for file in files:
            try:
                # Get file extension
                file_path = file.name
                
                # Read file based on extension
                if file_path.endswith('.pdf'):
                    text = self.read_pdf(file_path)
                elif file_path.endswith('.docx'):
                    text = self.read_docx(file_path)
                elif file_path.endswith('.txt'):
                    text = self.read_txt(file_path)
                else:
                    continue
                    
                all_text += text + "\n"
                processed_files += 1
            except Exception as e:
                print(f"Error processing file {file.name}: {e}")
                continue
        
        if not all_text.strip():
            return "No text content found in the uploaded documents."
        
        # Split text into chunks
        chunks = self.text_splitter.split_text(all_text)
        
        if not chunks:
            return "No text chunks created from documents."
        
        # Create embeddings for chunks
        embeddings = self.embedding_model.encode(chunks)
        
        # Add to FAISS index
        for i, (chunk, embedding) in enumerate(zip(chunks, embeddings)):
            self.index.add(np.array([embedding]))
            self.documents.append(chunk)
        
        return f"βœ… Successfully processed {len(chunks)} text chunks from {processed_files} documents."
    
    def retrieve_relevant_chunks(self, query: str, k: int = 3) -> List[str]:
        """Retrieve k most relevant chunks for the query."""
        if len(self.documents) == 0:
            return []
        
        # Create embedding for query
        query_embedding = self.embedding_model.encode([query])
        
        # Search in FAISS index
        distances, indices = self.index.search(query_embedding, min(k, len(self.documents)))
        
        # Get relevant documents
        relevant_chunks = [self.documents[idx] for idx in indices[0] if idx < len(self.documents)]
        
        return relevant_chunks
    
    def generate_response(self, query: str, context: List[str]) -> str:
        """Generate response using OpenAI API with retrieved context."""
        if not openai.api_key:
            return "⚠️ OpenAI API key not configured. Please add OPENAI_API_KEY to the Space secrets."
        
        if not context:
            return "No relevant documents found. Please upload documents first."
        
        # Prepare context string
        context_str = "\n\n".join(context[:3])  # Limit context to avoid token limits
        
        # Create prompt
        prompt = f"""You are a helpful assistant. Use the following context to answer the question. 
        If you cannot answer the question based on the context, say so.
        
        Context:
        {context_str}
        
        Question: {query}
        
        Answer:"""
        
        try:
            # Call OpenAI API (updated for new API)
            from openai import OpenAI
            client = OpenAI(api_key=openai.api_key)
            
            response = client.chat.completions.create(
                model="gpt-3.5-turbo",
                messages=[
                    {"role": "system", "content": "You are a helpful assistant that answers questions based on provided context."},
                    {"role": "user", "content": prompt}
                ],
                max_tokens=500,
                temperature=0.7
            )
            
            return response.choices[0].message.content
        except Exception as e:
            return f"Error generating response: {str(e)}"
    
    def chat(self, message: str, history: List[Tuple[str, str]]) -> Tuple[str, List[Tuple[str, str]]]:
        """Main chat function that combines retrieval and generation."""
        if not message.strip():
            return "", history
        
        # Retrieve relevant chunks
        relevant_chunks = self.retrieve_relevant_chunks(message)
        
        # Generate response
        response = self.generate_response(message, relevant_chunks)
        
        # Update history
        history.append((message, response))
        
        return "", history

# Initialize the chatbot
print("Initializing RAG Chatbot...")
chatbot = RAGChatbot()

# Create Gradio interface
with gr.Blocks(title="RAG Chatbot", theme=gr.themes.Soft()) as demo:
    gr.Markdown(
        """
        # πŸ€– RAG Chatbot with Gradio
        
        Upload your documents and start asking questions! The chatbot will retrieve relevant information from your documents to answer your queries.
        
        **Supported formats:** PDF, DOCX, TXT | **Powered by:** Sentence-BERT + FAISS + OpenAI
        """
    )
    
    with gr.Tab("πŸ“„ Upload Documents"):
        file_upload = gr.File(
            label="Upload Documents",
            file_count="multiple",
            file_types=[".pdf", ".docx", ".txt"]
        )
        upload_button = gr.Button("Process Documents", variant="primary")
        upload_status = gr.Textbox(label="Status", interactive=False)
        
        upload_button.click(
            fn=chatbot.process_documents,
            inputs=[file_upload],
            outputs=[upload_status]
        )
    
    with gr.Tab("πŸ’¬ Chat"):
        chatbot_interface = gr.Chatbot(
            label="Chat History",
            height=400,
            bubble_full_width=False
        )
        
        with gr.Row():
            msg = gr.Textbox(
                label="Your Question",
                placeholder="Ask a question about your documents...",
                lines=1,
                scale=4
            )
            submit_btn = gr.Button("Send", variant="primary", scale=1)
        
        clear = gr.Button("πŸ—‘οΈ Clear Chat")
        
        # Handle message submission
        msg.submit(
            fn=chatbot.chat,
            inputs=[msg, chatbot_interface],
            outputs=[msg, chatbot_interface]
        )
        
        submit_btn.click(
            fn=chatbot.chat,
            inputs=[msg, chatbot_interface],
            outputs=[msg, chatbot_interface]
        )
        
        # Clear chat history
        clear.click(
            lambda: (None, []),
            outputs=[msg, chatbot_interface]
        )
    
    with gr.Tab("βš™οΈ Settings"):
        gr.Markdown(
            """
            ### Configuration
            
            | Component | Details |
            |-----------|---------|
            | **Embedding Model** | all-MiniLM-L6-v2 |
            | **Vector Store** | FAISS |
            | **LLM** | OpenAI GPT-3.5-turbo |
            | **Chunk Size** | 500 characters |
            | **Chunk Overlap** | 50 characters |
            | **Retrieved Chunks** | 3 |
            
            ### About
            This RAG chatbot uses retrieval-augmented generation to answer questions based on your uploaded documents.
            """
        )

# Launch the app
demo.launch()

# -----------------------------------
# requirements.txt - Dependencies file
"""
gradio==4.19.2
sentence-transformers==2.3.1
faiss-cpu==1.7.4
langchain==0.1.6
openai==1.12.0
PyPDF2==3.0.1
python-docx==1.1.0
numpy==1.24.3
"""

# -----------------------------------
# README.md - Documentation for your Space
"""
---
title: RAG Chatbot
emoji: πŸ€–
colorFrom: blue
colorTo: green
sdk: gradio
sdk_version: 4.19.2
app_file: app.py
pinned: false
license: mit
---

# RAG Chatbot

A Retrieval-Augmented Generation chatbot built with Gradio, FAISS, and OpenAI.

## Features
- Upload PDF, DOCX, and TXT documents
- Semantic search using Sentence-BERT embeddings
- Context-aware responses using OpenAI GPT-3.5
- Interactive chat interface

## Setup
Add your OpenAI API key to the Space secrets:
1. Go to Settings β†’ Variables and secrets
2. Add a new secret named `OPENAI_API_KEY`
3. Paste your OpenAI API key

## Usage
1. Upload your documents in the Upload Documents tab
2. Wait for processing confirmation
3. Go to the Chat tab and start asking questions!

Check out the [GitHub repository](https://github.com/yourusername/rag-chatbot) for more details.
"""