File size: 14,401 Bytes
dd85cda
 
 
 
 
 
 
 
 
 
 
 
830467d
dd85cda
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
import wikipedia
import PyPDF2
import threading
import requests
from bs4 import BeautifulSoup
from datetime import datetime
import json

# Load the model and tokenizer
model_name = "Qwen/Qwen3-0.6B"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

# System prompt for safe responses
SYSTEM_PROMPT = """You are a helpful, harmless, and honest AI assistant.
- Provide accurate and factual information
- Be respectful and avoid harmful, unethical, or offensive content
- Admit when you don't know something
- Stay on topic and provide clear, concise answers
"""

# Global variables for RAG
rag_content = ""
rag_filename = ""

# Function to generate response from model
def generate_response(prompt, max_length=512):
    full_prompt = SYSTEM_PROMPT + "\n\n" + prompt
    inputs = tokenizer(full_prompt, return_tensors="pt", truncation=True, max_length=2048).to(device)
    outputs = model.generate(
        **inputs,
        max_length=max_length,
        num_return_sequences=1,
        temperature=0.7,
        do_sample=True,
        pad_token_id=tokenizer.eos_token_id
    )
    response = tokenizer.decode(outputs[0], skip_special_tokens=True)
    # Remove the prompt from response
    if full_prompt in response:
        response = response.replace(full_prompt, "").strip()
    return response

# Function to generate search queries
def generate_search_queries(user_query, num_queries=5):
    prompt = f"""Generate {num_queries} different search queries to find comprehensive information about: "{user_query}"

The queries should cover different aspects and perspectives. List only the queries, one per line, without numbering.

Queries:"""

    response = generate_response(prompt, max_length=256)

    # Parse the generated queries
    queries = [q.strip() for q in response.split('\n') if q.strip() and len(q.strip()) > 5]

    # If model didn't generate enough queries, add variations
    if len(queries) < num_queries:
        queries.append(user_query)
        queries.append(f"{user_query} latest news")
        queries.append(f"{user_query} {datetime.now().year}")
        queries.append(f"recent {user_query}")
        queries.append(f"{user_query} updates")

    return queries[:num_queries]

# Enhanced Wikipedia search with multiple queries
def enhanced_wiki_search(user_query):
    search_results = []

    # Generate multiple search queries
    queries = generate_search_queries(user_query, num_queries=5)

    print(f"🔍 Generated search queries: {queries}")

    for query in queries:
        try:
            # Try to get Wikipedia summary
            summary = wikipedia.summary(query, sentences=3, auto_suggest=True)
            search_results.append({
                'query': query,
                'source': 'Wikipedia',
                'content': summary
            })
        except wikipedia.exceptions.DisambiguationError as e:
            # If disambiguation, try first option
            try:
                summary = wikipedia.summary(e.options[0], sentences=3)
                search_results.append({
                    'query': query,
                    'source': 'Wikipedia',
                    'content': summary
                })
            except:
                pass
        except wikipedia.exceptions.PageError:
            # Try searching for the query
            try:
                search_list = wikipedia.search(query, results=3)
                if search_list:
                    summary = wikipedia.summary(search_list[0], sentences=3)
                    search_results.append({
                        'query': query,
                        'source': 'Wikipedia',
                        'content': summary
                    })
            except:
                pass
        except Exception as e:
            print(f"Error with query '{query}': {str(e)}")
            continue

    return search_results

# Function to aggregate and understand search results
def aggregate_search_results(search_results, user_query):
    if not search_results:
        return "No search results found. Please try a different query."

    # Combine all search results
    combined_info = "\n\n".join([
        f"Source: {result['source']}\nQuery: {result['query']}\nInformation: {result['content']}"
        for result in search_results
    ])

    # Generate comprehensive response
    prompt = f"""Based on the following search results, provide a comprehensive and well-structured answer to the user's question: "{user_query}"

Search Results:
{combined_info}

Instructions:
- Synthesize information from all sources
- Provide accurate and up-to-date information
- If there are conflicting information, mention it
- Structure your response clearly
- Include relevant details and context

Comprehensive Answer:"""

    response = generate_response(prompt, max_length=1024)
    return response

# Function to extract text from PDF or TXT
def extract_text(file):
    global rag_filename
    try:
        if file.name.endswith(".pdf"):
            rag_filename = file.name
            pdf_reader = PyPDF2.PdfReader(file.name)
            text = ""
            for page in pdf_reader.pages:
                page_text = page.extract_text()
                if page_text:
                    text += page_text + "\n"
            return text if text else "Could not extract text from PDF."
        elif file.name.endswith(".txt"):
            rag_filename = file.name
            with open(file.name, 'r', encoding='utf-8') as f:
                return f.read()
        else:
            return "Unsupported file type. Please upload PDF or TXT files."
    except Exception as e:
        return f"Error reading file: {str(e)}"

# Main chat function with history
def chat(message, history, mode, file=None):
    global rag_content, rag_filename

    if not message.strip():
        return history, ""

    # Handle file upload for RAG
    if file:
        extracted = extract_text(file)
        if extracted.startswith("Error") or extracted.startswith("Unsupported") or extracted.startswith("Could not"):
            history.append((message, f"❌ {extracted}"))
            return history, ""
        rag_content = extracted
        history.append((None, f"✓ File uploaded: {rag_filename} ({len(rag_content)} characters)"))

    # Generate response based on mode
    if mode == "Web search":
        # Show searching indicator
        history.append((message, "🔍 Searching and analyzing information..."))
        yield history, ""

        # Generate multiple search queries
        search_queries = generate_search_queries(message, num_queries=5)

        # Perform searches
        search_results = enhanced_wiki_search(message)

        # Aggregate and generate response
        if search_results:
            response = "📊 *Search Queries Generated:*\n"
            response += "\n".join([f"- {q}" for q in search_queries])
            response += f"\n\n✅ *Found {len(search_results)} relevant sources*\n\n"

            # Generate comprehensive answer
            final_answer = aggregate_search_results(search_results, message)
            response += "📝 *Comprehensive Answer:*\n" + final_answer
        else:
            response = "❌ Could not find relevant information. Please try rephrasing your query."

        # Update the last message with final response
        history[-1] = (message, response)

    elif mode == "Think":
        think_prompt = f"Think step by step about the following question: {message}\n\nProvide your reasoning process:"
        thoughts = generate_response(think_prompt, max_length=512)
        final_prompt = f"Based on this reasoning:\n{thoughts}\n\nNow provide a final answer to: {message}"
        final_response = generate_response(final_prompt, max_length=512)
        response = f"🤔 *Thinking Process:\n{thoughts}\n\n💡 **Final Answer:*\n{final_response}"
        history.append((message, response))

    elif mode == "No think":
        prompt = f"Answer the following question directly and concisely:\n{message}\n\nAnswer:"
        response = generate_response(prompt, max_length=512)
        history.append((message, response))

    elif mode == "RAG":
        if not rag_content:
            history.append((message, "⚠ Please upload a PDF or TXT file first for RAG mode."))
            return history, ""
        chunk_size = 1500
        prompt = f"Document content:\n{rag_content[:chunk_size]}\n\nUser question: {message}\n\nAnswer based strictly on the document content above:"
        response = generate_response(prompt, max_length=768)
        history.append((message, response))
    else:
        response = "Invalid mode selected."
        history.append((message, response))

    yield history, ""

# Function for parallel chat
def parallel_chat(q1, q2, q3, q4, mode, file=None):
    global rag_content

    # Handle file upload for RAG
    if file and mode == "RAG":
        extracted = extract_text(file)
        if not (extracted.startswith("Error") or extracted.startswith("Unsupported")):
            rag_content = extracted

    responses = [None, None, None, None]
    questions = [q1, q2, q3, q4]

    def process(i):
        if questions[i] and questions[i].strip():
            temp_history = []
            # Use the generator and get final result
            for result, _ in chat(questions[i], temp_history, mode):
                pass
            if result:
                responses[i] = result[-1][1]

    threads = []
    for i in range(4):
        if questions[i] and questions[i].strip():
            t = threading.Thread(target=process, args=(i,))
            t.start()
            threads.append(t)

    for t in threads:
        t.join()

    return (responses[0] or "No question provided",
            responses[1] or "No question provided",
            responses[2] or "No question provided",
            responses[3] or "No question provided")

# Custom CSS for better UI
custom_css = """
#chatbot {
    height: 600px;
    overflow-y: auto;
}
.message {
    padding: 10px;
    margin: 5px;
    border-radius: 8px;
}
"""

# Gradio interface
with gr.Blocks(css=custom_css, theme=gr.themes.Soft()) as demo:
    gr.Markdown("# 🤖 AI Chatbot with Advanced Web Search")
    gr.Markdown("Choose your preferred mode and start chatting! Web search now generates multiple queries for comprehensive results.")

    with gr.Tab("💬 Chat"):
        with gr.Row():
            mode = gr.Dropdown(
                choices=["No think", "Think", "Web search", "RAG"],
                label="Select Mode",
                value="No think",
                info="Web search uses 5 different queries for comprehensive results"
            )
            file = gr.File(
                label="Upload File (PDF/TXT)",
                file_types=[".pdf", ".txt"],
                type="filepath"
            )

        chatbot = gr.Chatbot(
            label="Conversation",
            elem_id="chatbot",
            height=500,
            show_label=True,
            bubble_full_width=False
        )

        with gr.Row():
            input_text = gr.Textbox(
                label="Your Message",
                placeholder="Type your message here...",
                lines=2,
                scale=4
            )
            send_btn = gr.Button("Send 📤", scale=1, variant="primary")

        clear_btn = gr.Button("Clear History 🗑")

        # Chat functionality
        send_btn.click(
            chat,
            inputs=[input_text, chatbot, mode, file],
            outputs=[chatbot, input_text]
        )
        input_text.submit(
            chat,
            inputs=[input_text, chatbot, mode, file],
            outputs=[chatbot, input_text]
        )
        clear_btn.click(lambda: [], None, chatbot)

    with gr.Tab("⚡ Parallel Chat"):
        gr.Markdown("### Ask up to 4 questions simultaneously!")
        mode_parallel = gr.Dropdown(
            choices=["No think", "Think", "Web search", "RAG"],
            label="Select Mode",
            value="No think"
        )
        file_parallel = gr.File(
            label="Upload File for RAG (PDF/TXT)",
            file_types=[".pdf", ".txt"],
            type="filepath"
        )

        with gr.Row():
            with gr.Column():
                q1 = gr.Textbox(label="Question 1", lines=2)
                q2 = gr.Textbox(label="Question 2", lines=2)
            with gr.Column():
                q3 = gr.Textbox(label="Question 3", lines=2)
                q4 = gr.Textbox(label="Question 4", lines=2)

        btn_parallel = gr.Button("Submit All Questions 🚀", variant="primary")

        with gr.Row():
            with gr.Column():
                r1 = gr.Textbox(label="Response 1", lines=8, max_lines=20)
                r2 = gr.Textbox(label="Response 2", lines=8, max_lines=20)
            with gr.Column():
                r3 = gr.Textbox(label="Response 3", lines=8, max_lines=20)
                r4 = gr.Textbox(label="Response 4", lines=8, max_lines=20)

        btn_parallel.click(
            parallel_chat,
            inputs=[q1, q2, q3, q4, mode_parallel, file_parallel],
            outputs=[r1, r2, r3, r4]
        )

    with gr.Tab("ℹ About"):
        gr.Markdown("""
        ## Features:
        - *No think*: Direct, concise answers
        - *Think*: Step-by-step reasoning process
        - *Web search: 🔥 **NEW!* Generates 5 different search queries and aggregates results for comprehensive answers
        - *RAG*: Answer questions based on uploaded documents

        ## Enhanced Web Search:
        - Automatically generates 5 diverse search queries
        - Searches multiple sources simultaneously
        - Aggregates and synthesizes information
        - Provides comprehensive, up-to-date answers

        ## Tips:
        - Upload PDF or TXT files for RAG mode
        - Use parallel chat for comparing different questions
        - Clear history to start fresh conversations
        - Web search is best for current events and factual queries

        ## Safety:
        This chatbot includes a system prompt for safe, helpful, and honest responses.
        """)

demo.launch(share=True)