Spaces:
Sleeping
Sleeping
Create app.py
Browse files
app.py
ADDED
|
@@ -0,0 +1,406 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Install required libraries
|
| 2 |
+
!pip install gradio transformers torch wikipedia pypdf2 requests beautifulsoup4
|
| 3 |
+
|
| 4 |
+
import gradio as gr
|
| 5 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 6 |
+
import torch
|
| 7 |
+
import wikipedia
|
| 8 |
+
import PyPDF2
|
| 9 |
+
import threading
|
| 10 |
+
import requests
|
| 11 |
+
from bs4 import BeautifulSoup
|
| 12 |
+
from datetime import datetime
|
| 13 |
+
import json
|
| 14 |
+
|
| 15 |
+
# Load the model and tokenizer
|
| 16 |
+
model_name = "Qwen/Qwen2.5-1.5B"
|
| 17 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 18 |
+
model = AutoModelForCausalLM.from_pretrained(model_name)
|
| 19 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 20 |
+
model.to(device)
|
| 21 |
+
|
| 22 |
+
# System prompt for safe responses
|
| 23 |
+
SYSTEM_PROMPT = """You are a helpful, harmless, and honest AI assistant.
|
| 24 |
+
- Provide accurate and factual information
|
| 25 |
+
- Be respectful and avoid harmful, unethical, or offensive content
|
| 26 |
+
- Admit when you don't know something
|
| 27 |
+
- Stay on topic and provide clear, concise answers
|
| 28 |
+
"""
|
| 29 |
+
|
| 30 |
+
# Global variables for RAG
|
| 31 |
+
rag_content = ""
|
| 32 |
+
rag_filename = ""
|
| 33 |
+
|
| 34 |
+
# Function to generate response from model
|
| 35 |
+
def generate_response(prompt, max_length=512):
|
| 36 |
+
full_prompt = SYSTEM_PROMPT + "\n\n" + prompt
|
| 37 |
+
inputs = tokenizer(full_prompt, return_tensors="pt", truncation=True, max_length=2048).to(device)
|
| 38 |
+
outputs = model.generate(
|
| 39 |
+
**inputs,
|
| 40 |
+
max_length=max_length,
|
| 41 |
+
num_return_sequences=1,
|
| 42 |
+
temperature=0.7,
|
| 43 |
+
do_sample=True,
|
| 44 |
+
pad_token_id=tokenizer.eos_token_id
|
| 45 |
+
)
|
| 46 |
+
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 47 |
+
# Remove the prompt from response
|
| 48 |
+
if full_prompt in response:
|
| 49 |
+
response = response.replace(full_prompt, "").strip()
|
| 50 |
+
return response
|
| 51 |
+
|
| 52 |
+
# Function to generate search queries
|
| 53 |
+
def generate_search_queries(user_query, num_queries=5):
|
| 54 |
+
prompt = f"""Generate {num_queries} different search queries to find comprehensive information about: "{user_query}"
|
| 55 |
+
|
| 56 |
+
The queries should cover different aspects and perspectives. List only the queries, one per line, without numbering.
|
| 57 |
+
|
| 58 |
+
Queries:"""
|
| 59 |
+
|
| 60 |
+
response = generate_response(prompt, max_length=256)
|
| 61 |
+
|
| 62 |
+
# Parse the generated queries
|
| 63 |
+
queries = [q.strip() for q in response.split('\n') if q.strip() and len(q.strip()) > 5]
|
| 64 |
+
|
| 65 |
+
# If model didn't generate enough queries, add variations
|
| 66 |
+
if len(queries) < num_queries:
|
| 67 |
+
queries.append(user_query)
|
| 68 |
+
queries.append(f"{user_query} latest news")
|
| 69 |
+
queries.append(f"{user_query} {datetime.now().year}")
|
| 70 |
+
queries.append(f"recent {user_query}")
|
| 71 |
+
queries.append(f"{user_query} updates")
|
| 72 |
+
|
| 73 |
+
return queries[:num_queries]
|
| 74 |
+
|
| 75 |
+
# Enhanced Wikipedia search with multiple queries
|
| 76 |
+
def enhanced_wiki_search(user_query):
|
| 77 |
+
search_results = []
|
| 78 |
+
|
| 79 |
+
# Generate multiple search queries
|
| 80 |
+
queries = generate_search_queries(user_query, num_queries=5)
|
| 81 |
+
|
| 82 |
+
print(f"🔍 Generated search queries: {queries}")
|
| 83 |
+
|
| 84 |
+
for query in queries:
|
| 85 |
+
try:
|
| 86 |
+
# Try to get Wikipedia summary
|
| 87 |
+
summary = wikipedia.summary(query, sentences=3, auto_suggest=True)
|
| 88 |
+
search_results.append({
|
| 89 |
+
'query': query,
|
| 90 |
+
'source': 'Wikipedia',
|
| 91 |
+
'content': summary
|
| 92 |
+
})
|
| 93 |
+
except wikipedia.exceptions.DisambiguationError as e:
|
| 94 |
+
# If disambiguation, try first option
|
| 95 |
+
try:
|
| 96 |
+
summary = wikipedia.summary(e.options[0], sentences=3)
|
| 97 |
+
search_results.append({
|
| 98 |
+
'query': query,
|
| 99 |
+
'source': 'Wikipedia',
|
| 100 |
+
'content': summary
|
| 101 |
+
})
|
| 102 |
+
except:
|
| 103 |
+
pass
|
| 104 |
+
except wikipedia.exceptions.PageError:
|
| 105 |
+
# Try searching for the query
|
| 106 |
+
try:
|
| 107 |
+
search_list = wikipedia.search(query, results=3)
|
| 108 |
+
if search_list:
|
| 109 |
+
summary = wikipedia.summary(search_list[0], sentences=3)
|
| 110 |
+
search_results.append({
|
| 111 |
+
'query': query,
|
| 112 |
+
'source': 'Wikipedia',
|
| 113 |
+
'content': summary
|
| 114 |
+
})
|
| 115 |
+
except:
|
| 116 |
+
pass
|
| 117 |
+
except Exception as e:
|
| 118 |
+
print(f"Error with query '{query}': {str(e)}")
|
| 119 |
+
continue
|
| 120 |
+
|
| 121 |
+
return search_results
|
| 122 |
+
|
| 123 |
+
# Function to aggregate and understand search results
|
| 124 |
+
def aggregate_search_results(search_results, user_query):
|
| 125 |
+
if not search_results:
|
| 126 |
+
return "No search results found. Please try a different query."
|
| 127 |
+
|
| 128 |
+
# Combine all search results
|
| 129 |
+
combined_info = "\n\n".join([
|
| 130 |
+
f"Source: {result['source']}\nQuery: {result['query']}\nInformation: {result['content']}"
|
| 131 |
+
for result in search_results
|
| 132 |
+
])
|
| 133 |
+
|
| 134 |
+
# Generate comprehensive response
|
| 135 |
+
prompt = f"""Based on the following search results, provide a comprehensive and well-structured answer to the user's question: "{user_query}"
|
| 136 |
+
|
| 137 |
+
Search Results:
|
| 138 |
+
{combined_info}
|
| 139 |
+
|
| 140 |
+
Instructions:
|
| 141 |
+
- Synthesize information from all sources
|
| 142 |
+
- Provide accurate and up-to-date information
|
| 143 |
+
- If there are conflicting information, mention it
|
| 144 |
+
- Structure your response clearly
|
| 145 |
+
- Include relevant details and context
|
| 146 |
+
|
| 147 |
+
Comprehensive Answer:"""
|
| 148 |
+
|
| 149 |
+
response = generate_response(prompt, max_length=1024)
|
| 150 |
+
return response
|
| 151 |
+
|
| 152 |
+
# Function to extract text from PDF or TXT
|
| 153 |
+
def extract_text(file):
|
| 154 |
+
global rag_filename
|
| 155 |
+
try:
|
| 156 |
+
if file.name.endswith(".pdf"):
|
| 157 |
+
rag_filename = file.name
|
| 158 |
+
pdf_reader = PyPDF2.PdfReader(file.name)
|
| 159 |
+
text = ""
|
| 160 |
+
for page in pdf_reader.pages:
|
| 161 |
+
page_text = page.extract_text()
|
| 162 |
+
if page_text:
|
| 163 |
+
text += page_text + "\n"
|
| 164 |
+
return text if text else "Could not extract text from PDF."
|
| 165 |
+
elif file.name.endswith(".txt"):
|
| 166 |
+
rag_filename = file.name
|
| 167 |
+
with open(file.name, 'r', encoding='utf-8') as f:
|
| 168 |
+
return f.read()
|
| 169 |
+
else:
|
| 170 |
+
return "Unsupported file type. Please upload PDF or TXT files."
|
| 171 |
+
except Exception as e:
|
| 172 |
+
return f"Error reading file: {str(e)}"
|
| 173 |
+
|
| 174 |
+
# Main chat function with history
|
| 175 |
+
def chat(message, history, mode, file=None):
|
| 176 |
+
global rag_content, rag_filename
|
| 177 |
+
|
| 178 |
+
if not message.strip():
|
| 179 |
+
return history, ""
|
| 180 |
+
|
| 181 |
+
# Handle file upload for RAG
|
| 182 |
+
if file:
|
| 183 |
+
extracted = extract_text(file)
|
| 184 |
+
if extracted.startswith("Error") or extracted.startswith("Unsupported") or extracted.startswith("Could not"):
|
| 185 |
+
history.append((message, f"❌ {extracted}"))
|
| 186 |
+
return history, ""
|
| 187 |
+
rag_content = extracted
|
| 188 |
+
history.append((None, f"✓ File uploaded: {rag_filename} ({len(rag_content)} characters)"))
|
| 189 |
+
|
| 190 |
+
# Generate response based on mode
|
| 191 |
+
if mode == "Web search":
|
| 192 |
+
# Show searching indicator
|
| 193 |
+
history.append((message, "🔍 Searching and analyzing information..."))
|
| 194 |
+
yield history, ""
|
| 195 |
+
|
| 196 |
+
# Generate multiple search queries
|
| 197 |
+
search_queries = generate_search_queries(message, num_queries=5)
|
| 198 |
+
|
| 199 |
+
# Perform searches
|
| 200 |
+
search_results = enhanced_wiki_search(message)
|
| 201 |
+
|
| 202 |
+
# Aggregate and generate response
|
| 203 |
+
if search_results:
|
| 204 |
+
response = "📊 *Search Queries Generated:*\n"
|
| 205 |
+
response += "\n".join([f"- {q}" for q in search_queries])
|
| 206 |
+
response += f"\n\n✅ *Found {len(search_results)} relevant sources*\n\n"
|
| 207 |
+
|
| 208 |
+
# Generate comprehensive answer
|
| 209 |
+
final_answer = aggregate_search_results(search_results, message)
|
| 210 |
+
response += "📝 *Comprehensive Answer:*\n" + final_answer
|
| 211 |
+
else:
|
| 212 |
+
response = "❌ Could not find relevant information. Please try rephrasing your query."
|
| 213 |
+
|
| 214 |
+
# Update the last message with final response
|
| 215 |
+
history[-1] = (message, response)
|
| 216 |
+
|
| 217 |
+
elif mode == "Think":
|
| 218 |
+
think_prompt = f"Think step by step about the following question: {message}\n\nProvide your reasoning process:"
|
| 219 |
+
thoughts = generate_response(think_prompt, max_length=512)
|
| 220 |
+
final_prompt = f"Based on this reasoning:\n{thoughts}\n\nNow provide a final answer to: {message}"
|
| 221 |
+
final_response = generate_response(final_prompt, max_length=512)
|
| 222 |
+
response = f"🤔 *Thinking Process:\n{thoughts}\n\n💡 **Final Answer:*\n{final_response}"
|
| 223 |
+
history.append((message, response))
|
| 224 |
+
|
| 225 |
+
elif mode == "No think":
|
| 226 |
+
prompt = f"Answer the following question directly and concisely:\n{message}\n\nAnswer:"
|
| 227 |
+
response = generate_response(prompt, max_length=512)
|
| 228 |
+
history.append((message, response))
|
| 229 |
+
|
| 230 |
+
elif mode == "RAG":
|
| 231 |
+
if not rag_content:
|
| 232 |
+
history.append((message, "⚠ Please upload a PDF or TXT file first for RAG mode."))
|
| 233 |
+
return history, ""
|
| 234 |
+
chunk_size = 1500
|
| 235 |
+
prompt = f"Document content:\n{rag_content[:chunk_size]}\n\nUser question: {message}\n\nAnswer based strictly on the document content above:"
|
| 236 |
+
response = generate_response(prompt, max_length=768)
|
| 237 |
+
history.append((message, response))
|
| 238 |
+
else:
|
| 239 |
+
response = "Invalid mode selected."
|
| 240 |
+
history.append((message, response))
|
| 241 |
+
|
| 242 |
+
yield history, ""
|
| 243 |
+
|
| 244 |
+
# Function for parallel chat
|
| 245 |
+
def parallel_chat(q1, q2, q3, q4, mode, file=None):
|
| 246 |
+
global rag_content
|
| 247 |
+
|
| 248 |
+
# Handle file upload for RAG
|
| 249 |
+
if file and mode == "RAG":
|
| 250 |
+
extracted = extract_text(file)
|
| 251 |
+
if not (extracted.startswith("Error") or extracted.startswith("Unsupported")):
|
| 252 |
+
rag_content = extracted
|
| 253 |
+
|
| 254 |
+
responses = [None, None, None, None]
|
| 255 |
+
questions = [q1, q2, q3, q4]
|
| 256 |
+
|
| 257 |
+
def process(i):
|
| 258 |
+
if questions[i] and questions[i].strip():
|
| 259 |
+
temp_history = []
|
| 260 |
+
# Use the generator and get final result
|
| 261 |
+
for result, _ in chat(questions[i], temp_history, mode):
|
| 262 |
+
pass
|
| 263 |
+
if result:
|
| 264 |
+
responses[i] = result[-1][1]
|
| 265 |
+
|
| 266 |
+
threads = []
|
| 267 |
+
for i in range(4):
|
| 268 |
+
if questions[i] and questions[i].strip():
|
| 269 |
+
t = threading.Thread(target=process, args=(i,))
|
| 270 |
+
t.start()
|
| 271 |
+
threads.append(t)
|
| 272 |
+
|
| 273 |
+
for t in threads:
|
| 274 |
+
t.join()
|
| 275 |
+
|
| 276 |
+
return (responses[0] or "No question provided",
|
| 277 |
+
responses[1] or "No question provided",
|
| 278 |
+
responses[2] or "No question provided",
|
| 279 |
+
responses[3] or "No question provided")
|
| 280 |
+
|
| 281 |
+
# Custom CSS for better UI
|
| 282 |
+
custom_css = """
|
| 283 |
+
#chatbot {
|
| 284 |
+
height: 600px;
|
| 285 |
+
overflow-y: auto;
|
| 286 |
+
}
|
| 287 |
+
.message {
|
| 288 |
+
padding: 10px;
|
| 289 |
+
margin: 5px;
|
| 290 |
+
border-radius: 8px;
|
| 291 |
+
}
|
| 292 |
+
"""
|
| 293 |
+
|
| 294 |
+
# Gradio interface
|
| 295 |
+
with gr.Blocks(css=custom_css, theme=gr.themes.Soft()) as demo:
|
| 296 |
+
gr.Markdown("# 🤖 AI Chatbot with Advanced Web Search")
|
| 297 |
+
gr.Markdown("Choose your preferred mode and start chatting! Web search now generates multiple queries for comprehensive results.")
|
| 298 |
+
|
| 299 |
+
with gr.Tab("💬 Chat"):
|
| 300 |
+
with gr.Row():
|
| 301 |
+
mode = gr.Dropdown(
|
| 302 |
+
choices=["No think", "Think", "Web search", "RAG"],
|
| 303 |
+
label="Select Mode",
|
| 304 |
+
value="No think",
|
| 305 |
+
info="Web search uses 5 different queries for comprehensive results"
|
| 306 |
+
)
|
| 307 |
+
file = gr.File(
|
| 308 |
+
label="Upload File (PDF/TXT)",
|
| 309 |
+
file_types=[".pdf", ".txt"],
|
| 310 |
+
type="filepath"
|
| 311 |
+
)
|
| 312 |
+
|
| 313 |
+
chatbot = gr.Chatbot(
|
| 314 |
+
label="Conversation",
|
| 315 |
+
elem_id="chatbot",
|
| 316 |
+
height=500,
|
| 317 |
+
show_label=True,
|
| 318 |
+
bubble_full_width=False
|
| 319 |
+
)
|
| 320 |
+
|
| 321 |
+
with gr.Row():
|
| 322 |
+
input_text = gr.Textbox(
|
| 323 |
+
label="Your Message",
|
| 324 |
+
placeholder="Type your message here...",
|
| 325 |
+
lines=2,
|
| 326 |
+
scale=4
|
| 327 |
+
)
|
| 328 |
+
send_btn = gr.Button("Send 📤", scale=1, variant="primary")
|
| 329 |
+
|
| 330 |
+
clear_btn = gr.Button("Clear History 🗑")
|
| 331 |
+
|
| 332 |
+
# Chat functionality
|
| 333 |
+
send_btn.click(
|
| 334 |
+
chat,
|
| 335 |
+
inputs=[input_text, chatbot, mode, file],
|
| 336 |
+
outputs=[chatbot, input_text]
|
| 337 |
+
)
|
| 338 |
+
input_text.submit(
|
| 339 |
+
chat,
|
| 340 |
+
inputs=[input_text, chatbot, mode, file],
|
| 341 |
+
outputs=[chatbot, input_text]
|
| 342 |
+
)
|
| 343 |
+
clear_btn.click(lambda: [], None, chatbot)
|
| 344 |
+
|
| 345 |
+
with gr.Tab("⚡ Parallel Chat"):
|
| 346 |
+
gr.Markdown("### Ask up to 4 questions simultaneously!")
|
| 347 |
+
mode_parallel = gr.Dropdown(
|
| 348 |
+
choices=["No think", "Think", "Web search", "RAG"],
|
| 349 |
+
label="Select Mode",
|
| 350 |
+
value="No think"
|
| 351 |
+
)
|
| 352 |
+
file_parallel = gr.File(
|
| 353 |
+
label="Upload File for RAG (PDF/TXT)",
|
| 354 |
+
file_types=[".pdf", ".txt"],
|
| 355 |
+
type="filepath"
|
| 356 |
+
)
|
| 357 |
+
|
| 358 |
+
with gr.Row():
|
| 359 |
+
with gr.Column():
|
| 360 |
+
q1 = gr.Textbox(label="Question 1", lines=2)
|
| 361 |
+
q2 = gr.Textbox(label="Question 2", lines=2)
|
| 362 |
+
with gr.Column():
|
| 363 |
+
q3 = gr.Textbox(label="Question 3", lines=2)
|
| 364 |
+
q4 = gr.Textbox(label="Question 4", lines=2)
|
| 365 |
+
|
| 366 |
+
btn_parallel = gr.Button("Submit All Questions 🚀", variant="primary")
|
| 367 |
+
|
| 368 |
+
with gr.Row():
|
| 369 |
+
with gr.Column():
|
| 370 |
+
r1 = gr.Textbox(label="Response 1", lines=8, max_lines=20)
|
| 371 |
+
r2 = gr.Textbox(label="Response 2", lines=8, max_lines=20)
|
| 372 |
+
with gr.Column():
|
| 373 |
+
r3 = gr.Textbox(label="Response 3", lines=8, max_lines=20)
|
| 374 |
+
r4 = gr.Textbox(label="Response 4", lines=8, max_lines=20)
|
| 375 |
+
|
| 376 |
+
btn_parallel.click(
|
| 377 |
+
parallel_chat,
|
| 378 |
+
inputs=[q1, q2, q3, q4, mode_parallel, file_parallel],
|
| 379 |
+
outputs=[r1, r2, r3, r4]
|
| 380 |
+
)
|
| 381 |
+
|
| 382 |
+
with gr.Tab("ℹ About"):
|
| 383 |
+
gr.Markdown("""
|
| 384 |
+
## Features:
|
| 385 |
+
- *No think*: Direct, concise answers
|
| 386 |
+
- *Think*: Step-by-step reasoning process
|
| 387 |
+
- *Web search: 🔥 **NEW!* Generates 5 different search queries and aggregates results for comprehensive answers
|
| 388 |
+
- *RAG*: Answer questions based on uploaded documents
|
| 389 |
+
|
| 390 |
+
## Enhanced Web Search:
|
| 391 |
+
- Automatically generates 5 diverse search queries
|
| 392 |
+
- Searches multiple sources simultaneously
|
| 393 |
+
- Aggregates and synthesizes information
|
| 394 |
+
- Provides comprehensive, up-to-date answers
|
| 395 |
+
|
| 396 |
+
## Tips:
|
| 397 |
+
- Upload PDF or TXT files for RAG mode
|
| 398 |
+
- Use parallel chat for comparing different questions
|
| 399 |
+
- Clear history to start fresh conversations
|
| 400 |
+
- Web search is best for current events and factual queries
|
| 401 |
+
|
| 402 |
+
## Safety:
|
| 403 |
+
This chatbot includes a system prompt for safe, helpful, and honest responses.
|
| 404 |
+
""")
|
| 405 |
+
|
| 406 |
+
demo.launch(share=True)
|