Update app.py
Browse files
app.py
CHANGED
|
@@ -7,11 +7,10 @@ import base64
|
|
| 7 |
import tempfile
|
| 8 |
import os
|
| 9 |
import logging
|
| 10 |
-
import
|
| 11 |
-
import aiohttp
|
| 12 |
from datetime import datetime
|
| 13 |
from concurrent.futures import ThreadPoolExecutor
|
| 14 |
-
from
|
| 15 |
|
| 16 |
# Setup logging
|
| 17 |
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
|
@@ -21,7 +20,7 @@ logger = logging.getLogger(__name__)
|
|
| 21 |
logger.info("Loading Whisper model...")
|
| 22 |
whisper_model = WhisperModel("tiny", device="cpu", compute_type="int8")
|
| 23 |
|
| 24 |
-
logger.info("Loading Qwen 2.5 1.5B-Instruct
|
| 25 |
model_name = "Qwen/Qwen2.5-1.5B-Instruct"
|
| 26 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 27 |
model = AutoModelForCausalLM.from_pretrained(
|
|
@@ -31,28 +30,23 @@ model = AutoModelForCausalLM.from_pretrained(
|
|
| 31 |
low_cpu_mem_usage=True
|
| 32 |
)
|
| 33 |
|
| 34 |
-
logger.info("All models loaded!")
|
| 35 |
|
| 36 |
-
# Search APIs configuration
|
| 37 |
-
TAVILY_API_KEY = os.getenv('TAVILY_API_KEY', '')
|
| 38 |
BRAVE_API_KEY = os.getenv('BRAVE_API_KEY', '')
|
| 39 |
|
| 40 |
def search_tavily(query):
|
| 41 |
-
"""Priority 1: Tavily AI search
|
| 42 |
-
logger.info("[TAVILY] Starting
|
| 43 |
if not TAVILY_API_KEY:
|
| 44 |
-
logger.warning("[TAVILY] No API key
|
| 45 |
return None
|
| 46 |
|
| 47 |
try:
|
| 48 |
response = requests.post(
|
| 49 |
'https://api.tavily.com/search',
|
| 50 |
-
json={
|
| 51 |
-
'api_key': TAVILY_API_KEY,
|
| 52 |
-
'query': query,
|
| 53 |
-
'max_results': 3,
|
| 54 |
-
'include_answer': True
|
| 55 |
-
},
|
| 56 |
timeout=3
|
| 57 |
)
|
| 58 |
|
|
@@ -69,10 +63,10 @@ def search_tavily(query):
|
|
| 69 |
return None
|
| 70 |
|
| 71 |
def search_brave(query):
|
| 72 |
-
"""Priority 2: Brave Search
|
| 73 |
-
logger.info("[BRAVE] Starting
|
| 74 |
if not BRAVE_API_KEY:
|
| 75 |
-
logger.warning("[BRAVE] No API key
|
| 76 |
return None
|
| 77 |
|
| 78 |
try:
|
|
@@ -96,10 +90,9 @@ def search_brave(query):
|
|
| 96 |
return None
|
| 97 |
|
| 98 |
def search_searx(query):
|
| 99 |
-
"""Priority 3: Searx
|
| 100 |
-
logger.info("[SEARX] Starting
|
| 101 |
|
| 102 |
-
# Try multiple public Searx instances
|
| 103 |
searx_instances = [
|
| 104 |
'https://searx.be/search',
|
| 105 |
'https://searx.work/search',
|
|
@@ -110,7 +103,7 @@ def search_searx(query):
|
|
| 110 |
try:
|
| 111 |
response = requests.get(
|
| 112 |
instance,
|
| 113 |
-
params={'q': query, 'format': 'json', 'categories': 'general'
|
| 114 |
timeout=3
|
| 115 |
)
|
| 116 |
|
|
@@ -120,18 +113,16 @@ def search_searx(query):
|
|
| 120 |
context = ""
|
| 121 |
for i, result in enumerate(results[:3], 1):
|
| 122 |
context += f"\n[Searx {i}] {result.get('title', '')}\n{result.get('content', '')}\n"
|
| 123 |
-
logger.info(f"[SEARX] Success
|
| 124 |
return context
|
| 125 |
except Exception as e:
|
| 126 |
logger.warning(f"[SEARX] Failed {instance}: {str(e)}")
|
| 127 |
-
continue
|
| 128 |
|
| 129 |
-
logger.error("[SEARX] All instances failed")
|
| 130 |
return None
|
| 131 |
|
| 132 |
def search_duckduckgo_html(query):
|
| 133 |
-
"""Priority 4: DuckDuckGo HTML
|
| 134 |
-
logger.info("[DDG] Starting
|
| 135 |
try:
|
| 136 |
response = requests.get(
|
| 137 |
'https://html.duckduckgo.com/html/',
|
|
@@ -141,9 +132,6 @@ def search_duckduckgo_html(query):
|
|
| 141 |
)
|
| 142 |
|
| 143 |
if response.status_code == 200:
|
| 144 |
-
# Simple HTML parsing (basic extraction)
|
| 145 |
-
from html.parser import HTMLParser
|
| 146 |
-
|
| 147 |
class DDGParser(HTMLParser):
|
| 148 |
def __init__(self):
|
| 149 |
super().__init__()
|
|
@@ -173,18 +161,17 @@ def search_duckduckgo_html(query):
|
|
| 173 |
context += f"\n[DDG {i}] {result}\n"
|
| 174 |
|
| 175 |
if context:
|
| 176 |
-
logger.info(f"[DDG] Success
|
| 177 |
return context
|
| 178 |
except Exception as e:
|
| 179 |
logger.error(f"[DDG] Error: {str(e)}")
|
| 180 |
return None
|
| 181 |
|
| 182 |
def search_parallel(query):
|
| 183 |
-
"""Execute all searches in parallel
|
| 184 |
-
logger.info("[PARALLEL
|
| 185 |
|
| 186 |
with ThreadPoolExecutor(max_workers=4) as executor:
|
| 187 |
-
# Submit all searches simultaneously
|
| 188 |
futures = {
|
| 189 |
executor.submit(search_tavily, query): "Tavily",
|
| 190 |
executor.submit(search_brave, query): "Brave",
|
|
@@ -192,36 +179,32 @@ def search_parallel(query):
|
|
| 192 |
executor.submit(search_duckduckgo_html, query): "DuckDuckGo"
|
| 193 |
}
|
| 194 |
|
| 195 |
-
# Priority order: Tavily > Brave > Searx > DDG
|
| 196 |
priority_order = ["Tavily", "Brave", "Searx", "DuckDuckGo"]
|
| 197 |
results = {}
|
| 198 |
|
| 199 |
-
# Collect all results
|
| 200 |
for future in futures:
|
| 201 |
engine = futures[future]
|
| 202 |
try:
|
| 203 |
result = future.result(timeout=4)
|
| 204 |
if result:
|
| 205 |
results[engine] = result
|
| 206 |
-
logger.info(f"[PARALLEL
|
| 207 |
except Exception as e:
|
| 208 |
-
logger.error(f"[PARALLEL
|
| 209 |
|
| 210 |
-
# Return results by priority
|
| 211 |
for engine in priority_order:
|
| 212 |
if engine in results and results[engine]:
|
| 213 |
-
logger.info(f"[PARALLEL
|
| 214 |
return results[engine], engine
|
| 215 |
|
| 216 |
-
logger.error("[PARALLEL
|
| 217 |
-
return "Unable to fetch search results.
|
| 218 |
|
| 219 |
def transcribe_audio_base64(audio_base64):
|
| 220 |
-
"""Transcribe audio
|
| 221 |
-
logger.info("[PLUELY STT] Request
|
| 222 |
try:
|
| 223 |
audio_bytes = base64.b64decode(audio_base64)
|
| 224 |
-
logger.info(f"[PLUELY STT] Audio size: {len(audio_bytes)} bytes")
|
| 225 |
|
| 226 |
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as temp_audio:
|
| 227 |
temp_audio.write(audio_bytes)
|
|
@@ -231,7 +214,7 @@ def transcribe_audio_base64(audio_base64):
|
|
| 231 |
transcription = " ".join([seg.text for seg in segments])
|
| 232 |
os.unlink(temp_path)
|
| 233 |
|
| 234 |
-
logger.info(f"[PLUELY STT] Success
|
| 235 |
return {"text": transcription.strip()}
|
| 236 |
|
| 237 |
except Exception as e:
|
|
@@ -239,7 +222,7 @@ def transcribe_audio_base64(audio_base64):
|
|
| 239 |
return {"error": str(e)}
|
| 240 |
|
| 241 |
def generate_answer(text_input):
|
| 242 |
-
"""Generate answer
|
| 243 |
logger.info(f"[PLUELY AI] Question: {text_input}")
|
| 244 |
try:
|
| 245 |
if not text_input or not text_input.strip():
|
|
@@ -247,42 +230,18 @@ def generate_answer(text_input):
|
|
| 247 |
|
| 248 |
current_date = datetime.now().strftime("%B %d, %Y")
|
| 249 |
|
| 250 |
-
|
| 251 |
-
logger.info("[PLUELY AI] Starting parallel search...")
|
| 252 |
search_results, search_engine = search_parallel(text_input)
|
| 253 |
-
logger.info(f"[PLUELY AI] Using {search_engine}
|
| 254 |
|
| 255 |
-
# Enhanced prompt for Qwen 2.5
|
| 256 |
messages = [
|
| 257 |
-
{
|
| 258 |
-
|
| 259 |
-
"content": f"You are a factual assistant. Today is {current_date}. Answer questions using ONLY the provided search results. Be concise (100-120 words)."
|
| 260 |
-
},
|
| 261 |
-
{
|
| 262 |
-
"role": "user",
|
| 263 |
-
"content": f"""Search Results:
|
| 264 |
-
{search_results}
|
| 265 |
-
|
| 266 |
-
Question: {text_input}
|
| 267 |
-
|
| 268 |
-
Instructions:
|
| 269 |
-
1. Answer based STRICTLY on the search results above
|
| 270 |
-
2. Include relevant dates and facts from search results
|
| 271 |
-
3. If search results are insufficient, say so
|
| 272 |
-
4. Keep answer to 100-120 words
|
| 273 |
-
|
| 274 |
-
Answer:"""
|
| 275 |
-
}
|
| 276 |
]
|
| 277 |
|
| 278 |
-
|
| 279 |
-
text = tokenizer.apply_chat_template(
|
| 280 |
-
messages,
|
| 281 |
-
tokenize=False,
|
| 282 |
-
add_generation_prompt=True
|
| 283 |
-
)
|
| 284 |
|
| 285 |
-
logger.info("[PLUELY AI] Generating
|
| 286 |
inputs = tokenizer(text, return_tensors="pt", truncation=True, max_length=1500)
|
| 287 |
|
| 288 |
with torch.no_grad():
|
|
@@ -297,11 +256,9 @@ Answer:"""
|
|
| 297 |
)
|
| 298 |
|
| 299 |
answer = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True).strip()
|
| 300 |
-
|
| 301 |
-
# Add source attribution
|
| 302 |
answer_with_source = f"{answer}\n\n**Source:** {search_engine}"
|
| 303 |
|
| 304 |
-
logger.info(f"[PLUELY AI]
|
| 305 |
return answer_with_source
|
| 306 |
|
| 307 |
except Exception as e:
|
|
@@ -312,39 +269,28 @@ def process_audio(audio_path, question_text):
|
|
| 312 |
"""Main pipeline"""
|
| 313 |
start_time = time.time()
|
| 314 |
logger.info("="*50)
|
| 315 |
-
logger.info("[MAIN] New request")
|
| 316 |
|
| 317 |
if audio_path:
|
| 318 |
-
logger.info(f"[MAIN] Audio: {audio_path}")
|
| 319 |
try:
|
| 320 |
segments, _ = whisper_model.transcribe(audio_path, language="en", beam_size=1)
|
| 321 |
question = " ".join([seg.text for seg in segments])
|
| 322 |
-
logger.info(f"[MAIN] Transcribed: {question}")
|
| 323 |
except Exception as e:
|
| 324 |
-
logger.error(f"[MAIN] Error: {str(e)}")
|
| 325 |
return f"❌ Error: {str(e)}", 0.0
|
| 326 |
else:
|
| 327 |
question = question_text
|
| 328 |
-
logger.info(f"[MAIN] Text: {question}")
|
| 329 |
|
| 330 |
if not question or not question.strip():
|
| 331 |
return "❌ No input", 0.0
|
| 332 |
|
| 333 |
-
transcription_time = time.time() - start_time
|
| 334 |
-
|
| 335 |
-
# Generate (includes parallel search)
|
| 336 |
-
gen_start = time.time()
|
| 337 |
answer = generate_answer(question)
|
| 338 |
-
gen_time = time.time() - gen_start
|
| 339 |
-
|
| 340 |
total_time = time.time() - start_time
|
|
|
|
| 341 |
time_emoji = "🟢" if total_time < 4.0 else "🟡" if total_time < 6.0 else "🔴"
|
|
|
|
| 342 |
|
| 343 |
logger.info(f"[MAIN] Total: {total_time:.2f}s")
|
| 344 |
logger.info("="*50)
|
| 345 |
|
| 346 |
-
timing = f"\n\n{time_emoji} **Performance:** Trans={transcription_time:.2f}s | Search+Gen={gen_time:.2f}s | **Total={total_time:.2f}s**"
|
| 347 |
-
|
| 348 |
return answer + timing, total_time
|
| 349 |
|
| 350 |
def audio_handler(audio_path):
|
|
@@ -354,83 +300,52 @@ def text_handler(text_input):
|
|
| 354 |
return process_audio(None, text_input)
|
| 355 |
|
| 356 |
# Gradio UI
|
| 357 |
-
with gr.Blocks(title="Fast Q&A
|
| 358 |
gr.Markdown("""
|
| 359 |
-
# ⚡
|
| 360 |
-
**Parallel multi-search
|
| 361 |
-
|
| 362 |
-
**Features:**
|
| 363 |
-
- Whisper-tiny transcription
|
| 364 |
-
- 4 search engines running in parallel (uses fastest available)
|
| 365 |
-
- Qwen 2.5 1.5B-Instruct (2-3s CPU inference)
|
| 366 |
-
- Search-grounded answers only
|
| 367 |
""")
|
| 368 |
|
| 369 |
with gr.Tab("🎙️ Audio"):
|
| 370 |
with gr.Row():
|
| 371 |
with gr.Column():
|
| 372 |
-
audio_input = gr.Audio(sources=["microphone", "upload"], type="filepath"
|
| 373 |
-
audio_submit = gr.Button("🚀 Submit
|
| 374 |
with gr.Column():
|
| 375 |
audio_output = gr.Textbox(label="Answer", lines=10, show_copy_button=True)
|
| 376 |
-
audio_time = gr.Number(label="Time (
|
| 377 |
|
| 378 |
audio_submit.click(fn=audio_handler, inputs=[audio_input], outputs=[audio_output, audio_time], api_name="audio_query")
|
| 379 |
|
| 380 |
with gr.Tab("✍️ Text"):
|
| 381 |
with gr.Row():
|
| 382 |
with gr.Column():
|
| 383 |
-
text_input = gr.Textbox(label="
|
| 384 |
-
text_submit = gr.Button("🚀 Submit
|
| 385 |
with gr.Column():
|
| 386 |
text_output = gr.Textbox(label="Answer", lines=10, show_copy_button=True)
|
| 387 |
-
text_time = gr.Number(label="Time (
|
| 388 |
|
| 389 |
text_submit.click(fn=text_handler, inputs=[text_input], outputs=[text_output, text_time], api_name="text_query")
|
| 390 |
|
| 391 |
gr.Examples(
|
| 392 |
examples=[
|
| 393 |
["Is internet shut down in Bareilly today?"],
|
| 394 |
-
["Who won
|
| 395 |
-
["What is current India inflation rate?"],
|
| 396 |
-
["Latest Israel Palestine conflict news?"]
|
| 397 |
],
|
| 398 |
inputs=text_input
|
| 399 |
)
|
| 400 |
|
| 401 |
-
with gr.Tab("🔌
|
| 402 |
gr.Markdown("""
|
| 403 |
-
|
| 404 |
-
|
| 405 |
-
**STT (Audio → Text):**
|
| 406 |
-
```
|
| 407 |
-
curl -X POST https://archcoder-basic-app.hf.space/call/transcribe_stt \\
|
| 408 |
-
-H "Content-Type: application/json" \\
|
| 409 |
-
-d '{"data": ["BASE64_AUDIO"]}'
|
| 410 |
-
```
|
| 411 |
-
**Response Path:** `data[0].text`
|
| 412 |
-
|
| 413 |
-
**AI (Text → Answer):**
|
| 414 |
-
```
|
| 415 |
-
curl -X POST https://archcoder-basic-app.hf.space/call/answer_ai \\
|
| 416 |
-
-H "Content-Type: application/json" \\
|
| 417 |
-
-d '{"data": ["Your question"]}'
|
| 418 |
-
```
|
| 419 |
-
**Response Path:** `data[0]`
|
| 420 |
-
|
| 421 |
-
---
|
| 422 |
|
| 423 |
-
|
|
|
|
| 424 |
|
| 425 |
-
**
|
| 426 |
-
|
| 427 |
-
|
| 428 |
-
```
|
| 429 |
-
|
| 430 |
-
**Custom AI Provider:**
|
| 431 |
-
```
|
| 432 |
-
curl https://archcoder-basic-app.hf.space/call/answer_ai -H "Content-Type: application/json" -d '{"data": ["{{TEXT}}"]}'
|
| 433 |
-
```
|
| 434 |
""")
|
| 435 |
|
| 436 |
with gr.Row(visible=False):
|
|
@@ -442,14 +357,7 @@ with gr.Blocks(title="Fast Q&A - Qwen 1.5B + Multi-Search", theme=gr.themes.Soft
|
|
| 442 |
gr.Button("STT", visible=False).click(fn=transcribe_audio_base64, inputs=[stt_in], outputs=[stt_out], api_name="transcribe_stt")
|
| 443 |
gr.Button("AI", visible=False).click(fn=generate_answer, inputs=[ai_in], outputs=[ai_out], api_name="answer_ai")
|
| 444 |
|
| 445 |
-
gr.Markdown(""
|
| 446 |
-
---
|
| 447 |
-
**Model:** Qwen 2.5 1.5B-Instruct (fastest quality model for CPU)
|
| 448 |
-
**Search Strategy:** Parallel execution (Tavily → Brave → Searx → DDG by priority)
|
| 449 |
-
**All requests logged** - Check Logs tab
|
| 450 |
-
|
| 451 |
-
🟢 < 4s | 🟡 4-6s | 🔴 > 6s
|
| 452 |
-
""")
|
| 453 |
|
| 454 |
if __name__ == "__main__":
|
| 455 |
demo.queue(max_size=5)
|
|
|
|
| 7 |
import tempfile
|
| 8 |
import os
|
| 9 |
import logging
|
| 10 |
+
import time # ADDED - was missing!
|
|
|
|
| 11 |
from datetime import datetime
|
| 12 |
from concurrent.futures import ThreadPoolExecutor
|
| 13 |
+
from html.parser import HTMLParser
|
| 14 |
|
| 15 |
# Setup logging
|
| 16 |
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
|
|
|
| 20 |
logger.info("Loading Whisper model...")
|
| 21 |
whisper_model = WhisperModel("tiny", device="cpu", compute_type="int8")
|
| 22 |
|
| 23 |
+
logger.info("Loading Qwen 2.5 1.5B-Instruct...")
|
| 24 |
model_name = "Qwen/Qwen2.5-1.5B-Instruct"
|
| 25 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 26 |
model = AutoModelForCausalLM.from_pretrained(
|
|
|
|
| 30 |
low_cpu_mem_usage=True
|
| 31 |
)
|
| 32 |
|
| 33 |
+
logger.info("All models loaded successfully!")
|
| 34 |
|
| 35 |
+
# Search APIs configuration
|
| 36 |
+
TAVILY_API_KEY = os.getenv('TAVILY_API_KEY', '')
|
| 37 |
BRAVE_API_KEY = os.getenv('BRAVE_API_KEY', '')
|
| 38 |
|
| 39 |
def search_tavily(query):
|
| 40 |
+
"""Priority 1: Tavily AI search"""
|
| 41 |
+
logger.info("[TAVILY] Starting...")
|
| 42 |
if not TAVILY_API_KEY:
|
| 43 |
+
logger.warning("[TAVILY] No API key")
|
| 44 |
return None
|
| 45 |
|
| 46 |
try:
|
| 47 |
response = requests.post(
|
| 48 |
'https://api.tavily.com/search',
|
| 49 |
+
json={'api_key': TAVILY_API_KEY, 'query': query, 'max_results': 3},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 50 |
timeout=3
|
| 51 |
)
|
| 52 |
|
|
|
|
| 63 |
return None
|
| 64 |
|
| 65 |
def search_brave(query):
|
| 66 |
+
"""Priority 2: Brave Search"""
|
| 67 |
+
logger.info("[BRAVE] Starting...")
|
| 68 |
if not BRAVE_API_KEY:
|
| 69 |
+
logger.warning("[BRAVE] No API key")
|
| 70 |
return None
|
| 71 |
|
| 72 |
try:
|
|
|
|
| 90 |
return None
|
| 91 |
|
| 92 |
def search_searx(query):
|
| 93 |
+
"""Priority 3: Searx"""
|
| 94 |
+
logger.info("[SEARX] Starting...")
|
| 95 |
|
|
|
|
| 96 |
searx_instances = [
|
| 97 |
'https://searx.be/search',
|
| 98 |
'https://searx.work/search',
|
|
|
|
| 103 |
try:
|
| 104 |
response = requests.get(
|
| 105 |
instance,
|
| 106 |
+
params={'q': query, 'format': 'json', 'categories': 'general'},
|
| 107 |
timeout=3
|
| 108 |
)
|
| 109 |
|
|
|
|
| 113 |
context = ""
|
| 114 |
for i, result in enumerate(results[:3], 1):
|
| 115 |
context += f"\n[Searx {i}] {result.get('title', '')}\n{result.get('content', '')}\n"
|
| 116 |
+
logger.info(f"[SEARX] Success from {instance}")
|
| 117 |
return context
|
| 118 |
except Exception as e:
|
| 119 |
logger.warning(f"[SEARX] Failed {instance}: {str(e)}")
|
|
|
|
| 120 |
|
|
|
|
| 121 |
return None
|
| 122 |
|
| 123 |
def search_duckduckgo_html(query):
|
| 124 |
+
"""Priority 4: DuckDuckGo HTML"""
|
| 125 |
+
logger.info("[DDG] Starting...")
|
| 126 |
try:
|
| 127 |
response = requests.get(
|
| 128 |
'https://html.duckduckgo.com/html/',
|
|
|
|
| 132 |
)
|
| 133 |
|
| 134 |
if response.status_code == 200:
|
|
|
|
|
|
|
|
|
|
| 135 |
class DDGParser(HTMLParser):
|
| 136 |
def __init__(self):
|
| 137 |
super().__init__()
|
|
|
|
| 161 |
context += f"\n[DDG {i}] {result}\n"
|
| 162 |
|
| 163 |
if context:
|
| 164 |
+
logger.info(f"[DDG] Success")
|
| 165 |
return context
|
| 166 |
except Exception as e:
|
| 167 |
logger.error(f"[DDG] Error: {str(e)}")
|
| 168 |
return None
|
| 169 |
|
| 170 |
def search_parallel(query):
|
| 171 |
+
"""Execute all searches in parallel"""
|
| 172 |
+
logger.info("[PARALLEL] Starting all engines...")
|
| 173 |
|
| 174 |
with ThreadPoolExecutor(max_workers=4) as executor:
|
|
|
|
| 175 |
futures = {
|
| 176 |
executor.submit(search_tavily, query): "Tavily",
|
| 177 |
executor.submit(search_brave, query): "Brave",
|
|
|
|
| 179 |
executor.submit(search_duckduckgo_html, query): "DuckDuckGo"
|
| 180 |
}
|
| 181 |
|
|
|
|
| 182 |
priority_order = ["Tavily", "Brave", "Searx", "DuckDuckGo"]
|
| 183 |
results = {}
|
| 184 |
|
|
|
|
| 185 |
for future in futures:
|
| 186 |
engine = futures[future]
|
| 187 |
try:
|
| 188 |
result = future.result(timeout=4)
|
| 189 |
if result:
|
| 190 |
results[engine] = result
|
| 191 |
+
logger.info(f"[PARALLEL] {engine} completed")
|
| 192 |
except Exception as e:
|
| 193 |
+
logger.error(f"[PARALLEL] {engine} failed: {str(e)}")
|
| 194 |
|
|
|
|
| 195 |
for engine in priority_order:
|
| 196 |
if engine in results and results[engine]:
|
| 197 |
+
logger.info(f"[PARALLEL] Using {engine}")
|
| 198 |
return results[engine], engine
|
| 199 |
|
| 200 |
+
logger.error("[PARALLEL] All failed")
|
| 201 |
+
return "Unable to fetch search results.", "None"
|
| 202 |
|
| 203 |
def transcribe_audio_base64(audio_base64):
|
| 204 |
+
"""Transcribe audio"""
|
| 205 |
+
logger.info("[PLUELY STT] Request")
|
| 206 |
try:
|
| 207 |
audio_bytes = base64.b64decode(audio_base64)
|
|
|
|
| 208 |
|
| 209 |
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as temp_audio:
|
| 210 |
temp_audio.write(audio_bytes)
|
|
|
|
| 214 |
transcription = " ".join([seg.text for seg in segments])
|
| 215 |
os.unlink(temp_path)
|
| 216 |
|
| 217 |
+
logger.info(f"[PLUELY STT] Success")
|
| 218 |
return {"text": transcription.strip()}
|
| 219 |
|
| 220 |
except Exception as e:
|
|
|
|
| 222 |
return {"error": str(e)}
|
| 223 |
|
| 224 |
def generate_answer(text_input):
|
| 225 |
+
"""Generate answer"""
|
| 226 |
logger.info(f"[PLUELY AI] Question: {text_input}")
|
| 227 |
try:
|
| 228 |
if not text_input or not text_input.strip():
|
|
|
|
| 230 |
|
| 231 |
current_date = datetime.now().strftime("%B %d, %Y")
|
| 232 |
|
| 233 |
+
logger.info("[PLUELY AI] Searching...")
|
|
|
|
| 234 |
search_results, search_engine = search_parallel(text_input)
|
| 235 |
+
logger.info(f"[PLUELY AI] Using {search_engine}")
|
| 236 |
|
|
|
|
| 237 |
messages = [
|
| 238 |
+
{"role": "system", "content": f"Today is {current_date}. Answer using ONLY the search results. Be concise (100-120 words)."},
|
| 239 |
+
{"role": "user", "content": f"Search Results:\n{search_results}\n\nQuestion: {text_input}\n\nAnswer based strictly on search results:"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 240 |
]
|
| 241 |
|
| 242 |
+
text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 243 |
|
| 244 |
+
logger.info("[PLUELY AI] Generating...")
|
| 245 |
inputs = tokenizer(text, return_tensors="pt", truncation=True, max_length=1500)
|
| 246 |
|
| 247 |
with torch.no_grad():
|
|
|
|
| 256 |
)
|
| 257 |
|
| 258 |
answer = tokenizer.decode(outputs[0][inputs['input_ids'].shape[1]:], skip_special_tokens=True).strip()
|
|
|
|
|
|
|
| 259 |
answer_with_source = f"{answer}\n\n**Source:** {search_engine}"
|
| 260 |
|
| 261 |
+
logger.info(f"[PLUELY AI] Done")
|
| 262 |
return answer_with_source
|
| 263 |
|
| 264 |
except Exception as e:
|
|
|
|
| 269 |
"""Main pipeline"""
|
| 270 |
start_time = time.time()
|
| 271 |
logger.info("="*50)
|
|
|
|
| 272 |
|
| 273 |
if audio_path:
|
|
|
|
| 274 |
try:
|
| 275 |
segments, _ = whisper_model.transcribe(audio_path, language="en", beam_size=1)
|
| 276 |
question = " ".join([seg.text for seg in segments])
|
|
|
|
| 277 |
except Exception as e:
|
|
|
|
| 278 |
return f"❌ Error: {str(e)}", 0.0
|
| 279 |
else:
|
| 280 |
question = question_text
|
|
|
|
| 281 |
|
| 282 |
if not question or not question.strip():
|
| 283 |
return "❌ No input", 0.0
|
| 284 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 285 |
answer = generate_answer(question)
|
|
|
|
|
|
|
| 286 |
total_time = time.time() - start_time
|
| 287 |
+
|
| 288 |
time_emoji = "🟢" if total_time < 4.0 else "🟡" if total_time < 6.0 else "🔴"
|
| 289 |
+
timing = f"\n\n{time_emoji} **Time:** {total_time:.2f}s"
|
| 290 |
|
| 291 |
logger.info(f"[MAIN] Total: {total_time:.2f}s")
|
| 292 |
logger.info("="*50)
|
| 293 |
|
|
|
|
|
|
|
| 294 |
return answer + timing, total_time
|
| 295 |
|
| 296 |
def audio_handler(audio_path):
|
|
|
|
| 300 |
return process_audio(None, text_input)
|
| 301 |
|
| 302 |
# Gradio UI
|
| 303 |
+
with gr.Blocks(title="Fast Q&A", theme=gr.themes.Soft()) as demo:
|
| 304 |
gr.Markdown("""
|
| 305 |
+
# ⚡ Fast Political Q&A
|
| 306 |
+
**Parallel multi-search + Qwen 2.5 1.5B**
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 307 |
""")
|
| 308 |
|
| 309 |
with gr.Tab("🎙️ Audio"):
|
| 310 |
with gr.Row():
|
| 311 |
with gr.Column():
|
| 312 |
+
audio_input = gr.Audio(sources=["microphone", "upload"], type="filepath")
|
| 313 |
+
audio_submit = gr.Button("🚀 Submit", variant="primary", size="lg")
|
| 314 |
with gr.Column():
|
| 315 |
audio_output = gr.Textbox(label="Answer", lines=10, show_copy_button=True)
|
| 316 |
+
audio_time = gr.Number(label="Time (s)", precision=2)
|
| 317 |
|
| 318 |
audio_submit.click(fn=audio_handler, inputs=[audio_input], outputs=[audio_output, audio_time], api_name="audio_query")
|
| 319 |
|
| 320 |
with gr.Tab("✍️ Text"):
|
| 321 |
with gr.Row():
|
| 322 |
with gr.Column():
|
| 323 |
+
text_input = gr.Textbox(label="Question", placeholder="Ask anything...", lines=3)
|
| 324 |
+
text_submit = gr.Button("🚀 Submit", variant="primary", size="lg")
|
| 325 |
with gr.Column():
|
| 326 |
text_output = gr.Textbox(label="Answer", lines=10, show_copy_button=True)
|
| 327 |
+
text_time = gr.Number(label="Time (s)", precision=2)
|
| 328 |
|
| 329 |
text_submit.click(fn=text_handler, inputs=[text_input], outputs=[text_output, text_time], api_name="text_query")
|
| 330 |
|
| 331 |
gr.Examples(
|
| 332 |
examples=[
|
| 333 |
["Is internet shut down in Bareilly today?"],
|
| 334 |
+
["Who won 2024 US election?"]
|
|
|
|
|
|
|
| 335 |
],
|
| 336 |
inputs=text_input
|
| 337 |
)
|
| 338 |
|
| 339 |
+
with gr.Tab("🔌 API"):
|
| 340 |
gr.Markdown("""
|
| 341 |
+
**Pluely Endpoints:**
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 342 |
|
| 343 |
+
STT: `https://archcoder-basic-app.hf.space/call/transcribe_stt`
|
| 344 |
+
AI: `https://archcoder-basic-app.hf.space/call/answer_ai`
|
| 345 |
|
| 346 |
+
**Response Paths:**
|
| 347 |
+
STT: `data[0].text`
|
| 348 |
+
AI: `data[0]`
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 349 |
""")
|
| 350 |
|
| 351 |
with gr.Row(visible=False):
|
|
|
|
| 357 |
gr.Button("STT", visible=False).click(fn=transcribe_audio_base64, inputs=[stt_in], outputs=[stt_out], api_name="transcribe_stt")
|
| 358 |
gr.Button("AI", visible=False).click(fn=generate_answer, inputs=[ai_in], outputs=[ai_out], api_name="answer_ai")
|
| 359 |
|
| 360 |
+
gr.Markdown("🟢 < 4s | 🟡 4-6s | 🔴 > 6s")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 361 |
|
| 362 |
if __name__ == "__main__":
|
| 363 |
demo.queue(max_size=5)
|