Update app.py
Browse files
app.py
CHANGED
|
@@ -1,3 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
import os
|
| 3 |
import re
|
|
@@ -5,20 +21,27 @@ import torch
|
|
| 5 |
import numpy as np
|
| 6 |
from pathlib import Path
|
| 7 |
import PyPDF2
|
| 8 |
-
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 9 |
from sentence_transformers import SentenceTransformer
|
| 10 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
| 11 |
from langchain_community.vectorstores import FAISS
|
| 12 |
from langchain.schema import Document
|
| 13 |
from langchain.embeddings import HuggingFaceEmbeddings
|
| 14 |
-
import spaces
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
|
| 16 |
# Create the Vision 2030 Assistant class
|
| 17 |
class Vision2030Assistant:
|
| 18 |
-
def __init__(self, model, tokenizer, vector_store):
|
| 19 |
self.model = model
|
| 20 |
self.tokenizer = tokenizer
|
| 21 |
self.vector_store = vector_store
|
|
|
|
| 22 |
self.conversation_history = []
|
| 23 |
|
| 24 |
def answer(self, user_query):
|
|
@@ -40,8 +63,11 @@ class Vision2030Assistant:
|
|
| 40 |
# Retrieve relevant contexts
|
| 41 |
contexts = retrieve_context(enhanced_query, self.vector_store, top_k=5)
|
| 42 |
|
| 43 |
-
# Generate response
|
| 44 |
-
|
|
|
|
|
|
|
|
|
|
| 45 |
|
| 46 |
# Add response to conversation history
|
| 47 |
self.conversation_history.append({"role": "assistant", "content": response})
|
|
@@ -87,9 +113,9 @@ def retrieve_context(query, vector_store, top_k=5):
|
|
| 87 |
|
| 88 |
return contexts
|
| 89 |
|
| 90 |
-
@spaces.GPU
|
| 91 |
-
def
|
| 92 |
-
"""Generate a response using
|
| 93 |
# Auto-detect language if not specified
|
| 94 |
if language == "auto":
|
| 95 |
language = detect_language(query)
|
|
@@ -149,6 +175,53 @@ Question: {query} [/INST]</s>"""
|
|
| 149 |
# Fallback response
|
| 150 |
return "I apologize, but I encountered an error while generating a response."
|
| 151 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 152 |
def process_pdf_files(pdf_files):
|
| 153 |
"""Process PDF files and create documents"""
|
| 154 |
documents = []
|
|
@@ -217,24 +290,67 @@ def create_vector_store(documents):
|
|
| 217 |
vector_store = FAISS.from_documents(chunks, embedding_function)
|
| 218 |
return vector_store
|
| 219 |
|
| 220 |
-
#
|
| 221 |
-
|
| 222 |
-
|
| 223 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 224 |
|
| 225 |
-
|
| 226 |
-
|
| 227 |
-
|
| 228 |
-
global model, tokenizer
|
| 229 |
|
| 230 |
-
if model is not None and tokenizer is not None:
|
| 231 |
-
return "
|
| 232 |
|
| 233 |
model_name = "ALLaM-AI/ALLaM-7B-Instruct-preview"
|
| 234 |
-
print(f"Loading model: {model_name}")
|
| 235 |
|
| 236 |
try:
|
| 237 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
| 238 |
tokenizer = AutoTokenizer.from_pretrained(
|
| 239 |
model_name,
|
| 240 |
trust_remote_code=True,
|
|
@@ -244,32 +360,64 @@ def load_model_and_tokenizer():
|
|
| 244 |
# Load model with appropriate settings for ALLaM
|
| 245 |
model = AutoModelForCausalLM.from_pretrained(
|
| 246 |
model_name,
|
| 247 |
-
torch_dtype=torch.bfloat16,
|
| 248 |
trust_remote_code=True,
|
| 249 |
device_map="auto",
|
| 250 |
)
|
| 251 |
|
| 252 |
-
|
|
|
|
| 253 |
|
| 254 |
except Exception as e:
|
| 255 |
-
error_msg = f"
|
| 256 |
print(error_msg)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 257 |
|
| 258 |
-
|
| 259 |
-
|
| 260 |
-
|
| 261 |
-
|
| 262 |
-
|
| 263 |
-
|
| 264 |
-
|
| 265 |
-
|
| 266 |
-
|
| 267 |
-
|
| 268 |
-
|
| 269 |
-
|
| 270 |
-
|
| 271 |
-
|
| 272 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 273 |
|
| 274 |
# Gradio Interface Functions
|
| 275 |
def process_pdfs(pdf_files):
|
|
@@ -285,27 +433,44 @@ def process_pdfs(pdf_files):
|
|
| 285 |
|
| 286 |
# Ensure model is loaded
|
| 287 |
if model is None or tokenizer is None:
|
| 288 |
-
|
| 289 |
-
if "successfully" not in load_status.lower():
|
| 290 |
-
return f"Model loading failed: {load_status}"
|
| 291 |
|
| 292 |
# Create vector store
|
| 293 |
vector_store = create_vector_store(documents)
|
| 294 |
|
| 295 |
# Initialize assistant
|
| 296 |
-
assistant = Vision2030Assistant(model, tokenizer, vector_store)
|
| 297 |
|
| 298 |
return f"Successfully processed {len(documents)} documents. The assistant is ready to use!"
|
| 299 |
|
| 300 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 301 |
def answer_query(message, history):
|
| 302 |
global assistant
|
| 303 |
|
| 304 |
if assistant is None:
|
| 305 |
-
return "Please
|
| 306 |
|
| 307 |
response = assistant.answer(message)
|
| 308 |
-
|
|
|
|
| 309 |
|
| 310 |
def reset_chat():
|
| 311 |
global assistant
|
|
@@ -316,31 +481,62 @@ def reset_chat():
|
|
| 316 |
reset_message = assistant.reset_conversation()
|
| 317 |
return reset_message
|
| 318 |
|
|
|
|
|
|
|
|
|
|
| 319 |
# Create Gradio interface
|
| 320 |
with gr.Blocks(title="Vision 2030 Virtual Assistant") as demo:
|
| 321 |
gr.Markdown("# Vision 2030 Virtual Assistant")
|
| 322 |
gr.Markdown("Ask questions about Saudi Vision 2030 goals, projects, and progress in Arabic or English.")
|
| 323 |
|
| 324 |
with gr.Tab("Setup"):
|
| 325 |
-
gr.Markdown("## Step 1: Load
|
| 326 |
-
|
| 327 |
-
|
| 328 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 329 |
|
| 330 |
-
gr.Markdown("##
|
| 331 |
-
|
| 332 |
-
|
| 333 |
-
|
| 334 |
-
|
| 335 |
|
| 336 |
with gr.Tab("Chat"):
|
| 337 |
-
chatbot = gr.Chatbot(label="Conversation")
|
| 338 |
-
|
| 339 |
-
|
| 340 |
-
|
| 341 |
-
|
| 342 |
-
|
| 343 |
-
|
|
|
|
|
|
|
|
|
|
| 344 |
reset_btn = gr.Button("Reset Conversation")
|
| 345 |
|
| 346 |
gr.Markdown("### Example Questions")
|
|
@@ -375,7 +571,7 @@ with gr.Blocks(title="Vision 2030 Virtual Assistant") as demo:
|
|
| 375 |
submit_btn.click(answer_query, inputs=[message, chatbot], outputs=[chatbot])
|
| 376 |
message.submit(answer_query, inputs=[message, chatbot], outputs=[chatbot])
|
| 377 |
reset_btn.click(reset_chat, inputs=[], outputs=[reset_output])
|
| 378 |
-
reset_btn.click(lambda: None, inputs=[], outputs=[chatbot], postprocess=
|
| 379 |
|
| 380 |
# Launch the app
|
| 381 |
demo.launch()
|
|
|
|
| 1 |
+
# Force install sentencepiece
|
| 2 |
+
import sys
|
| 3 |
+
import subprocess
|
| 4 |
+
|
| 5 |
+
def install_package(package):
|
| 6 |
+
subprocess.check_call([sys.executable, "-m", "pip", "install", package])
|
| 7 |
+
|
| 8 |
+
try:
|
| 9 |
+
import sentencepiece
|
| 10 |
+
print("SentencePiece is already installed")
|
| 11 |
+
except ImportError:
|
| 12 |
+
print("Installing SentencePiece...")
|
| 13 |
+
install_package("sentencepiece==0.1.99")
|
| 14 |
+
print("SentencePiece installed successfully")
|
| 15 |
+
|
| 16 |
+
# Import other required libraries
|
| 17 |
import gradio as gr
|
| 18 |
import os
|
| 19 |
import re
|
|
|
|
| 21 |
import numpy as np
|
| 22 |
from pathlib import Path
|
| 23 |
import PyPDF2
|
| 24 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, AutoModelForSeq2SeqLM
|
| 25 |
from sentence_transformers import SentenceTransformer
|
| 26 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
| 27 |
from langchain_community.vectorstores import FAISS
|
| 28 |
from langchain.schema import Document
|
| 29 |
from langchain.embeddings import HuggingFaceEmbeddings
|
| 30 |
+
import spaces
|
| 31 |
+
|
| 32 |
+
# Global variables to store model state
|
| 33 |
+
model = None
|
| 34 |
+
tokenizer = None
|
| 35 |
+
assistant = None
|
| 36 |
+
model_type = "primary" # Track if we're using primary or fallback model
|
| 37 |
|
| 38 |
# Create the Vision 2030 Assistant class
|
| 39 |
class Vision2030Assistant:
|
| 40 |
+
def __init__(self, model, tokenizer, vector_store, model_type="primary"):
|
| 41 |
self.model = model
|
| 42 |
self.tokenizer = tokenizer
|
| 43 |
self.vector_store = vector_store
|
| 44 |
+
self.model_type = model_type
|
| 45 |
self.conversation_history = []
|
| 46 |
|
| 47 |
def answer(self, user_query):
|
|
|
|
| 63 |
# Retrieve relevant contexts
|
| 64 |
contexts = retrieve_context(enhanced_query, self.vector_store, top_k=5)
|
| 65 |
|
| 66 |
+
# Generate response based on model type
|
| 67 |
+
if self.model_type == "primary":
|
| 68 |
+
response = generate_response_primary(user_query, contexts, self.model, self.tokenizer, language)
|
| 69 |
+
else:
|
| 70 |
+
response = generate_response_fallback(user_query, contexts, self.model, self.tokenizer, language)
|
| 71 |
|
| 72 |
# Add response to conversation history
|
| 73 |
self.conversation_history.append({"role": "assistant", "content": response})
|
|
|
|
| 113 |
|
| 114 |
return contexts
|
| 115 |
|
| 116 |
+
@spaces.GPU
|
| 117 |
+
def generate_response_primary(query, contexts, model, tokenizer, language="auto"):
|
| 118 |
+
"""Generate a response using ALLaM model"""
|
| 119 |
# Auto-detect language if not specified
|
| 120 |
if language == "auto":
|
| 121 |
language = detect_language(query)
|
|
|
|
| 175 |
# Fallback response
|
| 176 |
return "I apologize, but I encountered an error while generating a response."
|
| 177 |
|
| 178 |
+
@spaces.GPU
|
| 179 |
+
def generate_response_fallback(query, contexts, model, tokenizer, language="auto"):
|
| 180 |
+
"""Generate a response using the fallback model (BLOOM or mBART)"""
|
| 181 |
+
# Auto-detect language if not specified
|
| 182 |
+
if language == "auto":
|
| 183 |
+
language = detect_language(query)
|
| 184 |
+
|
| 185 |
+
# Format the prompt based on language
|
| 186 |
+
if language == "arabic":
|
| 187 |
+
system_prompt = (
|
| 188 |
+
"أنت مساعد افتراضي يهتم برؤية السعودية 2030. استخدم السياق التالي للإجابة على السؤال: "
|
| 189 |
+
)
|
| 190 |
+
else:
|
| 191 |
+
system_prompt = (
|
| 192 |
+
"You are a virtual assistant for Saudi Vision 2030. Use the following context to answer the question: "
|
| 193 |
+
)
|
| 194 |
+
|
| 195 |
+
# Combine retrieved contexts
|
| 196 |
+
context_text = "\n\n".join([f"Document: {ctx['content']}" for ctx in contexts])
|
| 197 |
+
|
| 198 |
+
# Format prompt for fallback model (simpler format)
|
| 199 |
+
prompt = f"{system_prompt}\n\nContext:\n{context_text}\n\nQuestion: {query}\n\nAnswer:"
|
| 200 |
+
|
| 201 |
+
try:
|
| 202 |
+
# Generate with fallback model
|
| 203 |
+
inputs = tokenizer(prompt, return_tensors="pt", max_length=1024, truncation=True).to(model.device)
|
| 204 |
+
|
| 205 |
+
outputs = model.generate(
|
| 206 |
+
inputs.input_ids,
|
| 207 |
+
attention_mask=inputs.attention_mask,
|
| 208 |
+
max_length=inputs.input_ids.shape[1] + 512,
|
| 209 |
+
temperature=0.7,
|
| 210 |
+
top_p=0.9,
|
| 211 |
+
do_sample=True,
|
| 212 |
+
pad_token_id=tokenizer.eos_token_id
|
| 213 |
+
)
|
| 214 |
+
|
| 215 |
+
# For most models, this is how we extract the response
|
| 216 |
+
response = tokenizer.decode(outputs[0][inputs.input_ids.shape[1]:], skip_special_tokens=True)
|
| 217 |
+
|
| 218 |
+
# Cleanup and return
|
| 219 |
+
return response.strip()
|
| 220 |
+
|
| 221 |
+
except Exception as e:
|
| 222 |
+
print(f"Error during fallback generation: {e}")
|
| 223 |
+
return "I apologize, but I encountered an error while generating a response with the fallback model."
|
| 224 |
+
|
| 225 |
def process_pdf_files(pdf_files):
|
| 226 |
"""Process PDF files and create documents"""
|
| 227 |
documents = []
|
|
|
|
| 290 |
vector_store = FAISS.from_documents(chunks, embedding_function)
|
| 291 |
return vector_store
|
| 292 |
|
| 293 |
+
# Attempt to create mock documents if none are available yet
|
| 294 |
+
def create_mock_documents():
|
| 295 |
+
"""Create mock documents about Vision 2030"""
|
| 296 |
+
documents = []
|
| 297 |
+
|
| 298 |
+
# Sample content about Vision 2030 in both languages
|
| 299 |
+
samples = [
|
| 300 |
+
{
|
| 301 |
+
"content": "رؤية السعودية 2030 هي خطة استراتيجية تهدف إلى تنويع الاقتصاد السعودي وتقليل الاعتماد على النفط مع تطوير قطاعات مختلفة مثل الصحة والتعليم والسياحة.",
|
| 302 |
+
"source": "vision2030_overview_ar.txt"
|
| 303 |
+
},
|
| 304 |
+
{
|
| 305 |
+
"content": "Saudi Vision 2030 is a strategic framework aiming to diversify Saudi Arabia's economy and reduce dependence on oil, while developing sectors like health, education, and tourism.",
|
| 306 |
+
"source": "vision2030_overview_en.txt"
|
| 307 |
+
},
|
| 308 |
+
{
|
| 309 |
+
"content": "تشمل الأهداف الاقتصادية لرؤية 2030 زيادة مساهمة القطاع الخاص من 40% إلى 65% من الناتج المحلي الإجمالي، ورفع نسبة الصادرات غير النفطية من 16% إلى 50% من الناتج المحلي الإجمالي غير النفطي، وخفض البطالة إلى 7%.",
|
| 310 |
+
"source": "economic_goals_ar.txt"
|
| 311 |
+
},
|
| 312 |
+
{
|
| 313 |
+
"content": "The economic goals of Vision 2030 include increasing private sector contribution from 40% to 65% of GDP, raising non-oil exports from 16% to 50%, and reducing unemployment from 11.6% to 7%.",
|
| 314 |
+
"source": "economic_goals_en.txt"
|
| 315 |
+
},
|
| 316 |
+
{
|
| 317 |
+
"content": "تركز رؤية 2030 على زيادة مشاركة المرأة في سوق العمل من 22% إلى 30% بحلول عام 2030، مع توفير فرص متساوية في التعليم والعمل.",
|
| 318 |
+
"source": "women_empowerment_ar.txt"
|
| 319 |
+
},
|
| 320 |
+
{
|
| 321 |
+
"content": "Vision 2030 emphasizes increasing women's participation in the workforce from 22% to 30% by 2030, while providing equal opportunities in education and employment.",
|
| 322 |
+
"source": "women_empowerment_en.txt"
|
| 323 |
+
}
|
| 324 |
+
]
|
| 325 |
+
|
| 326 |
+
# Create documents from samples
|
| 327 |
+
for sample in samples:
|
| 328 |
+
doc = Document(
|
| 329 |
+
page_content=sample["content"],
|
| 330 |
+
metadata={"source": sample["source"], "filename": sample["source"]}
|
| 331 |
+
)
|
| 332 |
+
documents.append(doc)
|
| 333 |
+
|
| 334 |
+
print(f"Created {len(documents)} mock documents")
|
| 335 |
+
return documents
|
| 336 |
|
| 337 |
+
@spaces.GPU
|
| 338 |
+
def load_primary_model():
|
| 339 |
+
"""Load the ALLaM-7B model with error handling"""
|
| 340 |
+
global model, tokenizer, model_type
|
| 341 |
|
| 342 |
+
if model is not None and tokenizer is not None and model_type == "primary":
|
| 343 |
+
return "Primary model (ALLaM-7B) already loaded"
|
| 344 |
|
| 345 |
model_name = "ALLaM-AI/ALLaM-7B-Instruct-preview"
|
| 346 |
+
print(f"Loading primary model: {model_name}")
|
| 347 |
|
| 348 |
try:
|
| 349 |
+
# Try to import sentencepiece explicitly first
|
| 350 |
+
import sentencepiece as spm
|
| 351 |
+
print("SentencePiece imported successfully")
|
| 352 |
+
|
| 353 |
+
# First attempt with AutoTokenizer and explicit trust_remote_code
|
| 354 |
tokenizer = AutoTokenizer.from_pretrained(
|
| 355 |
model_name,
|
| 356 |
trust_remote_code=True,
|
|
|
|
| 360 |
# Load model with appropriate settings for ALLaM
|
| 361 |
model = AutoModelForCausalLM.from_pretrained(
|
| 362 |
model_name,
|
| 363 |
+
torch_dtype=torch.bfloat16,
|
| 364 |
trust_remote_code=True,
|
| 365 |
device_map="auto",
|
| 366 |
)
|
| 367 |
|
| 368 |
+
model_type = "primary"
|
| 369 |
+
return "Primary model (ALLaM-7B) loaded successfully!"
|
| 370 |
|
| 371 |
except Exception as e:
|
| 372 |
+
error_msg = f"Primary model loading failed: {e}"
|
| 373 |
print(error_msg)
|
| 374 |
+
return error_msg
|
| 375 |
+
|
| 376 |
+
@spaces.GPU
|
| 377 |
+
def load_fallback_model():
|
| 378 |
+
"""Load the fallback model (BLOOM-7B1) when ALLaM fails"""
|
| 379 |
+
global model, tokenizer, model_type
|
| 380 |
+
|
| 381 |
+
if model is not None and tokenizer is not None and model_type == "fallback":
|
| 382 |
+
return "Fallback model already loaded"
|
| 383 |
+
|
| 384 |
+
try:
|
| 385 |
+
print("Loading fallback model: BLOOM-7B1...")
|
| 386 |
|
| 387 |
+
# Use BLOOM model as fallback (it doesn't need SentencePiece)
|
| 388 |
+
tokenizer = AutoTokenizer.from_pretrained("bigscience/bloom-7b1")
|
| 389 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 390 |
+
"bigscience/bloom-7b1",
|
| 391 |
+
torch_dtype=torch.bfloat16,
|
| 392 |
+
device_map="auto",
|
| 393 |
+
load_in_8bit=True # Reduce memory usage
|
| 394 |
+
)
|
| 395 |
+
|
| 396 |
+
model_type = "fallback"
|
| 397 |
+
return "Fallback model (BLOOM-7B1) loaded successfully!"
|
| 398 |
+
except Exception as e:
|
| 399 |
+
return f"Fallback model loading failed: {e}"
|
| 400 |
+
|
| 401 |
+
def load_mbart_model():
|
| 402 |
+
"""Load mBART as a second fallback option"""
|
| 403 |
+
global model, tokenizer, model_type
|
| 404 |
+
|
| 405 |
+
try:
|
| 406 |
+
print("Loading mBART multilingual model...")
|
| 407 |
+
|
| 408 |
+
model_name = "facebook/mbart-large-50-many-to-many-mmt"
|
| 409 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 410 |
+
model = AutoModelForSeq2SeqLM.from_pretrained(
|
| 411 |
+
model_name,
|
| 412 |
+
torch_dtype=torch.float16,
|
| 413 |
+
device_map="auto",
|
| 414 |
+
load_in_8bit=True
|
| 415 |
+
)
|
| 416 |
+
|
| 417 |
+
model_type = "mbart"
|
| 418 |
+
return "mBART multilingual model loaded successfully!"
|
| 419 |
+
except Exception as e:
|
| 420 |
+
return f"mBART model loading failed: {e}"
|
| 421 |
|
| 422 |
# Gradio Interface Functions
|
| 423 |
def process_pdfs(pdf_files):
|
|
|
|
| 433 |
|
| 434 |
# Ensure model is loaded
|
| 435 |
if model is None or tokenizer is None:
|
| 436 |
+
return "Please load a model first (primary or fallback) before processing documents."
|
|
|
|
|
|
|
| 437 |
|
| 438 |
# Create vector store
|
| 439 |
vector_store = create_vector_store(documents)
|
| 440 |
|
| 441 |
# Initialize assistant
|
| 442 |
+
assistant = Vision2030Assistant(model, tokenizer, vector_store, model_type)
|
| 443 |
|
| 444 |
return f"Successfully processed {len(documents)} documents. The assistant is ready to use!"
|
| 445 |
|
| 446 |
+
def use_mock_documents():
|
| 447 |
+
"""Use mock documents when no PDFs are available"""
|
| 448 |
+
documents = create_mock_documents()
|
| 449 |
+
|
| 450 |
+
global assistant, model, tokenizer
|
| 451 |
+
|
| 452 |
+
# Ensure model is loaded
|
| 453 |
+
if model is None or tokenizer is None:
|
| 454 |
+
return "Please load a model first (primary or fallback) before using mock documents."
|
| 455 |
+
|
| 456 |
+
# Create vector store
|
| 457 |
+
vector_store = create_vector_store(documents)
|
| 458 |
+
|
| 459 |
+
# Initialize assistant
|
| 460 |
+
assistant = Vision2030Assistant(model, tokenizer, vector_store, model_type)
|
| 461 |
+
|
| 462 |
+
return "Successfully initialized with mock Vision 2030 documents. The assistant is ready for testing!"
|
| 463 |
+
|
| 464 |
+
@spaces.GPU
|
| 465 |
def answer_query(message, history):
|
| 466 |
global assistant
|
| 467 |
|
| 468 |
if assistant is None:
|
| 469 |
+
return [(message, "Please load a model and process documents first (or use mock documents for testing).")]
|
| 470 |
|
| 471 |
response = assistant.answer(message)
|
| 472 |
+
history.append((message, response))
|
| 473 |
+
return history
|
| 474 |
|
| 475 |
def reset_chat():
|
| 476 |
global assistant
|
|
|
|
| 481 |
reset_message = assistant.reset_conversation()
|
| 482 |
return reset_message
|
| 483 |
|
| 484 |
+
def restart_factory():
|
| 485 |
+
return "Restarting the application... Please reload the page in a few seconds."
|
| 486 |
+
|
| 487 |
# Create Gradio interface
|
| 488 |
with gr.Blocks(title="Vision 2030 Virtual Assistant") as demo:
|
| 489 |
gr.Markdown("# Vision 2030 Virtual Assistant")
|
| 490 |
gr.Markdown("Ask questions about Saudi Vision 2030 goals, projects, and progress in Arabic or English.")
|
| 491 |
|
| 492 |
with gr.Tab("Setup"):
|
| 493 |
+
gr.Markdown("## Step 1: Load a Model")
|
| 494 |
+
with gr.Row():
|
| 495 |
+
with gr.Column():
|
| 496 |
+
primary_btn = gr.Button("Load ALLaM-7B Model (Primary)", variant="primary")
|
| 497 |
+
primary_output = gr.Textbox(label="Primary Model Status")
|
| 498 |
+
primary_btn.click(load_primary_model, inputs=[], outputs=primary_output)
|
| 499 |
+
|
| 500 |
+
with gr.Column():
|
| 501 |
+
fallback_btn = gr.Button("Load BLOOM-7B1 (Fallback)", variant="secondary")
|
| 502 |
+
fallback_output = gr.Textbox(label="Fallback Model Status")
|
| 503 |
+
fallback_btn.click(load_fallback_model, inputs=[], outputs=fallback_output)
|
| 504 |
+
|
| 505 |
+
with gr.Column():
|
| 506 |
+
mbart_btn = gr.Button("Load mBART (Alternative)", variant="secondary")
|
| 507 |
+
mbart_output = gr.Textbox(label="mBART Model Status")
|
| 508 |
+
mbart_btn.click(load_mbart_model, inputs=[], outputs=mbart_output)
|
| 509 |
+
|
| 510 |
+
gr.Markdown("## Step 2: Prepare Documents")
|
| 511 |
+
with gr.Row():
|
| 512 |
+
with gr.Column():
|
| 513 |
+
pdf_files = gr.File(file_types=[".pdf"], file_count="multiple", label="Upload PDF Documents")
|
| 514 |
+
process_btn = gr.Button("Process Documents", variant="primary")
|
| 515 |
+
process_output = gr.Textbox(label="Processing Status")
|
| 516 |
+
process_btn.click(process_pdfs, inputs=[pdf_files], outputs=process_output)
|
| 517 |
+
|
| 518 |
+
with gr.Column():
|
| 519 |
+
mock_btn = gr.Button("Use Mock Documents (for testing)", variant="secondary")
|
| 520 |
+
mock_output = gr.Textbox(label="Mock Documents Status")
|
| 521 |
+
mock_btn.click(use_mock_documents, inputs=[], outputs=mock_output)
|
| 522 |
|
| 523 |
+
gr.Markdown("## Troubleshooting")
|
| 524 |
+
restart_btn = gr.Button("Restart Application", variant="secondary")
|
| 525 |
+
restart_output = gr.Textbox(label="Restart Status")
|
| 526 |
+
restart_btn.click(restart_factory, inputs=[], outputs=restart_output)
|
| 527 |
+
restart_btn.click(None, [], None, _js="() => {setTimeout(() => {location.reload()}, 5000)}")
|
| 528 |
|
| 529 |
with gr.Tab("Chat"):
|
| 530 |
+
chatbot = gr.Chatbot(label="Conversation", height=500)
|
| 531 |
+
|
| 532 |
+
with gr.Row():
|
| 533 |
+
message = gr.Textbox(
|
| 534 |
+
label="Ask a question about Vision 2030 (in Arabic or English)",
|
| 535 |
+
placeholder="What are the main goals of Vision 2030?",
|
| 536 |
+
lines=2
|
| 537 |
+
)
|
| 538 |
+
submit_btn = gr.Button("Submit", variant="primary")
|
| 539 |
+
|
| 540 |
reset_btn = gr.Button("Reset Conversation")
|
| 541 |
|
| 542 |
gr.Markdown("### Example Questions")
|
|
|
|
| 571 |
submit_btn.click(answer_query, inputs=[message, chatbot], outputs=[chatbot])
|
| 572 |
message.submit(answer_query, inputs=[message, chatbot], outputs=[chatbot])
|
| 573 |
reset_btn.click(reset_chat, inputs=[], outputs=[reset_output])
|
| 574 |
+
reset_btn.click(lambda: None, inputs=[], outputs=[chatbot], postprocess=lambda: [])
|
| 575 |
|
| 576 |
# Launch the app
|
| 577 |
demo.launch()
|