abdull4h commited on
Commit
260ec72
·
verified ·
1 Parent(s): 6750126

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +292 -216
app.py CHANGED
@@ -1,4 +1,4 @@
1
- import streamlit as st
2
  import os
3
  import re
4
  import torch
@@ -12,138 +12,64 @@ from langchain_community.vectorstores import FAISS
12
  from langchain.schema import Document
13
  from langchain.embeddings import HuggingFaceEmbeddings
14
 
15
- # Set page configuration
16
- st.set_page_config(
17
- page_title="Vision 2030 Virtual Assistant",
18
- page_icon="🇸🇦",
19
- layout="wide"
20
- )
21
-
22
- # App title and description
23
- st.title("Vision 2030 Virtual Assistant")
24
- st.markdown("Ask questions about Saudi Vision 2030 goals, projects, and progress in Arabic or English.")
25
-
26
- # Function definitions
27
- @st.cache_resource
28
- def load_model_and_tokenizer():
29
- """Load the ALLaM-7B model and tokenizer with error handling"""
30
- model_name = "ALLaM-AI/ALLaM-7B-Instruct-preview"
31
- st.info(f"Loading model: {model_name} (this may take a few minutes)")
32
-
33
- try:
34
- # First attempt with AutoTokenizer
35
- tokenizer = AutoTokenizer.from_pretrained(
36
- model_name,
37
- trust_remote_code=True,
38
- use_fast=False
39
- )
40
 
41
- # Load model with appropriate settings for ALLaM
42
- model = AutoModelForCausalLM.from_pretrained(
43
- model_name,
44
- torch_dtype=torch.bfloat16,
45
- trust_remote_code=True,
46
- device_map="auto",
47
- )
48
 
49
- st.success("Model loaded successfully!")
 
50
 
51
- except Exception as e:
52
- st.error(f"First loading attempt failed: {e}")
53
- st.info("Trying alternative loading approach...")
 
 
54
 
55
- # Try with specific tokenizer class if the first attempt fails
56
- from transformers import LlamaTokenizer
57
 
58
- tokenizer = LlamaTokenizer.from_pretrained(model_name)
59
- model = AutoModelForCausalLM.from_pretrained(
60
- model_name,
61
- torch_dtype=torch.float16,
62
- trust_remote_code=True,
63
- device_map="auto",
64
- )
 
 
 
 
 
65
 
66
- st.success("Model loaded successfully with LlamaTokenizer!")
 
 
 
 
 
 
 
67
 
68
- return model, tokenizer
 
 
 
69
 
 
70
  def detect_language(text):
71
  """Detect if text is primarily Arabic or English"""
72
  arabic_chars = re.findall(r'[\u0600-\u06FF]', text)
73
  is_arabic = len(arabic_chars) > len(text) * 0.5
74
  return "arabic" if is_arabic else "english"
75
 
76
- def process_pdfs():
77
- """Process uploaded PDF documents"""
78
- documents = []
79
-
80
- if 'uploaded_pdfs' in st.session_state and st.session_state.uploaded_pdfs:
81
- for pdf_file in st.session_state.uploaded_pdfs:
82
- try:
83
- # Save the uploaded file temporarily
84
- pdf_path = f"temp_{pdf_file.name}"
85
- with open(pdf_path, "wb") as f:
86
- f.write(pdf_file.getbuffer())
87
-
88
- # Extract text
89
- text = ""
90
- with open(pdf_path, 'rb') as file:
91
- reader = PyPDF2.PdfReader(file)
92
- for page in reader.pages:
93
- text += page.extract_text() + "\n\n"
94
-
95
- # Remove temporary file
96
- os.remove(pdf_path)
97
-
98
- if text.strip(): # If we got some text
99
- doc = Document(
100
- page_content=text,
101
- metadata={"source": pdf_file.name, "filename": pdf_file.name}
102
- )
103
- documents.append(doc)
104
- st.info(f"Successfully processed: {pdf_file.name}")
105
- else:
106
- st.warning(f"No text extracted from {pdf_file.name}")
107
- except Exception as e:
108
- st.error(f"Error processing {pdf_file.name}: {e}")
109
-
110
- st.success(f"Processed {len(documents)} PDF documents")
111
- return documents
112
-
113
- def create_vector_store(documents):
114
- """Split documents into chunks and create a FAISS vector store"""
115
- # Text splitter for breaking documents into chunks
116
- text_splitter = RecursiveCharacterTextSplitter(
117
- chunk_size=500,
118
- chunk_overlap=50,
119
- separators=["\n\n", "\n", ".", "!", "?", ",", " ", ""]
120
- )
121
-
122
- # Split documents into chunks
123
- chunks = []
124
- for doc in documents:
125
- doc_chunks = text_splitter.split_text(doc.page_content)
126
- # Preserve metadata for each chunk
127
- chunks.extend([
128
- Document(page_content=chunk, metadata=doc.metadata)
129
- for chunk in doc_chunks
130
- ])
131
-
132
- st.info(f"Created {len(chunks)} chunks from {len(documents)} documents")
133
-
134
- # Create a proper embedding function for LangChain
135
- embedding_function = HuggingFaceEmbeddings(
136
- model_name="sentence-transformers/paraphrase-multilingual-mpnet-base-v2"
137
- )
138
-
139
- # Create FAISS index
140
- vector_store = FAISS.from_documents(
141
- chunks,
142
- embedding_function
143
- )
144
-
145
- return vector_store
146
-
147
  def retrieve_context(query, vector_store, top_k=5):
148
  """Retrieve most relevant document chunks for a given query"""
149
  # Search the vector store using similarity search
@@ -160,10 +86,11 @@ def retrieve_context(query, vector_store, top_k=5):
160
 
161
  return contexts
162
 
163
- def generate_response(query, contexts, model, tokenizer):
164
  """Generate a response using retrieved contexts with ALLaM-specific formatting"""
165
- # Auto-detect language
166
- language = detect_language(query)
 
167
 
168
  # Format the prompt based on language
169
  if language == "arabic":
@@ -189,113 +116,262 @@ Context:
189
  Question: {query} [/INST]</s>"""
190
 
191
  try:
192
- with st.spinner("Generating response..."):
193
- # Generate response with appropriate parameters for ALLaM
194
- inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
195
-
196
- # Generate with appropriate parameters
197
- outputs = model.generate(
198
- inputs.input_ids,
199
- attention_mask=inputs.attention_mask,
200
- max_new_tokens=512,
201
- temperature=0.7,
202
- top_p=0.9,
203
- do_sample=True,
204
- repetition_penalty=1.1
205
- )
206
-
207
- # Decode the response
208
- full_output = tokenizer.decode(outputs[0], skip_special_tokens=True)
209
-
210
- # Extract just the answer part (after the instruction)
211
- response = full_output.split("[/INST]")[-1].strip()
 
 
 
212
 
213
- # If response is empty for some reason, return the full output
214
- if not response:
215
- response = full_output
216
-
217
- return response, [ctx.get("source", "Unknown") for ctx in contexts]
218
 
219
  except Exception as e:
220
- st.error(f"Error during generation: {e}")
221
  # Fallback response
222
- return "I apologize, but I encountered an error while generating a response.", []
223
-
224
- # Initialize the app state
225
- if 'conversation_history' not in st.session_state:
226
- st.session_state.conversation_history = []
227
-
228
- if 'vector_store' not in st.session_state:
229
- st.session_state.vector_store = None
230
-
231
- if 'uploaded_pdfs' not in st.session_state:
232
- st.session_state.uploaded_pdfs = None
233
 
234
- # PDF upload section
235
- st.header("1. Upload Vision 2030 Documents")
236
- uploaded_files = st.file_uploader("Upload PDF documents about Vision 2030",
237
- type=["pdf"],
238
- accept_multiple_files=True,
239
- help="Upload one or more PDF documents containing information about Vision 2030")
240
-
241
- if uploaded_files:
242
- st.session_state.uploaded_pdfs = uploaded_files
243
- if st.button("Process PDFs"):
244
- documents = process_pdfs()
245
- if documents:
246
- with st.spinner("Creating vector database..."):
247
- st.session_state.vector_store = create_vector_store(documents)
248
- st.success("Vector database created successfully!")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
249
 
250
- # Load the model (cached)
251
- model, tokenizer = load_model_and_tokenizer()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
252
 
253
- # Chat interface
254
- st.header("2. Chat with the Vision 2030 Assistant")
 
 
255
 
256
- # Display conversation history
257
- for message in st.session_state.conversation_history:
258
- if message["role"] == "user":
259
- st.markdown(f"**You:** {message['content']}")
260
- else:
261
- st.markdown(f"**Assistant:** {message['content']}")
262
- if 'sources' in message and message['sources']:
263
- st.markdown(f"*Sources: {', '.join([os.path.basename(src) for src in message['sources']])}*")
264
- st.divider()
265
 
266
- # Input for new question
267
- user_input = st.text_input("Ask a question about Vision 2030 (in Arabic or English):", key="user_query")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
268
 
269
- # Examples
270
- st.markdown("**Example questions:**")
271
- examples_col1, examples_col2 = st.columns(2)
272
- with examples_col1:
273
- st.markdown("- What is Saudi Vision 2030?")
274
- st.markdown("- What are the economic goals of Vision 2030?")
275
- st.markdown("- How does Vision 2030 support women's empowerment?")
276
- with examples_col2:
277
- st.markdown("- ما هي رؤية السعودية 2030؟")
278
- st.markdown("- ما هي الأهداف الاقتصادية لرؤية 2030؟")
279
- st.markdown("- كيف تدعم رؤية 2030 تمكين المرأة السعودية؟")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
280
 
281
- # Process the user input
282
- if user_input and st.session_state.vector_store:
283
- # Add user message to history
284
- st.session_state.conversation_history.append({"role": "user", "content": user_input})
285
 
286
- # Get response
287
- response, sources = generate_response(user_input, retrieve_context(user_input, st.session_state.vector_store), model, tokenizer)
288
 
289
- # Add assistant message to history
290
- st.session_state.conversation_history.append({"role": "assistant", "content": response, "sources": sources})
 
 
 
 
 
 
291
 
292
- # Rerun to update the UI
293
- st.experimental_rerun()
294
 
295
- elif user_input and not st.session_state.vector_store:
296
- st.warning("Please upload and process Vision 2030 PDF documents first")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
297
 
298
- # Reset conversation button
299
- if st.button("Reset Conversation") and len(st.session_state.conversation_history) > 0:
300
- st.session_state.conversation_history = []
301
- st.experimental_rerun()
 
1
+ import gradio as gr
2
  import os
3
  import re
4
  import torch
 
12
  from langchain.schema import Document
13
  from langchain.embeddings import HuggingFaceEmbeddings
14
 
15
+ # Create the Vision 2030 Assistant class
16
+ class Vision2030Assistant:
17
+ def __init__(self, model, tokenizer, vector_store):
18
+ self.model = model
19
+ self.tokenizer = tokenizer
20
+ self.vector_store = vector_store
21
+ self.conversation_history = []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
+ def answer(self, user_query):
24
+ # Detect language
25
+ language = detect_language(user_query)
 
 
 
 
26
 
27
+ # Add user query to conversation history
28
+ self.conversation_history.append({"role": "user", "content": user_query})
29
 
30
+ # Get the full conversation context
31
+ conversation_context = "\n".join([
32
+ f"{'User' if msg['role'] == 'user' else 'Assistant'}: {msg['content']}"
33
+ for msg in self.conversation_history[-6:] # Keep last 3 turns (6 messages)
34
+ ])
35
 
36
+ # Enhance query with conversation context for better retrieval
37
+ enhanced_query = f"{conversation_context}\n{user_query}"
38
 
39
+ # Retrieve relevant contexts
40
+ contexts = retrieve_context(enhanced_query, self.vector_store, top_k=5)
41
+
42
+ # Generate response
43
+ response = generate_response(user_query, contexts, self.model, self.tokenizer, language)
44
+
45
+ # Add response to conversation history
46
+ self.conversation_history.append({"role": "assistant", "content": response})
47
+
48
+ # Also return sources for transparency
49
+ sources = [ctx.get("source", "Unknown") for ctx in contexts]
50
+ unique_sources = list(set(sources))
51
 
52
+ # Format the response with sources
53
+ if unique_sources:
54
+ source_text = "\n\nSources: " + ", ".join([os.path.basename(src) for src in unique_sources])
55
+ response_with_sources = response + source_text
56
+ else:
57
+ response_with_sources = response
58
+
59
+ return response_with_sources
60
 
61
+ def reset_conversation(self):
62
+ """Reset the conversation history"""
63
+ self.conversation_history = []
64
+ return "Conversation has been reset."
65
 
66
+ # Helper functions
67
  def detect_language(text):
68
  """Detect if text is primarily Arabic or English"""
69
  arabic_chars = re.findall(r'[\u0600-\u06FF]', text)
70
  is_arabic = len(arabic_chars) > len(text) * 0.5
71
  return "arabic" if is_arabic else "english"
72
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
  def retrieve_context(query, vector_store, top_k=5):
74
  """Retrieve most relevant document chunks for a given query"""
75
  # Search the vector store using similarity search
 
86
 
87
  return contexts
88
 
89
+ def generate_response(query, contexts, model, tokenizer, language="auto"):
90
  """Generate a response using retrieved contexts with ALLaM-specific formatting"""
91
+ # Auto-detect language if not specified
92
+ if language == "auto":
93
+ language = detect_language(query)
94
 
95
  # Format the prompt based on language
96
  if language == "arabic":
 
116
  Question: {query} [/INST]</s>"""
117
 
118
  try:
119
+ # Generate response with appropriate parameters for ALLaM
120
+ inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
121
+
122
+ # Generate with appropriate parameters
123
+ outputs = model.generate(
124
+ inputs.input_ids,
125
+ attention_mask=inputs.attention_mask,
126
+ max_new_tokens=512,
127
+ temperature=0.7,
128
+ top_p=0.9,
129
+ do_sample=True,
130
+ repetition_penalty=1.1
131
+ )
132
+
133
+ # Decode the response
134
+ full_output = tokenizer.decode(outputs[0], skip_special_tokens=True)
135
+
136
+ # Extract just the answer part (after the instruction)
137
+ response = full_output.split("[/INST]")[-1].strip()
138
+
139
+ # If response is empty for some reason, return the full output
140
+ if not response:
141
+ response = full_output
142
 
143
+ return response
 
 
 
 
144
 
145
  except Exception as e:
146
+ print(f"Error during generation: {e}")
147
  # Fallback response
148
+ return "I apologize, but I encountered an error while generating a response."
 
 
 
 
 
 
 
 
 
 
149
 
150
+ def process_pdf_files(pdf_files):
151
+ """Process PDF files and create documents"""
152
+ documents = []
153
+
154
+ for pdf_file in pdf_files:
155
+ try:
156
+ # Save the uploaded file temporarily
157
+ temp_path = f"temp_{pdf_file.name}"
158
+ with open(temp_path, "wb") as f:
159
+ f.write(pdf_file.read())
160
+
161
+ # Extract text
162
+ text = ""
163
+ with open(temp_path, 'rb') as file:
164
+ reader = PyPDF2.PdfReader(file)
165
+ for page in reader.pages:
166
+ page_text = page.extract_text()
167
+ if page_text:
168
+ text += page_text + "\n\n"
169
+
170
+ # Clean up
171
+ os.remove(temp_path)
172
+
173
+ if text.strip(): # If we got some text
174
+ doc = Document(
175
+ page_content=text,
176
+ metadata={"source": pdf_file.name, "filename": pdf_file.name}
177
+ )
178
+ documents.append(doc)
179
+ print(f"Successfully processed: {pdf_file.name}")
180
+ else:
181
+ print(f"Warning: No text extracted from {pdf_file.name}")
182
+ except Exception as e:
183
+ print(f"Error processing {pdf_file.name}: {e}")
184
+
185
+ print(f"Processed {len(documents)} PDF documents")
186
+ return documents
187
 
188
+ def create_vector_store(documents):
189
+ """Create a vector store from documents"""
190
+ # Text splitter for breaking documents into chunks
191
+ text_splitter = RecursiveCharacterTextSplitter(
192
+ chunk_size=500,
193
+ chunk_overlap=50,
194
+ separators=["\n\n", "\n", ".", "!", "?", ",", " ", ""]
195
+ )
196
+
197
+ # Split documents into chunks
198
+ chunks = []
199
+ for doc in documents:
200
+ doc_chunks = text_splitter.split_text(doc.page_content)
201
+ # Preserve metadata for each chunk
202
+ chunks.extend([
203
+ Document(page_content=chunk, metadata=doc.metadata)
204
+ for chunk in doc_chunks
205
+ ])
206
+
207
+ print(f"Created {len(chunks)} chunks from {len(documents)} documents")
208
+
209
+ # Create embedding function
210
+ embedding_function = HuggingFaceEmbeddings(
211
+ model_name="sentence-transformers/paraphrase-multilingual-mpnet-base-v2"
212
+ )
213
+
214
+ # Create FAISS index
215
+ vector_store = FAISS.from_documents(chunks, embedding_function)
216
+ return vector_store
217
 
218
+ # Variables to store state
219
+ model = None
220
+ tokenizer = None
221
+ assistant = None
222
 
223
+ # Load the model and tokenizer
224
+ def load_model_and_tokenizer():
225
+ global model, tokenizer
226
+
227
+ if model is not None and tokenizer is not None:
228
+ return "Model already loaded"
229
+
230
+ model_name = "ALLaM-AI/ALLaM-7B-Instruct-preview"
231
+ print(f"Loading model: {model_name}")
232
 
233
+ try:
234
+ # First attempt with AutoTokenizer
235
+ tokenizer = AutoTokenizer.from_pretrained(
236
+ model_name,
237
+ trust_remote_code=True,
238
+ use_fast=False
239
+ )
240
+
241
+ # Load model with appropriate settings for ALLaM
242
+ model = AutoModelForCausalLM.from_pretrained(
243
+ model_name,
244
+ torch_dtype=torch.bfloat16, # Use bfloat16 for better compatibility
245
+ trust_remote_code=True,
246
+ device_map="auto",
247
+ )
248
+
249
+ return "Model loaded successfully with AutoTokenizer!"
250
+
251
+ except Exception as e:
252
+ error_msg = f"First loading attempt failed: {e}"
253
+ print(error_msg)
254
+
255
+ try:
256
+ # Try with specific tokenizer class if the first attempt fails
257
+ from transformers import LlamaTokenizer
258
+
259
+ tokenizer = LlamaTokenizer.from_pretrained(model_name)
260
+ model = AutoModelForCausalLM.from_pretrained(
261
+ model_name,
262
+ torch_dtype=torch.float16,
263
+ trust_remote_code=True,
264
+ device_map="auto",
265
+ )
266
+
267
+ return "Model loaded successfully with LlamaTokenizer!"
268
+ except Exception as e2:
269
+ return f"Both loading attempts failed. Error 1: {e}. Error 2: {e2}"
270
 
271
+ # Gradio Interface Functions
272
+ def process_pdfs(pdf_files):
273
+ if not pdf_files:
274
+ return "No files uploaded. Please upload PDF documents about Vision 2030."
275
+
276
+ documents = process_pdf_files(pdf_files)
277
+
278
+ if not documents:
279
+ return "Failed to extract text from the uploaded PDFs."
280
+
281
+ global assistant, model, tokenizer
282
+
283
+ # Ensure model is loaded
284
+ if model is None or tokenizer is None:
285
+ load_status = load_model_and_tokenizer()
286
+ if "successfully" not in load_status.lower():
287
+ return f"Model loading failed: {load_status}"
288
+
289
+ # Create vector store
290
+ vector_store = create_vector_store(documents)
291
+
292
+ # Initialize assistant
293
+ assistant = Vision2030Assistant(model, tokenizer, vector_store)
294
+
295
+ return f"Successfully processed {len(documents)} documents. The assistant is ready to use!"
296
 
297
+ def answer_query(message, history):
298
+ global assistant
 
 
299
 
300
+ if assistant is None:
301
+ return "Please upload and process Vision 2030 PDF documents first."
302
 
303
+ response = assistant.answer(message)
304
+ return response
305
+
306
+ def reset_chat():
307
+ global assistant
308
+
309
+ if assistant is None:
310
+ return "No active conversation to reset."
311
 
312
+ reset_message = assistant.reset_conversation()
313
+ return reset_message
314
 
315
+ # Create Gradio interface
316
+ with gr.Blocks(title="Vision 2030 Virtual Assistant") as demo:
317
+ gr.Markdown("# Vision 2030 Virtual Assistant")
318
+ gr.Markdown("Ask questions about Saudi Vision 2030 goals, projects, and progress in Arabic or English.")
319
+
320
+ with gr.Tab("Setup"):
321
+ gr.Markdown("## Step 1: Load the Model")
322
+ load_btn = gr.Button("Load ALLaM-7B Model", variant="primary")
323
+ load_output = gr.Textbox(label="Load Status")
324
+ load_btn.click(load_model_and_tokenizer, inputs=[], outputs=load_output)
325
+
326
+ gr.Markdown("## Step 2: Upload Vision 2030 Documents")
327
+ pdf_files = gr.File(file_types=[".pdf"], file_count="multiple", label="Upload PDF Documents")
328
+ process_btn = gr.Button("Process Documents", variant="primary")
329
+ process_output = gr.Textbox(label="Processing Status")
330
+ process_btn.click(process_pdfs, inputs=[pdf_files], outputs=process_output)
331
+
332
+ with gr.Tab("Chat"):
333
+ chatbot = gr.Chatbot(label="Conversation")
334
+ message = gr.Textbox(
335
+ label="Ask a question about Vision 2030 (in Arabic or English)",
336
+ placeholder="What are the main goals of Vision 2030?",
337
+ lines=2
338
+ )
339
+ submit_btn = gr.Button("Submit", variant="primary")
340
+ reset_btn = gr.Button("Reset Conversation")
341
+
342
+ gr.Markdown("### Example Questions")
343
+ with gr.Row():
344
+ with gr.Column():
345
+ gr.Markdown("**English Questions:**")
346
+ en_examples = gr.Examples(
347
+ examples=[
348
+ "What is Saudi Vision 2030?",
349
+ "What are the economic goals of Vision 2030?",
350
+ "How does Vision 2030 support women's empowerment?",
351
+ "What environmental initiatives are part of Vision 2030?",
352
+ "What is the role of the Public Investment Fund in Vision 2030?"
353
+ ],
354
+ inputs=message
355
+ )
356
+
357
+ with gr.Column():
358
+ gr.Markdown("**Arabic Questions:**")
359
+ ar_examples = gr.Examples(
360
+ examples=[
361
+ "ما هي رؤية السعودية 2030؟",
362
+ "ما هي الأهداف الاقتصادية لرؤية 2030؟",
363
+ "كيف تدعم رؤية 2030 تمكين المرأة السعودية؟",
364
+ "ما هي مبادرات رؤية 2030 للحفاظ على البيئة؟",
365
+ "ما هي استراتيجية صندوق الاستثمارات العامة في رؤية 2030؟"
366
+ ],
367
+ inputs=message
368
+ )
369
+
370
+ reset_output = gr.Textbox(label="Reset Status", visible=False)
371
+ submit_btn.click(answer_query, inputs=[message, chatbot], outputs=[chatbot])
372
+ message.submit(answer_query, inputs=[message, chatbot], outputs=[chatbot])
373
+ reset_btn.click(reset_chat, inputs=[], outputs=[reset_output])
374
+ reset_btn.click(lambda: None, inputs=[], outputs=[chatbot], postprocess=False)
375
 
376
+ # Launch the app
377
+ demo.launch()