heerjtdev commited on
Commit
2c19d14
Β·
verified Β·
1 Parent(s): df562ea

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +198 -316
app.py CHANGED
@@ -1,325 +1,207 @@
1
- # import gradio as gr
2
- # print("GRADIO VERSION:", gr.__version__)
3
- # import json
4
- # import os
5
- # import tempfile
6
- # from pathlib import Path
7
-
8
- # # NOTE: You must ensure that 'working_yolo_pipeline.py' exists
9
- # # and defines the following items correctly:
10
- # from working_yolo_pipeline import run_document_pipeline, DEFAULT_LAYOUTLMV3_MODEL_PATH, WEIGHTS_PATH
11
- # # Since I don't have this file, I am assuming the imports are correct.
12
-
13
- # # Define placeholders for assumed constants if the pipeline file isn't present
14
- # # You should replace these with your actual definitions if they are missing
15
- # try:
16
- # from working_yolo_pipeline import run_document_pipeline, DEFAULT_LAYOUTLMV3_MODEL_PATH, WEIGHTS_PATH
17
- # except ImportError:
18
- # print("Warning: 'working_yolo_pipeline.py' not found. Using dummy paths.")
19
- # def run_document_pipeline(*args):
20
- # return {"error": "Placeholder pipeline function called."}
21
- # DEFAULT_LAYOUTLMV3_MODEL_PATH = "./models/layoutlmv3_model"
22
- # WEIGHTS_PATH = "./weights/yolo_weights.pt"
23
-
24
-
25
- # def process_pdf(pdf_file, layoutlmv3_model_path=None):
26
- # """
27
- # Wrapper function for Gradio interface.
28
-
29
- # Args:
30
- # pdf_file: Gradio UploadButton file object
31
- # layoutlmv3_model_path: Optional custom model path
32
-
33
- # Returns:
34
- # Tuple of (JSON string, download file path)
35
- # """
36
- # if pdf_file is None:
37
- # return "❌ Error: No PDF file uploaded.", None
38
-
39
- # # Use default model path if not provided
40
- # if not layoutlmv3_model_path:
41
- # layoutlmv3_model_path = DEFAULT_LAYOUTLMV3_MODEL_PATH
42
-
43
- # # Verify model and weights exist
44
- # if not os.path.exists(layoutlmv3_model_path):
45
- # return f"❌ Error: LayoutLMv3 model not found at {layoutlmv3_model_path}", None
46
-
47
- # if not os.path.exists(WEIGHTS_PATH):
48
- # return f"❌ Error: YOLO weights not found at {WEIGHTS_PATH}", None
49
-
50
- # try:
51
- # # Get the uploaded PDF path
52
- # pdf_path = pdf_file.name
53
-
54
- # # Run the pipeline
55
- # result = run_document_pipeline(pdf_path, layoutlmv3_model_path, 'label_studio_import.json')
56
-
57
- # if result is None:
58
- # return "❌ Error: Pipeline failed to process the PDF. Check console for details.", None
59
-
60
- # # Create a temporary file for download
61
- # output_filename = f"{Path(pdf_path).stem}_analysis.json"
62
- # temp_output = tempfile.NamedTemporaryFile(mode='w', delete=False, suffix='.json', prefix='analysis_')
63
-
64
- # # Dump results to the temporary file
65
- # with open(temp_output.name, 'w', encoding='utf-8') as f:
66
- # json.dump(result, f, indent=2, ensure_ascii=False)
67
-
68
- # # Format JSON for display
69
- # json_display = json.dumps(result, indent=2, ensure_ascii=False)
70
-
71
- # return json_display, temp_output.name
72
-
73
- # except Exception as e:
74
- # return f"❌ Error during processing: {str(e)}", None
75
-
76
-
77
- # # Create Gradio interface
78
- # # FIX APPLIED: Removed 'theme=gr.themes.Soft()' which caused the TypeError
79
- # with gr.Blocks(title="Document Analysis Pipeline") as demo:
80
- # gr.Markdown("""
81
- # # πŸ“„ Document Analysis Pipeline
82
-
83
- # Upload a PDF document to extract structured data including questions, options, answers, passages, and embedded images.
84
-
85
- # **Pipeline Steps:**
86
- # 1. πŸ” YOLO/OCR Preprocessing (word extraction + figure/equation detection)
87
- # 2. πŸ€– LayoutLMv3 Inference (BIO tagging)
88
- # 3. πŸ“Š Structured JSON Decoding
89
- # 4. πŸ–ΌοΈ Base64 Image Embedding
90
- # """)
91
-
92
- # with gr.Row():
93
- # with gr.Column(scale=1):
94
- # pdf_input = gr.File(
95
- # label="Upload PDF Document",
96
- # file_types=[".pdf"],
97
- # type="filepath"
98
- # )
99
-
100
- # model_path_input = gr.Textbox(
101
- # label="LayoutLMv3 Model Path (optional)",
102
- # placeholder=DEFAULT_LAYOUTLMV3_MODEL_PATH,
103
- # value=DEFAULT_LAYOUTLMV3_MODEL_PATH,
104
- # interactive=True
105
- # )
106
-
107
- # process_btn = gr.Button("πŸš€ Process Document", variant="primary", size="lg")
108
-
109
- # gr.Markdown("""
110
- # ### ℹ️ Notes:
111
- # - Processing may take several minutes depending on PDF size
112
- # - Figures and equations will be extracted and embedded as Base64
113
- # - The output JSON includes structured questions, options, and answers
114
- # """)
115
-
116
- # with gr.Column(scale=2):
117
- # json_output = gr.Code(
118
- # label="Structured JSON Output",
119
- # language="json",
120
- # lines=25
121
- # )
122
-
123
- # download_output = gr.File(
124
- # label="Download Full JSON",
125
- # interactive=False
126
- # )
127
-
128
- # # Status/Examples section
129
- # with gr.Row():
130
- # gr.Markdown("""
131
- # ### πŸ“‹ Output Format
132
- # The pipeline generates JSON with the following structure:
133
- # - **Questions**: Extracted question text
134
- # - **Options**: Multiple choice options (A, B, C, D, etc.)
135
- # - **Answers**: Correct answer(s)
136
- # - **Passages**: Associated reading passages
137
- # - **Images**: Base64-encoded figures and equations (embedded with keys like `figure1`, `equation2`)
138
- # """)
139
-
140
- # # Connect the button to the processing function
141
- # process_btn.click(
142
- # fn=process_pdf,
143
- # inputs=[pdf_input, model_path_input],
144
- # outputs=[json_output, download_output],
145
- # api_name="process_document"
146
- # )
147
-
148
- # # Example section (optional - add example PDFs if available)
149
- # # gr.Examples(
150
- # # examples=[
151
- # # ["examples/sample1.pdf"],
152
- # # ["examples/sample2.pdf"],
153
- # # ],
154
- # # inputs=pdf_input,
155
- # # )
156
-
157
- # # Launch the app
158
- # if __name__ == "__main__":
159
- # demo.launch(
160
- # server_name="0.0.0.0",
161
- # server_port=7860,
162
- # share=False,
163
- # show_error=True
164
- # )
165
-
166
-
167
-
168
-
169
-
170
  import gradio as gr
171
- print("GRADIO VERSION:", gr.__version__)
172
- import json
173
  import os
174
- import tempfile
175
- from pathlib import Path
176
-
177
- # ==============================
178
- # WRITE CUSTOM CSS FOR FONTS
179
- # ==============================
180
-
181
- # CUSTOM_CSS = """
182
- # @font-face {
183
- # font-family: 'NotoSansMath';
184
- # src: url('./NotoSansMath-Regular.ttf') format('truetype');
185
- # font-weight: normal;
186
- # font-style: normal;
187
- # }
188
-
189
- # html, body, * {
190
- # font-family: 'NotoSansMath', sans-serif !important;
191
- # }
192
- # """
193
-
194
- # # Optionally write the CSS file if needed (not required for inline css)
195
- # if not os.path.exists("custom.css"):
196
- # with open("custom.css", "w") as f:
197
- # f.write(CUSTOM_CSS)
198
- # ==============================
199
-
200
- try:
201
- from working_yolo_pipeline import run_document_pipeline, DEFAULT_LAYOUTLMV3_MODEL_PATH, WEIGHTS_PATH
202
- except ImportError:
203
- print("Warning: 'working_yolo_pipeline.py' not found. Using dummy paths.")
204
- def run_document_pipeline(*args):
205
- return {"error": "Placeholder pipeline function called."}
206
- DEFAULT_LAYOUTLMV3_MODEL_PATH = "./models/layoutlmv3_model"
207
- WEIGHTS_PATH = "./weights/yolo_weights.pt"
208
-
209
-
210
- def process_pdf(pdf_file, layoutlmv3_model_path=None):
211
- if pdf_file is None:
212
- return "❌ Error: No PDF file uploaded.", None
213
-
214
- if not layoutlmv3_model_path:
215
- layoutlmv3_model_path = DEFAULT_LAYOUTLMV3_MODEL_PATH
216
-
217
- if not os.path.exists(layoutlmv3_model_path):
218
- return f"❌ Error: LayoutLMv3 model not found at {layoutlmv3_model_path}", None
219
-
220
- if not os.path.exists(WEIGHTS_PATH):
221
- return f"❌ Error: YOLO weights not found at {WEIGHTS_PATH}", None
222
-
223
- try:
224
- pdf_path = pdf_file.name
225
-
226
- result = run_document_pipeline(pdf_path, layoutlmv3_model_path, 'label_studio_import.json')
227
-
228
- if result is None:
229
- return "❌ Error: Pipeline failed to process the PDF. Check console for details.", None
230
-
231
- output_filename = f"{Path(pdf_path).stem}_analysis.json"
232
- temp_output = tempfile.NamedTemporaryFile(mode='w', delete=False, suffix='.json', prefix='analysis_')
233
-
234
- with open(temp_output.name, 'w', encoding='utf-8') as f:
235
- json.dump(result, f, indent=2, ensure_ascii=False)
236
-
237
- json_display = json.dumps(result, indent=2, ensure_ascii=False)
238
-
239
- return json_display, temp_output.name
240
-
241
- except Exception as e:
242
- return f"❌ Error during processing: {str(e)}", None
243
-
244
-
245
- with gr.Blocks(
246
- title="Document Analysis Pipeline"
247
- ) as demo:
248
-
249
-
250
- gr.HTML()
251
-
252
- gr.Markdown("""
253
- # πŸ“„ Document Analysis Pipeline
254
-
255
- Upload a PDF document to extract structured data including questions, options, answers, passages, and embedded images.
256
-
257
- **Pipeline Steps:**
258
- 1. πŸ” YOLO/OCR Preprocessing (word extraction + figure/equation detection)
259
- 2. πŸ€– LayoutLMv3 Inference (BIO tagging)
260
- 3. πŸ“Š Structured JSON Decoding
261
- 4. πŸ–ΌοΈ Base64 Image Embedding
262
- """)
263
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
264
  with gr.Row():
265
  with gr.Column(scale=1):
266
- pdf_input = gr.File(
267
- label="Upload PDF Document",
268
- file_types=[".pdf"],
269
- type="filepath"
270
- )
271
-
272
- model_path_input = gr.Textbox(
273
- label="LayoutLMv3 Model Path (optional)",
274
- placeholder=DEFAULT_LAYOUTLMV3_MODEL_PATH,
275
- value=DEFAULT_LAYOUTLMV3_MODEL_PATH,
276
- interactive=True
277
- )
278
-
279
- process_btn = gr.Button("πŸš€ Process Document", variant="primary", size="lg")
280
-
281
- gr.Markdown("""
282
- ### ℹ️ Notes:
283
- - Processing may take several minutes depending on PDF size
284
- - Figures and equations will be extracted and embedded as Base64
285
- - The output JSON includes structured questions, options, and answers
286
- """)
287
 
288
  with gr.Column(scale=2):
289
- json_output = gr.Code(
290
- label="Structured JSON Output",
291
- language="json",
292
- lines=25
293
- )
294
-
295
- download_output = gr.File(
296
- label="Download Full JSON",
297
- interactive=False
298
- )
299
-
300
- with gr.Row():
301
- gr.Markdown("""
302
- ### πŸ“‹ Output Format
303
- The pipeline generates JSON with the following structure:
304
- - **Questions**: Extracted question text
305
- - **Options**: Multiple choice options
306
- - **Answers**: Correct answer(s)
307
- - **Passages**: Associated reading passages
308
- - **Images**: Base64-encoded figures and equations
309
- """)
310
-
311
- process_btn.click(
312
- fn=process_pdf,
313
- inputs=[pdf_input, model_path_input],
314
- outputs=[json_output, download_output],
315
- api_name="process_document"
316
- )
317
-
318
 
319
  if __name__ == "__main__":
320
- demo.launch(
321
- server_name="0.0.0.0",
322
- server_port=7860,
323
- share=False,
324
- show_error=True
325
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ import fitz # PyMuPDF
3
+ import torch
4
  import os
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
+ # --- LANGCHAIN & RAG IMPORTS ---
7
+ from langchain_text_splitters import RecursiveCharacterTextSplitter
8
+ from langchain_community.vectorstores import FAISS
9
+ from langchain_core.embeddings import Embeddings
10
+
11
+ # --- ONNX & MODEL IMPORTS ---
12
+ from transformers import AutoTokenizer
13
+ from optimum.onnxruntime import ORTModelForFeatureExtraction, ORTModelForCausalLM
14
+ from huggingface_hub import snapshot_download
15
+ import onnxruntime as ort
16
+
17
+ # Check available hardware accelerators
18
+ PROVIDERS = ort.get_available_providers()
19
+ print(f"⚑ Hardware Acceleration Providers: {PROVIDERS}")
20
+
21
+ # ---------------------------------------------------------
22
+ # 1. OPTIMIZED EMBEDDINGS (BGE-SMALL)
23
+ # ---------------------------------------------------------
24
+ class OnnxBgeEmbeddings(Embeddings):
25
+ # CHANGE 1: Switched to 'bge-small' (3x faster than large, similar accuracy)
26
+ def __init__(self, model_name="BAAI/bge-small-en-v1.5"):
27
+ print(f"πŸ”„ Loading Faster Embeddings: {model_name}...")
28
+ self.tokenizer = AutoTokenizer.from_pretrained(model_name)
29
+
30
+ self.model = ORTModelForFeatureExtraction.from_pretrained(
31
+ model_name,
32
+ export=False,
33
+ provider=PROVIDERS[0] # Auto-select best hardware (CUDA/CoreML)
34
+ )
35
+
36
+ def _process_batch(self, texts):
37
+ inputs = self.tokenizer(texts, padding=True, truncation=True, max_length=512, return_tensors="pt")
38
+
39
+ # Move inputs to same device as model if needed (mostly handled by Optimum)
40
+ device = self.model.device
41
+ inputs = {k: v.to(device) for k, v in inputs.items()}
42
+
43
+ with torch.no_grad():
44
+ outputs = self.model(**inputs)
45
+
46
+ embeddings = outputs.last_hidden_state[:, 0]
47
+ embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=1)
48
+ # Detach from graph before converting to numpy
49
+ return embeddings.cpu().numpy().tolist()
50
+
51
+ def embed_documents(self, texts):
52
+ return self._process_batch(texts)
53
+
54
+ def embed_query(self, text):
55
+ return self._process_batch(["Represent this sentence for searching relevant passages: " + text])[0]
56
+
57
+ # ---------------------------------------------------------
58
+ # 2. OPTIMIZED LLM (Qwen 2.5 - 0.5B)
59
+ # ---------------------------------------------------------
60
+ class LLMEvaluator:
61
+ def __init__(self):
62
+ # CHANGE 2: Switched to Qwen 2.5 0.5B (Half the size of Llama 1B, very smart)
63
+ self.repo_id = "Xenova/Qwen2.5-0.5B-Instruct"
64
+ self.local_dir = "onnx_qwen_local"
65
+
66
+ print(f"πŸ”„ Preparing Ultra-Fast LLM: {self.repo_id}...")
67
+
68
+ if not os.path.exists(self.local_dir):
69
+ print(f"πŸ“₯ Downloading Model to {self.local_dir}...")
70
+ # Note: Xenova repos usually have the ONNX ready, no complex wildcard needed
71
+ snapshot_download(repo_id=self.repo_id, local_dir=self.local_dir)
72
+ print("βœ… Download complete.")
73
+
74
+ self.tokenizer = AutoTokenizer.from_pretrained(self.local_dir)
75
+
76
+ # CHANGE 3: Enabled IO Binding + Explicit Provider
77
+ self.model = ORTModelForCausalLM.from_pretrained(
78
+ self.local_dir,
79
+ use_cache=True,
80
+ use_io_binding=True, # CHANGE: Major speedup on GPU
81
+ provider=PROVIDERS[0]
82
+ )
83
+
84
+ def evaluate(self, context, question, student_answer, max_marks):
85
+ # Qwen uses ChatML format implicitly via tokenizer
86
+ messages = [
87
+ {"role": "system", "content": "You are a strict academic grader. Verify the student answer against the context. Be harsh. Do not halluncinate."},
88
+ {"role": "user", "content": f"""
89
+ CONTEXT: {context}
90
+ QUESTION: {question}
91
+ ANSWER: {student_answer}
92
+
93
+ TASK: Grade out of {max_marks}.
94
+ RULES:
95
+ 1. If wrong, 0 marks.
96
+ 2. Be strict.
97
+ 3. Format: 'Score: X/{max_marks} \n Feedback: ...'
98
+ """}
99
+ ]
100
+
101
+ input_text = self.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
102
+ inputs = self.tokenizer(input_text, return_tensors="pt")
103
+
104
+ # Move inputs for IO Binding
105
+ device = self.model.device
106
+ inputs = {k: v.to(device) for k, v in inputs.items()}
107
+
108
+ with torch.no_grad():
109
+ outputs = self.model.generate(
110
+ **inputs,
111
+ max_new_tokens=75, # CHANGE 4: Reduced tokens (we only need a short score/feedback)
112
+ temperature=0.1,
113
+ do_sample=False
114
+ )
115
+
116
+ response = self.tokenizer.decode(outputs[0][inputs.input_ids.shape[1]:], skip_special_tokens=True)
117
+ return response
118
+
119
+ # ---------------------------------------------------------
120
+ # 3. Main Application Logic (Unchanged but uses new classes)
121
+ # ---------------------------------------------------------
122
+ class VectorSystem:
123
+ def __init__(self):
124
+ self.vector_store = None
125
+ self.embeddings = OnnxBgeEmbeddings() # Uses new BGE-Small
126
+ self.llm = LLMEvaluator() # Uses new Qwen 0.5B
127
+ self.all_chunks = []
128
+ self.total_chunks = 0
129
+
130
+ def process_file(self, file_obj):
131
+ if file_obj is None: return "No file uploaded."
132
+ try:
133
+ text = ""
134
+ if file_obj.name.endswith('.pdf'):
135
+ doc = fitz.open(file_obj.name)
136
+ for page in doc: text += page.get_text()
137
+ elif file_obj.name.endswith('.txt'):
138
+ with open(file_obj.name, 'r', encoding='utf-8') as f: text = f.read()
139
+ else:
140
+ return "❌ Error: Only .pdf and .txt supported."
141
+
142
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=800, chunk_overlap=100)
143
+ self.all_chunks = text_splitter.split_text(text)
144
+ self.total_chunks = len(self.all_chunks)
145
+
146
+ if not self.all_chunks: return "File empty."
147
+
148
+ metadatas = [{"id": i} for i in range(self.total_chunks)]
149
+ self.vector_store = FAISS.from_texts(self.all_chunks, self.embeddings, metadatas=metadatas)
150
+
151
+ return f"βœ… Indexed {self.total_chunks} chunks."
152
+ except Exception as e:
153
+ return f"Error: {str(e)}"
154
+
155
+ def process_query(self, question, student_answer, max_marks):
156
+ if not self.vector_store: return "⚠️ Please upload a file first.", ""
157
+ if not question: return "⚠️ Enter a question.", ""
158
+
159
+ results = self.vector_store.similarity_search_with_score(question, k=1)
160
+ top_doc, score = results[0]
161
+
162
+ center_id = top_doc.metadata['id']
163
+ start_id = max(0, center_id - 1)
164
+ end_id = min(self.total_chunks - 1, center_id + 1)
165
+
166
+ expanded_context = ""
167
+ for i in range(start_id, end_id + 1):
168
+ expanded_context += self.all_chunks[i] + "\n"
169
+
170
+ evidence_display = f"### πŸ“š Expanded Context (Chunks {start_id} to {end_id}):\n"
171
+ evidence_display += f"> ... {expanded_context} ..."
172
+
173
+ llm_feedback = "Please enter a student answer to grade."
174
+ if student_answer:
175
+ llm_feedback = self.llm.evaluate(expanded_context, question, student_answer, max_marks)
176
+
177
+ return evidence_display, llm_feedback
178
+
179
+ system = VectorSystem()
180
+
181
+ with gr.Blocks(title="EduGenius AI Grader") as demo:
182
+ gr.Markdown("# ⚑ EduGenius: Ultra-Fast RAG")
183
+ gr.Markdown("Powered by **Qwen-2.5-0.5B** and **BGE-Small** (ONNX Optimized)")
184
+
185
  with gr.Row():
186
  with gr.Column(scale=1):
187
+ pdf_input = gr.File(label="1. Upload Chapter")
188
+ upload_btn = gr.Button("Index Content", variant="primary")
189
+ status_msg = gr.Textbox(label="Status", interactive=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
190
 
191
  with gr.Column(scale=2):
192
+ with gr.Row():
193
+ q_input = gr.Textbox(label="Question", scale=2)
194
+ max_marks = gr.Slider(minimum=1, maximum=20, value=5, step=1, label="Max Marks")
195
+
196
+ a_input = gr.TextArea(label="Student Answer")
197
+ run_btn = gr.Button("Retrieve & Grade", variant="secondary")
198
+
199
+ with gr.Row():
200
+ evidence_box = gr.Markdown(label="Context Used")
201
+ grade_box = gr.Markdown(label="Grading Result")
202
+
203
+ upload_btn.click(system.process_file, inputs=[pdf_input], outputs=[status_msg])
204
+ run_btn.click(system.process_query, inputs=[q_input, a_input, max_marks], outputs=[evidence_box, grade_box])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
205
 
206
  if __name__ == "__main__":
207
+ demo.launch()