Ferdinann commited on
Commit
bde8d2f
Β·
verified Β·
1 Parent(s): 498a0c8

Upload sentiment_app.py

Browse files
Files changed (1) hide show
  1. sentiment_app.py +469 -0
sentiment_app.py ADDED
@@ -0,0 +1,469 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassification
4
+ import pandas as pd
5
+ import matplotlib.pyplot as plt
6
+ import seaborn as sns
7
+ from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
8
+ import numpy as np
9
+ from datetime import datetime
10
+ import io
11
+ import base64
12
+
13
+ # Setup plotting style
14
+ sns.set_style("whitegrid")
15
+ plt.rcParams['figure.figsize'] = (10, 6)
16
+
17
+ class SentimentAnalyzer:
18
+ def __init__(self, model_name="w11wo/indonesian-roberta-base-sentiment-classifier"):
19
+ """
20
+ Initialize sentiment analyzer with Indonesian RoBERTa model
21
+ Model ini dipilih karena:
22
+ - Sudah pre-trained untuk sentiment analysis
23
+ - Cepat (RoBERTa lebih efisien dari BERT)
24
+ - Tahan terhadap slang dan variasi bahasa Indonesia
25
+ """
26
+ print(f"Loading model: {model_name}")
27
+ self.device = 0 if torch.cuda.is_available() else -1
28
+
29
+ # Load sentiment analysis pipeline
30
+ self.sentiment_pipeline = pipeline(
31
+ "sentiment-analysis",
32
+ model=model_name,
33
+ device=self.device,
34
+ truncation=True,
35
+ max_length=512
36
+ )
37
+
38
+ # Mapping label untuk kategori keluhan
39
+ self.label_mapping = {
40
+ "POSITIVE": "Positif/Pujian",
41
+ "NEGATIVE": "Keluhan/Kritik",
42
+ "NEUTRAL": "Netral/Pertanyaan"
43
+ }
44
+
45
+ print("Model loaded successfully!")
46
+
47
+ def analyze(self, text):
48
+ """Analyze sentiment of a single text"""
49
+ if not text or text.strip() == "":
50
+ return {
51
+ "label": "Invalid",
52
+ "kategori": "Input kosong",
53
+ "confidence": 0.0,
54
+ "interpretation": "Silakan masukkan teks untuk dianalisis"
55
+ }
56
+
57
+ result = self.sentiment_pipeline(text)[0]
58
+ label = result['label'].upper()
59
+ score = result['score']
60
+
61
+ # Interpretasi berdasarkan confidence
62
+ if score >= 0.8:
63
+ confidence_level = "Sangat Yakin"
64
+ elif score >= 0.6:
65
+ confidence_level = "Yakin"
66
+ else:
67
+ confidence_level = "Kurang Yakin"
68
+
69
+ # Interpretasi untuk admin bencana
70
+ if label == "NEGATIVE":
71
+ if score >= 0.8:
72
+ interpretation = "⚠️ PRIORITAS TINGGI - Keluhan serius yang memerlukan tindakan segera"
73
+ else:
74
+ interpretation = "⚑ Keluhan yang perlu ditindaklanjuti"
75
+ elif label == "POSITIVE":
76
+ interpretation = "βœ… Feedback positif atau apresiasi"
77
+ else:
78
+ interpretation = "ℹ️ Pertanyaan atau informasi netral"
79
+
80
+ return {
81
+ "label": label,
82
+ "kategori": self.label_mapping.get(label, label),
83
+ "confidence": score,
84
+ "confidence_level": confidence_level,
85
+ "interpretation": interpretation
86
+ }
87
+
88
+ def batch_analyze(self, texts):
89
+ """Analyze multiple texts"""
90
+ results = []
91
+ for text in texts:
92
+ result = self.analyze(text)
93
+ results.append(result)
94
+ return results
95
+
96
+ def evaluate_model(self, test_texts, true_labels):
97
+ """
98
+ Evaluate model performance with visualization
99
+ test_texts: list of texts
100
+ true_labels: list of true labels (POSITIVE, NEGATIVE, NEUTRAL)
101
+ """
102
+ predictions = []
103
+ pred_labels = []
104
+
105
+ for text in test_texts:
106
+ result = self.analyze(text)
107
+ predictions.append(result)
108
+ pred_labels.append(result['label'])
109
+
110
+ # Calculate metrics
111
+ accuracy = accuracy_score(true_labels, pred_labels)
112
+ report = classification_report(
113
+ true_labels,
114
+ pred_labels,
115
+ target_names=list(set(true_labels)),
116
+ output_dict=True,
117
+ zero_division=0
118
+ )
119
+
120
+ # Create confusion matrix
121
+ cm = confusion_matrix(true_labels, pred_labels, labels=list(set(true_labels)))
122
+
123
+ return {
124
+ 'accuracy': accuracy,
125
+ 'classification_report': report,
126
+ 'confusion_matrix': cm,
127
+ 'predictions': predictions,
128
+ 'labels': list(set(true_labels))
129
+ }
130
+
131
+ # Initialize analyzer
132
+ analyzer = SentimentAnalyzer()
133
+
134
+ # Sample data untuk testing (contoh keluhan bencana dan feedback masyarakat)
135
+ SAMPLE_DATA = {
136
+ "texts": [
137
+ "Bantuan bencana sangat lambat, kami sudah 3 hari belum dapat makanan!",
138
+ "Terima kasih banyak atas bantuan yang cepat, sangat membantu kami",
139
+ "Kapan bantuan akan tiba di lokasi kami?",
140
+ "Posko pengungsian penuh, tidak ada tempat untuk tidur!",
141
+ "Tim relawan sangat baik dan peduli",
142
+ "Mohon info jalur evakuasi terdekat",
143
+ "Air bersih habis, kondisi sangat memprihatinkan",
144
+ "Koordinasi tim bantuan sangat bagus",
145
+ "Gimana cara daftar bantuan sosial?",
146
+ "Hadeh parah banget nih pelayanan, gak jelas!",
147
+ "Mantap jiwa pelayanannya, cepet banget",
148
+ "Mana nih bantuan yang dijanjikan? Udah lama nungguin!",
149
+ "Alhamdulillah bantuan sudah sampai dengan selamat",
150
+ "Tempat pengungsian kotor dan tidak layak!",
151
+ "Bagaimana prosedur mendapatkan bantuan medis?"
152
+ ],
153
+ "labels": [
154
+ "NEGATIVE", "POSITIVE", "NEUTRAL",
155
+ "NEGATIVE", "POSITIVE", "NEUTRAL",
156
+ "NEGATIVE", "POSITIVE", "NEUTRAL",
157
+ "NEGATIVE", "POSITIVE", "NEGATIVE",
158
+ "POSITIVE", "NEGATIVE", "NEUTRAL"
159
+ ]
160
+ }
161
+
162
+ def analyze_single_text(text):
163
+ """Gradio function for single text analysis"""
164
+ result = analyzer.analyze(text)
165
+
166
+ # Format output
167
+ output = f"""
168
+ 🎯 **Hasil Analisis:**
169
+
170
+ πŸ“Š **Kategori**: {result['kategori']}
171
+ πŸ“ˆ **Confidence**: {result['confidence']:.2%} ({result['confidence_level']})
172
+ πŸ’‘ **Interpretasi**: {result['interpretation']}
173
+ """
174
+
175
+ return output
176
+
177
+ def analyze_batch_texts(text_input):
178
+ """Gradio function for batch text analysis"""
179
+ if not text_input or text_input.strip() == "":
180
+ return "Silakan masukkan teks (satu per baris)"
181
+
182
+ texts = [t.strip() for t in text_input.split('\n') if t.strip()]
183
+ results = analyzer.batch_analyze(texts)
184
+
185
+ # Create DataFrame for display
186
+ df_data = []
187
+ for text, result in zip(texts, results):
188
+ df_data.append({
189
+ 'Teks': text[:50] + '...' if len(text) > 50 else text,
190
+ 'Kategori': result['kategori'],
191
+ 'Confidence': f"{result['confidence']:.2%}",
192
+ 'Prioritas': 'πŸ”΄' if result['label'] == 'NEGATIVE' and result['confidence'] >= 0.8 else
193
+ '🟑' if result['label'] == 'NEGATIVE' else '🟒'
194
+ })
195
+
196
+ df = pd.DataFrame(df_data)
197
+
198
+ # Count statistics
199
+ total = len(results)
200
+ keluhan = sum(1 for r in results if r['label'] == 'NEGATIVE')
201
+ positif = sum(1 for r in results if r['label'] == 'POSITIVE')
202
+ netral = sum(1 for r in results if r['label'] == 'NEUTRAL')
203
+
204
+ stats = f"""
205
+ πŸ“Š **Ringkasan Analisis:**
206
+ - Total pesan: {total}
207
+ - Keluhan/Kritik: {keluhan} ({keluhan/total*100:.1f}%)
208
+ - Positif/Pujian: {positif} ({positif/total*100:.1f}%)
209
+ - Netral/Pertanyaan: {netral} ({netral/total*100:.1f}%)
210
+ """
211
+
212
+ return stats + "\n\n" + df.to_markdown(index=False)
213
+
214
+ def run_evaluation():
215
+ """Run model evaluation with visualization"""
216
+ eval_results = analyzer.evaluate_model(
217
+ SAMPLE_DATA['texts'],
218
+ SAMPLE_DATA['labels']
219
+ )
220
+
221
+ # Create visualizations
222
+ fig, axes = plt.subplots(2, 2, figsize=(15, 12))
223
+
224
+ # 1. Confusion Matrix
225
+ cm = eval_results['confusion_matrix']
226
+ labels = eval_results['labels']
227
+ sns.heatmap(
228
+ cm,
229
+ annot=True,
230
+ fmt='d',
231
+ cmap='Blues',
232
+ xticklabels=[analyzer.label_mapping.get(l, l) for l in labels],
233
+ yticklabels=[analyzer.label_mapping.get(l, l) for l in labels],
234
+ ax=axes[0, 0]
235
+ )
236
+ axes[0, 0].set_title('Confusion Matrix', fontsize=14, fontweight='bold')
237
+ axes[0, 0].set_ylabel('True Label')
238
+ axes[0, 0].set_xlabel('Predicted Label')
239
+
240
+ # 2. Per-class metrics
241
+ report = eval_results['classification_report']
242
+ metrics_data = []
243
+ for label in labels:
244
+ if label in report:
245
+ metrics_data.append({
246
+ 'Class': analyzer.label_mapping.get(label, label),
247
+ 'Precision': report[label]['precision'],
248
+ 'Recall': report[label]['recall'],
249
+ 'F1-Score': report[label]['f1-score']
250
+ })
251
+
252
+ df_metrics = pd.DataFrame(metrics_data)
253
+ x = np.arange(len(df_metrics))
254
+ width = 0.25
255
+
256
+ axes[0, 1].bar(x - width, df_metrics['Precision'], width, label='Precision', alpha=0.8)
257
+ axes[0, 1].bar(x, df_metrics['Recall'], width, label='Recall', alpha=0.8)
258
+ axes[0, 1].bar(x + width, df_metrics['F1-Score'], width, label='F1-Score', alpha=0.8)
259
+ axes[0, 1].set_xlabel('Class')
260
+ axes[0, 1].set_ylabel('Score')
261
+ axes[0, 1].set_title('Metrics per Class', fontsize=14, fontweight='bold')
262
+ axes[0, 1].set_xticks(x)
263
+ axes[0, 1].set_xticklabels(df_metrics['Class'], rotation=15)
264
+ axes[0, 1].legend()
265
+ axes[0, 1].set_ylim([0, 1.1])
266
+ axes[0, 1].grid(axis='y', alpha=0.3)
267
+
268
+ # 3. Confidence distribution
269
+ confidences = [p['confidence'] for p in eval_results['predictions']]
270
+ axes[1, 0].hist(confidences, bins=20, color='skyblue', edgecolor='black', alpha=0.7)
271
+ axes[1, 0].axvline(np.mean(confidences), color='red', linestyle='--',
272
+ label=f'Mean: {np.mean(confidences):.3f}', linewidth=2)
273
+ axes[1, 0].set_xlabel('Confidence Score')
274
+ axes[1, 0].set_ylabel('Frequency')
275
+ axes[1, 0].set_title('Confidence Distribution', fontsize=14, fontweight='bold')
276
+ axes[1, 0].legend()
277
+ axes[1, 0].grid(axis='y', alpha=0.3)
278
+
279
+ # 4. Label distribution
280
+ pred_labels = [p['label'] for p in eval_results['predictions']]
281
+ label_counts = pd.Series(pred_labels).value_counts()
282
+ colors = {'POSITIVE': '#4CAF50', 'NEGATIVE': '#F44336', 'NEUTRAL': '#FFC107'}
283
+ plot_colors = [colors.get(l, '#999999') for l in label_counts.index]
284
+
285
+ axes[1, 1].pie(
286
+ label_counts.values,
287
+ labels=[analyzer.label_mapping.get(l, l) for l in label_counts.index],
288
+ autopct='%1.1f%%',
289
+ colors=plot_colors,
290
+ startangle=90
291
+ )
292
+ axes[1, 1].set_title('Prediction Distribution', fontsize=14, fontweight='bold')
293
+
294
+ plt.tight_layout()
295
+
296
+ # Summary text
297
+ summary = f"""
298
+ ╔══════════════════════════════════════════════════╗
299
+ β•‘ EVALUASI MODEL SENTIMENT ANALYSIS β•‘
300
+ β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•
301
+
302
+ πŸ“Š Overall Accuracy: {eval_results['accuracy']:.2%}
303
+
304
+ πŸ“ˆ Detailed Metrics:
305
+ """
306
+
307
+ for label in labels:
308
+ if label in report:
309
+ summary += f"""
310
+ {analyzer.label_mapping.get(label, label)}:
311
+ - Precision: {report[label]['precision']:.3f}
312
+ - Recall: {report[label]['recall']:.3f}
313
+ - F1-Score: {report[label]['f1-score']:.3f}
314
+ - Support: {report[label]['support']}
315
+ """
316
+
317
+ summary += f"""
318
+
319
+ πŸ’‘ Interpretasi:
320
+ - Model menunjukkan performa {'BAIK' if eval_results['accuracy'] > 0.8 else 'CUKUP BAIK' if eval_results['accuracy'] > 0.6 else 'PERLU DITINGKATKAN'}
321
+ - Confidence rata-rata: {np.mean(confidences):.3f}
322
+ - Cocok untuk filtering keluhan masyarakat secara otomatis
323
+ - Dapat menangani slang dan variasi bahasa Indonesia
324
+
325
+ Waktu Evaluasi: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
326
+ """
327
+
328
+ return fig, summary
329
+
330
+ # Create Gradio Interface
331
+ with gr.Blocks(title="Analisis Sentimen Keluhan Masyarakat", theme=gr.themes.Soft()) as demo:
332
+ gr.Markdown("""
333
+ # 🎯 Sistem Analisis Sentimen Keluhan Masyarakat
334
+
335
+ **Model**: Indonesian RoBERTa Sentiment Classifier
336
+
337
+ Sistem ini menggunakan model `w11wo/indonesian-roberta-base-sentiment-classifier` yang:
338
+ - βœ… Sudah pre-trained untuk analisis sentimen Bahasa Indonesia
339
+ - ⚑ Cepat dan efisien (berbasis RoBERTa)
340
+ - 🎭 Tahan terhadap slang dan variasi bahasa informal
341
+ - 🎯 Akurat untuk membedakan keluhan, pujian, dan pertanyaan
342
+
343
+ ---
344
+ """)
345
+
346
+ with gr.Tabs():
347
+ # Tab 1: Single Text Analysis
348
+ with gr.Tab("πŸ“ Analisis Teks Tunggal"):
349
+ gr.Markdown("### Analisis sentimen untuk satu teks")
350
+ with gr.Row():
351
+ with gr.Column():
352
+ input_text = gr.Textbox(
353
+ label="Masukkan Teks",
354
+ placeholder="Contoh: Bantuan sangat lambat, sudah 3 hari belum dapat makanan!",
355
+ lines=5
356
+ )
357
+ analyze_btn = gr.Button("πŸ” Analisis", variant="primary")
358
+ with gr.Column():
359
+ output_single = gr.Markdown(label="Hasil Analisis")
360
+
361
+ # Examples
362
+ gr.Examples(
363
+ examples=[
364
+ ["Bantuan bencana sangat lambat, kami sudah 3 hari belum dapat makanan!"],
365
+ ["Terima kasih banyak atas bantuan yang cepat, sangat membantu kami"],
366
+ ["Kapan bantuan akan tiba di lokasi kami?"],
367
+ ["Hadeh parah banget nih pelayanan, gak jelas!"],
368
+ ["Mantap jiwa pelayanannya, cepet banget"],
369
+ ],
370
+ inputs=input_text
371
+ )
372
+
373
+ analyze_btn.click(analyze_single_text, inputs=input_text, outputs=output_single)
374
+
375
+ # Tab 2: Batch Analysis
376
+ with gr.Tab("πŸ“Š Analisis Batch"):
377
+ gr.Markdown("### Analisis sentimen untuk multiple teks (satu per baris)")
378
+ with gr.Row():
379
+ with gr.Column():
380
+ input_batch = gr.Textbox(
381
+ label="Masukkan Teks (satu per baris)",
382
+ placeholder="Contoh:\nBantuan sangat lambat!\nTerima kasih banyak\nKapan bantuan tiba?",
383
+ lines=10
384
+ )
385
+ batch_btn = gr.Button("πŸ” Analisis Batch", variant="primary")
386
+
387
+ load_sample_btn = gr.Button("πŸ“‹ Load Sample Data", variant="secondary")
388
+ with gr.Column():
389
+ output_batch = gr.Markdown(label="Hasil Analisis Batch")
390
+
391
+ batch_btn.click(analyze_batch_texts, inputs=input_batch, outputs=output_batch)
392
+ load_sample_btn.click(
393
+ lambda: '\n'.join(SAMPLE_DATA['texts']),
394
+ outputs=input_batch
395
+ )
396
+
397
+ # Tab 3: Model Evaluation
398
+ with gr.Tab("πŸ“ˆ Evaluasi Model"):
399
+ gr.Markdown("""
400
+ ### Evaluasi Performa Model
401
+
402
+ Menggunakan dataset sample untuk mengevaluasi performa model dengan berbagai metrik.
403
+ """)
404
+ eval_btn = gr.Button("πŸš€ Jalankan Evaluasi", variant="primary", size="lg")
405
+
406
+ with gr.Row():
407
+ eval_plot = gr.Plot(label="Visualisasi Evaluasi")
408
+
409
+ eval_summary = gr.Textbox(label="Ringkasan Evaluasi", lines=20)
410
+
411
+ eval_btn.click(run_evaluation, outputs=[eval_plot, eval_summary])
412
+
413
+ # Tab 4: Info
414
+ with gr.Tab("ℹ️ Informasi"):
415
+ gr.Markdown("""
416
+ ## πŸ“š Tentang Sistem
417
+
418
+ ### Model yang Digunakan
419
+ **w11wo/indonesian-roberta-base-sentiment-classifier**
420
+
421
+ #### Kenapa Model Ini?
422
+ 1. **Pre-trained & Siap Pakai**: Tidak perlu training tambahan
423
+ 2. **Berbasis RoBERTa**: Lebih cepat dan efisien dibanding BERT
424
+ 3. **Bahasa Indonesia**: Dilatih khusus untuk teks Bahasa Indonesia
425
+ 4. **Tahan Slang**: Mampu memahami variasi bahasa informal dan slang
426
+ 5. **Akurat**: Presisi tinggi untuk klasifikasi sentimen
427
+
428
+ ### Output Labels
429
+ - **POSITIVE**: Feedback positif, pujian, apresiasi
430
+ - **NEGATIVE**: Keluhan, kritik, masalah yang perlu ditangani
431
+ - **NEUTRAL**: Pertanyaan, informasi netral, inquiry
432
+
433
+ ### Use Case: Admin Bencana
434
+ Sistem ini sangat cocok untuk:
435
+ - βœ… Filtering keluhan prioritas tinggi dari ribuan pesan
436
+ - βœ… Identifikasi masalah urgent yang perlu tindakan segera
437
+ - βœ… Monitoring sentimen masyarakat terhadap bantuan
438
+ - βœ… Analisis feedback untuk perbaikan layanan
439
+
440
+ ### Perbandingan Model (yang dipilih vs alternatif)
441
+
442
+ | Model | Kecepatan | Akurasi | Tahan Slang | Siap Pakai |
443
+ |-------|-----------|---------|-------------|------------|
444
+ | **w11wo/roberta-sentiment** βœ… | ⚑⚑⚑ | ⭐⭐⭐⭐ | βœ… | βœ… |
445
+ | indobert-base-p1 | ⚑⚑ | ⭐⭐⭐⭐ | ⚠️ | ❌ (perlu fine-tune) |
446
+ | indobart-v2 | ⚑ | ⭐⭐⭐ | βœ… | ❌ (untuk summarization) |
447
+ | mdhugol/indobert | ⚑⚑ | ⭐⭐⭐⭐⭐ | βœ… | βœ… |
448
+
449
+ ### Tech Stack
450
+ - πŸ€— Transformers (Hugging Face)
451
+ - 🎨 Gradio (Interface)
452
+ - πŸ“Š Scikit-learn (Evaluation)
453
+ - πŸ“ˆ Matplotlib & Seaborn (Visualization)
454
+ - 🐳 Docker (Deployment)
455
+
456
+ ### Tips Penggunaan
457
+ 1. Untuk analisis cepat 1-2 teks β†’ gunakan tab "Analisis Teks Tunggal"
458
+ 2. Untuk filtering ribuan pesan β†’ gunakan tab "Analisis Batch"
459
+ 3. Untuk validasi model β†’ gunakan tab "Evaluasi Model"
460
+ 4. Confidence β‰₯ 80% β†’ sangat yakin, prioritaskan untuk keluhan
461
+ 5. Confidence < 60% β†’ review manual disarankan
462
+
463
+ ---
464
+
465
+ **Dibuat dengan ❀️ untuk membantu admin bencana melayani masyarakat dengan lebih efisien**
466
+ """)
467
+
468
+ if __name__ == "__main__":
469
+ demo.launch(server_name="0.0.0.0", server_port=7860, share=False)