LovnishVerma commited on
Commit
8f5a0a3
ยท
verified ยท
1 Parent(s): eeb6db3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +60 -370
app.py CHANGED
@@ -1,94 +1,49 @@
1
- # =========================================================
2
- # MeetGenius AI โ€“ LOCAL STABLE VERSION (100% FIXED)
3
- # =========================================================
4
-
5
  import os
6
- import re
7
- import uuid
8
  import shutil
9
  import threading
10
  import torch
11
- from io import BytesIO
12
- import numpy as np
13
- import subprocess # Replacing os.system for stability
14
- import time
15
  from flask import Flask, render_template, request, jsonify, send_file
16
  import yt_dlp
17
  import whisper
18
  from sentence_transformers import SentenceTransformer
19
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
 
20
  from docx import Document
21
- from reportlab.lib.pagesizes import A4
22
  from reportlab.pdfgen import canvas
23
- from reportlab.lib.utils import simpleSplit
 
 
24
 
25
  app = Flask(__name__)
26
  LOCK = threading.Lock()
27
 
28
  # ---- CONFIGURATION ----
29
- JOB_TTL_SECONDS = 30 * 60 # 30 minutes
30
- CLEANUP_INTERVAL = 10 * 60 # 10 minutes
31
- BASE_DIR = "jobs"
32
  os.makedirs(BASE_DIR, exist_ok=True)
33
  JOB_STORE = {}
34
 
35
  # --- DEVICE SETUP ---
36
- DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
37
  print(f"๐Ÿš€ Device selected: {DEVICE}")
38
 
39
- if DEVICE == "cuda":
40
- print(f"๐ŸŽฎ GPU: {torch.cuda.get_device_name(0)}")
41
- torch.cuda.empty_cache() # Clear old cache
42
-
43
- # --- FFMPEG AUTO-DETECTION (FIXED) ---
44
- # Ye ab khud dhundega ki FFmpeg kahan install hai
45
- if shutil.which("ffmpeg") is None:
46
- print("โš ๏ธ CRITICAL ERROR: FFmpeg is not installed or not in PATH!")
47
- print("๐Ÿ‘‰ Please install FFmpeg from https://ffmpeg.org/download.html and add to System Variables.")
48
- # Fallback for Windows default path if user forgot to add to PATH
49
- possible_path = r"C:\ffmpeg\bin"
50
- if os.path.exists(possible_path):
51
- os.environ["PATH"] += os.pathsep + possible_path
52
- print(f"โœ… Found FFmpeg at {possible_path}, added to path.")
53
- else:
54
- print("โŒ FFmpeg not found. Audio processing will fail.")
55
-
56
-
57
-
58
-
59
- # --- LOAD MODELS ---
60
- print("โณ Loading AI models (One-time setup)...")
61
- try:
62
- whisper_model = whisper.load_model("base", device=DEVICE)
63
- embedder = SentenceTransformer("all-MiniLM-L6-v2", device=DEVICE)
64
-
65
- qa_model_name = "google/flan-t5-base"
66
- tokenizer = AutoTokenizer.from_pretrained(qa_model_name)
67
- qa_model = AutoModelForSeq2SeqLM.from_pretrained(qa_model_name).to(DEVICE)
68
- print("โœ… All Models loaded successfully")
69
- except Exception as e:
70
- print(f"โŒ Model Loading Error: {e}")
71
- exit(1)
72
 
73
  # --- HELPER FUNCTIONS ---
74
-
75
  def extract_audio(input_path, output_path):
76
- """
77
- Uses subprocess for safe execution.
78
- Converts video/audio to 16kHz WAV mono for Whisper.
79
- """
80
- command = [
81
- "ffmpeg", "-y", "-i", input_path,
82
- "-ac", "1", "-ar", "16000", "-vn", output_path
83
- ]
84
- # subprocess.run is safer than os.system
85
- try:
86
- subprocess.run(command, check=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
87
- except subprocess.CalledProcessError as e:
88
- raise Exception(f"FFmpeg conversion failed: {e}")
89
 
90
  def clean_sentences(text):
91
- text = re.sub(r'\b(\w+)( \1\b){2,}', r'\1', text, flags=re.I) # Remove repetitions
 
92
  raw = re.split(r'(?<=[.!?]) +', text)
93
  return [re.sub(r'\s+', ' ', s.strip()) for s in raw if len(s.split()) > 2]
94
 
@@ -96,346 +51,81 @@ def to_paragraphs(sentences, n=4):
96
  return "\n\n".join(" ".join(sentences[i:i+n]) for i in range(0, len(sentences), n))
97
 
98
  def summarize(sentences):
99
- if not sentences:
100
- return "No content.", "No conclusion."
101
-
102
- # 1. Embeddings for Summary
103
- with torch.no_grad():
104
- vecs = embedder.encode(sentences, normalize_embeddings=True, batch_size=8)
105
 
 
 
106
  mean_vec = np.mean(vecs, axis=0)
107
-
108
- # FIX: Store index (i) to remember order
109
- # Format: (Score, Index, Sentence)
110
- scored_with_index = []
111
- for i, (s, v) in enumerate(zip(sentences, vecs)):
112
- score = np.dot(v, mean_vec)
113
- scored_with_index.append((score, i, s))
114
-
115
- # Step A: Pick Top 3 most important sentences
116
- top_3 = sorted(scored_with_index, key=lambda x: x[0], reverse=True)[:3]
117
-
118
- # Step B: Sort those Top 3 back by Index (Time Order)
119
- # Isse pehli line pehle aayegi, aur aakhri line baad mein
120
- top_3_chronological = sorted(top_3, key=lambda x: x[1])
121
-
122
- summary = " ".join(s for _, _, s in top_3_chronological)
123
-
124
- # 2. Abstractive Conclusion (Same as before)
125
- context_text = " ".join(sentences[:20])
126
- prompt = f"Summarize the main point of this text in one professional paragraph:\n\n{context_text}"
127
-
128
- input_ids = tokenizer(prompt, return_tensors="pt", max_length=512, truncation=True).input_ids.to(DEVICE)
129
-
130
- with torch.no_grad():
131
- output = qa_model.generate(input_ids, max_length=150, num_beams=4, early_stopping=True)
132
-
133
  conclusion = tokenizer.decode(output[0], skip_special_tokens=True)
134
- return summary, conclusion
135
-
136
- def build_vectors(job_id):
137
- JOB_STORE[job_id]["vectors"] = []
138
- chunks = JOB_STORE[job_id]["notes"].split("\n\n")
139
- if not chunks or chunks == ['']: return
140
-
141
- with torch.no_grad():
142
- vecs = embedder.encode(chunks, normalize_embeddings=True)
143
 
144
- for c, v in zip(chunks, vecs):
145
- JOB_STORE[job_id]["vectors"].append({"text": c, "vector": v})
146
 
147
  def transcribe(path):
148
- # Move to GPU, transcribe, then clear cache
149
- with torch.no_grad():
150
- res = whisper_model.transcribe(path, fp16=(DEVICE=="cuda"), verbose=False)
151
-
152
- if DEVICE == "cuda":
153
- torch.cuda.empty_cache() # IMPORTANT: Free up VRAM after transcription
154
-
155
- return " ".join(s["text"] for s in res["segments"])
156
 
157
- def generate_ai_answer(question, context):
158
- input_text = f"answer based on context: {context} question: {question}"
159
- input_ids = tokenizer(input_text, return_tensors="pt", max_length=512, truncation=True).input_ids.to(DEVICE)
160
-
161
- with torch.no_grad():
162
- outputs = qa_model.generate(input_ids, max_length=200, num_beams=4, early_stopping=True)
163
-
164
- return tokenizer.decode(outputs[0], skip_special_tokens=True)
165
-
166
- # --- FLASK ROUTES ---
167
 
 
168
  @app.route("/")
169
  def index():
170
  return render_template("landing.html")
171
 
172
- def new_job():
173
- # Memory Safety: Remove oldest job if > 20
174
- if len(JOB_STORE) >= 20:
175
- oldest = min(JOB_STORE.keys(), key=lambda k: JOB_STORE[k]['created_at'])
176
- JOB_STORE.pop(oldest)
177
-
178
- jid = str(uuid.uuid4())
179
- JOB_STORE[jid] = {
180
- "summary": "", "notes": "", "conclusion": "",
181
- "vectors": [], "created_at": time.time()
182
- }
183
- return jid
184
-
185
  @app.route("/dashboard", methods=["GET", "POST"])
186
  def dashboard():
187
- active_tab = request.args.get('tab', 'youtube')
188
  context = {}
189
-
 
190
  if request.method == "POST":
191
- # Changed to Blocking Lock: Wait instead of error
192
- with LOCK:
193
- job_id = new_job()
194
- job_dir = os.path.join(BASE_DIR, job_id)
195
- os.makedirs(job_dir, exist_ok=True)
196
-
197
  try:
198
  url = request.form.get("youtube_url")
199
- if not url: raise Exception("URL is missing")
200
 
201
- print(f"๐Ÿ“ฅ Processing: {url}")
 
 
202
 
 
203
  ydl_opts = {
204
- "format": "bestaudio/best",
205
- "outtmpl": os.path.join(job_dir, "audio.%(ext)s"),
206
- "postprocessors": [{"key": "FFmpegExtractAudio", "preferredcodec": "wav"}],
207
- "quiet": True
208
  }
209
-
210
  with yt_dlp.YoutubeDL(ydl_opts) as ydl:
211
  ydl.download([url])
212
 
213
- # Find the wav file
214
- audio_files = [f for f in os.listdir(job_dir) if f.endswith(".wav")]
215
- if not audio_files: raise Exception("Download failed, no audio file found.")
216
- audio_path = os.path.join(job_dir, audio_files[0])
217
-
218
- # Transcribe & Process
219
- text = transcribe(audio_path)
220
  sents = clean_sentences(text)
221
- if not sents: raise Exception("Audio transcribed but text was empty.")
222
-
223
  summary, conclusion = summarize(sents)
224
  notes = to_paragraphs(sents)
225
 
226
  JOB_STORE[job_id].update({"summary": summary, "notes": notes, "conclusion": conclusion})
227
- build_vectors(job_id)
228
 
229
- context = {
230
- "job_id": job_id, "summary": summary,
231
- "transcript": notes, "conclusion": conclusion
232
- }
233
 
234
  except Exception as e:
235
- print(f"โŒ Error: {e}")
236
- context["error"] = f"Processing Failed: {str(e)}"
237
- finally:
238
- shutil.rmtree(job_dir, ignore_errors=True)
239
-
240
  return render_template("dashboard.html", active_tab=active_tab, **context)
241
 
242
- @app.route("/enterprise", methods=["POST"])
243
- def enterprise_submit():
244
- with LOCK: # Blocking Lock
245
- file = request.files.get("audio_file")
246
- if not file:
247
- return render_template("dashboard.html", active_tab="enterprise", error="No file uploaded")
248
-
249
- job_id = new_job()
250
- job_dir = os.path.join(BASE_DIR, job_id)
251
- os.makedirs(job_dir, exist_ok=True)
252
-
253
- try:
254
- original_path = os.path.join(job_dir, file.filename)
255
- file.save(original_path)
256
- wav_path = os.path.join(job_dir, "clean.wav")
257
-
258
- # Convert to safe format
259
- extract_audio(original_path, wav_path)
260
-
261
- text = transcribe(wav_path)
262
- sents = clean_sentences(text)
263
-
264
- if not sents: raise Exception("No speech detected.")
265
-
266
- summary, conclusion = summarize(sents)
267
- notes = to_paragraphs(sents)
268
-
269
- JOB_STORE[job_id].update({"summary": summary, "notes": notes, "conclusion": conclusion})
270
- build_vectors(job_id)
271
-
272
- return render_template("dashboard.html", active_tab="enterprise", job_id=job_id, summary=summary, transcript=notes, conclusion=conclusion)
273
-
274
- except Exception as e:
275
- return render_template("dashboard.html", active_tab="enterprise", error=str(e))
276
- finally:
277
- shutil.rmtree(job_dir, ignore_errors=True)
278
-
279
- @app.route("/live_submit", methods=["POST"])
280
- def live_submit():
281
- with LOCK: # Locking taaki dusra process clash na kare
282
- file = request.files.get("audio_file")
283
- if not file:
284
- return jsonify({"error": "No audio received"}), 400
285
-
286
- job_id = new_job()
287
- job_dir = os.path.join(BASE_DIR, job_id)
288
- os.makedirs(job_dir, exist_ok=True)
289
-
290
- try:
291
- # Live recording aksar .webm ya .ogg format mein hoti hai
292
- original_path = os.path.join(job_dir, "live_input.webm")
293
- file.save(original_path)
294
-
295
- wav_path = os.path.join(job_dir, "clean.wav")
296
-
297
- # Convert WebM/OGG to WAV for Whisper
298
- extract_audio(original_path, wav_path)
299
-
300
- # --- Process Audio (Same as Enterprise) ---
301
- text = transcribe(wav_path)
302
- sents = clean_sentences(text)
303
-
304
- if not sents: raise Exception("No speech detected in live recording.")
305
-
306
- summary, conclusion = summarize(sents)
307
- notes = to_paragraphs(sents)
308
-
309
- JOB_STORE[job_id].update({"summary": summary, "notes": notes, "conclusion": conclusion})
310
- build_vectors(job_id)
311
-
312
- # JSON response return karein kyuki ye AJAX call hoga
313
- return jsonify({
314
- "job_id": job_id,
315
- "summary": summary,
316
- "transcript": notes,
317
- "conclusion": conclusion,
318
- "status": "success"
319
- })
320
-
321
- except Exception as e:
322
- print(f"โŒ Live Error: {e}")
323
- return jsonify({"error": str(e)}), 500
324
- finally:
325
- shutil.rmtree(job_dir, ignore_errors=True)
326
-
327
- @app.route("/ask", methods=["POST"])
328
- def ask():
329
- data = request.json if request.is_json else request.form
330
- jid = data.get("job_id")
331
- question = data.get("question")
332
-
333
- if not jid or jid not in JOB_STORE:
334
- return jsonify({"answer": "Session expired or invalid. Please upload again."})
335
-
336
- store = JOB_STORE[jid].get("vectors", [])
337
- if not store:
338
- return jsonify({"answer": "No content available to answer from."})
339
-
340
- try:
341
- q_vec = embedder.encode(question, normalize_embeddings=True)
342
- scored = sorted([(np.dot(q_vec, i["vector"]), i["text"]) for i in store], reverse=True)[:3]
343
- context_text = " ".join([t for _, t in scored])
344
-
345
- answer = generate_ai_answer(question, context_text)
346
- return jsonify({"answer": answer})
347
- except Exception as e:
348
- return jsonify({"answer": "I couldn't generate an answer due to an error."})
349
-
350
- # --- DOWNLOAD ROUTES (UNCHANGED BUT SAFE) ---
351
- @app.route("/download/word/<job_id>")
352
- def download_word(job_id):
353
- d = JOB_STORE.get(job_id)
354
- if not d: return "Expired ID", 404
355
- doc = Document()
356
- doc.add_heading("AI Summary Report", 0)
357
- doc.add_paragraph(f"Generated on: {time.ctime()}")
358
- doc.add_heading("Summary", 1)
359
- doc.add_paragraph(d["summary"])
360
- doc.add_heading("Conclusion", 1)
361
- doc.add_paragraph(d["conclusion"])
362
- doc.add_heading("Full Transcript", 1)
363
- doc.add_paragraph(d["notes"])
364
-
365
- path = f"{job_id}.docx"
366
- doc.save(path)
367
- return send_file(path, as_attachment=True)
368
-
369
- @app.route("/download/pdf/<job_id>")
370
- def download_pdf(job_id):
371
- d = JOB_STORE.get(job_id)
372
- if not d:
373
- return "Expired ID", 404
374
-
375
- pdf_path = f"{job_id}_MeetGenius_Report.pdf"
376
-
377
- c = canvas.Canvas(pdf_path, pagesize=A4)
378
- width, height = A4
379
-
380
- x_margin = 40
381
- y = height - 50
382
-
383
- def draw_text(title, text):
384
- nonlocal y
385
- c.setFont("Helvetica-Bold", 14)
386
- c.drawString(x_margin, y, title)
387
- y -= 25
388
-
389
- c.setFont("Helvetica", 11)
390
- lines = simpleSplit(text, "Helvetica", 11, width - 80)
391
-
392
- for line in lines:
393
- if y < 50:
394
- c.showPage()
395
- y = height - 50
396
- c.setFont("Helvetica", 11)
397
- c.drawString(x_margin, y, line)
398
- y -= 15
399
-
400
- y -= 20
401
-
402
- draw_text("AI Summary Report", f"Generated on: {time.ctime()}")
403
- draw_text("Summary", d["summary"])
404
- draw_text("Conclusion", d["conclusion"])
405
- draw_text("Full Transcript", d["notes"])
406
-
407
- c.save()
408
-
409
- return send_file(
410
- pdf_path,
411
- as_attachment=True,
412
- download_name="MeetGenius_AI_Report.pdf",
413
- mimetype="application/pdf"
414
- )
415
-
416
-
417
- # ---- CLEANUP THREAD ----
418
- def cleanup_old_jobs():
419
- while True:
420
- time.sleep(CLEANUP_INTERVAL)
421
- now = time.time()
422
- with LOCK:
423
- # Clean Dictionary
424
- keys_to_remove = [k for k, v in JOB_STORE.items() if now - v['created_at'] > JOB_TTL_SECONDS]
425
- for k in keys_to_remove:
426
- del JOB_STORE[k]
427
-
428
- # Clean Files
429
- for f in os.listdir("."):
430
- if f.endswith((".pdf", ".docx", ".wav")):
431
- try:
432
- if now - os.path.getmtime(f) > JOB_TTL_SECONDS:
433
- os.remove(f)
434
- except: pass
435
- print("๐Ÿงน Cleanup cycle completed.")
436
 
437
  if __name__ == "__main__":
438
- t = threading.Thread(target=cleanup_old_jobs, daemon=True)
439
- t.start()
440
- # Debug=True mat rakhna production/heavy files ke sath
441
- app.run(host="0.0.0.0", port=int(os.environ.get("PORT", 7860)), debug=False)
 
 
 
 
 
1
  import os
 
 
2
  import shutil
3
  import threading
4
  import torch
 
 
 
 
5
  from flask import Flask, render_template, request, jsonify, send_file
6
  import yt_dlp
7
  import whisper
8
  from sentence_transformers import SentenceTransformer
9
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
10
+ import numpy as np
11
  from docx import Document
 
12
  from reportlab.pdfgen import canvas
13
+ from reportlab.lib.pagesizes import A4
14
+ import time
15
+ import subprocess
16
 
17
  app = Flask(__name__)
18
  LOCK = threading.Lock()
19
 
20
  # ---- CONFIGURATION ----
21
+ # Create a writable directory specifically for Hugging Face
22
+ BASE_DIR = "jobs"
 
23
  os.makedirs(BASE_DIR, exist_ok=True)
24
  JOB_STORE = {}
25
 
26
  # --- DEVICE SETUP ---
27
+ DEVICE = "cpu" # Force CPU for free tier stability
28
  print(f"๐Ÿš€ Device selected: {DEVICE}")
29
 
30
+ # --- MODEL LOADING (Cached) ---
31
+ # Models will download once and be cached in the Docker image if configured right,
32
+ # but for simplicity, they will download on first run (takes 30s).
33
+ print("โณ Loading AI models...")
34
+ whisper_model = whisper.load_model("base", device=DEVICE)
35
+ embedder = SentenceTransformer("all-MiniLM-L6-v2", device=DEVICE)
36
+ tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-base")
37
+ qa_model = AutoModelForSeq2SeqLM.from_pretrained("google/flan-t5-base").to(DEVICE)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
 
39
  # --- HELPER FUNCTIONS ---
 
40
  def extract_audio(input_path, output_path):
41
+ command = ["ffmpeg", "-y", "-i", input_path, "-ac", "1", "-ar", "16000", "-vn", output_path]
42
+ subprocess.run(command, check=True)
 
 
 
 
 
 
 
 
 
 
 
43
 
44
  def clean_sentences(text):
45
+ import re
46
+ text = re.sub(r'\b(\w+)( \1\b){2,}', r'\1', text, flags=re.I)
47
  raw = re.split(r'(?<=[.!?]) +', text)
48
  return [re.sub(r'\s+', ' ', s.strip()) for s in raw if len(s.split()) > 2]
49
 
 
51
  return "\n\n".join(" ".join(sentences[i:i+n]) for i in range(0, len(sentences), n))
52
 
53
  def summarize(sentences):
54
+ if not sentences: return "No content.", "No conclusion."
 
 
 
 
 
55
 
56
+ # Simple Extractive Summary (Top 3)
57
+ vecs = embedder.encode(sentences, normalize_embeddings=True)
58
  mean_vec = np.mean(vecs, axis=0)
59
+ scores = np.dot(vecs, mean_vec)
60
+ top_indices = np.argsort(scores)[-3:]
61
+ summary = " ".join([sentences[i] for i in sorted(top_indices)])
62
+
63
+ # Abstractive Conclusion
64
+ context = " ".join(sentences[:15])
65
+ input_ids = tokenizer(f"summarize: {context}", return_tensors="pt", truncation=True).input_ids.to(DEVICE)
66
+ output = qa_model.generate(input_ids, max_length=100)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
  conclusion = tokenizer.decode(output[0], skip_special_tokens=True)
 
 
 
 
 
 
 
 
 
68
 
69
+ return summary, conclusion
 
70
 
71
  def transcribe(path):
72
+ result = whisper_model.transcribe(path, fp16=False)
73
+ return result["text"]
 
 
 
 
 
 
74
 
75
+ def new_job():
76
+ import uuid
77
+ jid = str(uuid.uuid4())
78
+ JOB_STORE[jid] = {"created_at": time.time()}
79
+ return jid
 
 
 
 
 
80
 
81
+ # --- ROUTES ---
82
  @app.route("/")
83
  def index():
84
  return render_template("landing.html")
85
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86
  @app.route("/dashboard", methods=["GET", "POST"])
87
  def dashboard():
 
88
  context = {}
89
+ active_tab = request.args.get('tab', 'youtube')
90
+
91
  if request.method == "POST":
92
+ with LOCK:
 
 
 
 
 
93
  try:
94
  url = request.form.get("youtube_url")
95
+ if not url: raise Exception("No URL provided")
96
 
97
+ job_id = new_job()
98
+ job_dir = os.path.join(BASE_DIR, job_id)
99
+ os.makedirs(job_dir, exist_ok=True)
100
 
101
+ # Download
102
  ydl_opts = {
103
+ 'format': 'bestaudio/best',
104
+ 'outtmpl': os.path.join(job_dir, 'audio.%(ext)s'),
105
+ 'postprocessors': [{'key': 'FFmpegExtractAudio','preferredcodec': 'wav'}],
 
106
  }
 
107
  with yt_dlp.YoutubeDL(ydl_opts) as ydl:
108
  ydl.download([url])
109
 
110
+ # Process
111
+ wav_path = os.path.join(job_dir, "audio.wav")
112
+ text = transcribe(wav_path)
 
 
 
 
113
  sents = clean_sentences(text)
 
 
114
  summary, conclusion = summarize(sents)
115
  notes = to_paragraphs(sents)
116
 
117
  JOB_STORE[job_id].update({"summary": summary, "notes": notes, "conclusion": conclusion})
 
118
 
119
+ context = {"job_id": job_id, "summary": summary, "transcript": notes, "conclusion": conclusion}
120
+ shutil.rmtree(job_dir, ignore_errors=True)
 
 
121
 
122
  except Exception as e:
123
+ context["error"] = str(e)
124
+
 
 
 
125
  return render_template("dashboard.html", active_tab=active_tab, **context)
126
 
127
+ # ... (Add your other routes like /enterprise, /download here, same logic) ...
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128
 
129
  if __name__ == "__main__":
130
+ # Hugging Face Spaces expects port 7860
131
+ app.run(host="0.0.0.0", port=7860)