Ellie5757575757 commited on
Commit
e1e69e9
Β·
verified Β·
1 Parent(s): 28fc64c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +186 -226
app.py CHANGED
@@ -1,88 +1,98 @@
1
  #!/usr/bin/env python3
2
  """
3
- Pure Flask App for Aphasia Classification
4
- No Gradio dependency - works anywhere!
5
  """
6
 
7
- from flask import Flask, request, render_template_string, jsonify, send_from_directory
8
  import os
9
  import tempfile
10
  import logging
11
  import json
 
 
12
  from pathlib import Path
13
- import traceback
14
 
15
  # Set up logging
16
- logging.basicConfig(level=logging.INFO)
17
  logger = logging.getLogger(__name__)
18
 
19
  app = Flask(__name__)
20
- app.config['MAX_CONTENT_LENGTH'] = 100 * 1024 * 1024 # 100MB max
21
 
22
- print("πŸš€ Starting Aphasia Classification System (Flask)")
23
 
24
- def safe_import_modules():
25
- """Import pipeline modules safely"""
26
- modules = {}
 
 
 
 
 
27
 
28
- try:
29
- from utils_audio import convert_to_wav
30
- modules['convert_to_wav'] = convert_to_wav
31
- logger.info("βœ“ utils_audio imported")
32
- except Exception as e:
33
- logger.error(f"βœ— utils_audio failed: {e}")
34
- modules['convert_to_wav'] = None
35
 
36
  try:
 
 
 
 
 
 
 
 
37
  from to_cha import to_cha_from_wav
38
- modules['to_cha_from_wav'] = to_cha_from_wav
39
- logger.info("βœ“ to_cha imported")
40
- except Exception as e:
41
- logger.error(f"βœ— to_cha failed: {e}")
42
- modules['to_cha_from_wav'] = None
43
-
44
- try:
45
  from cha_json import cha_to_json_file
46
- modules['cha_to_json_file'] = cha_to_json_file
47
- logger.info("βœ“ cha_json imported")
48
- except Exception as e:
49
- logger.error(f"βœ— cha_json failed: {e}")
50
- modules['cha_to_json_file'] = None
51
-
52
- try:
53
  from output import predict_from_chajson
54
- modules['predict_from_chajson'] = predict_from_chajson
55
- logger.info("βœ“ output imported")
 
 
 
 
 
 
56
  except Exception as e:
57
- logger.error(f"βœ— output failed: {e}")
58
- modules['predict_from_chajson'] = None
59
-
60
- return modules
 
 
 
 
61
 
62
- # Import modules
63
- MODULES = safe_import_modules()
64
- MODEL_DIR = "."
65
 
66
- # HTML Template
67
  HTML_TEMPLATE = """
68
  <!DOCTYPE html>
69
  <html lang="en">
70
  <head>
71
  <meta charset="UTF-8">
72
  <meta name="viewport" content="width=device-width, initial-scale=1.0">
73
- <title>🧠 Aphasia Classification System</title>
74
  <style>
75
- * {
76
- margin: 0;
77
- padding: 0;
78
- box-sizing: border-box;
79
- }
80
-
81
  body {
82
  font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
83
  background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
84
  min-height: 100vh;
85
  padding: 20px;
 
86
  }
87
 
88
  .container {
@@ -101,17 +111,6 @@ HTML_TEMPLATE = """
101
  text-align: center;
102
  }
103
 
104
- .header h1 {
105
- font-size: 2.5em;
106
- margin-bottom: 10px;
107
- font-weight: 700;
108
- }
109
-
110
- .header p {
111
- font-size: 1.1em;
112
- opacity: 0.9;
113
- }
114
-
115
  .content {
116
  padding: 40px 30px;
117
  }
@@ -124,24 +123,20 @@ HTML_TEMPLATE = """
124
  border-left: 4px solid #28a745;
125
  }
126
 
127
- .status h3 {
128
- color: #28a745;
129
- margin-bottom: 10px;
 
 
 
130
  }
131
 
132
  .upload-section {
133
  background: #f8f9fa;
134
  border-radius: 15px;
135
  padding: 30px;
136
- margin-bottom: 30px;
137
- border: 2px dashed #dee2e6;
138
  text-align: center;
139
- transition: all 0.3s ease;
140
- }
141
-
142
- .upload-section:hover {
143
- border-color: #667eea;
144
- background: #f0f4ff;
145
  }
146
 
147
  .file-input {
@@ -155,7 +150,6 @@ HTML_TEMPLATE = """
155
  padding: 15px 30px;
156
  border-radius: 50px;
157
  cursor: pointer;
158
- font-size: 1.1em;
159
  font-weight: 600;
160
  transition: transform 0.2s ease;
161
  }
@@ -170,22 +164,15 @@ HTML_TEMPLATE = """
170
  border: none;
171
  padding: 15px 40px;
172
  border-radius: 50px;
173
- font-size: 1.1em;
174
  font-weight: 600;
175
  cursor: pointer;
176
  margin-top: 20px;
177
  transition: all 0.2s ease;
178
  }
179
 
180
- .analyze-btn:hover {
181
- background: #218838;
182
- transform: translateY(-2px);
183
- }
184
-
185
  .analyze-btn:disabled {
186
  background: #6c757d;
187
  cursor: not-allowed;
188
- transform: none;
189
  }
190
 
191
  .results {
@@ -194,15 +181,8 @@ HTML_TEMPLATE = """
194
  padding: 30px;
195
  margin-top: 30px;
196
  display: none;
197
- }
198
-
199
- .results.success {
200
- border-left: 4px solid #28a745;
201
- }
202
-
203
- .results.error {
204
- border-left: 4px solid #dc3545;
205
- background: #fff5f5;
206
  }
207
 
208
  .loading {
@@ -226,30 +206,14 @@ HTML_TEMPLATE = """
226
  100% { transform: rotate(360deg); }
227
  }
228
 
229
- .supported-formats {
230
- text-align: center;
231
- color: #6c757d;
232
- margin-top: 15px;
233
- font-size: 0.9em;
234
- }
235
-
236
- .about {
237
- background: #fff;
238
- border-radius: 15px;
239
- padding: 30px;
240
- margin-top: 30px;
241
- border: 1px solid #dee2e6;
242
- }
243
-
244
- .about h3 {
245
- color: #333;
246
- margin-bottom: 15px;
247
- }
248
-
249
- .about p {
250
- color: #666;
251
- line-height: 1.6;
252
- margin-bottom: 10px;
253
  }
254
  </style>
255
  </head>
@@ -257,18 +221,19 @@ HTML_TEMPLATE = """
257
  <div class="container">
258
  <div class="header">
259
  <h1>🧠 Aphasia Classification</h1>
260
- <p>Advanced AI-powered speech analysis for aphasia type identification</p>
261
  </div>
262
 
263
  <div class="content">
264
- <div class="status">
265
- <h3>{{ status_title }}</h3>
266
- <div>{{ status_details | safe }}</div>
 
267
  </div>
268
 
269
  <div class="upload-section">
270
  <h3>πŸ“ Upload Audio File</h3>
271
- <p>Select an audio file containing speech for analysis</p>
272
 
273
  <form id="uploadForm" enctype="multipart/form-data">
274
  <input type="file" id="audioFile" name="audio" class="file-input" accept="audio/*" required>
@@ -281,47 +246,72 @@ HTML_TEMPLATE = """
281
  </button>
282
  </form>
283
 
284
- <div class="supported-formats">
285
- Supported: MP3, WAV, MP4, M4A, FLAC, OGG
286
- </div>
287
  </div>
288
 
289
  <div class="loading" id="loading">
290
  <div class="spinner"></div>
291
- <h3>πŸ”„ Analyzing Audio...</h3>
292
- <p>This may take 1-3 minutes depending on file size</p>
293
  </div>
294
 
295
- <div class="results" id="results">
296
- <div id="resultsContent"></div>
297
- </div>
298
-
299
- <div class="about">
300
- <h3>About This System</h3>
301
- <p>This AI system analyzes speech patterns to classify different types of aphasia, including:</p>
302
- <p><strong>β€’ Broca's Aphasia:</strong> Non-fluent speech with preserved comprehension</p>
303
- <p><strong>β€’ Wernicke's Aphasia:</strong> Fluent but often meaningless speech</p>
304
- <p><strong>β€’ Anomic Aphasia:</strong> Word-finding difficulties</p>
305
- <p><strong>β€’ Conduction Aphasia:</strong> Fluent speech with poor repetition</p>
306
- <p><strong>β€’ Global Aphasia:</strong> Severe impairment in all language areas</p>
307
- <br>
308
- <p><em>Note: This tool is for research and educational purposes. Always consult healthcare professionals for clinical decisions.</em></p>
309
- </div>
310
  </div>
311
  </div>
312
 
313
  <script>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
314
  document.getElementById('uploadForm').addEventListener('submit', async function(e) {
315
  e.preventDefault();
316
 
317
  const fileInput = document.getElementById('audioFile');
318
- const analyzeBtn = document.getElementById('analyzeBtn');
319
  const loading = document.getElementById('loading');
320
  const results = document.getElementById('results');
321
- const resultsContent = document.getElementById('resultsContent');
322
 
323
  if (!fileInput.files[0]) {
324
- alert('Please select an audio file first');
 
 
 
 
 
 
 
 
 
325
  return;
326
  }
327
 
@@ -329,7 +319,7 @@ HTML_TEMPLATE = """
329
  loading.style.display = 'block';
330
  results.style.display = 'none';
331
  analyzeBtn.disabled = true;
332
- analyzeBtn.textContent = 'πŸ”„ Processing...';
333
 
334
  try {
335
  const formData = new FormData();
@@ -342,32 +332,30 @@ HTML_TEMPLATE = """
342
 
343
  const data = await response.json();
344
 
345
- // Hide loading
346
  loading.style.display = 'none';
347
 
348
  if (data.success) {
349
- resultsContent.innerHTML = '<pre style="white-space: pre-wrap; font-family: inherit;">' + data.result + '</pre>';
350
- results.className = 'results success';
351
  } else {
352
- resultsContent.innerHTML = '<h3 style="color: #dc3545;">❌ Error</h3><p>' + data.error + '</p>';
353
- results.className = 'results error';
354
  }
355
 
356
  results.style.display = 'block';
357
 
358
  } catch (error) {
359
  loading.style.display = 'none';
360
- resultsContent.innerHTML = '<h3 style="color: #dc3545;">❌ Network Error</h3><p>Failed to process request: ' + error.message + '</p>';
361
- results.className = 'results error';
362
  results.style.display = 'block';
363
  }
364
 
365
- // Reset button
366
  analyzeBtn.disabled = false;
367
  analyzeBtn.textContent = 'πŸ” Analyze Speech';
368
  });
369
 
370
- // Update file label when file is selected
371
  document.getElementById('audioFile').addEventListener('change', function(e) {
372
  const label = document.querySelector('.file-label');
373
  if (e.target.files[0]) {
@@ -384,26 +372,29 @@ HTML_TEMPLATE = """
384
  @app.route('/')
385
  def index():
386
  """Main page"""
387
- # Check system status
388
- modules_ready = all(MODULES.values())
389
- missing_modules = [k for k, v in MODULES.items() if v is None]
390
-
391
- if modules_ready:
392
- status_title = "🟒 System Ready"
393
- status_details = "All components loaded successfully. Ready to process audio files."
394
- else:
395
- status_title = "🟑 System Loading"
396
- status_details = f"Missing modules: {', '.join(missing_modules)}<br>Some features may not be available."
397
-
398
- return render_template_string(HTML_TEMPLATE,
399
- status_title=status_title,
400
- status_details=status_details)
401
 
402
  @app.route('/analyze', methods=['POST'])
403
  def analyze_audio():
404
- """Process uploaded audio file"""
405
  try:
406
- # Check if file was uploaded
 
 
 
 
 
 
 
407
  if 'audio' not in request.files:
408
  return jsonify({'success': False, 'error': 'No audio file uploaded'})
409
 
@@ -411,37 +402,32 @@ def analyze_audio():
411
  if audio_file.filename == '':
412
  return jsonify({'success': False, 'error': 'No file selected'})
413
 
414
- # Check if modules are available
415
- if not all(MODULES.values()):
416
- missing = [k for k, v in MODULES.items() if v is None]
417
- return jsonify({'success': False, 'error': f'System not ready. Missing: {", ".join(missing)}'})
418
-
419
- # Save uploaded file temporarily
420
  with tempfile.NamedTemporaryFile(delete=False, suffix=os.path.splitext(audio_file.filename)[1]) as tmp_file:
421
  audio_file.save(tmp_file.name)
422
- temp_audio_path = tmp_file.name
423
 
424
  try:
425
- logger.info("🎡 Starting audio processing pipeline...")
426
 
427
  # Step 1: Convert to WAV
428
- wav_path = MODULES['convert_to_wav'](temp_audio_path, sr=16000, mono=True)
429
- logger.info("βœ“ Audio converted to WAV")
430
 
431
- # Step 2: Generate CHA file
 
432
  cha_path = MODULES['to_cha_from_wav'](wav_path, lang="eng")
433
- logger.info("βœ“ CHA file generated")
434
 
435
- # Step 3: Convert CHA to JSON
 
436
  json_path, _ = MODULES['cha_to_json_file'](cha_path)
437
- logger.info("βœ“ JSON conversion completed")
438
 
439
- # Step 4: Run classification
440
- results = MODULES['predict_from_chajson'](MODEL_DIR, json_path, output_file=None)
441
- logger.info("βœ“ Classification completed")
442
 
443
- # Clean up temporary files
444
- for temp_file in [temp_audio_path, wav_path, cha_path, json_path]:
445
  try:
446
  os.unlink(temp_file)
447
  except:
@@ -451,79 +437,53 @@ def analyze_audio():
451
  if "predictions" in results and results["predictions"]:
452
  pred = results["predictions"][0]
453
 
454
- if "error" in pred:
455
- return jsonify({'success': False, 'error': f'Classification error: {pred["error"]}'})
456
-
457
- # Format the result text
458
  classification = pred["prediction"]["predicted_class"]
459
  confidence = pred["prediction"]["confidence_percentage"]
460
- type_name = pred["class_description"]["name"]
461
- description = pred["class_description"]["description"]
462
  severity = pred["additional_predictions"]["predicted_severity_level"]
463
  fluency = pred["additional_predictions"]["fluency_rating"]
464
 
465
  result_text = f"""🧠 APHASIA CLASSIFICATION RESULTS
466
 
467
- 🎯 Primary Classification: {classification}
468
  πŸ“Š Confidence: {confidence}
469
- πŸ“‹ Type: {type_name}
470
- πŸ“ˆ Severity Level: {severity}/3
471
- πŸ—£οΈ Fluency Rating: {fluency}
472
 
473
- πŸ“Š Top 3 Probability Rankings:"""
474
 
475
- # Add probability distribution
476
  prob_dist = pred["probability_distribution"]
477
  for i, (atype, info) in enumerate(list(prob_dist.items())[:3], 1):
478
  result_text += f"\n{i}. {atype}: {info['percentage']}"
479
 
480
  result_text += f"""
481
 
482
- πŸ“ Clinical Description:
483
- {description}
484
 
485
- πŸ“Š Processing Summary:
486
- β€’ Total sentences analyzed: {results.get('total_sentences', 'N/A')}
487
- β€’ Average confidence: {results.get('summary', {}).get('average_confidence', 'N/A')}
488
- β€’ Processing completed successfully
489
  """
490
 
491
  return jsonify({'success': True, 'result': result_text})
492
  else:
493
- return jsonify({'success': False, 'error': 'No predictions generated from the audio file'})
494
 
495
  except Exception as e:
496
- # Clean up temp file on error
497
  try:
498
- os.unlink(temp_audio_path)
499
  except:
500
  pass
501
  raise e
502
 
503
  except Exception as e:
504
  logger.error(f"Processing error: {e}")
505
- traceback.print_exc()
506
- return jsonify({'success': False, 'error': f'Processing failed: {str(e)}'})
507
-
508
- @app.route('/health')
509
- def health_check():
510
- """Health check endpoint"""
511
- modules_ready = all(MODULES.values())
512
- return jsonify({
513
- 'status': 'healthy' if modules_ready else 'degraded',
514
- 'modules': {k: v is not None for k, v in MODULES.items()},
515
- 'ready': modules_ready
516
- })
517
 
518
  if __name__ == '__main__':
519
- # Get port from environment (for Hugging Face Spaces)
520
  port = int(os.environ.get('PORT', 7860))
521
- host = os.environ.get('HOST', '0.0.0.0')
522
-
523
- print(f"πŸš€ Starting Flask app on {host}:{port}")
524
- print("πŸ“‹ Modules status:")
525
- for name, module in MODULES.items():
526
- status = "βœ“" if module else "❌"
527
- print(f" {status} {name}")
528
 
529
- app.run(host=host, port=port, debug=False)
 
1
  #!/usr/bin/env python3
2
  """
3
+ Lightweight Aphasia Classification App
4
+ Optimized for Hugging Face Spaces with lazy loading and fallbacks
5
  """
6
 
7
+ from flask import Flask, request, render_template_string, jsonify
8
  import os
9
  import tempfile
10
  import logging
11
  import json
12
+ import threading
13
+ import time
14
  from pathlib import Path
 
15
 
16
  # Set up logging
17
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
18
  logger = logging.getLogger(__name__)
19
 
20
  app = Flask(__name__)
21
+ app.config['MAX_CONTENT_LENGTH'] = 50 * 1024 * 1024 # 50MB max (reduced)
22
 
23
+ print("πŸš€ Starting Lightweight Aphasia Classification System")
24
 
25
+ # Global state
26
+ MODULES = {}
27
+ MODELS_LOADED = False
28
+ LOADING_STATUS = "Starting up..."
29
+
30
+ def lazy_import_modules():
31
+ """Import modules only when needed"""
32
+ global MODULES, MODELS_LOADED, LOADING_STATUS
33
 
34
+ if MODELS_LOADED:
35
+ return True
 
 
 
 
 
36
 
37
  try:
38
+ LOADING_STATUS = "Loading audio processing..."
39
+ logger.info("Importing utils_audio...")
40
+ from utils_audio import convert_to_wav
41
+ MODULES['convert_to_wav'] = convert_to_wav
42
+ logger.info("βœ“ Audio processing loaded")
43
+
44
+ LOADING_STATUS = "Loading speech analysis..."
45
+ logger.info("Importing to_cha...")
46
  from to_cha import to_cha_from_wav
47
+ MODULES['to_cha_from_wav'] = to_cha_from_wav
48
+ logger.info("βœ“ Speech analysis loaded")
49
+
50
+ LOADING_STATUS = "Loading data conversion..."
51
+ logger.info("Importing cha_json...")
 
 
52
  from cha_json import cha_to_json_file
53
+ MODULES['cha_to_json_file'] = cha_to_json_file
54
+ logger.info("βœ“ Data conversion loaded")
55
+
56
+ LOADING_STATUS = "Loading AI model..."
57
+ logger.info("Importing output...")
 
 
58
  from output import predict_from_chajson
59
+ MODULES['predict_from_chajson'] = predict_from_chajson
60
+ logger.info("βœ“ AI model loaded")
61
+
62
+ MODELS_LOADED = True
63
+ LOADING_STATUS = "Ready!"
64
+ logger.info("πŸŽ‰ All modules loaded successfully!")
65
+ return True
66
+
67
  except Exception as e:
68
+ logger.error(f"Failed to load modules: {e}")
69
+ LOADING_STATUS = f"Error: {str(e)}"
70
+ return False
71
+
72
+ def background_loader():
73
+ """Load modules in background thread"""
74
+ logger.info("Starting background module loading...")
75
+ lazy_import_modules()
76
 
77
+ # Start loading modules in background
78
+ loading_thread = threading.Thread(target=background_loader, daemon=True)
79
+ loading_thread.start()
80
 
81
+ # HTML Template (simplified)
82
  HTML_TEMPLATE = """
83
  <!DOCTYPE html>
84
  <html lang="en">
85
  <head>
86
  <meta charset="UTF-8">
87
  <meta name="viewport" content="width=device-width, initial-scale=1.0">
88
+ <title>🧠 Aphasia Classification</title>
89
  <style>
 
 
 
 
 
 
90
  body {
91
  font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, sans-serif;
92
  background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
93
  min-height: 100vh;
94
  padding: 20px;
95
+ margin: 0;
96
  }
97
 
98
  .container {
 
111
  text-align: center;
112
  }
113
 
 
 
 
 
 
 
 
 
 
 
 
114
  .content {
115
  padding: 40px 30px;
116
  }
 
123
  border-left: 4px solid #28a745;
124
  }
125
 
126
+ .status.loading {
127
+ border-left-color: #ffc107;
128
+ }
129
+
130
+ .status.error {
131
+ border-left-color: #dc3545;
132
  }
133
 
134
  .upload-section {
135
  background: #f8f9fa;
136
  border-radius: 15px;
137
  padding: 30px;
 
 
138
  text-align: center;
139
+ margin-bottom: 30px;
 
 
 
 
 
140
  }
141
 
142
  .file-input {
 
150
  padding: 15px 30px;
151
  border-radius: 50px;
152
  cursor: pointer;
 
153
  font-weight: 600;
154
  transition: transform 0.2s ease;
155
  }
 
164
  border: none;
165
  padding: 15px 40px;
166
  border-radius: 50px;
 
167
  font-weight: 600;
168
  cursor: pointer;
169
  margin-top: 20px;
170
  transition: all 0.2s ease;
171
  }
172
 
 
 
 
 
 
173
  .analyze-btn:disabled {
174
  background: #6c757d;
175
  cursor: not-allowed;
 
176
  }
177
 
178
  .results {
 
181
  padding: 30px;
182
  margin-top: 30px;
183
  display: none;
184
+ white-space: pre-wrap;
185
+ font-family: monospace;
 
 
 
 
 
 
 
186
  }
187
 
188
  .loading {
 
206
  100% { transform: rotate(360deg); }
207
  }
208
 
209
+ .refresh-btn {
210
+ background: #17a2b8;
211
+ color: white;
212
+ border: none;
213
+ padding: 10px 20px;
214
+ border-radius: 25px;
215
+ cursor: pointer;
216
+ margin-left: 10px;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
217
  }
218
  </style>
219
  </head>
 
221
  <div class="container">
222
  <div class="header">
223
  <h1>🧠 Aphasia Classification</h1>
224
+ <p>AI-powered speech analysis for aphasia identification</p>
225
  </div>
226
 
227
  <div class="content">
228
+ <div class="status" id="statusBox">
229
+ <h3 id="statusTitle">πŸ”„ System Status</h3>
230
+ <div id="statusText">{{ status_message }}</div>
231
+ <button class="refresh-btn" onclick="checkStatus()">Refresh Status</button>
232
  </div>
233
 
234
  <div class="upload-section">
235
  <h3>πŸ“ Upload Audio File</h3>
236
+ <p>Upload speech audio for aphasia classification</p>
237
 
238
  <form id="uploadForm" enctype="multipart/form-data">
239
  <input type="file" id="audioFile" name="audio" class="file-input" accept="audio/*" required>
 
246
  </button>
247
  </form>
248
 
249
+ <p style="color: #666; margin-top: 15px; font-size: 0.9em;">
250
+ Supported: MP3, WAV, M4A (max 50MB)
251
+ </p>
252
  </div>
253
 
254
  <div class="loading" id="loading">
255
  <div class="spinner"></div>
256
+ <h3>πŸ”„ Processing Audio...</h3>
257
+ <p>This may take 2-5 minutes. Please be patient.</p>
258
  </div>
259
 
260
+ <div class="results" id="results"></div>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
261
  </div>
262
  </div>
263
 
264
  <script>
265
+ // Check status periodically
266
+ function checkStatus() {
267
+ fetch('/status')
268
+ .then(response => response.json())
269
+ .then(data => {
270
+ const statusBox = document.getElementById('statusBox');
271
+ const statusTitle = document.getElementById('statusTitle');
272
+ const statusText = document.getElementById('statusText');
273
+
274
+ if (data.ready) {
275
+ statusBox.className = 'status';
276
+ statusTitle.textContent = '🟒 System Ready';
277
+ statusText.textContent = 'All components loaded. Ready to process audio files.';
278
+ } else {
279
+ statusBox.className = 'status loading';
280
+ statusTitle.textContent = '🟑 Loading...';
281
+ statusText.textContent = data.status;
282
+ }
283
+ })
284
+ .catch(error => {
285
+ const statusBox = document.getElementById('statusBox');
286
+ statusBox.className = 'status error';
287
+ document.getElementById('statusTitle').textContent = 'πŸ”΄ Error';
288
+ document.getElementById('statusText').textContent = 'Failed to check status';
289
+ });
290
+ }
291
+
292
+ // Check status every 5 seconds
293
+ setInterval(checkStatus, 5000);
294
+
295
+ // Form submission
296
  document.getElementById('uploadForm').addEventListener('submit', async function(e) {
297
  e.preventDefault();
298
 
299
  const fileInput = document.getElementById('audioFile');
 
300
  const loading = document.getElementById('loading');
301
  const results = document.getElementById('results');
302
+ const analyzeBtn = document.getElementById('analyzeBtn');
303
 
304
  if (!fileInput.files[0]) {
305
+ alert('Please select an audio file');
306
+ return;
307
+ }
308
+
309
+ // Check if system is ready
310
+ const statusCheck = await fetch('/status');
311
+ const status = await statusCheck.json();
312
+
313
+ if (!status.ready) {
314
+ alert('System is still loading. Please wait and try again.');
315
  return;
316
  }
317
 
 
319
  loading.style.display = 'block';
320
  results.style.display = 'none';
321
  analyzeBtn.disabled = true;
322
+ analyzeBtn.textContent = 'Processing...';
323
 
324
  try {
325
  const formData = new FormData();
 
332
 
333
  const data = await response.json();
334
 
 
335
  loading.style.display = 'none';
336
 
337
  if (data.success) {
338
+ results.textContent = data.result;
339
+ results.style.borderLeft = '4px solid #28a745';
340
  } else {
341
+ results.textContent = 'Error: ' + data.error;
342
+ results.style.borderLeft = '4px solid #dc3545';
343
  }
344
 
345
  results.style.display = 'block';
346
 
347
  } catch (error) {
348
  loading.style.display = 'none';
349
+ results.textContent = 'Network error: ' + error.message;
350
+ results.style.borderLeft = '4px solid #dc3545';
351
  results.style.display = 'block';
352
  }
353
 
 
354
  analyzeBtn.disabled = false;
355
  analyzeBtn.textContent = 'πŸ” Analyze Speech';
356
  });
357
 
358
+ // File selection feedback
359
  document.getElementById('audioFile').addEventListener('change', function(e) {
360
  const label = document.querySelector('.file-label');
361
  if (e.target.files[0]) {
 
372
  @app.route('/')
373
  def index():
374
  """Main page"""
375
+ return render_template_string(HTML_TEMPLATE, status_message=LOADING_STATUS)
376
+
377
+ @app.route('/status')
378
+ def status():
379
+ """Status check endpoint"""
380
+ return jsonify({
381
+ 'ready': MODELS_LOADED,
382
+ 'status': LOADING_STATUS,
383
+ 'modules_loaded': len(MODULES)
384
+ })
 
 
 
 
385
 
386
  @app.route('/analyze', methods=['POST'])
387
  def analyze_audio():
388
+ """Process uploaded audio - only if models are loaded"""
389
  try:
390
+ # Check if system is ready
391
+ if not MODELS_LOADED:
392
+ return jsonify({
393
+ 'success': False,
394
+ 'error': f'System still loading: {LOADING_STATUS}'
395
+ })
396
+
397
+ # Check file upload
398
  if 'audio' not in request.files:
399
  return jsonify({'success': False, 'error': 'No audio file uploaded'})
400
 
 
402
  if audio_file.filename == '':
403
  return jsonify({'success': False, 'error': 'No file selected'})
404
 
405
+ # Save uploaded file
 
 
 
 
 
406
  with tempfile.NamedTemporaryFile(delete=False, suffix=os.path.splitext(audio_file.filename)[1]) as tmp_file:
407
  audio_file.save(tmp_file.name)
408
+ temp_path = tmp_file.name
409
 
410
  try:
411
+ logger.info("🎡 Starting audio processing...")
412
 
413
  # Step 1: Convert to WAV
414
+ logger.info("Converting to WAV...")
415
+ wav_path = MODULES['convert_to_wav'](temp_path, sr=16000, mono=True)
416
 
417
+ # Step 2: Generate CHA
418
+ logger.info("Generating CHA file...")
419
  cha_path = MODULES['to_cha_from_wav'](wav_path, lang="eng")
 
420
 
421
+ # Step 3: Convert to JSON
422
+ logger.info("Converting to JSON...")
423
  json_path, _ = MODULES['cha_to_json_file'](cha_path)
 
424
 
425
+ # Step 4: Classification
426
+ logger.info("Running classification...")
427
+ results = MODULES['predict_from_chajson'](".", json_path, output_file=None)
428
 
429
+ # Cleanup
430
+ for temp_file in [temp_path, wav_path, cha_path, json_path]:
431
  try:
432
  os.unlink(temp_file)
433
  except:
 
437
  if "predictions" in results and results["predictions"]:
438
  pred = results["predictions"][0]
439
 
 
 
 
 
440
  classification = pred["prediction"]["predicted_class"]
441
  confidence = pred["prediction"]["confidence_percentage"]
442
+ description = pred["class_description"]["name"]
 
443
  severity = pred["additional_predictions"]["predicted_severity_level"]
444
  fluency = pred["additional_predictions"]["fluency_rating"]
445
 
446
  result_text = f"""🧠 APHASIA CLASSIFICATION RESULTS
447
 
448
+ 🎯 Classification: {classification}
449
  πŸ“Š Confidence: {confidence}
450
+ πŸ“‹ Type: {description}
451
+ πŸ“ˆ Severity: {severity}/3
452
+ πŸ—£οΈ Fluency: {fluency}
453
 
454
+ πŸ“Š Top 3 Probabilities:"""
455
 
 
456
  prob_dist = pred["probability_distribution"]
457
  for i, (atype, info) in enumerate(list(prob_dist.items())[:3], 1):
458
  result_text += f"\n{i}. {atype}: {info['percentage']}"
459
 
460
  result_text += f"""
461
 
462
+ πŸ“ Description:
463
+ {pred["class_description"]["description"]}
464
 
465
+ βœ… Processing completed successfully!
 
 
 
466
  """
467
 
468
  return jsonify({'success': True, 'result': result_text})
469
  else:
470
+ return jsonify({'success': False, 'error': 'No predictions generated'})
471
 
472
  except Exception as e:
473
+ # Cleanup on error
474
  try:
475
+ os.unlink(temp_path)
476
  except:
477
  pass
478
  raise e
479
 
480
  except Exception as e:
481
  logger.error(f"Processing error: {e}")
482
+ return jsonify({'success': False, 'error': str(e)})
 
 
 
 
 
 
 
 
 
 
 
483
 
484
  if __name__ == '__main__':
 
485
  port = int(os.environ.get('PORT', 7860))
486
+ print(f"πŸš€ Starting on port {port}")
487
+ print("πŸ”„ Models loading in background...")
 
 
 
 
 
488
 
489
+ app.run(host='0.0.0.0', port=port, debug=False, threaded=True)