Ellie5757575757 commited on
Commit
3f2b9ca
Β·
verified Β·
1 Parent(s): a97ba2d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +153 -104
app.py CHANGED
@@ -1,17 +1,16 @@
1
  import gradio as gr
2
- import json
3
  import os
4
  import tempfile
5
  import logging
6
- import traceback
7
- from pathlib import Path
8
- print("Gradio version:", gr.__version__)
9
 
10
  # Set up logging
11
  logging.basicConfig(level=logging.INFO)
12
  logger = logging.getLogger(__name__)
13
 
14
- # Configuration - Use current directory for model files
15
  MODEL_DIR = "."
16
  SUPPORTED_AUDIO_FORMATS = [".mp3", ".mp4", ".wav", ".m4a", ".flac", ".ogg"]
17
 
@@ -70,12 +69,7 @@ def check_model_files():
70
  if not os.path.exists(os.path.join(MODEL_DIR, file)):
71
  missing_files.append(file)
72
 
73
- if missing_files:
74
- logger.error(f"Missing model files: {missing_files}")
75
- return False, missing_files
76
-
77
- logger.info("βœ“ All required model files found")
78
- return True, []
79
 
80
  def run_complete_pipeline(audio_file_path: str) -> dict:
81
  """Complete pipeline: Audio β†’ WAV β†’ CHA β†’ JSON β†’ Model Prediction"""
@@ -128,7 +122,8 @@ def run_complete_pipeline(audio_file_path: str) -> dict:
128
 
129
  except Exception as e:
130
  logger.error(f"Pipeline error: {str(e)}")
131
- logger.error(traceback.format_exc())
 
132
  return {
133
  "success": False,
134
  "error": str(e),
@@ -143,13 +138,15 @@ def process_audio_input(audio_file):
143
 
144
  # Check if pipeline is available
145
  if not all(MODULES.values()):
146
- return "❌ Error: Audio processing pipeline not available. Missing required modules."
 
147
 
148
  # Check file format
149
  file_path = audio_file
150
  if hasattr(audio_file, 'name'):
151
  file_path = audio_file.name
152
 
 
153
  file_ext = Path(file_path).suffix.lower()
154
  if file_ext not in SUPPORTED_AUDIO_FORMATS:
155
  return f"❌ Error: Unsupported file format {file_ext}. Supported: {', '.join(SUPPORTED_AUDIO_FORMATS)}"
@@ -185,8 +182,7 @@ def process_audio_input(audio_file):
185
  prob_dist = first_pred["probability_distribution"]
186
  top_3 = list(prob_dist.items())[:3]
187
 
188
- result_text = f"""
189
- 🧠 **APHASIA CLASSIFICATION RESULTS**
190
 
191
  🎯 **Primary Classification:** {predicted_class}
192
  πŸ“Š **Confidence:** {confidence}
@@ -218,7 +214,8 @@ def process_audio_input(audio_file):
218
 
219
  except Exception as e:
220
  logger.error(f"Processing error: {str(e)}")
221
- logger.error(traceback.format_exc())
 
222
  return f"❌ Processing Error: {str(e)}\n\nPlease check the logs for more details."
223
 
224
  def process_text_input(text_input):
@@ -232,6 +229,7 @@ def process_text_input(text_input):
232
  return "❌ Error: Text analysis not available. Missing prediction module."
233
 
234
  # Create a simple JSON structure for text-only input
 
235
  temp_json = {
236
  "sentences": [{
237
  "sentence_id": "S1",
@@ -274,8 +272,7 @@ def process_text_input(text_input):
274
  severity = first_pred["additional_predictions"]["predicted_severity_level"]
275
  fluency = first_pred["additional_predictions"]["fluency_rating"]
276
 
277
- return f"""
278
- 🧠 **TEXT ANALYSIS RESULTS**
279
 
280
  🎯 **Predicted:** {predicted_class}
281
  πŸ“Š **Confidence:** {confidence}
@@ -294,107 +291,159 @@ def process_text_input(text_input):
294
  logger.error(f"Text processing error: {str(e)}")
295
  return f"❌ Error: {str(e)}"
296
 
297
- def detect_environment():
298
- """Detect if we're running in a cloud environment"""
299
- # Check for common cloud environment indicators
300
- cloud_indicators = [
301
- 'SPACE_ID', # Hugging Face Spaces
302
- 'PAPERSPACE_NOTEBOOK_REPO_ID', # Paperspace
303
- 'COLAB_GPU', # Google Colab
304
- 'KAGGLE_KERNEL_RUN_TYPE', # Kaggle
305
- 'AWS_LAMBDA_FUNCTION_NAME', # AWS Lambda
306
- ]
307
-
308
- is_cloud = any(os.getenv(indicator) for indicator in cloud_indicators)
309
-
310
- # Also check if we can access localhost
311
- import socket
312
- localhost_accessible = False
313
- try:
314
- sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
315
- sock.settimeout(1)
316
- result = sock.connect_ex(('127.0.0.1', 7860))
317
- localhost_accessible = (result == 0)
318
- sock.close()
319
- except:
320
- localhost_accessible = False
321
-
322
- return is_cloud, localhost_accessible
323
-
324
- def create_interface():
325
- """Create Gradio interface with proper configuration"""
326
 
327
  # Check system status
328
  model_available, missing_files = check_model_files()
329
  pipeline_available = all(MODULES.values())
330
 
331
- status_message = "🟒 **System Status: Ready**" if model_available and pipeline_available else "πŸ”΄ **System Status: Issues Detected**"
332
 
 
333
  if not model_available:
334
- status_message += f"\n❌ Missing model files: {', '.join(missing_files)}"
335
-
336
  if not pipeline_available:
337
  missing_modules = [k for k, v in MODULES.items() if v is None]
338
- status_message += f"\n❌ Missing modules: {', '.join(missing_modules)}"
339
 
340
- # Create interface using simple Interface instead of Blocks to avoid JSON schema issues
341
- audio_interface = gr.Interface(
342
  fn=process_audio_input,
343
- inputs=gr.File(
344
- label="Upload Audio File (MP3, MP4, WAV, M4A, FLAC, OGG)",
345
- file_types=["audio"]
346
- ),
347
- outputs=gr.Textbox(
348
- label="Analysis Results",
349
- lines=25,
350
- max_lines=50
351
- ),
 
 
 
 
 
 
 
 
 
352
  title="🧠 Aphasia Classification System",
353
- description="Upload audio files to analyze speech patterns and classify aphasia types",
354
- article=f"""
355
- <div style="margin-top: 20px;">
356
- <h3>System Status</h3>
357
- <p>{status_message}</p>
358
- <h3>About</h3>
359
- <p><strong>Pipeline:</strong> Audio β†’ WAV β†’ CHA β†’ JSON β†’ Classification</p>
360
- <p><strong>Supported formats:</strong> MP3, MP4, WAV, M4A, FLAC, OGG</p>
361
- <p><em>For research and clinical assessment purposes.</em></p>
362
- </div>
363
- """
364
  )
365
 
366
- return audio_interface
367
 
368
- if __name__ == "__main__":
369
- try:
370
- logger.info("Starting Aphasia Classification System...")
371
-
372
- # Detect environment
373
- is_cloud, localhost_accessible = detect_environment()
374
- logger.info(f"Environment - Cloud: {is_cloud}, Localhost accessible: {localhost_accessible}")
375
-
376
- # Create and launch interface
377
- demo = create_interface()
 
 
 
 
 
 
 
 
 
 
378
 
379
- # Configure launch parameters based on environment
380
- launch_kwargs = {
381
- "server_name": "0.0.0.0",
382
- "server_port": 7860,
383
- "show_error": True,
384
- "quiet": False,
385
  }
386
-
387
- # Set share parameter based on environment
388
- if is_cloud or not localhost_accessible:
389
- launch_kwargs["share"] = True
390
- logger.info("Running in cloud environment or localhost not accessible - enabling share")
391
- else:
392
- launch_kwargs["share"] = False
393
- logger.info("Running locally - share disabled")
394
-
395
- demo.launch(**launch_kwargs)
396
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
397
  except Exception as e:
398
- logger.error(f"Failed to launch app: {e}")
399
- logger.error(traceback.format_exc())
400
- print(f"❌ Application startup failed: {e}")
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ from flask import Flask
3
  import os
4
  import tempfile
5
  import logging
6
+ import threading
7
+ import time
 
8
 
9
  # Set up logging
10
  logging.basicConfig(level=logging.INFO)
11
  logger = logging.getLogger(__name__)
12
 
13
+ # Configuration
14
  MODEL_DIR = "."
15
  SUPPORTED_AUDIO_FORMATS = [".mp3", ".mp4", ".wav", ".m4a", ".flac", ".ogg"]
16
 
 
69
  if not os.path.exists(os.path.join(MODEL_DIR, file)):
70
  missing_files.append(file)
71
 
72
+ return len(missing_files) == 0, missing_files
 
 
 
 
 
73
 
74
  def run_complete_pipeline(audio_file_path: str) -> dict:
75
  """Complete pipeline: Audio β†’ WAV β†’ CHA β†’ JSON β†’ Model Prediction"""
 
122
 
123
  except Exception as e:
124
  logger.error(f"Pipeline error: {str(e)}")
125
+ import traceback
126
+ traceback.print_exc()
127
  return {
128
  "success": False,
129
  "error": str(e),
 
138
 
139
  # Check if pipeline is available
140
  if not all(MODULES.values()):
141
+ missing_modules = [k for k, v in MODULES.items() if v is None]
142
+ return f"❌ Error: Audio processing pipeline not available. Missing required modules: {', '.join(missing_modules)}"
143
 
144
  # Check file format
145
  file_path = audio_file
146
  if hasattr(audio_file, 'name'):
147
  file_path = audio_file.name
148
 
149
+ from pathlib import Path
150
  file_ext = Path(file_path).suffix.lower()
151
  if file_ext not in SUPPORTED_AUDIO_FORMATS:
152
  return f"❌ Error: Unsupported file format {file_ext}. Supported: {', '.join(SUPPORTED_AUDIO_FORMATS)}"
 
182
  prob_dist = first_pred["probability_distribution"]
183
  top_3 = list(prob_dist.items())[:3]
184
 
185
+ result_text = f"""🧠 **APHASIA CLASSIFICATION RESULTS**
 
186
 
187
  🎯 **Primary Classification:** {predicted_class}
188
  πŸ“Š **Confidence:** {confidence}
 
214
 
215
  except Exception as e:
216
  logger.error(f"Processing error: {str(e)}")
217
+ import traceback
218
+ traceback.print_exc()
219
  return f"❌ Processing Error: {str(e)}\n\nPlease check the logs for more details."
220
 
221
  def process_text_input(text_input):
 
229
  return "❌ Error: Text analysis not available. Missing prediction module."
230
 
231
  # Create a simple JSON structure for text-only input
232
+ import json
233
  temp_json = {
234
  "sentences": [{
235
  "sentence_id": "S1",
 
272
  severity = first_pred["additional_predictions"]["predicted_severity_level"]
273
  fluency = first_pred["additional_predictions"]["fluency_rating"]
274
 
275
+ return f"""🧠 **TEXT ANALYSIS RESULTS**
 
276
 
277
  🎯 **Predicted:** {predicted_class}
278
  πŸ“Š **Confidence:** {confidence}
 
291
  logger.error(f"Text processing error: {str(e)}")
292
  return f"❌ Error: {str(e)}"
293
 
294
+ def create_gradio_app():
295
+ """Create the Gradio interface"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
296
 
297
  # Check system status
298
  model_available, missing_files = check_model_files()
299
  pipeline_available = all(MODULES.values())
300
 
301
+ status_message = "🟒 System Ready" if model_available and pipeline_available else "πŸ”΄ System Issues"
302
 
303
+ status_details = []
304
  if not model_available:
305
+ status_details.append(f"Missing model files: {', '.join(missing_files)}")
 
306
  if not pipeline_available:
307
  missing_modules = [k for k, v in MODULES.items() if v is None]
308
+ status_details.append(f"Missing modules: {', '.join(missing_modules)}")
309
 
310
+ # Create simple interfaces to avoid JSON schema issues
311
+ audio_demo = gr.Interface(
312
  fn=process_audio_input,
313
+ inputs=gr.File(label="Upload Audio File", file_types=["audio"]),
314
+ outputs=gr.Textbox(label="Analysis Results", lines=25),
315
+ title="🎡 Audio Analysis",
316
+ description="Upload MP3, MP4, WAV, M4A, FLAC, or OGG files"
317
+ )
318
+
319
+ text_demo = gr.Interface(
320
+ fn=process_text_input,
321
+ inputs=gr.Textbox(label="Enter Text", lines=5, placeholder="Enter speech transcription..."),
322
+ outputs=gr.Textbox(label="Analysis Results", lines=15),
323
+ title="πŸ“ Text Analysis",
324
+ description="Enter text for direct analysis (less accurate than audio)"
325
+ )
326
+
327
+ # Combine interfaces using TabbedInterface
328
+ demo = gr.TabbedInterface(
329
+ [audio_demo, text_demo],
330
+ ["Audio Analysis", "Text Analysis"],
331
  title="🧠 Aphasia Classification System",
332
+ theme=gr.themes.Soft()
 
 
 
 
 
 
 
 
 
 
333
  )
334
 
335
+ return demo
336
 
337
+ def create_flask_app():
338
+ """Create Flask app that serves Gradio"""
339
+
340
+ # Create Flask app
341
+ flask_app = Flask(__name__)
342
+
343
+ # Create Gradio app
344
+ gradio_app = create_gradio_app()
345
+
346
+ # Mount Gradio app on Flask
347
+ gradio_app.queue() # Enable queuing for better performance
348
+
349
+ # Get the underlying FastAPI app from Gradio
350
+ gradio_fastapi_app = gradio_app.app
351
+
352
+ # Add a health check endpoint
353
+ @flask_app.route('/health')
354
+ def health_check():
355
+ model_available, missing_files = check_model_files()
356
+ pipeline_available = all(MODULES.values())
357
 
358
+ return {
359
+ "status": "healthy" if model_available and pipeline_available else "unhealthy",
360
+ "model_available": model_available,
361
+ "pipeline_available": pipeline_available,
362
+ "missing_files": missing_files if not model_available else [],
363
+ "missing_modules": [k for k, v in MODULES.items() if v is None] if not pipeline_available else []
364
  }
365
+
366
+ # Add info endpoint
367
+ @flask_app.route('/info')
368
+ def info():
369
+ return {
370
+ "title": "Aphasia Classification System",
371
+ "description": "AI-powered aphasia type classification from audio",
372
+ "supported_formats": SUPPORTED_AUDIO_FORMATS,
373
+ "endpoints": {
374
+ "/": "Main Gradio interface",
375
+ "/health": "Health check",
376
+ "/info": "System information"
377
+ }
378
+ }
379
+
380
+ return flask_app, gradio_app
381
+
382
+ def run_gradio_on_flask():
383
+ """Run Gradio app mounted on Flask"""
384
+
385
+ logger.info("Starting Aphasia Classification System with Flask + Gradio...")
386
+
387
+ # Create Flask and Gradio apps
388
+ flask_app, gradio_app = create_flask_app()
389
+
390
+ # Detect environment
391
+ port = int(os.environ.get('PORT', 7860))
392
+ host = os.environ.get('HOST', '0.0.0.0')
393
+
394
+ # Check if we're in a cloud environment
395
+ is_cloud = any(os.getenv(indicator) for indicator in [
396
+ 'SPACE_ID', 'PAPERSPACE_NOTEBOOK_REPO_ID',
397
+ 'COLAB_GPU', 'KAGGLE_KERNEL_RUN_TYPE'
398
+ ])
399
+
400
+ logger.info(f"Environment - Cloud: {is_cloud}, Host: {host}, Port: {port}")
401
+
402
+ def run_gradio():
403
+ """Run Gradio in a separate thread"""
404
+ try:
405
+ gradio_app.launch(
406
+ server_name=host,
407
+ server_port=port,
408
+ share=is_cloud, # Auto-enable share in cloud environments
409
+ show_error=True,
410
+ quiet=False,
411
+ prevent_thread_lock=True # Important for running with Flask
412
+ )
413
+ except Exception as e:
414
+ logger.error(f"Failed to start Gradio: {e}")
415
+
416
+ # Start Gradio in background thread
417
+ gradio_thread = threading.Thread(target=run_gradio, daemon=True)
418
+ gradio_thread.start()
419
+
420
+ # Give Gradio time to start
421
+ time.sleep(2)
422
+
423
+ logger.info(f"βœ“ Gradio app started on {host}:{port}")
424
+ logger.info("βœ“ Flask health endpoints available at /health and /info")
425
+
426
+ # Keep the main thread alive
427
+ try:
428
+ while True:
429
+ time.sleep(1)
430
+ except KeyboardInterrupt:
431
+ logger.info("Shutting down...")
432
+
433
+ if __name__ == "__main__":
434
+ try:
435
+ run_gradio_on_flask()
436
  except Exception as e:
437
+ logger.error(f"Failed to start application: {e}")
438
+ import traceback
439
+ traceback.print_exc()
440
+
441
+ # Fallback to basic Gradio if Flask setup fails
442
+ logger.info("Falling back to basic Gradio interface...")
443
+ demo = create_gradio_app()
444
+ demo.launch(
445
+ server_name="0.0.0.0",
446
+ server_port=7860,
447
+ share=True,
448
+ show_error=True
449
+ )