Spaces:
Running
Running
| import os | |
| import sys | |
| import numpy as np | |
| import librosa | |
| import tensorflow as tf | |
| from tensorflow.keras.models import load_model | |
| # Ensure we can import utils | |
| sys.path.append(os.getcwd()) | |
| try: | |
| from utils.hear_extractor import HeARExtractor | |
| from utils.audio_preprocessor import advanced_preprocess | |
| except ImportError: | |
| sys.path.append(os.path.dirname(os.getcwd())) | |
| from utils.hear_extractor import HeARExtractor | |
| from utils.audio_preprocessor import advanced_preprocess | |
| # Configuration | |
| MODEL_PATH = r"c:\Users\ASUS\lung_ai_project\models\hear_classifier_advanced.h5" | |
| CLASSES_PATH = r"c:\Users\ASUS\lung_ai_project\models\hear_classes_advanced.npy" | |
| USER_FILE = r"C:\Users\ASUS\Downloads\WhatsApp Audio 2026-02-23 at 6.09.14 PM.wav" | |
| def predict_single_file(file_path): | |
| print(f"\n--- Analyzing Audio: {os.path.basename(file_path)} ---") | |
| if not os.path.exists(file_path): | |
| print(f"Error: File not found at {file_path}") | |
| return | |
| # 1. Initialize Extractor | |
| print("Initializing HeAR Extractor...") | |
| try: | |
| extractor = HeARExtractor() | |
| except Exception as e: | |
| print(f"Failed to load HeAR model: {e}") | |
| return | |
| # 2. Load Evaluation Model | |
| print(f"Loading Model from {MODEL_PATH}...") | |
| try: | |
| model = load_model(MODEL_PATH, compile=False) | |
| classes = np.load(CLASSES_PATH) | |
| print(f"Classes: {classes}") | |
| except Exception as e: | |
| print(f"Error loading model: {e}") | |
| return | |
| # 3. Process & Predict | |
| try: | |
| # Load Audio | |
| print("Loading and preprocessing audio...") | |
| y, sr = librosa.load(file_path, sr=16000, duration=5.0) | |
| # --- NEW: Audio Validation (Gatekeeper) --- | |
| from utils.audio_validator import validate_audio_is_cough | |
| is_valid_cough, reason, val_conf = validate_audio_is_cough(y, sr) | |
| if not is_valid_cough: | |
| print("\n" + "="*50) | |
| print(f"REJECTED: Audio Validation Failed!") | |
| print(f"REASON: {reason}") | |
| print(f"RECOMMENDATION: Please record a clear, loud cough in a quiet room.") | |
| print("="*50) | |
| return | |
| # Apply Advanced Preprocessing (Critical for correct result!) | |
| y_clean = advanced_preprocess(y, sr) | |
| # Extract Embedding | |
| print("Extracting features...") | |
| emb = extractor.extract(y_clean) | |
| if emb is not None: | |
| # 4. Predict | |
| print("Step 4: Running Inference...") | |
| try: | |
| X = emb[np.newaxis, ...] | |
| preds = model.predict(X, verbose=0) | |
| pred_idx = np.argmax(preds[0]) | |
| raw_label = classes[pred_idx] | |
| confidence = preds[0][pred_idx] | |
| # --- Reliability Guard --- | |
| THRESHOLD = 0.70 | |
| if raw_label == "sick" and confidence < THRESHOLD: | |
| print(f"DEBUG: Borderline result ({confidence:.2f}). Applying reliability guard.") | |
| final_label = "healthy" | |
| is_inconclusive = True | |
| else: | |
| final_label = raw_label | |
| is_inconclusive = False | |
| except Exception as e: | |
| print(f"Error during inference: {e}") | |
| return | |
| print("\n" + "="*50) | |
| if is_inconclusive: | |
| print(f"RESULT: HEALTHY (Normal Pattern)") | |
| print(f"NOTE: Prediction was borderline ({confidence*100:.1f}%).") | |
| print("Reliability guard applied: No strong abnormal indicators found.") | |
| else: | |
| print(f"RESULT: {final_label.upper()}") | |
| print(f"CONFIDENCE: {confidence*100:.2f}%") | |
| print("="*50) | |
| # Simple interpretation | |
| if final_label == "sick": | |
| print("Recommendation: Potential respiratory symptoms detected. Consider medical consultation.") | |
| else: | |
| if is_inconclusive: | |
| print("Recommendation: Recording had minor artifacts but appears normal. Re-record in a quiet room for better accuracy.") | |
| else: | |
| print("Recommendation: Acoustic pattern appears healthy. Continue monitoring if symptoms persist.") | |
| else: | |
| print("Error: Could not extract features from audio.") | |
| except Exception as e: | |
| print(f"Detailed Error: {e}") | |
| if __name__ == "__main__": | |
| predict_single_file(USER_FILE) | |