import numpy as np import os import shutil import json import tensorflow as tf from src.preprocess import audio_to_spectrograms from src.model import build_autoencoder from src.storage import upload_file, download_file # --- CRITICAL FIX FOR CLOUD DEPLOYMENT --- # We use /tmp because it is the ONLY folder guaranteed to be writable in Docker. TEMP_DIR = "/tmp/temp_models" # Create the directory if it doesn't exist if not os.path.exists(TEMP_DIR): os.makedirs(TEMP_DIR) def train_mode_cloud(audio_path, mode_name, boat_id): # 1. Preprocess # audio_path is now likely "/tmp/input.wav" passed from app.py X_train = audio_to_spectrograms(audio_path) if X_train is None: return "❌ Audio too short (min 1 sec)." # 2. Train autoencoder = build_autoencoder(X_train.shape[1:]) autoencoder.fit(X_train, X_train, epochs=40, batch_size=4, verbose=0) # 3. Calculate Threshold (Dynamic Safety Margin) reconstructions = autoencoder.predict(X_train) mse = np.mean(np.power(X_train - reconstructions, 2), axis=(1, 2, 3)) # Using 2 Standard Deviations (Industry Standard for Strict Detection) threshold = float(np.mean(mse) + (2 * np.std(mse))) # 4. Save Locally (TO /tmp) model_filename = f"{mode_name}_model.h5" meta_filename = f"{mode_name}_meta.json" # These paths now point to /tmp/temp_models/... local_model_path = os.path.join(TEMP_DIR, model_filename) local_meta_path = os.path.join(TEMP_DIR, meta_filename) # Save to the temporary linux folder autoencoder.save(local_model_path, save_format='h5', include_optimizer=False) with open(local_meta_path, 'w') as f: json.dump({"threshold": threshold}, f) # 5. Upload to Google Cloud u1 = upload_file(local_model_path, boat_id, model_filename) u2 = upload_file(local_meta_path, boat_id, meta_filename) if u1 and u2: return f"✅ Calibrated {mode_name.upper()} | Threshold: {threshold:.5f}" else: return "⚠️ Trained locally, but Cloud Upload Failed." def predict_health_cloud(audio_path, mode_name, boat_id): model_filename = f"{mode_name}_model.h5" meta_filename = f"{mode_name}_meta.json" local_model_path = os.path.join(TEMP_DIR, model_filename) local_meta_path = os.path.join(TEMP_DIR, meta_filename) # 1. Download from Cloud to /tmp d1 = download_file(boat_id, model_filename, local_model_path) d2 = download_file(boat_id, meta_filename, local_meta_path) if not (d1 and d2): return f"⚠️ No trained model found in cloud for Boat: {boat_id} (Mode: {mode_name})" # 2. Load with open(local_meta_path, 'r') as f: threshold = json.load(f)["threshold"] model = tf.keras.models.load_model(local_model_path, compile=False) # 3. Predict X_test = audio_to_spectrograms(audio_path) if X_test is None: return "Error: Audio too short." reconstructions = model.predict(X_test) mse = np.mean(np.power(X_test - reconstructions, 2), axis=(1, 2, 3)) # 4. Analysis anomalies = np.sum(mse > threshold) health_score = 100 * (1 - (anomalies / len(mse))) # 5. Telemetry avg_error = np.mean(mse) max_error = np.max(mse) status = "🟢 HEALTHY" if health_score > 85 else "🔴 ANOMALY DETECTED" return f""" {status} Confidence Score: {health_score:.1f}% --- TECHNICAL TELEMETRY --- Threshold Limit : {threshold:.5f} Your Avg Error : {avg_error:.5f} Your Max Error : {max_error:.5f} Anomalous Secs : {anomalies} / {len(mse)} """