Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import tensorflow as tf | |
| import numpy as np | |
| from PIL import Image | |
| import json | |
| import os | |
| print("=== DIAGNOSTIC MODE ===") | |
| # Load class labels | |
| class_labels = ["Class 0", "Class 1"] # Default | |
| try: | |
| with open("class_labels.json", "r") as f: | |
| class_labels = json.load(f) | |
| print(f"β Loaded class labels: {class_labels}") | |
| except Exception as e: | |
| print(f"β Error loading class labels: {e}") | |
| # Load model | |
| model = None | |
| keras_files = [f for f in os.listdir(".") if f.endswith('.keras')] | |
| if keras_files: | |
| try: | |
| model = tf.keras.models.load_model(keras_files[0]) | |
| print(f"β Model loaded: {keras_files[0]}") | |
| print(f"Model output shape: {model.output_shape}") | |
| except Exception as e: | |
| print(f"β Model loading error: {e}") | |
| def diagnose_prediction(image): | |
| """ | |
| Fixed diagnostic of the prediction process | |
| """ | |
| if model is None: | |
| return {"Error": "Model not loaded"} | |
| try: | |
| print("\n" + "="*50) | |
| print("DIAGNOSTIC PREDICTION") | |
| print("="*50) | |
| # Preprocess image | |
| img_array = np.array(image) | |
| img_resized = tf.image.resize(img_array, [256, 256]) | |
| img_normalized = tf.cast(img_resized, tf.float32) / 255.0 | |
| img_batch = tf.expand_dims(img_normalized, 0) | |
| print(f"Input shape to model: {img_batch.shape}") | |
| # Get raw predictions | |
| raw_predictions = model.predict(img_batch, verbose=0) | |
| print(f"Raw model output: {raw_predictions}") | |
| print(f"Raw output shape: {raw_predictions.shape}") | |
| print(f"Raw output type: {type(raw_predictions)}") | |
| # FIXED: Handle binary classification properly | |
| if raw_predictions.shape[1] == 1: | |
| # Single output with sigmoid activation (binary classification) | |
| print("Detected binary classification with sigmoid output") | |
| sigmoid_output = float(raw_predictions[0][0]) | |
| print(f"Sigmoid output: {sigmoid_output}") | |
| # Convert to probabilities for both classes | |
| prob_class_1 = sigmoid_output | |
| prob_class_0 = 1.0 - sigmoid_output | |
| probabilities = np.array([prob_class_0, prob_class_1]) | |
| print(f"Calculated probabilities: [Class 0: {prob_class_0:.4f}, Class 1: {prob_class_1:.4f}]") | |
| elif raw_predictions.shape[1] == 2: | |
| # Two outputs with softmax activation | |
| print("Detected two-output classification") | |
| logits = raw_predictions[0] | |
| probabilities = tf.nn.softmax(logits).numpy() | |
| print(f"Softmax probabilities: {probabilities}") | |
| else: | |
| # Multi-class classification | |
| print("Detected multi-class classification") | |
| logits = raw_predictions[0] | |
| probabilities = tf.nn.softmax(logits).numpy() | |
| print(f"Multi-class probabilities: {probabilities}") | |
| print(f"Final probabilities shape: {probabilities.shape}") | |
| print(f"Probabilities sum: {np.sum(probabilities)}") | |
| # Get the predicted class | |
| predicted_class_index = np.argmax(probabilities) | |
| print(f"Predicted class index: {predicted_class_index}") | |
| print(f"Predicted class name: {class_labels[predicted_class_index] if predicted_class_index < len(class_labels) else 'Unknown'}") | |
| # Create results dictionary | |
| results = {} | |
| print(f"Number of probabilities: {len(probabilities)}") | |
| print(f"Number of class labels: {len(class_labels)}") | |
| for i, prob in enumerate(probabilities): | |
| if i < len(class_labels): | |
| class_name = class_labels[i] | |
| prob_value = float(prob) | |
| results[class_name] = prob_value | |
| print(f" {class_name} (index {i}): {prob_value:.4f}") | |
| else: | |
| print(f" WARNING: More probabilities than class labels at index {i}") | |
| print(f"Final results dictionary: {results}") | |
| # Check for common bugs | |
| if len(set(results.values())) == 1: | |
| print("π¨ BUG DETECTED: All probabilities are identical!") | |
| if len(results) != len(class_labels): | |
| print("π¨ BUG DETECTED: Results count doesn't match class labels!") | |
| else: | |
| print("β Results count matches class labels!") | |
| # Sort results by probability (Gradio expects this) | |
| sorted_results = dict(sorted(results.items(), key=lambda x: x[1], reverse=True)) | |
| print(f"Sorted results: {sorted_results}") | |
| return sorted_results | |
| except Exception as e: | |
| error_msg = f"Prediction failed: {str(e)}" | |
| print(f"β {error_msg}") | |
| import traceback | |
| traceback.print_exc() | |
| return {"Error": error_msg} | |
| # Test the model with a simple synthetic input | |
| def test_model_directly(): | |
| """Test model with known different inputs""" | |
| if model is None: | |
| return | |
| print("\n" + "="*50) | |
| print("TESTING MODEL WITH SYNTHETIC INPUTS") | |
| print("="*50) | |
| # Test 1: All black image | |
| black_img = np.zeros((1, 256, 256, 3), dtype=np.float32) | |
| black_pred = model.predict(black_img, verbose=0) | |
| # Test 2: All white image | |
| white_img = np.ones((1, 256, 256, 3), dtype=np.float32) | |
| white_pred = model.predict(white_img, verbose=0) | |
| # Test 3: Random noise | |
| noise_img = np.random.random((1, 256, 256, 3)).astype(np.float32) | |
| noise_pred = model.predict(noise_img, verbose=0) | |
| # Handle predictions based on output shape | |
| if black_pred.shape[1] == 1: | |
| # Binary classification with sigmoid | |
| black_probs = np.array([1-black_pred[0][0], black_pred[0][0]]) | |
| white_probs = np.array([1-white_pred[0][0], white_pred[0][0]]) | |
| noise_probs = np.array([1-noise_pred[0][0], noise_pred[0][0]]) | |
| else: | |
| # Multi-class with softmax | |
| black_probs = tf.nn.softmax(black_pred[0]).numpy() | |
| white_probs = tf.nn.softmax(white_pred[0]).numpy() | |
| noise_probs = tf.nn.softmax(noise_pred[0]).numpy() | |
| print(f"Black image prediction: {black_probs}") | |
| print(f"White image prediction: {white_probs}") | |
| print(f"Noise image prediction: {noise_probs}") | |
| # Check if predictions are different | |
| if np.allclose(black_probs, white_probs, atol=1e-6) and np.allclose(white_probs, noise_probs, atol=1e-6): | |
| print("π¨ MODEL ISSUE: Model gives identical predictions for different inputs!") | |
| print(" This suggests the model is broken or not properly trained") | |
| else: | |
| print("β Model gives different predictions for different inputs") | |
| # Run diagnostic test when app starts | |
| if model is not None: | |
| test_model_directly() | |
| # Create interface | |
| interface = gr.Interface( | |
| fn=diagnose_prediction, | |
| inputs=gr.Image(type="pil", label="Upload an image"), | |
| outputs=gr.Label(label="Diagnostic Results - Check Logs!"), | |
| title="DIAGNOSTIC MODE - Check Application Logs", | |
| description="This version shows detailed diagnostic info in the logs. Upload an image and check the logs tab." | |
| ) | |
| if __name__ == "__main__": | |
| interface.launch() |