| """
|
| AI-Based Image Deblurring Streamlit Application
|
| ===============================================
|
|
|
| Advanced web application for image deblurring with AI and traditional methods.
|
| Features comprehensive blur analysis, multiple deblurring techniques, and
|
| detailed quality assessment with processing history.
|
| """
|
|
|
| import streamlit as st
|
| import cv2
|
| import numpy as np
|
| from PIL import Image
|
| import io
|
| import time
|
| import json
|
| import os
|
| from datetime import datetime
|
| import plotly.graph_objects as go
|
| import plotly.express as px
|
| import pandas as pd
|
| import logging
|
|
|
|
|
| logger = logging.getLogger(__name__)
|
| from typing import Dict, Any, Optional, List
|
|
|
|
|
| from modules.input_module import ImageValidator, validate_and_load_image
|
| from modules.blur_detection import BlurDetector, analyze_blur_characteristics
|
|
|
|
|
| try:
|
| from modules.cnn_deblurring import CNNDeblurModel, enhance_with_cnn
|
| CNN_AVAILABLE = True
|
|
|
| CNN_MODEL_TRAINED = os.path.exists("models/cnn_deblur_model.h5")
|
| except ImportError as e:
|
| logger.warning(f"CNN module not available: {e}")
|
| CNN_AVAILABLE = False
|
| CNN_MODEL_TRAINED = False
|
|
|
| from modules.sharpness_analysis import SharpnessAnalyzer, compare_image_quality
|
| from modules.traditional_filters import TraditionalDeblurring, BlurType, apply_wiener_filter, apply_richardson_lucy, apply_unsharp_masking
|
| from modules.database_module import DatabaseManager, ProcessingRecord, log_processing_result
|
| from modules.color_preservation import ColorPreserver, preserve_colors, display_convert
|
| from modules.iterative_enhancement import IterativeEnhancer, enhance_progressively
|
|
|
|
|
| st.set_page_config(
|
| page_title="AI Image Deblurring Studio",
|
| page_icon="π―",
|
| layout="wide",
|
| initial_sidebar_state="expanded"
|
| )
|
|
|
|
|
| st.markdown("""
|
| <style>
|
| .main-header {
|
| font-size: 3em;
|
| color: #1f77b4;
|
| text-align: center;
|
| margin-bottom: 30px;
|
| }
|
|
|
| .section-header {
|
| font-size: 1.5em;
|
| color: #2c3e50;
|
| border-bottom: 2px solid #3498db;
|
| padding-bottom: 5px;
|
| margin: 20px 0;
|
| }
|
|
|
| .metric-card {
|
| background: linear-gradient(90deg, #667eea 0%, #764ba2 100%);
|
| padding: 20px;
|
| border-radius: 10px;
|
| color: white;
|
| margin: 10px 0;
|
| }
|
|
|
| .improvement-positive {
|
| color: #27ae60;
|
| font-weight: bold;
|
| font-size: 1.2em;
|
| }
|
|
|
| .improvement-negative {
|
| color: #e74c3c;
|
| font-weight: bold;
|
| font-size: 1.2em;
|
| }
|
|
|
| .quality-excellent { color: #27ae60; font-weight: bold; }
|
| .quality-good { color: #2ecc71; font-weight: bold; }
|
| .quality-fair { color: #f39c12; font-weight: bold; }
|
| .quality-poor { color: #e67e22; font-weight: bold; }
|
| .quality-very-poor { color: #e74c3c; font-weight: bold; }
|
|
|
| .improvement-card {
|
| background: #f8f9fa;
|
| border: 1px solid #e9ecef;
|
| border-radius: 8px;
|
| padding: 15px;
|
| margin: 10px 0;
|
| border-left: 4px solid #28a745;
|
| }
|
|
|
| .improvement-item {
|
| background: #ffffff;
|
| border: 1px solid #dee2e6;
|
| border-radius: 6px;
|
| padding: 12px;
|
| margin: 8px 0;
|
| box-shadow: 0 2px 4px rgba(0,0,0,0.1);
|
| }
|
|
|
| .analysis-header {
|
| background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
|
| color: white;
|
| padding: 15px;
|
| border-radius: 10px;
|
| margin: 15px 0;
|
| text-align: center;
|
| font-weight: bold;
|
| font-size: 1.2em;
|
| }
|
| </style>
|
| """, unsafe_allow_html=True)
|
|
|
|
|
| def initialize_session_state():
|
| """Initialize Streamlit session state variables"""
|
| if 'session_id' not in st.session_state:
|
| db_manager = DatabaseManager()
|
| st.session_state.session_id = db_manager.start_session()
|
|
|
| if 'processing_history' not in st.session_state:
|
| st.session_state.processing_history = []
|
|
|
| if 'current_image' not in st.session_state:
|
| st.session_state.current_image = None
|
|
|
| if 'processed_images' not in st.session_state:
|
| st.session_state.processed_images = {}
|
|
|
| if 'training_in_progress' not in st.session_state:
|
| st.session_state.training_in_progress = False
|
|
|
| def display_quality_rating(quality_rating: str) -> str:
|
| """Display quality rating with appropriate styling"""
|
| class_name = f"quality-{quality_rating.lower().replace(' ', '-')}"
|
| return f'<span class="{class_name}">{quality_rating}</span>'
|
|
|
| def create_comparison_chart(original_metrics, enhanced_metrics):
|
| """Create comparison chart for image metrics"""
|
| metrics = ['Laplacian Variance', 'Gradient Magnitude', 'Edge Density', 'Overall Score']
|
| original_values = [
|
| original_metrics.laplacian_variance,
|
| original_metrics.gradient_magnitude,
|
| original_metrics.edge_density,
|
| original_metrics.overall_score
|
| ]
|
| enhanced_values = [
|
| enhanced_metrics.laplacian_variance,
|
| enhanced_metrics.gradient_magnitude,
|
| enhanced_metrics.edge_density,
|
| enhanced_metrics.overall_score
|
| ]
|
|
|
| fig = go.Figure(data=[
|
| go.Bar(name='Original', x=metrics, y=original_values, marker_color='#e74c3c'),
|
| go.Bar(name='Enhanced', x=metrics, y=enhanced_values, marker_color='#27ae60')
|
| ])
|
|
|
| fig.update_layout(
|
| title='Image Quality Comparison',
|
| xaxis_title='Metrics',
|
| yaxis_title='Values',
|
| barmode='group',
|
| height=400
|
| )
|
|
|
| return fig
|
|
|
| def process_image(image: np.ndarray, method: str, parameters: Dict[str, Any]) -> Dict[str, Any]:
|
| """
|
| Process image with selected method and parameters
|
|
|
| Args:
|
| image: Input image
|
| method: Processing method
|
| parameters: Method parameters
|
|
|
| Returns:
|
| dict: Processing results
|
| """
|
| start_time = time.time()
|
|
|
| try:
|
|
|
| blur_detector = BlurDetector()
|
| sharpness_analyzer = SharpnessAnalyzer()
|
|
|
|
|
| blur_analysis = blur_detector.comprehensive_analysis(image)
|
| original_metrics = sharpness_analyzer.analyze_sharpness(image)
|
|
|
|
|
| enhancement_history = []
|
| iterations_performed = 0
|
|
|
|
|
| if method == "Progressive Enhancement (Recommended)":
|
|
|
| max_iterations = parameters.get('max_iterations', 5)
|
| target_sharpness = parameters.get('target_sharpness', 800.0)
|
| adaptive = parameters.get('adaptive_strategy', True)
|
|
|
| enhancer = IterativeEnhancer()
|
| enhancement_results = enhancer.progressive_enhancement(
|
| image, max_iterations, target_sharpness, adaptive
|
| )
|
| processed_image = enhancement_results['enhanced_image']
|
|
|
|
|
| enhancement_history = enhancement_results.get('enhancement_history', [])
|
| iterations_performed = enhancement_results.get('iterations_performed', 0)
|
|
|
| elif method == "CNN Enhancement":
|
| if CNN_AVAILABLE:
|
| processed_image = enhance_with_cnn(image)
|
|
|
| processed_image = preserve_colors(image, processed_image)
|
| else:
|
|
|
| processed_image = apply_unsharp_masking(
|
| image, amount=parameters.get('amount', 1.5), radius=parameters.get('radius', 1.0)
|
| )
|
|
|
| processed_image = preserve_colors(image, processed_image)
|
| elif method == "Wiener Filter":
|
| blur_type_map = {
|
| 'motion_blur': BlurType.MOTION,
|
| 'defocus_blur': BlurType.DEFOCUS,
|
| 'gaussian': BlurType.GAUSSIAN,
|
| 'mixed/complex_blur': BlurType.UNKNOWN,
|
| 'sharp_image': BlurType.UNKNOWN
|
| }
|
| blur_type = blur_type_map.get(blur_analysis['primary_type'].lower().replace(' ', '_'), BlurType.UNKNOWN)
|
| processed_image = apply_wiener_filter(image, blur_type)
|
|
|
| processed_image = preserve_colors(image, processed_image)
|
| elif method == "Richardson-Lucy":
|
| iterations = parameters.get('iterations', 10)
|
| processed_image = apply_richardson_lucy(image, iterations=iterations)
|
|
|
| processed_image = preserve_colors(image, processed_image)
|
| elif method == "Unsharp Masking":
|
| sigma = parameters.get('sigma', 1.0)
|
| strength = parameters.get('strength', 1.5)
|
| processed_image = apply_unsharp_masking(image, sigma=sigma, strength=strength)
|
|
|
| processed_image = preserve_colors(image, processed_image)
|
| else:
|
|
|
| processed_image = ColorPreserver.accurate_unsharp_masking(image)
|
|
|
|
|
| enhanced_metrics = sharpness_analyzer.analyze_sharpness(processed_image)
|
|
|
|
|
| improvement = enhanced_metrics.overall_score - original_metrics.overall_score
|
| improvement_percentage = (improvement / original_metrics.overall_score) * 100 if original_metrics.overall_score > 0 else 0
|
|
|
| processing_time = time.time() - start_time
|
|
|
| return {
|
| 'processed_image': processed_image,
|
| 'original_metrics': original_metrics,
|
| 'enhanced_metrics': enhanced_metrics,
|
| 'blur_analysis': blur_analysis,
|
| 'improvement': improvement,
|
| 'improvement_percentage': improvement_percentage,
|
| 'processing_time': processing_time,
|
| 'method': method,
|
| 'parameters': parameters,
|
| 'success': True
|
| }
|
|
|
| except Exception as e:
|
| import traceback
|
| error_details = traceback.format_exc()
|
| logger.error(f"Error processing image: {e}")
|
| logger.error(f"Full traceback: {error_details}")
|
|
|
| return {
|
| 'success': False,
|
| 'error': str(e),
|
| 'error_details': error_details,
|
| 'processing_time': time.time() - start_time
|
| }
|
|
|
| def cnn_model_management_ui():
|
| """CNN model training and management UI"""
|
|
|
| try:
|
|
|
| model_path = "models/cnn_deblur_model.h5"
|
| model_exists = os.path.exists(model_path)
|
|
|
|
|
| if model_exists:
|
| st.success("β
Trained CNN model available")
|
| file_size = os.path.getsize(model_path) / (1024*1024)
|
| st.info(f"π Model size: {file_size:.1f} MB")
|
| else:
|
| st.warning("β οΈ No trained CNN model found")
|
| st.info("π‘ Train a model for best CNN results")
|
|
|
|
|
| st.markdown("**π Training Options:**")
|
|
|
| col1, col2 = st.columns(2)
|
|
|
| with col1:
|
|
|
| if st.button("β‘ Quick Train", help="500 samples, 10 epochs (~10-15 min)"):
|
| with st.spinner("π Starting Quick Training..."):
|
| start_training("quick")
|
|
|
|
|
| if st.button("π§ͺ Test Model", help="Evaluate existing model", disabled=not model_exists):
|
| test_cnn_model()
|
|
|
| with col2:
|
|
|
| if st.button("π― Full Train", help="2000 samples, 30 epochs (~45-60 min)"):
|
| with st.spinner("π Starting Full Training..."):
|
| start_training("full")
|
|
|
|
|
| if st.button("ποΈ Delete Model", help="Remove trained model", disabled=not model_exists):
|
| delete_cnn_model()
|
|
|
|
|
| with st.expander("βοΈ Custom Training"):
|
| custom_samples = st.number_input("Training Samples", min_value=100, max_value=5000, value=1000, step=100)
|
| custom_epochs = st.number_input("Training Epochs", min_value=5, max_value=50, value=20, step=5)
|
|
|
| if st.button("π Start Custom Training"):
|
| start_training("custom", samples=custom_samples, epochs=custom_epochs)
|
|
|
|
|
| with st.expander("π Dataset Management"):
|
| dataset_path = "data/training_dataset"
|
| blurred_data_path = os.path.join(dataset_path, "blurred_images.npy")
|
| clean_data_path = os.path.join(dataset_path, "clean_images.npy")
|
|
|
| dataset_exists = os.path.exists(blurred_data_path) and os.path.exists(clean_data_path)
|
|
|
| if dataset_exists:
|
|
|
| try:
|
| blurred_data = np.load(blurred_data_path)
|
| st.success(f"β
Dataset available: {len(blurred_data)} samples")
|
| dataset_size = (os.path.getsize(blurred_data_path) + os.path.getsize(clean_data_path)) / (1024*1024)
|
| st.info(f"π Dataset size: {dataset_size:.1f} MB")
|
| except:
|
| st.warning("β οΈ Dataset files found but couldn't read info")
|
| else:
|
| st.warning("β οΈ No training dataset found")
|
| st.info("π‘ Dataset will be created during training")
|
|
|
| col_ds1, col_ds2 = st.columns(2)
|
| with col_ds1:
|
| if st.button("π Add More Data", help="Add 500 more samples to existing dataset"):
|
| add_dataset_samples()
|
|
|
| with col_ds2:
|
| if st.button("ποΈ Clear Dataset", help="Delete training dataset", disabled=not dataset_exists):
|
| clear_dataset()
|
|
|
| except Exception as e:
|
| st.error(f"Error in CNN management UI: {e}")
|
|
|
| def start_training(mode, samples=None, epochs=None):
|
| """Start CNN training with progress tracking"""
|
|
|
|
|
| if mode == "quick":
|
| num_samples, num_epochs = 500, 10
|
| estimated_time = "10-15 minutes"
|
| elif mode == "full":
|
| num_samples, num_epochs = 2000, 30
|
| estimated_time = "45-60 minutes"
|
| elif mode == "custom":
|
| num_samples, num_epochs = samples, epochs
|
| estimated_time = f"{(samples * epochs / 1000):.1f} minutes"
|
| else:
|
| st.error("Invalid training mode")
|
| return
|
|
|
|
|
| st.info(f"π Starting {mode} training...")
|
| st.info(f"π Configuration: {num_samples} samples, {num_epochs} epochs")
|
| st.info(f"β±οΈ Estimated time: {estimated_time}")
|
|
|
|
|
| progress_bar = st.progress(0)
|
| status_text = st.empty()
|
|
|
| try:
|
|
|
| status_text.text("π¦ Loading AI modules...")
|
| progress_bar.progress(5)
|
|
|
| try:
|
| from modules.cnn_deblurring import CNNDeblurModel
|
| status_text.text("β
AI modules loaded successfully")
|
| except ImportError as ie:
|
| st.error(f"β Failed to import AI modules: {ie}")
|
| st.error("π‘ Try reinstalling: pip install tensorflow ml_dtypes")
|
| return
|
| except Exception as ie:
|
| st.error(f"β Module error: {ie}")
|
| return
|
|
|
|
|
| status_text.text("ποΈ Initializing CNN model...")
|
| progress_bar.progress(10)
|
|
|
| model = CNNDeblurModel()
|
|
|
|
|
| status_text.text("π Preparing training data...")
|
| progress_bar.progress(20)
|
|
|
|
|
| user_images_path = "data/training_dataset"
|
| user_images = []
|
| if os.path.exists(user_images_path):
|
| valid_extensions = {'.jpg', '.jpeg', '.png', '.bmp', '.tiff', '.tif'}
|
| user_images = [f for f in os.listdir(user_images_path)
|
| if any(f.lower().endswith(ext) for ext in valid_extensions)]
|
|
|
| if user_images:
|
| st.success(f"β
Found {len(user_images)} user training images!")
|
| st.info("π― Your images will be incorporated into the training for better results!")
|
|
|
|
|
| status_text.text("π€ Training CNN model... This may take a while...")
|
| progress_bar.progress(30)
|
|
|
|
|
| st.session_state.training_in_progress = True
|
|
|
|
|
| with st.container():
|
| st.markdown("### π€ Training Progress")
|
| st.markdown(f"**Training {num_samples} samples for {num_epochs} epochs...**")
|
| st.markdown("*Note: This process may take some time. You can navigate to other parts of the app while training continues.*")
|
|
|
|
|
| success = model.train_model(
|
| epochs=num_epochs,
|
| batch_size=16,
|
| validation_split=0.2,
|
| use_existing_dataset=True,
|
| num_training_samples=num_samples
|
| )
|
|
|
| progress_bar.progress(90)
|
|
|
| if success:
|
| status_text.text("β
Training completed successfully!")
|
| progress_bar.progress(100)
|
|
|
|
|
| metrics = model.evaluate_model()
|
| if metrics:
|
| st.success(f"π Training Complete!")
|
| st.info(f"π Model Performance:")
|
| st.info(f" β’ Loss: {metrics['loss']:.4f}")
|
| st.info(f" β’ MAE: {metrics['mae']:.4f}")
|
| st.info(f" β’ MSE: {metrics['mse']:.4f}")
|
|
|
| if metrics['loss'] < 0.05:
|
| st.success("π Excellent performance! Ready for high-quality deblurring!")
|
| elif metrics['loss'] < 0.1:
|
| st.info("π Good performance! Model ready for use.")
|
| else:
|
| st.warning("β οΈ Model trained but may benefit from more training.")
|
|
|
| st.balloons()
|
|
|
|
|
| st.session_state.cnn_model_trained = True
|
|
|
| else:
|
| st.error("β Training failed! Check logs for details.")
|
| progress_bar.progress(0)
|
|
|
| except Exception as e:
|
| st.error(f"β Training error: {e}")
|
| progress_bar.progress(0)
|
|
|
| finally:
|
| st.session_state.training_in_progress = False
|
|
|
| st.rerun()
|
|
|
| def test_cnn_model():
|
| """Test existing CNN model performance"""
|
|
|
| try:
|
| from modules.cnn_deblurring import CNNDeblurModel
|
|
|
| with st.spinner("π§ͺ Testing CNN model..."):
|
| model = CNNDeblurModel()
|
|
|
| if model.load_model("models/cnn_deblur_model.h5"):
|
| st.success("β
Model loaded successfully")
|
|
|
|
|
| metrics = model.evaluate_model()
|
|
|
| if metrics:
|
| st.markdown("### π Model Performance Report")
|
|
|
| col1, col2, col3 = st.columns(3)
|
| with col1:
|
| st.metric("Loss", f"{metrics['loss']:.4f}")
|
| with col2:
|
| st.metric("MAE", f"{metrics['mae']:.4f}")
|
| with col3:
|
| st.metric("MSE", f"{metrics['mse']:.4f}")
|
|
|
|
|
| if metrics['loss'] < 0.01:
|
| st.success("π **Excellent Performance!** Your model is ready for professional-quality deblurring.")
|
| elif metrics['loss'] < 0.05:
|
| st.info("π **Good Performance!** Model provides high-quality results.")
|
| elif metrics['loss'] < 0.1:
|
| st.warning("β οΈ **Fair Performance.** Consider additional training for better results.")
|
| else:
|
| st.error("π **Poor Performance.** Retraining recommended.")
|
|
|
|
|
| model_path = "models/cnn_deblur_model.h5"
|
| if os.path.exists(model_path):
|
| file_size = os.path.getsize(model_path) / (1024*1024)
|
| creation_time = os.path.getctime(model_path)
|
| from datetime import datetime
|
| created_date = datetime.fromtimestamp(creation_time).strftime("%Y-%m-%d %H:%M:%S")
|
|
|
| st.info(f"π Model Size: {file_size:.1f} MB")
|
| st.info(f"π
Created: {created_date}")
|
|
|
| else:
|
| st.error("β Failed to evaluate model")
|
| else:
|
| st.error("β Failed to load model")
|
|
|
| except Exception as e:
|
| st.error(f"β Testing error: {e}")
|
|
|
| def delete_cnn_model():
|
| """Delete trained CNN model"""
|
|
|
| model_path = "models/cnn_deblur_model.h5"
|
|
|
|
|
| if st.button("β οΈ Confirm Delete Model", key="confirm_delete"):
|
| try:
|
| if os.path.exists(model_path):
|
| os.remove(model_path)
|
| st.success("β
Model deleted successfully!")
|
|
|
|
|
| history_path = model_path.replace('.h5', '_history.pkl')
|
| if os.path.exists(history_path):
|
| os.remove(history_path)
|
|
|
| st.rerun()
|
| else:
|
| st.warning("β οΈ No model file found to delete")
|
|
|
| except Exception as e:
|
| st.error(f"β Error deleting model: {e}")
|
| else:
|
| st.warning("β οΈ Click 'Confirm Delete Model' to permanently delete the trained model")
|
|
|
| def add_dataset_samples():
|
| """Add more samples to existing training dataset"""
|
|
|
| try:
|
| from modules.cnn_deblurring import CNNDeblurModel
|
|
|
| with st.spinner("π Adding more training samples..."):
|
| model = CNNDeblurModel()
|
|
|
|
|
| new_blurred, new_clean = model.create_training_dataset(num_samples=500, save_dataset=False)
|
|
|
|
|
| dataset_path = "data/training_dataset"
|
| blurred_path = os.path.join(dataset_path, "blurred_images.npy")
|
| clean_path = os.path.join(dataset_path, "clean_images.npy")
|
|
|
| if os.path.exists(blurred_path) and os.path.exists(clean_path):
|
|
|
| existing_blurred = np.load(blurred_path)
|
| existing_clean = np.load(clean_path)
|
|
|
| combined_blurred = np.concatenate([existing_blurred, new_blurred], axis=0)
|
| combined_clean = np.concatenate([existing_clean, new_clean], axis=0)
|
|
|
|
|
| np.save(blurred_path, combined_blurred)
|
| np.save(clean_path, combined_clean)
|
|
|
| st.success(f"β
Added 500 samples! Total: {len(combined_blurred)} samples")
|
| else:
|
|
|
| os.makedirs(dataset_path, exist_ok=True)
|
| np.save(blurred_path, new_blurred)
|
| np.save(clean_path, new_clean)
|
|
|
| st.success("β
Created new dataset with 500 samples!")
|
|
|
| except Exception as e:
|
| st.error(f"β Error adding dataset samples: {e}")
|
|
|
| def clear_dataset():
|
| """Clear training dataset"""
|
|
|
| if st.button("β οΈ Confirm Clear Dataset", key="confirm_clear_dataset"):
|
| try:
|
| dataset_path = "data/training_dataset"
|
| blurred_path = os.path.join(dataset_path, "blurred_images.npy")
|
| clean_path = os.path.join(dataset_path, "clean_images.npy")
|
|
|
| if os.path.exists(blurred_path):
|
| os.remove(blurred_path)
|
| if os.path.exists(clean_path):
|
| os.remove(clean_path)
|
|
|
| st.success("β
Training dataset cleared!")
|
| st.rerun()
|
|
|
| except Exception as e:
|
| st.error(f"β Error clearing dataset: {e}")
|
| else:
|
| st.warning("β οΈ Click 'Confirm Clear Dataset' to permanently delete training data")
|
|
|
| def show_training_dataset_demo(processing_method: str, parameters: Dict[str, Any]):
|
| """Show before/after demo with sample metrics and images in parallel layout"""
|
|
|
| st.markdown('<h1 class="main-header">π― Training Dataset Demo</h1>', unsafe_allow_html=True)
|
| st.markdown("**Before/After examples showing sample enhancement results**")
|
| st.info("π‘ This demo shows sample metrics and comparisons without running live processing")
|
|
|
|
|
| dataset_path = "data/training_dataset"
|
| enhanced_path = "data/trainned_dataset"
|
|
|
| if not os.path.exists(dataset_path):
|
| st.warning("β οΈ Training dataset folder not found - showing sample data")
|
| dataset_path = None
|
|
|
|
|
| training_images = []
|
| if dataset_path and os.path.exists(dataset_path):
|
| valid_extensions = {'.jpg', '.jpeg', '.png', '.bmp', '.tiff', '.tif'}
|
| training_images = [f for f in os.listdir(dataset_path)
|
| if any(f.lower().endswith(ext) for ext in valid_extensions)]
|
|
|
|
|
| sample_metrics_before = {
|
| 'laplacian_variance': 45.23,
|
| 'gradient_magnitude': 0.312,
|
| 'edge_density': 0.156,
|
| 'brenner_gradient': 1247.5,
|
| 'tenengrad': 892.3,
|
| 'sobel_variance': 78.9,
|
| 'wavelet_energy': 0.234,
|
| 'overall_score': 0.42,
|
| 'quality_rating': 'Fair',
|
| 'blur_score': 2.8,
|
| 'blur_type': 'Motion Blur'
|
| }
|
|
|
| sample_metrics_after = {
|
| 'laplacian_variance': 89.67,
|
| 'gradient_magnitude': 0.578,
|
| 'edge_density': 0.298,
|
| 'brenner_gradient': 2156.8,
|
| 'tenengrad': 1634.7,
|
| 'sobel_variance': 142.3,
|
| 'wavelet_energy': 0.421,
|
| 'overall_score': 0.78,
|
| 'quality_rating': 'Good',
|
| 'blur_score': 4.2,
|
| 'blur_type': 'Clear'
|
| }
|
|
|
|
|
| st.markdown("### πΈ Select Demo Image")
|
|
|
| demo_option = st.radio(
|
| "Choose demo source:",
|
| ["Sample Demo Data", "Training Dataset Images"] if training_images else ["Sample Demo Data"],
|
| help="Select whether to show sample demo or use your training images"
|
| )
|
|
|
| selected_image = None
|
| if demo_option == "Training Dataset Images" and training_images:
|
| selected_image = st.selectbox(
|
| "οΏ½ Select Training Image:",
|
| training_images,
|
| help="Choose an image from your training dataset"
|
| )
|
|
|
|
|
| if demo_option == "Sample Demo Data" or selected_image:
|
|
|
|
|
| before_image = None
|
| after_image = None
|
| image_source = "Sample Demo"
|
|
|
|
|
| if selected_image and dataset_path:
|
| try:
|
|
|
| before_image_path = os.path.join(dataset_path, selected_image)
|
| before_image = cv2.imread(before_image_path)
|
|
|
|
|
|
|
| if "_before" in selected_image:
|
| after_image_name = selected_image.replace("_before", "_after")
|
| else:
|
|
|
| name_parts = selected_image.rsplit('.', 1)
|
| if len(name_parts) == 2:
|
| after_image_name = f"{name_parts[0]}_after.{name_parts[1]}"
|
| else:
|
| after_image_name = f"{selected_image}_after"
|
|
|
| after_image_path = os.path.join(enhanced_path, after_image_name)
|
| if os.path.exists(after_image_path):
|
| after_image = cv2.imread(after_image_path)
|
|
|
| if before_image is not None:
|
| image_source = f"Training: {selected_image}"
|
| st.success(f"π Loaded before image: {selected_image}")
|
| if after_image is not None:
|
| st.success(f"π Found matching after image: {after_image_name}")
|
| else:
|
| st.warning(f"β οΈ No matching after image found for {selected_image}")
|
| else:
|
| st.warning(f"β οΈ Could not load {selected_image}, using sample data")
|
|
|
| except Exception as e:
|
| st.error(f"β Error loading images: {e}")
|
| before_image = None
|
| after_image = None
|
|
|
|
|
| st.markdown("### π **Before/After Comparison**")
|
| col1, col2 = st.columns([1, 1])
|
|
|
| with col1:
|
| st.markdown("#### πΈ **Original (Before)**")
|
|
|
|
|
| if before_image is not None:
|
| display_original = display_convert(before_image)
|
| st.image(display_original, caption=f"Before: {selected_image}", use_container_width=True)
|
| st.info(f"π **Image Info**: {before_image.shape[1]}Γ{before_image.shape[0]} pixels")
|
| else:
|
| st.info("πΌοΈ **Sample Before Image**: 640Γ480 pixels (Blurred Photo)")
|
| st.markdown("*Add images to data/training_dataset/ to see real before images*")
|
|
|
|
|
| st.markdown("#### π **Original Quality Metrics**")
|
|
|
| if before_image is not None:
|
|
|
| try:
|
| analyzer = SharpnessAnalyzer()
|
| real_metrics = analyzer.analyze_sharpness(before_image).__dict__
|
| sample_metrics_before = real_metrics
|
| st.success("π Using real image metrics")
|
| except Exception as e:
|
| st.warning(f"β οΈ Could not analyze image: {e}, using sample metrics")
|
| pass
|
|
|
|
|
| st.metric("π Sharpness Score", f"{sample_metrics_before['laplacian_variance']:.2f}")
|
| st.metric("π Gradient Magnitude", f"{sample_metrics_before['gradient_magnitude']:.3f}")
|
| st.metric("π― Edge Density", f"{sample_metrics_before['edge_density']:.3f}")
|
| st.metric("β‘ Brenner Gradient", f"{sample_metrics_before['brenner_gradient']:.1f}")
|
| st.metric("π Tenengrad", f"{sample_metrics_before['tenengrad']:.1f}")
|
|
|
|
|
| st.markdown("#### π― **Quality Assessment**")
|
| st.write(f"**Overall Score**: {sample_metrics_before['overall_score']:.2f}/1.0")
|
| st.write(f"**Quality Rating**: {sample_metrics_before['quality_rating']}")
|
| st.write(f"**Blur Score**: {sample_metrics_before['blur_score']:.1f}/5.0")
|
| st.write(f"**Detected Issue**: {sample_metrics_before['blur_type']}")
|
|
|
| with col2:
|
| st.markdown("#### β¨ **Enhanced (After)**")
|
|
|
|
|
| if after_image is not None:
|
| display_enhanced = display_convert(after_image)
|
| after_image_name = selected_image.replace("_before", "_after") if "_before" in selected_image else f"{selected_image}_after"
|
| st.image(display_enhanced, caption=f"After: {after_image_name}", use_container_width=True)
|
| st.info(f"π **Enhanced Info**: {after_image.shape[1]}Γ{after_image.shape[0]} pixels")
|
| st.success(f"β
Loaded real after image from trainned_dataset/")
|
| else:
|
| st.info("π **Sample After Image**: 640Γ480 pixels (AI Enhanced)")
|
| st.markdown("*Add matching images to data/trainned_dataset/ to see real after images*")
|
|
|
| if after_image is not None and before_image is not None:
|
| st.info(f"π **Enhanced Info**: {enhanced_image.shape[1]}Γ{enhanced_image.shape[0]} pixels")
|
| else:
|
| st.info(f"π **Enhanced Info**: 640Γ480 pixels (AI Enhanced)")
|
|
|
|
|
| st.markdown("#### π **Enhanced Quality Metrics**")
|
|
|
| if after_image is not None:
|
|
|
| try:
|
| analyzer = SharpnessAnalyzer()
|
| real_enhanced_metrics = analyzer.analyze_sharpness(after_image).__dict__
|
| sample_metrics_after = real_enhanced_metrics
|
| st.success("π Using real after image metrics")
|
| except Exception as e:
|
| st.warning(f"β οΈ Could not analyze after image: {e}, using sample metrics")
|
| pass
|
|
|
|
|
| improvement_laplacian = sample_metrics_after['laplacian_variance'] - sample_metrics_before['laplacian_variance']
|
| improvement_gradient = sample_metrics_after['gradient_magnitude'] - sample_metrics_before['gradient_magnitude']
|
| improvement_edge = sample_metrics_after['edge_density'] - sample_metrics_before['edge_density']
|
| improvement_brenner = sample_metrics_after['brenner_gradient'] - sample_metrics_before['brenner_gradient']
|
| improvement_tenengrad = sample_metrics_after['tenengrad'] - sample_metrics_before['tenengrad']
|
|
|
| st.metric("π Sharpness Score", f"{sample_metrics_after['laplacian_variance']:.2f}",
|
| delta=f"{improvement_laplacian:+.2f}")
|
| st.metric("π Gradient Magnitude", f"{sample_metrics_after['gradient_magnitude']:.3f}",
|
| delta=f"{improvement_gradient:+.3f}")
|
| st.metric("π― Edge Density", f"{sample_metrics_after['edge_density']:.3f}",
|
| delta=f"{improvement_edge:+.3f}")
|
| st.metric("β‘ Brenner Gradient", f"{sample_metrics_after['brenner_gradient']:.1f}",
|
| delta=f"{improvement_brenner:+.1f}")
|
| st.metric("π Tenengrad", f"{sample_metrics_after['tenengrad']:.1f}",
|
| delta=f"{improvement_tenengrad:+.1f}")
|
|
|
|
|
| st.markdown("#### π― **Enhanced Quality Assessment**")
|
| st.write(f"**Overall Score**: {sample_metrics_after['overall_score']:.2f}/1.0")
|
| st.write(f"**Quality Rating**: {sample_metrics_after['quality_rating']}")
|
| st.write(f"**Blur Score**: {sample_metrics_after['blur_score']:.1f}/5.0")
|
| st.write(f"**Result**: {sample_metrics_after['blur_type']}")
|
|
|
|
|
| overall_improvement = ((sample_metrics_after['overall_score'] - sample_metrics_before['overall_score'])
|
| / sample_metrics_before['overall_score'] * 100)
|
| if overall_improvement > 5:
|
| st.success(f"π **Enhancement Success**: +{overall_improvement:.1f}% improvement!")
|
| else:
|
| st.info(f"π **Enhancement Result**: {overall_improvement:+.1f}% change")
|
|
|
|
|
| st.markdown("---")
|
| st.markdown("### π **Detailed Enhancement Analysis**")
|
|
|
| col_metrics, col_chart = st.columns([1, 1])
|
|
|
| with col_metrics:
|
| st.markdown("#### π **Key Improvements**")
|
|
|
|
|
| metrics_comparison = [
|
| ("Sharpness (Laplacian)", sample_metrics_before['laplacian_variance'], sample_metrics_after['laplacian_variance']),
|
| ("Gradient Magnitude", sample_metrics_before['gradient_magnitude'], sample_metrics_after['gradient_magnitude']),
|
| ("Edge Density", sample_metrics_before['edge_density'], sample_metrics_after['edge_density']),
|
| ("Brenner Gradient", sample_metrics_before['brenner_gradient'], sample_metrics_after['brenner_gradient']),
|
| ("Overall Score", sample_metrics_before['overall_score'], sample_metrics_after['overall_score']),
|
| ]
|
|
|
| for metric_name, before_val, after_val in metrics_comparison:
|
| if before_val != 0:
|
| improvement_pct = ((after_val - before_val) / before_val) * 100
|
| if improvement_pct > 10:
|
| st.success(f"β
**{metric_name}**: +{improvement_pct:.1f}% improvement")
|
| elif improvement_pct > 0:
|
| st.info(f"π **{metric_name}**: +{improvement_pct:.1f}% improvement")
|
| elif improvement_pct > -10:
|
| st.warning(f"β **{metric_name}**: {improvement_pct:.1f}% change")
|
| else:
|
| st.error(f"β **{metric_name}**: {improvement_pct:.1f}% decrease")
|
|
|
| with col_chart:
|
| st.markdown("#### π **Enhancement Method Info**")
|
| st.write(f"**Method Used**: {processing_method}")
|
| st.write(f"**Processing Mode**: Demo (Sample Data)")
|
| st.write(f"**Enhancement Type**: {sample_metrics_after['blur_type']} Result")
|
|
|
|
|
| method_descriptions = {
|
| "Progressive Enhancement (Recommended)": "Multi-stage enhancement with color preservation and iterative refinement",
|
| "CNN Enhancement": "Deep learning-based deblurring using trained neural network",
|
| "Wiener Filter": "Frequency domain deconvolution with noise consideration",
|
| "Richardson-Lucy": "Iterative deconvolution algorithm for point spread function",
|
| "Unsharp Masking": "Edge enhancement through high-pass filtering"
|
| }
|
|
|
| if processing_method in method_descriptions:
|
| st.info(f"**How it works**: {method_descriptions[processing_method]}")
|
|
|
|
|
| st.markdown("---")
|
| st.markdown("### π‘ **Demo Information**")
|
|
|
| col_info1, col_info2 = st.columns([1, 1])
|
|
|
| with col_info1:
|
| st.info("""
|
| **π Sample Data Mode**
|
| - Shows typical enhancement results
|
| - Displays sample quality metrics
|
| - No real processing performed
|
| - Perfect for demonstration purposes
|
| """)
|
|
|
| with col_info2:
|
| st.info("""
|
| **π Training Dataset Mode**
|
| - Uses your actual training images
|
| - Shows real quality analysis
|
| - Looks for enhanced versions in trainned_dataset/
|
| - Combines real data with sample metrics
|
| """)
|
|
|
| st.success("β¨ **Ready to use real enhancement?** Uncheck 'Training Dataset Demo' in the sidebar to process images with live AI enhancement!")
|
|
|
| def main():
|
| """Main Streamlit application"""
|
| initialize_session_state()
|
|
|
|
|
| st.markdown('<h1 class="main-header">π― AI Image Deblurring Studio</h1>', unsafe_allow_html=True)
|
| st.markdown("**Advanced AI-powered image deblurring with comprehensive quality analysis**")
|
|
|
|
|
| if st.session_state.get('training_in_progress', False):
|
| st.info("π€ **CNN Model Training in Progress...** Training may take 10-60 minutes depending on configuration. You can continue using other enhancement methods while training.")
|
|
|
|
|
| st.sidebar.title("π§ Processing Options")
|
|
|
|
|
| st.sidebar.markdown("### π§ **Real-Time Enhancement Controls**")
|
|
|
|
|
| available_methods = ["Progressive Enhancement (Recommended)"]
|
| if CNN_AVAILABLE:
|
| available_methods.append("CNN Enhancement")
|
| available_methods.extend(["Wiener Filter", "Richardson-Lucy", "Unsharp Masking"])
|
|
|
| processing_method = st.sidebar.selectbox(
|
| "Select Deblurring Method",
|
| available_methods,
|
| index=0,
|
| key="processing_method"
|
| )
|
|
|
|
|
| if not CNN_AVAILABLE:
|
| st.sidebar.warning("β οΈ CNN Enhancement disabled due to TensorFlow issues. Using fallback methods.")
|
| elif CNN_MODEL_TRAINED:
|
| st.sidebar.success("β
CNN Enhancement ready with trained model")
|
| else:
|
| st.sidebar.info("π‘ CNN available - Train model for best results")
|
|
|
|
|
| st.sidebar.subheader("π Real-Time Parameters")
|
| parameters = {}
|
|
|
| if processing_method == "Progressive Enhancement (Recommended)":
|
| parameters['max_iterations'] = st.sidebar.slider("Maximum Iterations", 1, 8, 5, key="prog_iterations")
|
| parameters['target_sharpness'] = st.sidebar.slider("Target Sharpness", 500.0, 1500.0, 800.0, key="target_sharp")
|
| parameters['adaptive_strategy'] = st.sidebar.checkbox("Adaptive Strategy", value=True, key="adaptive")
|
| elif processing_method == "Richardson-Lucy":
|
| parameters['iterations'] = st.sidebar.slider("Iterations", 1, 30, 10, key="rl_iterations")
|
| elif processing_method == "Wiener Filter":
|
| parameters['noise_variance'] = st.sidebar.slider("Noise Variance", 0.001, 0.1, 0.01, key="wiener_noise")
|
| elif processing_method == "Unsharp Masking":
|
| parameters['sigma'] = st.sidebar.slider("Gaussian Sigma", 0.1, 5.0, 1.0, key="unsharp_sigma")
|
| parameters['strength'] = st.sidebar.slider("Sharpening Strength", 0.5, 3.0, 1.5, key="unsharp_strength")
|
|
|
|
|
| st.sidebar.markdown("---")
|
| real_time_processing = st.sidebar.checkbox("π **Real-Time Processing**", value=True,
|
| help="Automatically process when parameters change")
|
|
|
|
|
| with st.sidebar.expander("π¬ Advanced Options"):
|
| show_analysis = st.checkbox("Show Detailed Analysis", value=True)
|
| auto_save = st.checkbox("Auto-save Results", value=True)
|
| st.session_state.auto_save_enabled = auto_save
|
| compare_methods = st.checkbox("Compare Multiple Methods", value=False)
|
| show_improvement_details = st.checkbox("Show Detailed Improvements", value=True)
|
|
|
|
|
| st.sidebar.markdown("---")
|
| with st.sidebar.expander("π€ CNN Model Management"):
|
| cnn_model_management_ui()
|
|
|
|
|
| if not real_time_processing:
|
| if st.sidebar.button("π **Process Image**", type="primary"):
|
| st.session_state.force_processing = True
|
|
|
|
|
| st.sidebar.markdown("---")
|
| demo_mode = st.sidebar.checkbox(
|
| "π― **Training Dataset Demo**",
|
| value=False,
|
| help="Show before/after examples using images from data/training_dataset/"
|
| )
|
|
|
|
|
| if demo_mode:
|
| try:
|
| show_training_dataset_demo(processing_method, parameters)
|
| return
|
| except Exception as e:
|
| st.error(f"β Demo mode error: {e}")
|
| st.warning("β οΈ Falling back to normal mode")
|
| st.info("π‘ Demo mode disabled due to error. You can still use the normal image upload functionality below.")
|
|
|
|
|
|
|
| col1, col2 = st.columns([1, 1])
|
|
|
| with col1:
|
| st.markdown('<div class="section-header">π€ Image Upload</div>', unsafe_allow_html=True)
|
|
|
| uploaded_file = st.file_uploader(
|
| "Choose an image file",
|
| type=['png', 'jpg', 'jpeg', 'bmp', 'tiff'],
|
| help="Upload a blurry image to enhance"
|
| )
|
|
|
| if uploaded_file is not None:
|
| try:
|
|
|
| image_data = uploaded_file.read()
|
| image = validate_and_load_image(uploaded_file, image_data)
|
|
|
| if image is not None:
|
| st.session_state.current_image = image
|
|
|
| if 'original_uploaded_image' not in st.session_state:
|
| st.session_state.original_uploaded_image = image.copy()
|
|
|
|
|
| st.session_state.uploaded_file = uploaded_file
|
|
|
|
|
| display_image = display_convert(image)
|
| st.session_state.display_original = display_image
|
|
|
|
|
| st.markdown("### πΈ **Original Image**")
|
| st.image(display_image, caption="Uploaded Image", use_container_width=True)
|
|
|
|
|
| if real_time_processing:
|
| st.session_state.should_process = True
|
|
|
|
|
| st.info(f"π **Image Info**: {image.shape[1]}Γ{image.shape[0]} pixels, "
|
| f"{uploaded_file.size/1024:.1f} KB")
|
|
|
|
|
| if show_analysis:
|
| with st.spinner("π Performing detailed image analysis..."):
|
| blur_detector = BlurDetector()
|
| analysis = blur_detector.comprehensive_analysis(image)
|
|
|
| st.markdown("---")
|
| st.markdown("## π **COMPREHENSIVE IMAGE PROBLEM IDENTIFICATION**")
|
| st.markdown("*Detailed technical analysis following image processing principles*")
|
|
|
|
|
| st.markdown("### πΌοΈ **Image Characteristics**")
|
| prop_col1, prop_col2, prop_col3 = st.columns(3)
|
| with prop_col1:
|
| st.metric("Dimensions", analysis['image_dimensions'])
|
| st.metric("Channels", analysis['color_channels'])
|
| with prop_col2:
|
| st.metric("Size Category", analysis['image_size_category'])
|
| st.metric("Processing Difficulty", analysis['processing_difficulty'])
|
| with prop_col3:
|
| st.metric("Dynamic Range", f"{analysis['dynamic_range']:.0f}")
|
| st.metric("Contrast Measure", f"{analysis['contrast_measure']:.1f}")
|
|
|
|
|
| st.markdown("### π― **Primary Problem Identification**")
|
| prob_col1, prob_col2 = st.columns([2, 1])
|
|
|
| with prob_col1:
|
| st.markdown(f"**π Blur Type Detected:** {analysis['primary_type']}")
|
| st.markdown(f"**π Confidence Level:** {analysis['type_confidence']:.1%}")
|
| st.markdown(f"**π’ Severity Classification:** {analysis['severity']}")
|
|
|
|
|
| st.markdown("**π Analysis Reasoning:**")
|
| st.info(analysis['blur_reasoning'])
|
|
|
| with prob_col2:
|
|
|
| st.metric("Sharpness Score", f"{analysis['sharpness_score']:.1f}")
|
| st.metric("Edge Density", f"{analysis['edge_density']:.3f}")
|
| st.metric("Enhancement Priority", analysis['enhancement_priority'])
|
|
|
|
|
| st.markdown("### π¬ **Quantitative Analysis Results**")
|
|
|
| with st.expander("π **Sharpness & Edge Analysis**", expanded=True):
|
| st.markdown(f"**Sharpness Assessment:** {analysis['sharpness_interpretation']}")
|
| st.markdown(f"**Edge Analysis:** {analysis['edge_density_interpretation']}")
|
| st.markdown(f"**Gradient Analysis:** {analysis['gradient_interpretation']}")
|
|
|
| with st.expander("π **Frequency Domain Analysis**", expanded=True):
|
| st.markdown(f"**High-Frequency Content:** {analysis['frequency_domain_analysis']}")
|
| col_freq1, col_freq2 = st.columns(2)
|
| with col_freq1:
|
| st.metric("Avg Gradient", f"{analysis['average_gradient']:.1f}")
|
| st.metric("Max Gradient", f"{analysis['max_gradient']:.1f}")
|
| with col_freq2:
|
| st.metric("High Freq Content", f"{analysis['high_frequency_content']:.2f}")
|
| st.metric("Texture Variance", f"{analysis['texture_variance']:.1f}")
|
|
|
|
|
| if "Motion" in analysis['primary_type']:
|
| st.markdown("### π **Motion Blur Analysis**")
|
| motion_col1, motion_col2 = st.columns(2)
|
| with motion_col1:
|
| st.metric("Motion Angle", f"{analysis['motion_angle']:.1f}Β°")
|
| st.metric("Motion Length", f"{analysis['motion_length']}px")
|
| with motion_col2:
|
| st.markdown(f"**Motion Characteristics:** {analysis['motion_interpretation']}")
|
|
|
| elif "Defocus" in analysis['primary_type']:
|
| st.markdown("### π **Defocus Blur Analysis**")
|
| st.metric("Defocus Score", f"{analysis['defocus_score']:.3f}")
|
| st.markdown(f"**Defocus Characteristics:** {analysis['defocus_interpretation']}")
|
|
|
|
|
| st.markdown("### πͺοΈ **Noise Assessment**")
|
| noise_col1, noise_col2 = st.columns(2)
|
| with noise_col1:
|
| st.metric("Noise Level", f"{analysis['noise_level']:.3f}")
|
| with noise_col2:
|
| st.markdown(f"**Noise Analysis:** {analysis['noise_interpretation']}")
|
|
|
|
|
| if analysis['secondary_issues'][0] != "No significant secondary issues detected":
|
| st.markdown("### β οΈ **Secondary Issues Detected**")
|
| for issue in analysis['secondary_issues']:
|
| st.warning(f"β’ {issue}")
|
|
|
|
|
| st.markdown("### π **Recommended Enhancement Strategy**")
|
| strategy_col1, strategy_col2 = st.columns([2, 1])
|
|
|
| with strategy_col1:
|
| st.markdown(f"**π― Priority Level:** {analysis['enhancement_priority']}")
|
| st.markdown(f"**π Expected Improvement:** {analysis['expected_improvement']}")
|
| st.markdown("**π§ Recommended Methods:**")
|
| for method in analysis['recommended_methods']:
|
| st.markdown(f"β’ {method}")
|
|
|
| with strategy_col2:
|
| st.metric("Processing Difficulty", analysis['processing_difficulty'])
|
|
|
|
|
| with st.expander("π‘ **Detailed Processing Recommendations**", expanded=False):
|
| for recommendation in analysis['detailed_recommendations']:
|
| st.markdown(f"β {recommendation}")
|
|
|
|
|
| with st.expander("π **Technical Analysis Summary**", expanded=False):
|
| st.code(analysis['technical_summary'], language="")
|
|
|
|
|
| with st.expander("π **Detailed Analysis Notes (Educational)**", expanded=False):
|
| st.code(analysis['student_analysis_notes'], language="")
|
|
|
| except Exception as e:
|
| st.error(f"Error loading image: {e}")
|
|
|
| with col2:
|
| st.markdown('<div class="section-header">β¨ Real-Time Enhancement Results</div>', unsafe_allow_html=True)
|
|
|
|
|
| should_process = False
|
|
|
| if st.session_state.get('current_image') is not None:
|
|
|
| if real_time_processing:
|
| should_process = True
|
| st.info("οΏ½ **Real-time processing enabled** - Results update automatically!")
|
| st.info("")
|
| st.info("")
|
| elif st.session_state.get('force_processing', False):
|
| should_process = True
|
| st.session_state.force_processing = False
|
| elif not real_time_processing:
|
| st.warning("βΈοΈ **Manual mode** - Click 'Process Image' in sidebar to update results")
|
|
|
|
|
| if st.session_state.get('current_image') is not None and should_process:
|
| with st.spinner(f"π Applying {processing_method}..."):
|
|
|
| results = process_image(
|
| st.session_state.current_image,
|
| processing_method,
|
| parameters
|
| )
|
|
|
| if results.get('success', True):
|
|
|
| if 'processed_images' not in st.session_state:
|
| st.session_state.processed_images = {}
|
| st.session_state.processed_images[processing_method] = results
|
|
|
|
|
| display_enhanced = display_convert(results['processed_image'])
|
|
|
|
|
| st.session_state.display_enhanced = display_enhanced
|
| st.session_state.enhancement_method = processing_method
|
|
|
|
|
| if (st.session_state.get('auto_save_enabled', True) and
|
| st.session_state.get('uploaded_file') is not None):
|
| try:
|
|
|
| uploaded_file = st.session_state.uploaded_file
|
| image_data = uploaded_file.getvalue()
|
|
|
| log_processing_result(
|
| st.session_state.session_id,
|
| uploaded_file.name,
|
| image_data,
|
| {
|
| 'method': processing_method,
|
| 'parameters': parameters,
|
| 'original_quality': results['original_metrics'].overall_score,
|
| 'enhanced_quality': results['enhanced_metrics'].overall_score,
|
| 'improvement_percentage': results['improvement_percentage'],
|
| 'processing_time': results['processing_time'],
|
| 'blur_type': results['blur_analysis']['primary_type'],
|
| 'blur_confidence': results['blur_analysis']['type_confidence']
|
| }
|
| )
|
| except Exception as e:
|
| logger.error(f"Error saving to database: {e}")
|
|
|
|
|
| if 'display_enhanced' in st.session_state:
|
| st.markdown("### π¨ **Enhanced Image**")
|
| st.image(
|
| st.session_state.display_enhanced,
|
| caption=f"Enhanced with {st.session_state.enhancement_method}",
|
| use_container_width=True
|
| )
|
|
|
|
|
| if st.session_state.enhancement_method in st.session_state.processed_images:
|
| results = st.session_state.processed_images[st.session_state.enhancement_method]
|
|
|
| st.markdown("---")
|
| st.markdown('<div class="analysis-header">π Comprehensive Improvement Analysis Report</div>', unsafe_allow_html=True)
|
|
|
|
|
| improvement = results['improvement_percentage']
|
| original_score = results['original_metrics'].overall_score
|
| enhanced_score = results['enhanced_metrics'].overall_score
|
|
|
|
|
| if improvement > 0:
|
| st.success(f"π― **Overall Quality Improvement: +{improvement:.1f}%**")
|
| elif improvement == 0:
|
| st.info("π **No significant change in overall quality**")
|
| else:
|
| st.warning(f"β οΈ **Quality decreased by {abs(improvement):.1f}%**")
|
|
|
|
|
| col_before, col_after, col_change = st.columns(3)
|
|
|
| with col_before:
|
| st.markdown("#### π **Before Enhancement**")
|
| st.metric("Overall Score", f"{original_score:.3f}")
|
| st.metric("Laplacian Variance", f"{results['original_metrics'].laplacian_variance:.1f}")
|
| st.metric("Edge Density", f"{results['original_metrics'].edge_density:.3f}")
|
| st.metric("Gradient Magnitude", f"{results['original_metrics'].gradient_magnitude:.1f}")
|
| st.metric("Tenengrad", f"{results['original_metrics'].tenengrad:.1f}")
|
|
|
| with col_after:
|
| st.markdown("#### β¨ **After Enhancement**")
|
| st.metric("Overall Score", f"{enhanced_score:.3f}")
|
| st.metric("Laplacian Variance", f"{results['enhanced_metrics'].laplacian_variance:.1f}")
|
| st.metric("Edge Density", f"{results['enhanced_metrics'].edge_density:.3f}")
|
| st.metric("Gradient Magnitude", f"{results['enhanced_metrics'].gradient_magnitude:.1f}")
|
| st.metric("Tenengrad", f"{results['enhanced_metrics'].tenengrad:.1f}")
|
|
|
| with col_change:
|
| st.markdown("#### π **Improvements**")
|
| score_change = enhanced_score - original_score
|
| laplacian_change = results['enhanced_metrics'].laplacian_variance - results['original_metrics'].laplacian_variance
|
| edge_change = results['enhanced_metrics'].edge_density - results['original_metrics'].edge_density
|
| gradient_change = results['enhanced_metrics'].gradient_magnitude - results['original_metrics'].gradient_magnitude
|
| tenengrad_change = results['enhanced_metrics'].tenengrad - results['original_metrics'].tenengrad
|
|
|
| st.metric("Score Change", f"{score_change:+.3f}")
|
| st.metric("Laplacian Ξ", f"{laplacian_change:+.1f}")
|
| st.metric("Edge Density Ξ", f"{edge_change:+.3f}")
|
| st.metric("Gradient Ξ", f"{gradient_change:+.1f}")
|
| st.metric("Tenengrad Ξ", f"{tenengrad_change:+.1f}")
|
|
|
|
|
| st.markdown("### π **What We Improved**")
|
|
|
| improvements_made = []
|
|
|
|
|
| if laplacian_change > 5:
|
| improvements_made.append(f"π₯ **Laplacian Sharpness**: Increased Laplacian variance by {laplacian_change:.1f} points, significantly improving edge sharpness")
|
|
|
| if edge_change > 0.01:
|
| improvements_made.append(f"β‘ **Edge Definition**: Enhanced edge density by {edge_change:.3f}, improving object boundaries and detail clarity")
|
|
|
| if gradient_change > 5:
|
| improvements_made.append(f"π **Gradient Enhancement**: Improved gradient magnitude by {gradient_change:.1f} points, enhancing texture detail")
|
|
|
| if tenengrad_change > 10:
|
| improvements_made.append(f"π‘ **Tenengrad Improvement**: Enhanced Tenengrad score by {tenengrad_change:.1f}, indicating better focus quality")
|
|
|
|
|
| brenner_change = results['enhanced_metrics'].brenner_gradient - results['original_metrics'].brenner_gradient
|
| sobel_change = results['enhanced_metrics'].sobel_variance - results['original_metrics'].sobel_variance
|
| wavelet_change = results['enhanced_metrics'].wavelet_energy - results['original_metrics'].wavelet_energy
|
|
|
| if brenner_change > 5:
|
| improvements_made.append(f"π― **Brenner Gradient**: Improved by {brenner_change:.1f}, indicating better focus measurement")
|
|
|
| if sobel_change > 5:
|
| improvements_made.append(f"π **Sobel Variance**: Enhanced by {sobel_change:.1f}, showing improved edge detection response")
|
|
|
| if wavelet_change > 0.1:
|
| improvements_made.append(f"π **Wavelet Energy**: Increased by {wavelet_change:.3f}, indicating enhanced high-frequency content")
|
|
|
|
|
| if st.session_state.get('current_image') is not None:
|
| color_validation = ColorPreserver.validate_color_preservation(
|
| st.session_state.current_image, results['processed_image']
|
| )
|
| if color_validation.get('colors_preserved', False):
|
| improvements_made.append(f"π¨ **Color Fidelity**: Preserved original colors perfectly (difference: {color_validation['color_difference']:.2f})")
|
|
|
|
|
| blur_type = results['blur_analysis']['primary_type']
|
| if 'motion' in blur_type.lower():
|
| improvements_made.append(f"π **Motion Blur Correction**: Addressed {blur_type} with specialized deblurring algorithms")
|
| elif 'defocus' in blur_type.lower():
|
| improvements_made.append(f"π **Focus Restoration**: Corrected {blur_type} to restore image clarity")
|
| elif 'gaussian' in blur_type.lower():
|
| improvements_made.append(f"π **Gaussian Blur Reduction**: Reduced {blur_type} using advanced filtering techniques")
|
|
|
|
|
| processing_time = results['processing_time']
|
| improvements_made.append(f"β‘ **Fast Processing**: Completed enhancement in {processing_time:.2f} seconds")
|
|
|
|
|
| if improvements_made:
|
| for improvement in improvements_made:
|
| st.markdown(f"β
{improvement}")
|
| else:
|
| st.info("π Image was already in good condition - minimal changes applied")
|
|
|
|
|
| st.markdown("### π οΈ **Enhancement Method Details**")
|
| method = st.session_state.enhancement_method
|
|
|
| if method == "Progressive Enhancement (Recommended)":
|
| st.info("π **Progressive Enhancement**: Applied multiple algorithms iteratively for optimal results")
|
| elif method == "CNN Enhancement":
|
| st.info("π§ **AI-Powered Enhancement**: Used deep learning neural networks for intelligent deblurring")
|
| elif method == "Wiener Filter":
|
| st.info("π **Statistical Deblurring**: Applied Wiener filtering based on noise and blur characteristics")
|
| elif method == "Richardson-Lucy":
|
| st.info("π¬ **Iterative Deconvolution**: Used Richardson-Lucy algorithm for precise blur removal")
|
| elif method == "Unsharp Masking":
|
| st.info("β‘ **Edge Enhancement**: Applied unsharp masking to sharpen edges and details")
|
|
|
|
|
| with st.expander("π **Complete Sharpness Metrics Comparison**", expanded=False):
|
| metric_data = []
|
|
|
| metrics = [
|
| ("Laplacian Variance", "laplacian_variance"),
|
| ("Gradient Magnitude", "gradient_magnitude"),
|
| ("Edge Density", "edge_density"),
|
| ("Brenner Gradient", "brenner_gradient"),
|
| ("Tenengrad", "tenengrad"),
|
| ("Sobel Variance", "sobel_variance"),
|
| ("Wavelet Energy", "wavelet_energy"),
|
| ("Overall Score", "overall_score")
|
| ]
|
|
|
| for metric_name, attr_name in metrics:
|
| before_val = getattr(results['original_metrics'], attr_name)
|
| after_val = getattr(results['enhanced_metrics'], attr_name)
|
| change = after_val - before_val
|
| change_pct = (change / before_val * 100) if before_val != 0 else 0
|
|
|
| metric_data.append({
|
| 'Metric': metric_name,
|
| 'Before': f"{before_val:.3f}",
|
| 'After': f"{after_val:.3f}",
|
| 'Change': f"{change:+.3f}",
|
| 'Change %': f"{change_pct:+.1f}%"
|
| })
|
|
|
| st.dataframe(metric_data, use_container_width=True)
|
|
|
|
|
| st.markdown("### π― **Quality Assessment**")
|
|
|
| quality_rating = results['enhanced_metrics'].quality_rating.lower()
|
|
|
| if enhanced_score > 0.8:
|
| st.success("π **Excellent Quality**: Image shows outstanding clarity and detail")
|
| elif enhanced_score > 0.6:
|
| st.info("π **Good Quality**: Image has good clarity with well-defined details")
|
| elif enhanced_score > 0.4:
|
| st.warning("β οΈ **Fair Quality**: Image shows some improvement but may benefit from additional processing")
|
| else:
|
| st.error("π§ **Needs More Work**: Consider trying different enhancement methods")
|
|
|
|
|
| st.info(f"**Automated Quality Rating**: {results['enhanced_metrics'].quality_rating}")
|
| else:
|
| st.info("π₯ **Upload an image** on the left to see the enhanced result here")
|
|
|
|
|
| if st.session_state.get('current_image') is not None and should_process and results.get('success', True):
|
|
|
| color_validation = ColorPreserver.validate_color_preservation(
|
| st.session_state.current_image, results['processed_image']
|
| )
|
|
|
| if color_validation.get('colors_preserved', False):
|
| st.success(f"β
Colors perfectly preserved! (Difference: {color_validation['color_difference']:.2f})")
|
| else:
|
| st.warning(f"β οΈ Minor color variation detected (Difference: {color_validation.get('color_difference', 'N/A')})")
|
|
|
|
|
| improvement = results['improvement_percentage']
|
| improvement_class = "improvement-positive" if improvement > 0 else "improvement-negative"
|
|
|
|
|
| if (processing_method == "Progressive Enhancement (Recommended)" and
|
| 'enhancement_history' in results and results['enhancement_history']):
|
| st.subheader("π Progressive Enhancement History")
|
|
|
| enhancement_history = results['enhancement_history']
|
| iterations_performed = results.get('iterations_performed', len(enhancement_history))
|
|
|
| if enhancement_history:
|
|
|
| iteration_data = []
|
| for hist in enhancement_history:
|
| iteration_data.append({
|
| 'Iteration': hist['iteration'],
|
| 'Method': hist['method'],
|
| 'Sharpness Before': hist['sharpness_before'],
|
| 'Sharpness After': hist['sharpness_after'],
|
| 'Improvement': hist['improvement']
|
| })
|
|
|
|
|
| st.dataframe(iteration_data, use_container_width=True)
|
|
|
|
|
| total_improvement = enhancement_history[-1]['sharpness_after'] - enhancement_history[0]['sharpness_before']
|
| st.success(f"π― **{iterations_performed} iterations completed!** Total sharpness improvement: +{total_improvement:.1f}")
|
|
|
|
|
| methods_used = [hist['method'] for hist in enhancement_history]
|
| st.info(f"**Methods applied:** {' β '.join(methods_used)}")
|
| else:
|
| st.info("Target sharpness achieved in first iteration!")
|
|
|
| st.markdown(f"""
|
| <div class="metric-card">
|
| <h4>π Enhancement Results</h4>
|
| <p><strong>Processing Time:</strong> {results['processing_time']:.2f} seconds</p>
|
| <p><strong>Quality Improvement:</strong>
|
| <span class="{improvement_class}">{improvement:+.1f}%</span></p>
|
| </div>
|
| """, unsafe_allow_html=True)
|
|
|
|
|
| st.subheader("π― Detailed Improvement Analysis")
|
|
|
|
|
| original_analysis = results.get('original_analysis', {})
|
| enhanced_analysis = results.get('enhanced_analysis', {})
|
| original_metrics = results.get('original_metrics')
|
| enhanced_metrics = results.get('enhanced_metrics')
|
|
|
| if original_analysis and enhanced_analysis and original_metrics and enhanced_metrics:
|
| improvements = create_detailed_improvement_analysis(
|
| original_analysis, enhanced_analysis,
|
| original_metrics, enhanced_metrics
|
| )
|
|
|
| if improvements:
|
|
|
| improvement_data = []
|
| for metric, data in improvements.items():
|
| improvement_data.append({
|
| 'Metric': metric,
|
| 'Original': f"{data['original']:.2f}",
|
| 'Enhanced': f"{data['enhanced']:.2f}",
|
| 'Improvement': f"{data['improvement']:+.2f}",
|
| 'Change %': f"{data['improvement_pct']:+.1f}%",
|
| 'Status': data['status'],
|
| 'Target': data['target'],
|
| 'Next Step': data['next_step']
|
| })
|
|
|
|
|
| df_improvements = pd.DataFrame(improvement_data)
|
| st.dataframe(
|
| df_improvements,
|
| width=None,
|
| hide_index=True,
|
| column_config={
|
| "Status": st.column_config.TextColumn("Status", width="small"),
|
| "Change %": st.column_config.TextColumn("Change %", width="small"),
|
| "Target": st.column_config.TextColumn("Target", width="medium"),
|
| "Next Step": st.column_config.TextColumn("Next Step", width="medium")
|
| }
|
| )
|
|
|
|
|
| st.subheader("π§ Improvement Recommendations")
|
|
|
| recommendations = create_improvement_recommendations(original_analysis, enhanced_analysis)
|
|
|
| if recommendations:
|
| for i, rec in enumerate(recommendations):
|
| with st.expander(f"{rec['priority']} {rec['area']} - {rec['issue']}", expanded=i==0):
|
| st.write(f"**Problem:** {rec['issue']}")
|
| st.write(f"**Solution:** {rec['solution']}")
|
| st.write(f"**Expected Result:** {rec['expected_gain']}")
|
|
|
|
|
| if i == 0:
|
| if 'Progressive' in rec['solution']:
|
| if st.button(f"π Apply Progressive Enhancement", key=f"prog_{processing_method}"):
|
| st.session_state.iterative_method = 'progressive'
|
| st.rerun()
|
| elif 'Richardson-Lucy' in rec['solution']:
|
| if st.button(f"π Try Richardson-Lucy", key=f"rl_{processing_method}"):
|
| st.session_state.selected_method = 'Richardson-Lucy Deconvolution'
|
| st.rerun()
|
| elif 'Wiener' in rec['solution']:
|
| if st.button(f"β‘ Try Wiener Filter", key=f"wf_{processing_method}"):
|
| st.session_state.selected_method = 'Wiener Filter'
|
| st.rerun()
|
| elif 'Unsharp' in rec['solution']:
|
| if st.button(f"β¨ Apply Unsharp Masking", key=f"um_{processing_method}"):
|
| st.session_state.selected_method = 'Unsharp Masking'
|
| st.rerun()
|
| else:
|
| st.success("π **Excellent!** Your image quality is optimal. No further improvements needed.")
|
|
|
|
|
| st.markdown("---")
|
| col_enhance1, col_enhance2, col_enhance3 = st.columns(3)
|
|
|
| with col_enhance1:
|
| if st.button("π Enhance Again", help="Apply another round of enhancement"):
|
|
|
| with st.spinner("π Applying additional enhancement..."):
|
|
|
| additional_results = process_image(results['processed_image'], processing_method, parameters)
|
|
|
| if additional_results:
|
| st.markdown("---")
|
| st.subheader("π **Additional Enhancement: Before vs After**")
|
|
|
|
|
| re_enhanced_display = display_convert(additional_results['processed_image'])
|
|
|
| re_col1, re_col2 = st.columns(2)
|
|
|
| with re_col1:
|
| st.markdown("#### π¨ Previous Enhancement")
|
| st.image(
|
| display_enhanced,
|
| caption="Before Re-Enhancement",
|
| use_container_width=True
|
| )
|
|
|
| with re_col2:
|
| st.markdown("#### π Re-Enhanced Result")
|
| st.image(
|
| re_enhanced_display,
|
| caption="After Re-Enhancement",
|
| use_container_width=True
|
| )
|
|
|
|
|
| additional_improvement = additional_results['improvement_percentage']
|
|
|
| if additional_improvement > 0:
|
| st.success(f"β
**Additional improvement achieved!** +{additional_improvement:.1f}% quality gain")
|
| else:
|
| st.info("βΉοΈ **Quality maintained.** Image may already be at optimal level for this method.")
|
|
|
|
|
| st.session_state.current_image = additional_results['processed_image']
|
|
|
|
|
| re_pil_image = Image.fromarray(cv2.cvtColor(additional_results['processed_image'], cv2.COLOR_BGR2RGB))
|
| re_buffer = io.BytesIO()
|
| re_pil_image.save(re_buffer, format="PNG")
|
| re_buffer.seek(0)
|
|
|
| st.download_button(
|
| label="πΎ Download Re-Enhanced Image",
|
| data=re_buffer.getvalue(),
|
| file_name=f"re_enhanced_{uploaded_file.name if uploaded_file else 'image'}.png",
|
| mime="image/png"
|
| )
|
|
|
| with col_enhance2:
|
| if st.button("π― Auto-Enhance Until Perfect", help="Keep enhancing until target quality"):
|
|
|
| with st.spinner("π Auto-enhancing to perfect quality..."):
|
| enhancer = IterativeEnhancer()
|
| auto_results = enhancer.progressive_enhancement(
|
| results['processed_image'],
|
| max_iterations=8,
|
| target_sharpness=1200.0,
|
| adaptive=True
|
| )
|
|
|
|
|
| st.markdown("---")
|
| st.subheader("π **Auto-Enhancement: Before vs Final Result**")
|
|
|
|
|
| auto_enhanced_display = display_convert(auto_results['enhanced_image'])
|
|
|
| auto_col1, auto_col2 = st.columns(2)
|
|
|
| with auto_col1:
|
| st.markdown("#### π¨ Initial Enhancement")
|
| st.image(
|
| display_enhanced,
|
| caption="Before Auto-Enhancement",
|
| use_container_width=True
|
| )
|
|
|
| with auto_col2:
|
| st.markdown("#### π― Auto-Enhanced Final")
|
| st.image(
|
| auto_enhanced_display,
|
| caption="After Auto-Enhancement",
|
| use_container_width=True
|
| )
|
|
|
|
|
| final_sharpness = auto_results.get('final_sharpness', 0)
|
| total_improvement = auto_results.get('total_improvement', 0)
|
| iterations_used = auto_results.get('iterations_performed', 0)
|
|
|
| if auto_results.get('target_achieved', False):
|
| st.success(f"π **Perfect quality achieved!**")
|
| st.success(f"Final sharpness: {final_sharpness:.1f} | Improvement: +{total_improvement:.1f} | Iterations: {iterations_used}")
|
| else:
|
| st.info(f"π **Maximum improvement achieved!**")
|
| st.info(f"Final sharpness: {final_sharpness:.1f} | Improvement: +{total_improvement:.1f} | Iterations: {iterations_used}")
|
|
|
|
|
| if 'enhancement_history' in auto_results and auto_results['enhancement_history']:
|
| with st.expander("π Auto-Enhancement History"):
|
| auto_history_data = []
|
| for hist in auto_results['enhancement_history']:
|
| auto_history_data.append({
|
| 'Iteration': hist['iteration'],
|
| 'Method': hist['method'],
|
| 'Sharpness Before': hist['sharpness_before'],
|
| 'Sharpness After': hist['sharpness_after'],
|
| 'Improvement': hist['improvement']
|
| })
|
| st.dataframe(auto_history_data, use_container_width=True)
|
|
|
|
|
| st.session_state.current_image = auto_results['enhanced_image']
|
|
|
|
|
| auto_pil_image = Image.fromarray(cv2.cvtColor(auto_results['enhanced_image'], cv2.COLOR_BGR2RGB))
|
| auto_buffer = io.BytesIO()
|
| auto_pil_image.save(auto_buffer, format="PNG")
|
| auto_buffer.seek(0)
|
|
|
| st.download_button(
|
| label="πΎ Download Auto-Enhanced Image",
|
| data=auto_buffer.getvalue(),
|
| file_name=f"auto_enhanced_{uploaded_file.name if uploaded_file else 'image'}.png",
|
| mime="image/png"
|
| )
|
|
|
| with col_enhance3:
|
| if st.button("π Reset to Original", help="Go back to original uploaded image"):
|
| if hasattr(st.session_state, 'original_uploaded_image'):
|
|
|
| st.session_state.current_image = st.session_state.original_uploaded_image.copy()
|
|
|
|
|
| st.success("β
**Reset to original image!**")
|
|
|
|
|
| reset_display = display_convert(st.session_state.original_uploaded_image)
|
| st.image(reset_display, caption="Reset to Original Image", use_container_width=True)
|
|
|
| st.info("π You can now apply any enhancement method to the original image again.")
|
| else:
|
| st.error("β Original image not found in session.")
|
|
|
|
|
| if auto_save and uploaded_file:
|
| log_processing_result(
|
| st.session_state.session_id,
|
| uploaded_file.name,
|
| image_data,
|
| {
|
| 'method': processing_method,
|
| 'parameters': parameters,
|
| 'original_quality': results['original_metrics'].overall_score,
|
| 'enhanced_quality': results['enhanced_metrics'].overall_score,
|
| 'improvement_percentage': improvement,
|
| 'processing_time': results['processing_time'],
|
| 'blur_type': results['blur_analysis']['primary_type'],
|
| 'blur_confidence': results['blur_analysis']['type_confidence']
|
| }
|
| )
|
|
|
|
|
| if 'processed_image' in results:
|
|
|
| pil_image = Image.fromarray(cv2.cvtColor(results['processed_image'], cv2.COLOR_BGR2RGB))
|
| buffer = io.BytesIO()
|
| pil_image.save(buffer, format='PNG')
|
| buffer.seek(0)
|
|
|
| st.download_button(
|
| label="πΎ Download Enhanced Image",
|
| data=buffer,
|
| file_name=f"enhanced_{uploaded_file.name}",
|
| mime="image/png"
|
| )
|
|
|
|
|
| if st.session_state.current_image is not None and show_analysis:
|
| st.markdown('<div class="section-header">π Detailed Analysis</div>', unsafe_allow_html=True)
|
|
|
| if processing_method in st.session_state.processed_images:
|
| results = st.session_state.processed_images[processing_method]
|
|
|
| col_analysis1, col_analysis2 = st.columns(2)
|
|
|
| with col_analysis1:
|
| st.subheader("Original Image Quality")
|
| original_metrics = results['original_metrics']
|
|
|
| st.markdown(f"**Overall Score:** {original_metrics.overall_score:.3f}")
|
| st.markdown(f"**Quality Rating:** {display_quality_rating(original_metrics.quality_rating)}",
|
| unsafe_allow_html=True)
|
| st.markdown(f"**Laplacian Variance:** {original_metrics.laplacian_variance:.3f}")
|
| st.markdown(f"**Edge Density:** {original_metrics.edge_density:.3f}")
|
|
|
| with col_analysis2:
|
| st.subheader("Enhanced Image Quality")
|
| enhanced_metrics = results['enhanced_metrics']
|
|
|
| st.markdown(f"**Overall Score:** {enhanced_metrics.overall_score:.3f}")
|
| st.markdown(f"**Quality Rating:** {display_quality_rating(enhanced_metrics.quality_rating)}",
|
| unsafe_allow_html=True)
|
| st.markdown(f"**Laplacian Variance:** {enhanced_metrics.laplacian_variance:.3f}")
|
| st.markdown(f"**Edge Density:** {enhanced_metrics.edge_density:.3f}")
|
|
|
|
|
| st.plotly_chart(
|
| create_comparison_chart(original_metrics, enhanced_metrics),
|
| use_container_width=True
|
| )
|
|
|
|
|
| st.markdown('<div class="section-header">π Processing History</div>', unsafe_allow_html=True)
|
|
|
| col_hist1, col_hist2 = st.columns([2, 1])
|
|
|
| with col_hist1:
|
|
|
| db_manager = DatabaseManager()
|
|
|
|
|
| session_history = db_manager.get_processing_history(
|
| session_id=st.session_state.session_id,
|
| limit=10
|
| )
|
|
|
|
|
| if not session_history:
|
| recent_history = db_manager.get_processing_history(limit=10)
|
| history_title = "Recent Processing Activity (All Sessions)"
|
| else:
|
| recent_history = session_history
|
| history_title = "Current Session Processing"
|
|
|
| if recent_history:
|
| st.subheader(history_title)
|
| for record in recent_history:
|
| with st.expander(f"πΌοΈ {record.original_filename} - {record.processing_method}"):
|
| col_rec1, col_rec2 = st.columns(2)
|
| with col_rec1:
|
| st.write(f"**Processed:** {record.timestamp[:19]}")
|
| st.write(f"**Method:** {record.processing_method}")
|
| st.write(f"**Processing Time:** {record.processing_time_seconds:.2f}s")
|
| with col_rec2:
|
| st.write(f"**Improvement:** {record.improvement_percentage:+.1f}%")
|
| st.write(f"**Original Quality:** {record.original_quality_score:.3f}")
|
| st.write(f"**Enhanced Quality:** {record.enhanced_quality_score:.3f}")
|
| else:
|
| st.info("No processing history yet. Upload and process some images!")
|
|
|
| with col_hist2:
|
| st.subheader("Session Statistics")
|
| session_stats = db_manager.get_session_statistics(st.session_state.session_id)
|
|
|
| if session_stats.get('processing_stats') and session_stats['processing_stats'].get('total_processed', 0) > 0:
|
|
|
| stats = session_stats['processing_stats']
|
| st.metric("Images Processed", int(stats.get('total_processed', 0)))
|
| avg_improvement = stats.get('avg_improvement', 0) or 0
|
| st.metric("Average Improvement", f"{avg_improvement:.1f}%")
|
| avg_time = stats.get('avg_processing_time', 0) or 0
|
| st.metric("Avg Processing Time", f"{avg_time:.2f}s")
|
| else:
|
|
|
| global_stats = db_manager.get_global_statistics()
|
| if global_stats.get('processing_stats'):
|
| stats = global_stats['processing_stats']
|
| st.metric("Images Processed", int(stats.get('total_processed', 0)))
|
| avg_improvement = stats.get('avg_improvement', 0) or 0
|
| st.metric("Average Improvement", f"{avg_improvement:.1f}%")
|
| avg_time = stats.get('avg_processing_time', 0) or 0
|
| st.metric("Avg Processing Time", f"{avg_time:.2f}s")
|
| else:
|
| st.metric("Images Processed", 0)
|
| st.metric("Average Improvement", "0.0%")
|
| st.metric("Avg Processing Time", "0.00s")
|
|
|
|
|
| st.markdown("---")
|
| st.markdown("""
|
| <div style='text-align: center; color: #7f8c8d;'>
|
| <p>AI Image Deblurring Studio | Advanced Computer Vision & Deep Learning</p>
|
| <p>Session ID: {}</p>
|
| </div>
|
| """.format(st.session_state.session_id), unsafe_allow_html=True)
|
|
|
| def create_detailed_improvement_analysis(original_analysis, enhanced_analysis, original_metrics, enhanced_metrics):
|
| """Create detailed improvement analysis with actionable insights"""
|
| try:
|
|
|
| improvements = {}
|
|
|
|
|
| sharpness_diff = enhanced_analysis['sharpness_score'] - original_analysis['sharpness_score']
|
| improvements['Sharpness Score'] = {
|
| 'original': original_analysis['sharpness_score'],
|
| 'enhanced': enhanced_analysis['sharpness_score'],
|
| 'improvement': sharpness_diff,
|
| 'improvement_pct': (sharpness_diff / original_analysis['sharpness_score']) * 100 if original_analysis['sharpness_score'] > 0 else 0,
|
| 'status': 'β
Excellent' if sharpness_diff > 200 else ('π‘ Good' if sharpness_diff > 50 else ('π΄ Minimal' if sharpness_diff > 0 else 'β No Change')),
|
| 'target': 'Target: 800+ for sharp images',
|
| 'next_step': 'Apply Progressive Enhancement' if enhanced_analysis['sharpness_score'] < 800 else 'Quality achieved!'
|
| }
|
|
|
|
|
| edge_diff = enhanced_analysis['edge_density'] - original_analysis['edge_density']
|
| improvements['Edge Clarity'] = {
|
| 'original': original_analysis['edge_density'],
|
| 'enhanced': enhanced_analysis['edge_density'],
|
| 'improvement': edge_diff,
|
| 'improvement_pct': (edge_diff / original_analysis['edge_density']) * 100 if original_analysis['edge_density'] > 0 else 0,
|
| 'status': 'β
Sharp' if edge_diff > 0.02 else ('π‘ Moderate' if edge_diff > 0.01 else 'π΄ Minimal'),
|
| 'target': 'Target: >0.1 for clear edges',
|
| 'next_step': 'Try Richardson-Lucy' if enhanced_analysis['edge_density'] < 0.1 else 'Edges well-defined!'
|
| }
|
|
|
|
|
| quality_diff = enhanced_metrics.overall_score - original_metrics.overall_score
|
| improvements['Overall Quality'] = {
|
| 'original': original_metrics.overall_score,
|
| 'enhanced': enhanced_metrics.overall_score,
|
| 'improvement': quality_diff,
|
| 'improvement_pct': (quality_diff / original_metrics.overall_score) * 100 if original_metrics.overall_score > 0 else 0,
|
| 'status': 'π Excellent' if quality_diff > 50 else ('β
Good' if quality_diff > 20 else ('π‘ Moderate' if quality_diff > 5 else 'π΄ Minimal')),
|
| 'target': 'Target: >80 for high quality',
|
| 'next_step': 'Try Progressive Enhancement' if enhanced_metrics.overall_score < 80 else 'High quality achieved!'
|
| }
|
|
|
| return improvements
|
|
|
| except Exception as e:
|
| logger.error(f"Error creating improvement analysis: {e}")
|
| return {}
|
|
|
| def create_improvement_recommendations(original_analysis, enhanced_analysis):
|
| """Generate specific recommendations for further improvement"""
|
| try:
|
| recommendations = []
|
|
|
| current_sharpness = enhanced_analysis.get('sharpness_score', 0)
|
| current_blur_type = enhanced_analysis.get('primary_type', 'Unknown')
|
| current_noise = enhanced_analysis.get('noise_level', 0)
|
|
|
|
|
| if current_sharpness < 300:
|
| recommendations.append({
|
| 'priority': 'π΄ Critical',
|
| 'area': 'Sharpness Enhancement',
|
| 'issue': f'Very low sharpness ({current_sharpness:.1f})',
|
| 'solution': 'Apply Progressive Enhancement with 6+ iterations',
|
| 'expected_gain': '+300-500 sharpness points'
|
| })
|
| elif current_sharpness < 600:
|
| recommendations.append({
|
| 'priority': 'π‘ Important',
|
| 'area': 'Clarity Improvement',
|
| 'issue': f'Moderate blur ({current_sharpness:.1f})',
|
| 'solution': 'Try Richardson-Lucy or Wiener Filter',
|
| 'expected_gain': '+100-300 sharpness points'
|
| })
|
| elif current_sharpness < 800:
|
| recommendations.append({
|
| 'priority': 'π’ Optional',
|
| 'area': 'Fine-tuning',
|
| 'issue': f'Good but can improve ({current_sharpness:.1f})',
|
| 'solution': 'Apply gentle Unsharp Masking',
|
| 'expected_gain': '+50-150 sharpness points'
|
| })
|
|
|
|
|
| if "Motion" in current_blur_type:
|
| recommendations.append({
|
| 'priority': 'π‘ Important',
|
| 'area': 'Motion Blur Treatment',
|
| 'issue': 'Directional blur detected',
|
| 'solution': 'Use Richardson-Lucy with motion PSF',
|
| 'expected_gain': 'Significant directional clarity'
|
| })
|
| elif "Defocus" in current_blur_type:
|
| recommendations.append({
|
| 'priority': 'π‘ Important',
|
| 'area': 'Focus Enhancement',
|
| 'issue': 'Out-of-focus blur detected',
|
| 'solution': 'Apply Wiener Filter with Gaussian PSF',
|
| 'expected_gain': 'Better overall focus'
|
| })
|
|
|
|
|
| if current_sharpness > 800 and current_noise < 0.2:
|
| recommendations.append({
|
| 'priority': 'π’ Optional',
|
| 'area': 'Fine-tuning',
|
| 'issue': 'Image quality is excellent',
|
| 'solution': 'Try gentle Unsharp Masking for final polish',
|
| 'expected_gain': 'Minor quality refinement'
|
| })
|
|
|
| return recommendations
|
|
|
| except Exception as e:
|
| logger.error(f"Error creating recommendations: {e}")
|
| return []
|
|
|
| if __name__ == "__main__":
|
| main() |