| """
|
| Blur Detection Module - Motion vs Defocus Detection
|
| ==================================================
|
|
|
| Comprehensive blur analysis using Variance of Laplacian and advanced techniques
|
| to detect motion blur, defocus blur, and estimate blur parameters.
|
| """
|
|
|
| import cv2
|
| import numpy as np
|
| from scipy import ndimage
|
| from scipy.signal import find_peaks
|
| from scipy.fft import fft2, fftshift
|
| import logging
|
| from typing import Dict, Tuple, Optional
|
|
|
|
|
| logging.basicConfig(level=logging.INFO)
|
| logger = logging.getLogger(__name__)
|
|
|
| class BlurDetector:
|
| """Advanced blur detection and analysis"""
|
|
|
| def __init__(self):
|
| self.sharpness_threshold = {
|
| 'sharp': 1000,
|
| 'slightly_blurred': 500,
|
| 'moderately_blurred': 200,
|
| 'heavily_blurred': 50
|
| }
|
|
|
| def variance_of_laplacian(self, image: np.ndarray) -> float:
|
| """
|
| Compute the Laplacian variance (sharpness metric)
|
|
|
| Args:
|
| image: Input image (BGR or grayscale)
|
|
|
| Returns:
|
| float: Variance of Laplacian (higher = sharper)
|
| """
|
| try:
|
|
|
| if len(image.shape) == 3:
|
| gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
| else:
|
| gray = image.copy()
|
|
|
|
|
| laplacian = cv2.Laplacian(gray, cv2.CV_64F)
|
| variance = laplacian.var()
|
|
|
| return variance
|
|
|
| except Exception as e:
|
| logger.error(f"Error computing Laplacian variance: {e}")
|
| return 0.0
|
|
|
| def estimate_motion_blur_params(self, image: np.ndarray) -> Tuple[float, int]:
|
| """
|
| Estimate motion blur parameters: angle and length
|
|
|
| Args:
|
| image: Input image
|
|
|
| Returns:
|
| tuple: (angle in degrees, length in pixels)
|
| """
|
| try:
|
|
|
| if len(image.shape) == 3:
|
| gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
| else:
|
| gray = image.copy()
|
|
|
|
|
| f_transform = np.fft.fft2(gray)
|
| f_shift = np.fft.fftshift(f_transform)
|
| magnitude_spectrum = np.log(np.abs(f_shift) + 1)
|
|
|
|
|
| rows, cols = magnitude_spectrum.shape
|
| center_row, center_col = rows // 2, cols // 2
|
|
|
|
|
| angles = np.linspace(0, 180, 180)
|
| max_intensity = 0
|
| best_angle = 0
|
|
|
| for angle in angles:
|
|
|
| length = min(rows, cols) // 4
|
| x = center_col + length * np.cos(np.radians(angle))
|
| y = center_row + length * np.sin(np.radians(angle))
|
|
|
|
|
| if 0 <= x < cols and 0 <= y < rows:
|
| intensity = magnitude_spectrum[int(y), int(x)]
|
| if intensity > max_intensity:
|
| max_intensity = intensity
|
| best_angle = angle
|
|
|
|
|
|
|
| blur_length = max(5, min(50, int(max_intensity / 10)))
|
|
|
| return best_angle, blur_length
|
|
|
| except Exception as e:
|
| logger.error(f"Error estimating motion blur: {e}")
|
| return 0.0, 5
|
|
|
| def detect_defocus_blur(self, image: np.ndarray) -> float:
|
| """
|
| Detect defocus blur using edge analysis
|
|
|
| Args:
|
| image: Input image
|
|
|
| Returns:
|
| float: Defocus blur score (0-1, higher = more defocus blur)
|
| """
|
| try:
|
|
|
| if len(image.shape) == 3:
|
| gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
| else:
|
| gray = image.copy()
|
|
|
|
|
| grad_x = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=3)
|
| grad_y = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=3)
|
|
|
|
|
| gradient_magnitude = np.sqrt(grad_x**2 + grad_y**2)
|
|
|
|
|
| edges = cv2.Canny(gray, 50, 150)
|
| edge_density = np.sum(edges > 0) / edges.size
|
|
|
|
|
| mean_gradient = np.mean(gradient_magnitude)
|
| std_gradient = np.std(gradient_magnitude)
|
|
|
|
|
| defocus_score = max(0, min(1, 1 - (std_gradient / (mean_gradient + 1e-10))))
|
|
|
| return defocus_score
|
|
|
| except Exception as e:
|
| logger.error(f"Error detecting defocus blur: {e}")
|
| return 0.0
|
|
|
| def analyze_noise_level(self, image: np.ndarray) -> float:
|
| """
|
| Estimate noise level in the image
|
|
|
| Args:
|
| image: Input image
|
|
|
| Returns:
|
| float: Estimated noise level (0-1)
|
| """
|
| try:
|
|
|
| if len(image.shape) == 3:
|
| gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
| else:
|
| gray = image.copy()
|
|
|
|
|
| laplacian = cv2.Laplacian(gray, cv2.CV_64F)
|
| noise_estimate = np.var(laplacian) / (np.mean(gray) + 1e-10)
|
|
|
|
|
| normalized_noise = min(noise_estimate / 1000, 1.0)
|
|
|
| return normalized_noise
|
|
|
| except Exception as e:
|
| logger.error(f"Error analyzing noise: {e}")
|
| return 0.0
|
|
|
| def classify_blur_severity(self, sharpness_score: float) -> Tuple[str, float]:
|
| """
|
| Classify blur severity based on sharpness score
|
|
|
| Args:
|
| sharpness_score: Laplacian variance value
|
|
|
| Returns:
|
| tuple: (severity_label, confidence)
|
| """
|
| try:
|
| if sharpness_score > self.sharpness_threshold['sharp']:
|
| return "Sharp", 0.9
|
| elif sharpness_score > self.sharpness_threshold['slightly_blurred']:
|
| return "Slightly Blurred", 0.8
|
| elif sharpness_score > self.sharpness_threshold['moderately_blurred']:
|
| return "Moderately Blurred", 0.9
|
| elif sharpness_score > self.sharpness_threshold['heavily_blurred']:
|
| return "Heavily Blurred", 0.95
|
| else:
|
| return "Extremely Blurred", 0.98
|
|
|
| except Exception as e:
|
| logger.error(f"Error classifying blur severity: {e}")
|
| return "Unknown", 0.0
|
|
|
| def comprehensive_analysis(self, image: np.ndarray) -> Dict:
|
| """
|
| Perform comprehensive blur analysis with detailed diagnostics
|
|
|
| Args:
|
| image: Input image
|
|
|
| Returns:
|
| dict: Complete analysis results with detailed explanations
|
| """
|
| try:
|
|
|
| height, width = image.shape[:2]
|
| channels = image.shape[2] if len(image.shape) == 3 else 1
|
|
|
|
|
| sharpness = self.variance_of_laplacian(image)
|
| severity, confidence = self.classify_blur_severity(sharpness)
|
|
|
|
|
| gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) if len(image.shape) == 3 else image
|
| edges = cv2.Canny(gray, 50, 150)
|
| edge_density = np.sum(edges > 0) / edges.size
|
|
|
|
|
| grad_x = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=3)
|
| grad_y = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=3)
|
| gradient_magnitude = np.sqrt(grad_x**2 + grad_y**2)
|
| avg_gradient = np.mean(gradient_magnitude)
|
| max_gradient = np.max(gradient_magnitude)
|
|
|
|
|
| f_transform = fft2(gray)
|
| f_shift = fftshift(f_transform)
|
| magnitude_spectrum = np.log(np.abs(f_shift) + 1)
|
| high_freq_content = np.mean(magnitude_spectrum[height//4:3*height//4, width//4:3*width//4])
|
|
|
|
|
| motion_angle, motion_length = self.estimate_motion_blur_params(image)
|
|
|
|
|
| defocus_score = self.detect_defocus_blur(image)
|
|
|
|
|
| noise_level = self.analyze_noise_level(image)
|
|
|
|
|
| hist = cv2.calcHist([gray], [0], None, [256], [0, 256])
|
| contrast_measure = np.std(gray)
|
| dynamic_range = np.max(gray) - np.min(gray)
|
|
|
|
|
| texture_variance = np.var(cv2.Laplacian(gray, cv2.CV_64F))
|
|
|
|
|
| blur_analysis = self._detailed_blur_classification(
|
| sharpness, motion_length, defocus_score, edge_density,
|
| avg_gradient, high_freq_content
|
| )
|
|
|
|
|
| enhancement_strategy = self._recommend_enhancement_strategy(
|
| blur_analysis['primary_type'], severity, noise_level, motion_length
|
| )
|
|
|
| return {
|
|
|
| 'image_dimensions': f"{width}x{height}",
|
| 'color_channels': channels,
|
| 'image_size_category': self._categorize_image_size(width, height),
|
|
|
|
|
| 'sharpness_score': float(sharpness),
|
| 'sharpness_interpretation': self._interpret_sharpness_score(sharpness),
|
| 'severity': severity,
|
| 'severity_confidence': float(confidence),
|
| 'edge_density': float(edge_density),
|
| 'edge_density_interpretation': self._interpret_edge_density(edge_density),
|
|
|
|
|
| 'average_gradient': float(avg_gradient),
|
| 'max_gradient': float(max_gradient),
|
| 'gradient_interpretation': self._interpret_gradients(avg_gradient, max_gradient),
|
| 'high_frequency_content': float(high_freq_content),
|
| 'frequency_domain_analysis': self._interpret_frequency_content(high_freq_content),
|
|
|
|
|
| 'primary_type': blur_analysis['primary_type'],
|
| 'type_confidence': blur_analysis['confidence'],
|
| 'blur_reasoning': blur_analysis['reasoning'],
|
| 'secondary_issues': blur_analysis['secondary_issues'],
|
|
|
|
|
| 'motion_angle': float(motion_angle),
|
| 'motion_length': int(motion_length),
|
| 'motion_interpretation': self._interpret_motion_blur(motion_angle, motion_length),
|
|
|
|
|
| 'defocus_score': float(defocus_score),
|
| 'defocus_interpretation': self._interpret_defocus(defocus_score),
|
|
|
|
|
| 'noise_level': float(noise_level),
|
| 'noise_interpretation': self._interpret_noise_level(noise_level),
|
| 'contrast_measure': float(contrast_measure),
|
| 'dynamic_range': float(dynamic_range),
|
| 'texture_variance': float(texture_variance),
|
|
|
|
|
| 'enhancement_priority': enhancement_strategy['priority'],
|
| 'recommended_methods': enhancement_strategy['methods'],
|
| 'expected_improvement': enhancement_strategy['expected_improvement'],
|
| 'processing_difficulty': enhancement_strategy['difficulty'],
|
| 'detailed_recommendations': enhancement_strategy['detailed_recommendations'],
|
|
|
|
|
| 'technical_summary': self._generate_technical_summary(
|
| sharpness, blur_analysis['primary_type'], severity, noise_level
|
| ),
|
| 'student_analysis_notes': self._generate_student_notes(
|
| sharpness, motion_length, defocus_score, edge_density
|
| )
|
| }
|
|
|
| except Exception as e:
|
| logger.error(f"Error in comprehensive analysis: {e}")
|
| return {
|
| 'sharpness_score': 0.0,
|
| 'severity': 'Unknown',
|
| 'severity_confidence': 0.0,
|
| 'primary_type': 'Unknown',
|
| 'type_confidence': 0.0,
|
| 'motion_angle': 0.0,
|
| 'motion_length': 0,
|
| 'defocus_score': 0.0,
|
| 'noise_level': 0.0,
|
| 'enhancement_priority': 'High',
|
| 'technical_summary': 'Analysis failed due to processing error',
|
| 'student_analysis_notes': 'Unable to perform detailed analysis'
|
| }
|
|
|
| def _categorize_image_size(self, width: int, height: int) -> str:
|
| """Categorize image size for processing complexity assessment"""
|
| total_pixels = width * height
|
| if total_pixels < 100000:
|
| return "Small (Fast Processing)"
|
| elif total_pixels < 1000000:
|
| return "Medium (Standard Processing)"
|
| elif total_pixels < 5000000:
|
| return "Large (Slower Processing)"
|
| else:
|
| return "Very Large (Requires Optimization)"
|
|
|
| def _interpret_sharpness_score(self, sharpness: float) -> str:
|
| """Provide educational interpretation of sharpness score"""
|
| if sharpness > 1000:
|
| return f"Excellent sharpness ({sharpness:.1f}). Strong edge definition with high contrast transitions."
|
| elif sharpness > 600:
|
| return f"Good sharpness ({sharpness:.1f}). Adequate edge clarity for most applications."
|
| elif sharpness > 300:
|
| return f"Moderate blur ({sharpness:.1f}). Noticeable softness in edges and details."
|
| elif sharpness > 100:
|
| return f"Significant blur ({sharpness:.1f}). Substantial loss of fine details and edge clarity."
|
| else:
|
| return f"Severe blur ({sharpness:.1f}). Major degradation requiring advanced restoration techniques."
|
|
|
| def _interpret_edge_density(self, edge_density: float) -> str:
|
| """Interpret edge density measurements"""
|
| if edge_density > 0.1:
|
| return f"High edge density ({edge_density:.3f}) - Rich in structural details and textures"
|
| elif edge_density > 0.05:
|
| return f"Medium edge density ({edge_density:.3f}) - Moderate structural content"
|
| elif edge_density > 0.02:
|
| return f"Low edge density ({edge_density:.3f}) - Smooth regions dominate, limited fine details"
|
| else:
|
| return f"Very low edge density ({edge_density:.3f}) - Predominantly smooth surfaces or severe blur"
|
|
|
| def _interpret_gradients(self, avg_gradient: float, max_gradient: float) -> str:
|
| """Analyze gradient characteristics for sharpness assessment"""
|
| gradient_ratio = max_gradient / (avg_gradient + 1e-6)
|
| if gradient_ratio > 10 and avg_gradient > 20:
|
| return f"Strong gradients detected (avg: {avg_gradient:.1f}, max: {max_gradient:.1f}) - Good edge definition"
|
| elif gradient_ratio > 5:
|
| return f"Moderate gradients (avg: {avg_gradient:.1f}, max: {max_gradient:.1f}) - Some edge preservation"
|
| else:
|
| return f"Weak gradients (avg: {avg_gradient:.1f}, max: {max_gradient:.1f}) - Poor edge definition, likely blurred"
|
|
|
| def _interpret_frequency_content(self, high_freq: float) -> str:
|
| """Analyze frequency domain characteristics"""
|
| if high_freq > 5.0:
|
| return f"Rich high-frequency content ({high_freq:.2f}) - Preserves fine details and textures"
|
| elif high_freq > 3.0:
|
| return f"Moderate high-frequency content ({high_freq:.2f}) - Some detail preservation"
|
| elif high_freq > 2.0:
|
| return f"Limited high-frequency content ({high_freq:.2f}) - Loss of fine details"
|
| else:
|
| return f"Poor high-frequency content ({high_freq:.2f}) - Significant detail loss, heavy blur"
|
|
|
| def _detailed_blur_classification(self, sharpness: float, motion_length: int,
|
| defocus_score: float, edge_density: float,
|
| avg_gradient: float, high_freq: float) -> Dict:
|
| """Comprehensive blur type analysis with detailed reasoning"""
|
|
|
|
|
| motion_evidence = []
|
| defocus_evidence = []
|
| noise_evidence = []
|
| mixed_evidence = []
|
|
|
|
|
| if motion_length > 15:
|
| motion_evidence.append(f"Strong directional blur detected (length: {motion_length}px)")
|
| if avg_gradient < 15 and sharpness < 400:
|
| motion_evidence.append("Gradient analysis suggests directional degradation")
|
|
|
|
|
| if defocus_score > 0.4:
|
| defocus_evidence.append(f"High defocus characteristics (score: {defocus_score:.3f})")
|
| if edge_density < 0.03 and high_freq < 3.0:
|
| defocus_evidence.append("Uniform blur pattern across all frequencies")
|
|
|
|
|
| if motion_length > 10 and defocus_score > 0.3:
|
| mixed_evidence.append("Both motion and defocus characteristics present")
|
| if sharpness < 200:
|
| mixed_evidence.append("Severe degradation suggests multiple blur sources")
|
|
|
|
|
| if len(motion_evidence) >= 2 and motion_length > 12:
|
| primary_type = "Motion Blur"
|
| confidence = 0.85 + min(0.1, motion_length / 100)
|
| reasoning = f"Motion blur identified based on: {', '.join(motion_evidence)}"
|
| secondary_issues = defocus_evidence + mixed_evidence
|
|
|
| elif len(defocus_evidence) >= 2 and defocus_score > 0.35:
|
| primary_type = "Defocus Blur"
|
| confidence = 0.80 + min(0.15, defocus_score)
|
| reasoning = f"Defocus blur identified based on: {', '.join(defocus_evidence)}"
|
| secondary_issues = motion_evidence + mixed_evidence
|
|
|
| elif sharpness > 800:
|
| primary_type = "Sharp Image"
|
| confidence = 0.90
|
| reasoning = "High sharpness metrics indicate well-focused image"
|
| secondary_issues = []
|
|
|
| else:
|
| primary_type = "Mixed/Complex Blur"
|
| confidence = 0.65
|
| reasoning = f"Complex blur pattern detected. Evidence includes: {', '.join(motion_evidence + defocus_evidence)}"
|
| secondary_issues = ["Multiple degradation sources present", "Requires combined enhancement approach"]
|
|
|
| return {
|
| 'primary_type': primary_type,
|
| 'confidence': confidence,
|
| 'reasoning': reasoning,
|
| 'secondary_issues': secondary_issues if secondary_issues else ["No significant secondary issues detected"]
|
| }
|
|
|
| def _interpret_motion_blur(self, angle: float, length: int) -> str:
|
| """Detailed motion blur parameter interpretation"""
|
| if length < 5:
|
| return f"Minimal motion (Length: {length}px) - Not significant for restoration"
|
| elif length < 15:
|
| return f"Moderate linear motion (Angle: {angle:.1f}Β°, Length: {length}px) - Correctable with standard techniques"
|
| elif length < 30:
|
| return f"Significant motion blur (Angle: {angle:.1f}Β°, Length: {length}px) - Requires advanced deconvolution"
|
| else:
|
| return f"Severe motion blur (Angle: {angle:.1f}Β°, Length: {length}px) - Challenging restoration case"
|
|
|
| def _interpret_defocus(self, defocus_score: float) -> str:
|
| """Interpret defocus blur characteristics"""
|
| if defocus_score < 0.2:
|
| return f"Minimal defocus ({defocus_score:.3f}) - Sharp focus maintained"
|
| elif defocus_score < 0.4:
|
| return f"Moderate defocus ({defocus_score:.3f}) - Some focus softness present"
|
| elif defocus_score < 0.6:
|
| return f"Significant defocus ({defocus_score:.3f}) - Noticeable out-of-focus blur"
|
| else:
|
| return f"Severe defocus ({defocus_score:.3f}) - Major focus problems requiring restoration"
|
|
|
| def _interpret_noise_level(self, noise_level: float) -> str:
|
| """Analyze noise characteristics and impact"""
|
| if noise_level < 0.1:
|
| return f"Low noise ({noise_level:.3f}) - Clean image, minimal interference"
|
| elif noise_level < 0.3:
|
| return f"Moderate noise ({noise_level:.3f}) - Some grain present but manageable"
|
| elif noise_level < 0.5:
|
| return f"High noise ({noise_level:.3f}) - Significant grain affecting image quality"
|
| else:
|
| return f"Severe noise ({noise_level:.3f}) - Heavy noise requiring specialized filtering"
|
|
|
| def _recommend_enhancement_strategy(self, blur_type: str, severity: str,
|
| noise_level: float, motion_length: int) -> Dict:
|
| """Generate detailed enhancement recommendations"""
|
|
|
| if "Sharp" in blur_type:
|
| return {
|
| 'priority': 'Low',
|
| 'methods': ['Optional sharpening enhancement'],
|
| 'expected_improvement': '5-10%',
|
| 'difficulty': 'Easy',
|
| 'detailed_recommendations': [
|
| "Image is already well-focused",
|
| "Consider mild unsharp masking if enhancement desired",
|
| "Focus on noise reduction if noise_level > 0.2"
|
| ]
|
| }
|
|
|
| elif "Motion" in blur_type:
|
| methods = ['Wiener Filter', 'Richardson-Lucy Deconvolution']
|
| if motion_length > 20:
|
| methods.append('Advanced CNN Enhancement')
|
|
|
| difficulty = 'Medium' if motion_length < 20 else 'Hard'
|
| improvement = '30-60%' if motion_length < 25 else '20-45%'
|
|
|
| recommendations = [
|
| f"Apply motion deblurring with {motion_length}px kernel",
|
| "Use Richardson-Lucy for best results with known PSF",
|
| "Consider CNN enhancement for complex cases"
|
| ]
|
|
|
| if noise_level > 0.3:
|
| recommendations.append("Apply noise reduction before deblurring")
|
|
|
| elif "Defocus" in blur_type:
|
| methods = ['Gaussian Deconvolution', 'Wiener Filter', 'CNN Enhancement']
|
| difficulty = 'Medium'
|
| improvement = '25-50%'
|
|
|
| recommendations = [
|
| "Use Gaussian PSF estimation for deconvolution",
|
| "Apply iterative Richardson-Lucy algorithm",
|
| "CNN methods often work well for defocus blur"
|
| ]
|
|
|
| else:
|
| methods = ['Combined Approach', 'CNN Enhancement', 'Multi-stage Processing']
|
| difficulty = 'Hard'
|
| improvement = '20-40%'
|
|
|
| recommendations = [
|
| "Try multiple deblurring approaches sequentially",
|
| "CNN enhancement recommended for complex cases",
|
| "May require manual parameter tuning"
|
| ]
|
|
|
|
|
| if noise_level > 0.4:
|
| recommendations.insert(0, "Critical: Apply aggressive noise reduction first")
|
| improvement = improvement.replace('0%', '5%').replace('5%', '0%')
|
|
|
| return {
|
| 'priority': 'High' if 'Severe' in severity else 'Medium',
|
| 'methods': methods,
|
| 'expected_improvement': improvement,
|
| 'difficulty': difficulty,
|
| 'detailed_recommendations': recommendations
|
| }
|
|
|
| def _generate_technical_summary(self, sharpness: float, blur_type: str,
|
| severity: str, noise_level: float) -> str:
|
| """Generate comprehensive technical analysis summary"""
|
| return f"""
|
| TECHNICAL ANALYSIS SUMMARY:
|
| β’ Sharpness Assessment: {severity} blur detected (Laplacian variance: {sharpness:.1f})
|
| β’ Primary Issue: {blur_type} identified as dominant degradation
|
| β’ Noise Characteristics: {'Low' if noise_level < 0.2 else 'High'} noise environment
|
| β’ Processing Complexity: {'Standard' if sharpness > 300 else 'Advanced'} restoration required
|
| β’ Image Condition: {'Recoverable' if sharpness > 100 else 'Severely degraded'} with appropriate methods
|
| """.strip()
|
|
|
| def _generate_student_notes(self, sharpness: float, motion_length: int,
|
| defocus_score: float, edge_density: float) -> str:
|
| """Generate educational analysis notes"""
|
| return f"""
|
| DETAILED ANALYSIS NOTES:
|
| π Quantitative Measurements:
|
| - Variance of Laplacian (sharpness): {sharpness:.1f}
|
| - Motion blur estimation: {motion_length}px kernel length
|
| - Defocus blur score: {defocus_score:.3f} (0=sharp, 1=heavily defocused)
|
| - Edge density ratio: {edge_density:.3f} (proportion of edge pixels)
|
|
|
| π Image Processing Observations:
|
| - {"Strong" if sharpness > 600 else "Weak"} high-frequency content preservation
|
| - {"Directional" if motion_length > 10 else "Uniform"} blur pattern characteristics
|
| - {"Adequate" if edge_density > 0.05 else "Poor"} structural detail retention
|
| - Enhancement difficulty: {"Low" if sharpness > 400 else "High"} (based on degradation severity)
|
|
|
| π‘ Recommended Analysis Approach:
|
| 1. Frequency domain analysis confirms blur type identification
|
| 2. Gradient-based metrics support sharpness assessment
|
| 3. PSF estimation required for optimal deconvolution
|
| 4. Multi-metric validation ensures robust classification
|
| """.strip()
|
|
|
| def detect_blur_type(image: np.ndarray) -> str:
|
| """
|
| Simple blur type detection function
|
|
|
| Args:
|
| image: Input image
|
|
|
| Returns:
|
| str: Blur type ('sharp', 'motion', 'defocus', 'mixed')
|
| """
|
| detector = BlurDetector()
|
| analysis = detector.comprehensive_analysis(image)
|
|
|
| blur_type = analysis['primary_type'].lower().replace(' ', '_')
|
| return blur_type
|
|
|
| def get_sharpness_score(image: np.ndarray) -> float:
|
| """
|
| Get sharpness score for image
|
|
|
| Args:
|
| image: Input image
|
|
|
| Returns:
|
| float: Sharpness score (Laplacian variance)
|
| """
|
| detector = BlurDetector()
|
| return detector.variance_of_laplacian(image)
|
|
|
|
|
| if __name__ == "__main__":
|
| print("Blur Detection Module - Testing")
|
| print("===============================")
|
|
|
|
|
|
|
| sharp_image = np.random.randint(0, 255, (480, 640, 3), dtype=np.uint8)
|
|
|
|
|
| blurred_image = cv2.GaussianBlur(sharp_image, (15, 15), 5)
|
|
|
|
|
| detector = BlurDetector()
|
|
|
|
|
| print("\n--- Sharp Image Analysis ---")
|
| sharp_analysis = detector.comprehensive_analysis(sharp_image)
|
| for key, value in sharp_analysis.items():
|
| print(f"{key}: {value}")
|
|
|
|
|
| print("\n--- Blurred Image Analysis ---")
|
| blurred_analysis = detector.comprehensive_analysis(blurred_image)
|
| for key, value in blurred_analysis.items():
|
| print(f"{key}: {value}")
|
|
|
| print("\nBlur detection module test completed!")
|
|
|
|
|
| def analyze_blur_characteristics(image: np.ndarray) -> Dict:
|
| """
|
| Standalone function for blur analysis (for backward compatibility)
|
|
|
| Args:
|
| image: Input image array
|
|
|
| Returns:
|
| dict: Comprehensive blur analysis results
|
| """
|
| detector = BlurDetector()
|
| return detector.comprehensive_analysis(image)
|
|
|
|
|
| if __name__ == "__main__":
|
| test_blur_detection() |