Spaces:
Sleeping
Sleeping
| # image_processing.py | |
| import cv2 | |
| import numpy as np | |
| import pandas as pd | |
| import matplotlib.pyplot as plt | |
| from skimage.feature import graycomatrix, graycoprops, local_binary_pattern | |
| import joblib | |
| import warnings | |
| warnings.filterwarnings("ignore") | |
| # Load model and preprocessing tools | |
| model = joblib.load("model.pkl") | |
| label_encoder = joblib.load("label_encoder.pkl") | |
| scaler = joblib.load("scaler.pkl") | |
| # Preprocessing: Resize, apply CLAHE, sharpen | |
| def preprocessing(single_image, count=1): | |
| single_image = cv2.resize(single_image, (256,256)) | |
| rgb_image = cv2.cvtColor(single_image, cv2.COLOR_BGR2RGB) | |
| r, g, b = cv2.split(rgb_image) | |
| clahe = cv2.createCLAHE(clipLimit=0.4, tileGridSize=(8, 8)) | |
| r_clahe = clahe.apply(r) | |
| g_clahe = clahe.apply(g) | |
| b_clahe = clahe.apply(b) | |
| clahe_image = cv2.merge((r_clahe, g_clahe, b_clahe)) | |
| clahe_bgr = cv2.cvtColor(clahe_image, cv2.COLOR_RGB2BGR) | |
| blurred = cv2.GaussianBlur(clahe_bgr, (5, 5), 1.5) | |
| sharp = cv2.addWeighted(clahe_bgr, 1.5, blurred, -0.5, 0) | |
| return rgb_image, clahe_image, cv2.cvtColor(sharp, cv2.COLOR_BGR2RGB) | |
| # RGB histogram plotting | |
| def plot_rgb_histogram(image): | |
| color = ('b', 'g', 'r') | |
| fig, ax = plt.subplots() | |
| for i, col in enumerate(color): | |
| hist = cv2.calcHist([image], [i], None, [256], [0,256]) | |
| ax.plot(hist, color=col) | |
| ax.set_title("RGB Histogram") | |
| ax.set_xlim([0, 256]) | |
| return fig | |
| # Extract features: GLCM, LBP, color, edge, etc. | |
| def feature_extraction(image, return_df=True): | |
| gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) | |
| glcm = graycomatrix(gray, distances=[1], angles=[0], levels=256, symmetric=True, normed=True) | |
| contrast = graycoprops(glcm, 'contrast')[0, 0] | |
| correlation = graycoprops(glcm, 'correlation')[0, 0] | |
| energy = graycoprops(glcm, 'energy')[0, 0] | |
| homogeneity = graycoprops(glcm, 'homogeneity')[0, 0] | |
| lbp = local_binary_pattern(gray, P=8, R=1, method='uniform') | |
| lbp_mean = np.mean(lbp) | |
| mean_r = np.mean(image[:, :, 0]) | |
| mean_g = np.mean(image[:, :, 1]) | |
| mean_b = np.mean(image[:, :, 2]) | |
| diff_black = ((1-(mean_r-255)/255) + (1-(mean_g-255)/255) + (1-(mean_b-255)/255))/3 | |
| sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=5) | |
| sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=5) | |
| edge_count = np.sum(cv2.magnitude(sobelx, sobely) > 0) | |
| features = [contrast, correlation, energy, homogeneity, lbp_mean, | |
| mean_r, mean_g, mean_b, edge_count, diff_black] | |
| if return_df: | |
| df = pd.DataFrame([features], columns=[ | |
| "Contrast", "Correlation", "Energy", "Homogeneity", "LBP_Mean", | |
| "Mean_R", "Mean_G", "Mean_B", "Edge_Count", "Black" | |
| ]) | |
| return df | |
| else: | |
| return np.array([features]) | |
| # Predict class and confidence from image | |
| def predict_image_class_with_features(image): | |
| _, _, sharp = preprocessing(image, count=0) | |
| features_df = feature_extraction(sharp) | |
| features_scaled = scaler.transform(features_df) | |
| prediction = model.predict(features_scaled) | |
| predicted_class = label_encoder.inverse_transform(prediction)[0] | |
| confidence = np.max(model.predict_proba(features_scaled)) | |
| return features_df, predicted_class, confidence | |
| # Segment image and classify each region | |
| def segment_and_classify_regions(image, k_clusters=2): | |
| rgb_img, _, sharp_img = preprocessing(image) | |
| reshaped = sharp_img.reshape((-1, 3)).astype(np.float32) | |
| criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.2) | |
| _, labels, centers = cv2.kmeans(reshaped, k_clusters, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS) | |
| segmented = labels.flatten().reshape(sharp_img.shape[:2]) | |
| output = image.copy() | |
| region_predictions = [] | |
| for i in range(k_clusters): | |
| mask = (segmented == i).astype(np.uint8) * 255 | |
| region = cv2.bitwise_and(sharp_img, sharp_img, mask=mask) | |
| contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) | |
| if not contours: | |
| continue | |
| x, y, w, h = cv2.boundingRect(max(contours, key=cv2.contourArea)) | |
| region_crop = region[y:y+h, x:x+w] | |
| if region_crop.size == 0: | |
| continue | |
| features_df = feature_extraction(region_crop) | |
| features_scaled = scaler.transform(features_df) | |
| prediction = model.predict(features_scaled) | |
| predicted_class = label_encoder.inverse_transform(prediction)[0] | |
| confidence = np.max(model.predict_proba(features_scaled)) | |
| region_predictions.append({ | |
| "class": predicted_class, | |
| "confidence": confidence, | |
| "bbox": (x, y, w, h) | |
| }) | |
| cv2.rectangle(output, (x, y), (x+w, y+h), (0,255,0), 2) | |
| cv2.putText(output, f"{predicted_class} ({confidence*100:.1f}%)", (x, y-10), | |
| cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255), 1) | |
| return output, region_predictions | |