| | import cv2 |
| | import numpy as np |
| | from skimage.feature.texture import graycomatrix, graycoprops |
| | from skimage.feature import local_binary_pattern |
| | from sklearn.decomposition import PCA |
| | from sklearn.svm import SVC |
| | from sklearn.model_selection import train_test_split |
| | from sklearn.metrics import accuracy_score |
| | from sklearn.preprocessing import StandardScaler |
| |
|
| | def rgb_histogram(image, bins=256): |
| | hist_features = [] |
| | for i in range(3): |
| | hist, _ = np.histogram(image[:, :, i], bins=bins, range=(0, 256), density=True) |
| | hist_features.append(hist) |
| | return np.concatenate(hist_features) |
| |
|
| | def hu_moments(image): |
| | |
| | gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) |
| | moments = cv2.moments(gray) |
| | hu_moments = cv2.HuMoments(moments).flatten() |
| | return hu_moments |
| |
|
| | def glcm_features(image, distances=[1], angles=[0], levels=256, symmetric=True, normed=True): |
| | gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) |
| | glcm = graycomatrix(gray, distances=distances, angles=angles, levels=levels, symmetric=symmetric, normed=normed) |
| | contrast = graycoprops(glcm, 'contrast').flatten() |
| | dissimilarity = graycoprops(glcm, 'dissimilarity').flatten() |
| | homogeneity = graycoprops(glcm, 'homogeneity').flatten() |
| | energy = graycoprops(glcm, 'energy').flatten() |
| | correlation = graycoprops(glcm, 'correlation').flatten() |
| | asm = graycoprops(glcm, 'ASM').flatten() |
| | return np.concatenate([contrast, dissimilarity, homogeneity, energy, correlation, asm]) |
| |
|
| | def local_binary_pattern_features(image, P=8, R=1): |
| | gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) |
| | lbp = local_binary_pattern(gray, P, R, method='uniform') |
| | (hist, _) = np.histogram(lbp.ravel(), bins=np.arange(0, P + 3), range=(0, P + 2), density=True) |
| | return hist |
| |
|
| | |
| | def edge_detection(image): |
| | |
| | gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) |
| | |
| | |
| | edges = cv2.Canny(gray, 100, 200) |
| | |
| | |
| | edge_density = np.sum(edges) / edges.size |
| | return np.array([edge_density]) |
| |
|
| | |
| | def color_moments(image): |
| | |
| | hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) |
| | |
| | moments = [] |
| | for i in range(3): |
| | channel = hsv[:, :, i] |
| | mean = np.mean(channel) |
| | var = np.var(channel) |
| | skew = np.mean((channel - mean) ** 3) / (np.std(channel) ** 3) |
| | moments.extend([mean, var, skew]) |
| | |
| | return np.array(moments) |
| |
|
| | |
| | def fourier_transform(image): |
| | |
| | gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) |
| | |
| | |
| | f = np.fft.fft2(gray) |
| | fshift = np.fft.fftshift(f) |
| | |
| | |
| | magnitude_spectrum = np.abs(fshift) |
| | |
| | |
| | mean_freq = np.mean(magnitude_spectrum) |
| | var_freq = np.var(magnitude_spectrum) |
| | entropy_freq = -np.sum(magnitude_spectrum * np.log(magnitude_spectrum + 1e-10)) |
| | |
| | return np.array([mean_freq, var_freq, entropy_freq]) |
| |
|
| | def extract_features_from_image(image): |
| | |
| | hist_features = rgb_histogram(image) |
| | hu_features = hu_moments(image) |
| | glcm_features_vector = glcm_features(image) |
| | lbp_features = local_binary_pattern_features(image) |
| | edge_features = edge_detection(image) |
| | color_moments_feats = color_moments(image) |
| | fourier_features = fourier_transform(image) |
| | |
| | |
| | image_features = np.concatenate([hist_features, hu_features, glcm_features_vector, lbp_features, edge_features, color_moments_feats, fourier_features]) |
| | |
| | return image_features |
| |
|
| | def standardize_features(features): |
| | """ |
| | Standardize the features using StandardScaler. |
| | """ |
| | scaler = StandardScaler() |
| | return scaler.fit_transform(features) |
| |
|
| | def perform_pca(data, num_components): |
| | """ |
| | Perform Principal Component Analysis (PCA) on the input data. |
| | |
| | Parameters: |
| | - data (numpy.ndarray): The input data with shape (n_samples, n_features). |
| | - num_components (int): The number of principal components to retain. |
| | |
| | Returns: |
| | - data_reduced (numpy.ndarray): The data transformed into the reduced PCA space. |
| | - top_k_eigenvectors (numpy.ndarray): The top k eigenvectors. |
| | - sorted_eigenvalues (numpy.ndarray): The sorted eigenvalues. |
| | """ |
| |
|
| | |
| | mean = np.mean(data, axis=0) |
| | std_dev = np.std(data, axis=0) |
| | data_standardized = (data - mean) / std_dev |
| |
|
| | |
| | covariance_matrix = np.cov(data_standardized, rowvar=False) |
| |
|
| | |
| | eigenvalues, eigenvectors = np.linalg.eig(covariance_matrix) |
| |
|
| | |
| | sorted_indices = np.argsort(eigenvalues)[::-1] |
| | sorted_eigenvalues = eigenvalues[sorted_indices] |
| | sorted_eigenvectors = eigenvectors[:, sorted_indices] |
| |
|
| | |
| | top_k_eigenvectors = sorted_eigenvectors[:, :num_components] |
| |
|
| | |
| | data_reduced = np.dot(data_standardized, top_k_eigenvectors) |
| |
|
| | |
| | data_reduced = np.real(data_reduced) |
| |
|
| | return data_reduced |
| |
|
| |
|
| | def train_svm_model(features, labels, test_size=0.2): |
| | """ |
| | Trains an SVM model and returns the trained model. |
| | |
| | Parameters: |
| | - features: Feature matrix of shape (B, F) |
| | - labels: Label matrix of shape (B, C) if one-hot encoded, or (B,) for single labels |
| | - test_size: Proportion of the data to use for testing (default is 0.2) |
| | |
| | Returns: |
| | - svm_model: Trained SVM model |
| | """ |
| | |
| | if labels.ndim > 1 and labels.shape[1] > 1: |
| | labels = np.argmax(labels, axis=1) |
| |
|
| | |
| | X_train, X_test, y_train, y_test = train_test_split(features, labels, test_size=test_size, random_state=42) |
| |
|
| | |
| | svm_model = SVC(kernel='rbf', C=1.0) |
| |
|
| | |
| | svm_model.fit(X_train, y_train) |
| |
|
| | |
| | y_pred = svm_model.predict(X_test) |
| |
|
| | |
| | accuracy = accuracy_score(y_test, y_pred) |
| | print(f'Test Accuracy: {accuracy:.2f}') |
| |
|
| | |
| | return svm_model |