File size: 6,670 Bytes
3439540 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 | import cv2
import numpy as np
from skimage.feature.texture import graycomatrix, graycoprops
from skimage.feature import local_binary_pattern
from sklearn.decomposition import PCA
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import StandardScaler
def rgb_histogram(image, bins=256):
hist_features = []
for i in range(3): # RGB Channels
hist, _ = np.histogram(image[:, :, i], bins=bins, range=(0, 256), density=True)
hist_features.append(hist)
return np.concatenate(hist_features)
def hu_moments(image):
# Convert to grayscale if the image is in RGB format
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
moments = cv2.moments(gray)
hu_moments = cv2.HuMoments(moments).flatten()
return hu_moments
def glcm_features(image, distances=[1], angles=[0], levels=256, symmetric=True, normed=True):
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
glcm = graycomatrix(gray, distances=distances, angles=angles, levels=levels, symmetric=symmetric, normed=normed)
contrast = graycoprops(glcm, 'contrast').flatten()
dissimilarity = graycoprops(glcm, 'dissimilarity').flatten()
homogeneity = graycoprops(glcm, 'homogeneity').flatten()
energy = graycoprops(glcm, 'energy').flatten()
correlation = graycoprops(glcm, 'correlation').flatten()
asm = graycoprops(glcm, 'ASM').flatten()
return np.concatenate([contrast, dissimilarity, homogeneity, energy, correlation, asm])
def local_binary_pattern_features(image, P=8, R=1):
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
lbp = local_binary_pattern(gray, P, R, method='uniform')
(hist, _) = np.histogram(lbp.ravel(), bins=np.arange(0, P + 3), range=(0, P + 2), density=True)
return hist
# Function to compute Edge Detection Features
def edge_detection(image):
# Convert to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Apply Canny edge detection
edges = cv2.Canny(gray, 100, 200)
# Calculate edge density (proportion of edge pixels)
edge_density = np.sum(edges) / edges.size
return np.array([edge_density])
# Function to compute Color Moments
def color_moments(image):
# Convert to HSV color space
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
moments = []
for i in range(3): # H, S, V channels
channel = hsv[:, :, i]
mean = np.mean(channel)
var = np.var(channel)
skew = np.mean((channel - mean) ** 3) / (np.std(channel) ** 3) # Skewness
moments.extend([mean, var, skew])
return np.array(moments)
# Function to compute Fourier Transform Features
def fourier_transform(image):
# Convert to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Apply Fourier Transform
f = np.fft.fft2(gray)
fshift = np.fft.fftshift(f) # Shift the zero frequency component to the center
# Get magnitude spectrum
magnitude_spectrum = np.abs(fshift)
# Calculate statistics (mean, variance, entropy)
mean_freq = np.mean(magnitude_spectrum)
var_freq = np.var(magnitude_spectrum)
entropy_freq = -np.sum(magnitude_spectrum * np.log(magnitude_spectrum + 1e-10)) # Entropy
return np.array([mean_freq, var_freq, entropy_freq])
def extract_features_from_image(image):
# Extrait les caractéristiques de l'image comme précédemment
hist_features = rgb_histogram(image)
hu_features = hu_moments(image)
glcm_features_vector = glcm_features(image)
lbp_features = local_binary_pattern_features(image)
edge_features = edge_detection(image)
color_moments_feats = color_moments(image)
fourier_features = fourier_transform(image)
# Combine toutes les caractéristiques dans un tableau
image_features = np.concatenate([hist_features, hu_features, glcm_features_vector, lbp_features])
return image_features
def perform_pca(data, num_components):
"""
Perform Principal Component Analysis (PCA) on the input data.
Parameters:
- data (numpy.ndarray): The input data with shape (n_samples, n_features).
- num_components (int): The number of principal components to retain.
Returns:
- data_reduced (numpy.ndarray): The data transformed into the reduced PCA space.
- top_k_eigenvectors (numpy.ndarray): The top k eigenvectors.
- sorted_eigenvalues (numpy.ndarray): The sorted eigenvalues.
"""
# Step 1: Standardize the Data
mean = np.mean(data, axis=0)
std_dev = np.std(data, axis=0)
data_standardized = (data - mean) / std_dev
# Step 2: Compute the Covariance Matrix
covariance_matrix = np.cov(data_standardized, rowvar=False)
# Step 3: Calculate Eigenvalues and Eigenvectors
eigenvalues, eigenvectors = np.linalg.eig(covariance_matrix)
# Step 4: Sort Eigenvalues and Eigenvectors in descending order
sorted_indices = np.argsort(eigenvalues)[::-1]
sorted_eigenvalues = eigenvalues[sorted_indices]
sorted_eigenvectors = eigenvectors[:, sorted_indices]
# Step 5: Select the top k Eigenvectors
top_k_eigenvectors = sorted_eigenvectors[:, :num_components]
# Step 6: Transform the Data using the top k eigenvectors
data_reduced = np.dot(data_standardized, top_k_eigenvectors)
# Return the real part of the data (in case of numerical imprecision)
data_reduced = np.real(data_reduced)
return data_reduced
def train_svm_model(features, labels, test_size=0.2):
"""
Trains an SVM model and returns the trained model.
Parameters:
- features: Feature matrix of shape (B, F)
- labels: Label matrix of shape (B, C) if one-hot encoded, or (B,) for single labels
- test_size: Proportion of the data to use for testing (default is 0.2)
Returns:
- svm_model: Trained SVM model
"""
# Check if labels are one-hot encoded, convert if needed
if labels.ndim > 1 and labels.shape[1] > 1:
labels = np.argmax(labels, axis=1) # Convert one-hot to single label per sample
# Split the data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(features, labels, test_size=test_size, random_state=42)
# Create an SVM classifier (you can modify kernel or C as needed)
svm_model = SVC(kernel='rbf', C=1.0)
# Train the model
svm_model.fit(X_train, y_train)
# Make predictions on the test set
y_pred = svm_model.predict(X_test)
# Evaluate and print accuracy
accuracy = accuracy_score(y_test, y_pred)
print(f'Test Accuracy: {accuracy:.2f}')
# Return the trained model
return svm_model |