Kalhar.Pandya
Initial commit
2ae889c
import os
import cv2
import numpy as np
from skimage.feature import local_binary_pattern, graycomatrix, graycoprops
# ---------------------------------------------------------------------
# 1. Color Features
# ---------------------------------------------------------------------
def get_average_color(image):
"""
Compute the average color in BGR format (3 values).
"""
return np.mean(image, axis=(0, 1)) # shape: (3,)
def get_small_color_hist(image, h_bins=8, s_bins=2, v_bins=2):
"""
Compute a *reduced* color histogram in HSV space:
- h_bins: number of bins for Hue
- s_bins: number of bins for Saturation
- v_bins: number of bins for Value
Total bins = h_bins * s_bins * v_bins.
"""
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
hist = cv2.calcHist(
[hsv],
[0, 1, 2],
None,
[h_bins, s_bins, v_bins],
[0, 180, 0, 256, 0, 256]
)
cv2.normalize(hist, hist)
return hist.flatten() # shape: (h_bins*s_bins*v_bins,)
# ---------------------------------------------------------------------
# 2. LBP (Local Binary Patterns)
# ---------------------------------------------------------------------
def get_lbp_histogram(image, num_points, radius):
# Ensure the image is grayscale: only convert if it has more than one channel.
if len(image.shape) > 2 and image.shape[2] != 1:
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
else:
gray = image
# Compute the LBP representation of the image.
# (Assuming you're using skimage's local_binary_pattern)
from skimage.feature import local_binary_pattern
lbp = local_binary_pattern(gray, num_points, radius, method="uniform")
# Build the histogram of the LBP.
n_bins = int(num_points + 2)
hist, _ = np.histogram(lbp.ravel(), bins=n_bins, range=(0, n_bins))
# Normalize the histogram.
hist = hist.astype("float")
hist /= (hist.sum() + 1e-6)
return hist
# ---------------------------------------------------------------------
# 3. GLCM (Gray Level Co-occurrence Matrix)
# ---------------------------------------------------------------------
def get_glcm_features(image,
distance=1,
angles=[0],
properties=('contrast', 'homogeneity', 'energy', 'correlation')):
"""
Compute a small set of GLCM features:
- distance=1, angles=[0] (or [0, np.pi/2] if you want more orientations)
- properties = a reduced subset for simpler texture capture
Returns a flattened array of property values across all angles.
"""
glcm = graycomatrix(
image,
distances=[distance],
angles=angles,
levels=256,
symmetric=True,
normed=True
)
feats = []
for prop in properties:
vals = graycoprops(glcm, prop)
feats.append(vals.ravel()) # Flatten N-dim array
glcm_features = np.concatenate(feats)
return glcm_features # shape: (len(properties)*len(angles),)
# ---------------------------------------------------------------------
# 4. Combined Feature Extraction
# ---------------------------------------------------------------------
def extract_features_from_image(image):
"""
Returns a DICTIONARY of feature sets:
- 'average_color': 3 values (B, G, R) from the original image.
- 'lbp_hist': Histogram from Local Binary Patterns (texture).
- 'glcm_features': GLCM features (contrast, homogeneity, energy, correlation).
- 'edge_density': Scalar representing the fraction of edge pixels.
- 'edge_orient_hist': Normalized histogram (8 bins) of edge orientations.
- 'combined_features': Concatenation of all the above features.
This function supports both color and grayscale images.
"""
import cv2
import numpy as np
# --- (A) Color Features ---
# Ensure a 3-channel image for color feature extraction.
if len(image.shape) == 2 or image.shape[2] == 1:
image_color = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
else:
image_color = image
avg_color = get_average_color(image_color) # Expected shape: (3,)
# --- (B) Texture Features: LBP and GLCM ---
# Use grayscale image for texture features.
if len(image.shape) == 2:
gray_image = image
else:
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
lbp_hist = get_lbp_histogram(gray_image, num_points=8, radius=1) # e.g., shape = (10,)
glcm_feats = get_glcm_features(
gray_image,
distance=1,
angles=[0], # Single orientation
properties=('contrast', 'homogeneity', 'energy', 'correlation')
) # e.g., shape = (4,)
# --- (C) Edge-Related Features ---
# Preprocessing: Blur to reduce noise before edge detection.
blurred = cv2.GaussianBlur(gray_image, (9, 9), 0)
# Compute Canny edges using fixed thresholds (these might be adapted based on context).
threshold1, threshold2 = 0, 100
edges = cv2.Canny(blurred, threshold1=threshold1, threshold2=threshold2)
# Edge Density: Ratio of edge pixels to total pixels.
edge_density = np.sum(edges > 0) / float(edges.size)
# Compute gradient orientations using Sobel operators.
grad_x = cv2.Sobel(blurred, cv2.CV_64F, 1, 0, ksize=3)
grad_y = cv2.Sobel(blurred, cv2.CV_64F, 0, 1, ksize=3)
magnitude, angle = cv2.cartToPolar(grad_x, grad_y, angleInDegrees=True)
# Use only edge pixels for orientation histogram.
angles = angle[edges > 0]
hist_bins = 8
if angles.size > 0:
edge_orient_hist, _ = np.histogram(angles, bins=hist_bins, range=(0, 360))
edge_orient_hist = edge_orient_hist.astype("float")
edge_orient_hist /= (edge_orient_hist.sum() + 1e-6) # Normalize histogram.
else:
edge_orient_hist = np.zeros(hist_bins, dtype="float")
# --- (D) Combine All Features ---
combined_features = np.concatenate([
avg_color, # 3 values
lbp_hist, # e.g., 10 values
glcm_feats, # e.g., 4 values
np.array([edge_density]), # 1 value
edge_orient_hist # 8 values
])
return {
'average_color': avg_color,
'lbp_hist': lbp_hist,
'glcm_features': glcm_feats,
'edge_density': edge_density,
'edge_orient_hist': edge_orient_hist,
'combined_features': combined_features
}
# ---------------------------------------------------------------------
# 5. Example Usage
# ---------------------------------------------------------------------
if __name__ == "__main__":
import sys
# Provide the path to an image file
# e.g., python feature_extractor_min.py images/wood_example.jpg
image_path = './wood_patches/patch_012.png'
image = cv2.imread(image_path, cv2.IMREAD_COLOR)
if image is None:
print(f"Error: Unable to read image at {image_path}")
else:
feats = extract_features_from_image(image)
print("Feature Shapes:")
for k, v in feats.items():
if isinstance(v, np.ndarray):
print(f" {k}: shape={v.shape}")
else:
print(f" {k}: {v}")
print("\nCombined Feature Vector:")
print(feats['combined'])
print("Combined Feature Length:", len(feats['combined']))