| | """ |
| | Image based feature alignment |
| | Credits: https://www.learnopencv.com/image-alignment-feature-based-using-opencv-c-python/ |
| | """ |
| | import cv2 |
| | import numpy as np |
| |
|
| | from src.processors.interfaces.ImagePreprocessor import ImagePreprocessor |
| | from src.utils.image import ImageUtils |
| | from src.utils.interaction import InteractionUtils |
| |
|
| |
|
| | class FeatureBasedAlignment(ImagePreprocessor): |
| | def __init__(self, *args, **kwargs): |
| | super().__init__(*args, **kwargs) |
| | options = self.options |
| | config = self.tuning_config |
| |
|
| | |
| | self.ref_path = self.relative_dir.joinpath(options["reference"]) |
| | ref_img = cv2.imread(str(self.ref_path), cv2.IMREAD_GRAYSCALE) |
| | self.ref_img = ImageUtils.resize_util( |
| | ref_img, |
| | config.dimensions.processing_width, |
| | config.dimensions.processing_height, |
| | ) |
| | |
| | self.max_features = int(options.get("maxFeatures", 500)) |
| | self.good_match_percent = options.get("goodMatchPercent", 0.15) |
| | self.transform_2_d = options.get("2d", False) |
| | |
| | self.orb = cv2.ORB_create(self.max_features) |
| | self.to_keypoints, self.to_descriptors = self.orb.detectAndCompute( |
| | self.ref_img, None |
| | ) |
| |
|
| | def __str__(self): |
| | return self.ref_path.name |
| |
|
| | def exclude_files(self): |
| | return [self.ref_path] |
| |
|
| | def apply_filter(self, image, _file_path): |
| | config = self.tuning_config |
| | |
| | |
| | |
| |
|
| | image = cv2.normalize(image, 0, 255, norm_type=cv2.NORM_MINMAX) |
| |
|
| | |
| | from_keypoints, from_descriptors = self.orb.detectAndCompute(image, None) |
| |
|
| | |
| | matcher = cv2.DescriptorMatcher_create( |
| | cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING |
| | ) |
| |
|
| | |
| | |
| |
|
| | matches = np.array(matcher.match(from_descriptors, self.to_descriptors, None)) |
| |
|
| | |
| | matches = sorted(matches, key=lambda x: x.distance, reverse=False) |
| |
|
| | |
| | num_good_matches = int(len(matches) * self.good_match_percent) |
| | matches = matches[:num_good_matches] |
| |
|
| | |
| | if config.outputs.show_image_level > 2: |
| | im_matches = cv2.drawMatches( |
| | image, from_keypoints, self.ref_img, self.to_keypoints, matches, None |
| | ) |
| | InteractionUtils.show("Aligning", im_matches, resize=True, config=config) |
| |
|
| | |
| | points1 = np.zeros((len(matches), 2), dtype=np.float32) |
| | points2 = np.zeros((len(matches), 2), dtype=np.float32) |
| |
|
| | for i, match in enumerate(matches): |
| | points1[i, :] = from_keypoints[match.queryIdx].pt |
| | points2[i, :] = self.to_keypoints[match.trainIdx].pt |
| |
|
| | |
| | height, width = self.ref_img.shape |
| | if self.transform_2_d: |
| | m, _inliers = cv2.estimateAffine2D(points1, points2) |
| | return cv2.warpAffine(image, m, (width, height)) |
| |
|
| | |
| | h, _mask = cv2.findHomography(points1, points2, cv2.RANSAC) |
| | return cv2.warpPerspective(image, h, (width, height)) |
| |
|