Omnibus commited on
Commit
a5e7e8b
·
1 Parent(s): 90a0d40

Upload 5 files

Browse files
image_stitching/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ from .stitcher import ImageStitcher
2
+ from .helpers import display
3
+ from .helpers import load_frames
4
+
5
+ __doc__ = '''
6
+ image_stitching is based around the ImageStitcher class which handles all
7
+ feature extraction and image warping
8
+ '''
image_stitching/combine.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+
3
+ import cv2
4
+ import numpy
5
+
6
+ __doc__ = '''helper functions for combining images, only to be used in the stitcher class'''
7
+
8
+
9
+ def compute_matches(features0, features1, matcher, knn=5, lowe=0.7):
10
+ '''
11
+ this applies lowe-ratio feature matching between feature0 an dfeature 1 using flann
12
+ '''
13
+ keypoints0, descriptors0 = features0
14
+ keypoints1, descriptors1 = features1
15
+
16
+ logging.debug('finding correspondence')
17
+
18
+ matches = matcher.knnMatch(descriptors0, descriptors1, k=knn)
19
+
20
+ logging.debug("filtering matches with lowe test")
21
+
22
+ positive = []
23
+ for match0, match1 in matches:
24
+ if match0.distance < lowe * match1.distance:
25
+ positive.append(match0)
26
+
27
+ src_pts = numpy.array([keypoints0[good_match.queryIdx].pt for good_match in positive],
28
+ dtype=numpy.float32)
29
+ src_pts = src_pts.reshape((-1, 1, 2))
30
+ dst_pts = numpy.array([keypoints1[good_match.trainIdx].pt for good_match in positive],
31
+ dtype=numpy.float32)
32
+ dst_pts = dst_pts.reshape((-1, 1, 2))
33
+
34
+ return src_pts, dst_pts, len(positive)
35
+
36
+
37
+ def combine_images(img0, img1, h_matrix):
38
+ '''
39
+ this takes two images and the homography matrix from 0 to 1 and combines the images together!
40
+ the logic is convoluted here and needs to be simplified!
41
+ '''
42
+ logging.debug('combining images... ')
43
+
44
+ points0 = numpy.array(
45
+ [[0, 0], [0, img0.shape[0]], [img0.shape[1], img0.shape[0]], [img0.shape[1], 0]],
46
+ dtype=numpy.float32)
47
+ points0 = points0.reshape((-1, 1, 2))
48
+ points1 = numpy.array(
49
+ [[0, 0], [0, img1.shape[0]], [img1.shape[1], img1.shape[0]], [img1.shape[1], 0]],
50
+ dtype=numpy.float32)
51
+ points1 = points1.reshape((-1, 1, 2))
52
+
53
+ points2 = cv2.perspectiveTransform(points1, h_matrix)
54
+ points = numpy.concatenate((points0, points2), axis=0)
55
+
56
+ [x_min, y_min] = (points.min(axis=0).ravel() - 0.5).astype(numpy.int32)
57
+ [x_max, y_max] = (points.max(axis=0).ravel() + 0.5).astype(numpy.int32)
58
+
59
+ h_translation = numpy.array([[1, 0, -x_min], [0, 1, -y_min], [0, 0, 1]])
60
+
61
+ logging.debug('warping previous image...')
62
+ output_img = cv2.warpPerspective(img1, h_translation.dot(h_matrix),
63
+ (x_max - x_min, y_max - y_min))
64
+ output_img[-y_min:img0.shape[0] - y_min, -x_min:img0.shape[1] - x_min] = img0
65
+ return output_img
image_stitching/helpers.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import pathlib
3
+
4
+ from typing import List
5
+ from typing import Generator
6
+
7
+ import cv2
8
+ import numpy
9
+
10
+ __doc__ = '''helper functions for loading frames and displaying them'''
11
+
12
+
13
+ def display(title, img, max_size=500000):
14
+ '''
15
+ resizes the image before it displays it,
16
+ this stops large stitches from going over the screen!
17
+ '''
18
+ assert isinstance(img, numpy.ndarray), 'img must be a numpy array'
19
+ assert isinstance(title, str), 'title must be a string'
20
+ scale = numpy.sqrt(min(1.0, float(max_size) / (img.shape[0] * img.shape[1])))
21
+ shape = (int(scale * img.shape[1]), int(scale * img.shape[0]))
22
+ img = cv2.resize(img, shape)
23
+ cv2.imshow(title, img)
24
+
25
+
26
+ def read_video(video_path: pathlib.Path):
27
+ '''read video is a generator class yielding frames'''
28
+ cap = cv2.VideoCapture(str(video_path))
29
+
30
+ while True:
31
+ ret, frame = cap.read()
32
+ if not ret or frame is None:
33
+ break
34
+
35
+ yield frame
36
+
37
+
38
+ def load_frames(paths: List[str]) -> Generator[numpy.ndarray, None, None]:
39
+ '''
40
+ load_frames takes in a list of paths to image,
41
+ video files, or directories and yields them
42
+ '''
43
+ for path in paths:
44
+ path = pathlib.Path(path)
45
+
46
+ if path.is_dir():
47
+ yield from load_frames(path.rglob('*'))
48
+ elif path.suffix.lower() in ['.jpg', '.jpeg', '.png']:
49
+ yield cv2.imread(str(path))
50
+ elif path.suffix.lower() in ['.avi', '.mp4', '.mov']:
51
+ yield from read_video(path)
52
+ else:
53
+ logging.warning(f'skipping {path.name}...')
image_stitching/stitcher.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+
3
+ import cv2
4
+ import numpy
5
+
6
+ from .combine import combine_images
7
+ from .combine import compute_matches
8
+
9
+ __doc__ = '''ImageStitcher class for combining all images together'''
10
+
11
+
12
+ class ImageStitcher:
13
+ __doc__ = '''ImageStitcher class for combining all images together'''
14
+
15
+ def __init__(self, min_num: int = 10, lowe: float = 0.7, knn_clusters: int = 2):
16
+ '''constructor that initialises the SIFT class and Flann matcher'''
17
+ self.min_num = min_num
18
+ self.lowe = lowe
19
+ self.knn_clusters = knn_clusters
20
+
21
+ self.flann = cv2.FlannBasedMatcher({'algorithm': 0, 'trees': 5}, {'checks': 50})
22
+ self.sift = cv2.SIFT_create()
23
+
24
+ self.result_image = None
25
+ self.result_image_gray = None
26
+
27
+ def add_image(self, image: numpy.ndarray):
28
+ '''
29
+ this adds a new image to the stitched image by
30
+ running feature extraction and matching them
31
+ '''
32
+ assert image.ndim == 3, 'must be an image!'
33
+ assert image.shape[-1] == 3, 'must be BGR!'
34
+ assert image.dtype == numpy.uint8, 'must be a uint8'
35
+
36
+ image_gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
37
+
38
+ if self.result_image is None:
39
+ self.result_image = image
40
+ self.result_image_gray = image_gray
41
+ return
42
+
43
+ # todo(will.brennan) - stop computing features on the results image each time!
44
+ result_features = self.sift.detectAndCompute(self.result_image_gray, None)
45
+ image_features = self.sift.detectAndCompute(image_gray, None)
46
+
47
+ matches_src, matches_dst, n_matches = compute_matches(result_features,
48
+ image_features,
49
+ matcher=self.flann,
50
+ knn=self.knn_clusters,
51
+ lowe=self.lowe)
52
+
53
+ if n_matches < self.min_num:
54
+ logging.warning('too few correspondences to add image to stitched image')
55
+ return
56
+
57
+ logging.debug('computing homography between accumulated and new images')
58
+ homography, _ = cv2.findHomography(matches_src, matches_dst, cv2.RANSAC, 5.0)
59
+
60
+ logging.debug('stitching images together')
61
+ self.result_image = combine_images(image, self.result_image, homography)
62
+ self.result_image_gray = cv2.cvtColor(self.result_image, cv2.COLOR_RGB2GRAY)
63
+
64
+ def image(self):
65
+ '''class for fetching the stitched image'''
66
+ return self.result_image
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ numpy
2
+ opencv-python>=4.5
3
+ opencv-contrib-python>=4.5
4
+ yapf
5
+ pylint