image-stitching / app.py
Omnibus's picture
Update app.py
a7bbd1b
import os
import gradio as gr
import deblur
import cv2
import numpy as np
import logging
import uuid
#import imutils
from image_stitching import ImageStitcher
from image_stitching import load_frames
from image_stitching import display
#level = logging.DEBUG if args.debug else logging.INFO
logging.basicConfig(level=logging.DEBUG)
stitcher = ImageStitcher()
main_url="https://omnibus-image-stitching.hf.space"
#esr = gr.Interface.load("spaces/Omnibus/Real-ESRGAN-mod")
#import stitching
#os.system("git clone https://github.com/WillBrennan/ImageStitching && cd ImageStitching")
# install deps
#os.system("pip install -r requirements.txt")
#out_box=[]
def fin(video_path):
fin_box=[]
# Run the stitching!
#os.system(f"python stitching.py {video_path} --save --save-path 'tmp.png'")
for idx, frame in enumerate(load_frames(video_path)):
#frame = deblur.improve(frame)
stitcher.add_image(frame)
result = stitcher.image()
im_name=f'tmp_im_{idx}.png'
print(f'saving result image on {im_name}')
cv2.imwrite(im_name, result)
fin_box.append(im_name)
#yield gr.Image(im_name)
#yield out_box
return fin_box
def stitch_frames(im1,im2):
# Load the images
image1 = cv2.imread(im1)
image2 = cv2.imread(im2)
# Convert images to grayscale
gray1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(image2, cv2.COLOR_BGR2GRAY)
# Initialize the feature detector and extractor (e.g., SIFT)
sift = cv2.SIFT_create()
# Detect keypoints and compute descriptors for both images
keypoints1, descriptors1 = sift.detectAndCompute(gray1, None)
keypoints2, descriptors2 = sift.detectAndCompute(gray2, None)
# Initialize the feature matcher using brute-force matching
bf = cv2.BFMatcher()
# Match the descriptors using brute-force matching
matches = bf.match(descriptors1, descriptors2)
# Select the top N matches
#num_matches = 50
num_matches = 50
matches = sorted(matches, key=lambda x: x.distance)[:num_matches]
# Extract matching keypoints
src_points = np.float32([keypoints1[match.queryIdx].pt for match in matches]).reshape(-1, 1, 2)
dst_points = np.float32([keypoints2[match.trainIdx].pt for match in matches]).reshape(-1, 1, 2)
# Estimate the homography matrix
homography, _ = cv2.findHomography(src_points, dst_points, cv2.RANSAC, 5.0)
# Warp the first image using the homography
result = cv2.warpPerspective(image1, homography, (image2.shape[1], image2.shape[0]))
# Blending the warped image with the second image using alpha blending
#alpha = 0.5 # blending factor
#blended_image = cv2.addWeighted(result, alpha, image2, 1 - alpha, 0)
s_img=result
l_img=image2
y1, y2 = y_offset, y_offset + s_img.shape[0]
x1, x2 = x_offset, x_offset + s_img.shape[1]
alpha_s = s_img[:, :, 3] / 255.0
alpha_l = 1.0 - alpha_s
for c in range(0, 3):
l_img[y1:y2, x1:x2, c] = (alpha_s * s_img[:, :, c] +
alpha_l * l_img[y1:y2, x1:x2, c])
# Display the blended image
#cv2.imshow('Blended Image', blended_image)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
return l_img
def stitch_native(img1,img2):
imgs=[]
imgs.clear()
imgs.append(cv2.imread(img1))
imgs.append(cv2.imread(img2))
stitchy=cv2.Stitcher.create()
(dummy,output)=stitchy.stitch(imgs)
print(dummy)
print(output)
return output
def stitch(video_path):
out_box=[]
result = None
uid=uuid.uuid4()
# Run the stitching!
#os.system(f"python stitching.py {video_path} --save --save-path 'tmp.png'")
frames = load_frames([f'{video_path}'])
for idx, frame in enumerate(frames):
im_name=f'tmp_im_{idx}.png'
cv2.imwrite(im_name, frame)
if idx == 0:
cv2.imwrite('comp.png', frame)
#im_name=cv2.imread(frame)
#prev_im_name=f'tmp_im_{idx}.png'
prev_im_name=f'comp.png'
print(f'saving result image on {im_name} :: {prev_im_name}')
#out_im = stitch_native(im_name,prev_im_name)
stitchy=cv2.Stitcher.create()
(dummy,output)=stitchy.stitch([im_name,prev_im_name])
print(dummy)
print(output)
prev_im_name=f'comp.png'
fin_im = cv2.imread(output)
cv2.imwrite(prev_im_name, fin_im)
out_box.append(prev_im_name)
yield out_box
def stitch_og(video_path):
out_box=[]
result = None
uid=uuid.uuid4()
# Run the stitching!
#os.system(f"python stitching.py {video_path} --save --save-path 'tmp.png'")
for idx, frame in enumerate(load_frames([f'{video_path}'])):
#[input_image, model_name, denoise_strength, face_enhance, outscale]
'''
try:
cv2.imwrite(f'{uid}-{idx}.png',frame)
out = os.path.abspath(f'{uid}-{idx}.png')
out_url = f'{main_url}/file={out}'
frame = esr(out_url, "realesr-general-x4v3", float(0.5), "False", int(1))
#frame = deblur.improve(frame)
except Exception as e:
print(f'passing:: {e}')
pass
'''
stitcher.add_image(frame)
im_name=f'tmp_im_{idx}.png'
result = stitcher.image()
cv2.imwrite(im_name, result)
print(f'saving result image on {im_name}')
out_box.append(im_name)
yield out_box
#return im_name
with gr.Blocks() as app:
inp = gr.Textbox()
btn = gr.Button()
outp = gr.Gallery(columns=6)
btn.click(stitch,inp,outp)
app.launch()