Spaces:
Runtime error
Runtime error
Upload 5 files
Browse files- app.py +240 -0
- image.jpg +0 -0
- markup.py +71 -0
- perspective_correction.py +106 -0
- requirements.txt +5 -0
app.py
ADDED
|
@@ -0,0 +1,240 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
from rembg import remove
|
| 3 |
+
from PIL import ImageOps, ImageEnhance, Image
|
| 4 |
+
from streamlit_option_menu import option_menu
|
| 5 |
+
from markup import real_estate_app, real_estate_app_hf, sliders_intro, perspective_intro, manual_bg_intro
|
| 6 |
+
from perspective_correction import perspective_correction, perspective_correction2
|
| 7 |
+
from streamlit_drawable_canvas import st_canvas
|
| 8 |
+
import tempfile
|
| 9 |
+
|
| 10 |
+
import numpy as np
|
| 11 |
+
import cv2
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def tab1():
|
| 15 |
+
st.header("Image Background Remover")
|
| 16 |
+
col1, col2 = st.columns([1, 2])
|
| 17 |
+
with col1:
|
| 18 |
+
st.image("image.jpg", use_column_width=True)
|
| 19 |
+
with col2:
|
| 20 |
+
st.markdown(real_estate_app(), unsafe_allow_html=True)
|
| 21 |
+
st.markdown(real_estate_app_hf(),unsafe_allow_html=True)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
github_link = '[<img src="https://badgen.net/badge/icon/github?icon=github&label">](https://github.com/ethanrom)'
|
| 25 |
+
huggingface_link = '[<img src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue">](https://huggingface.co/ethanrom)'
|
| 26 |
+
|
| 27 |
+
st.write(github_link + ' ' + huggingface_link, unsafe_allow_html=True)
|
| 28 |
+
|
| 29 |
+
def tab2():
|
| 30 |
+
st.header("Image Background Remover")
|
| 31 |
+
st.markdown(sliders_intro(), unsafe_allow_html=True)
|
| 32 |
+
|
| 33 |
+
upload_option = st.radio("Upload Option", ("Single Image", "Multiple Images"))
|
| 34 |
+
|
| 35 |
+
if upload_option == "Single Image":
|
| 36 |
+
uploaded_images = st.file_uploader("Upload an image", type=["png", "jpg", "jpeg"], accept_multiple_files=False)
|
| 37 |
+
images = [Image.open(uploaded_images)] if uploaded_images else []
|
| 38 |
+
else:
|
| 39 |
+
uploaded_images = st.file_uploader("Upload multiple images", type=["png", "jpg", "jpeg"], accept_multiple_files=True)
|
| 40 |
+
images = [Image.open(image) for image in uploaded_images] if uploaded_images else []
|
| 41 |
+
|
| 42 |
+
if images:
|
| 43 |
+
col1, col2 = st.columns([2, 1])
|
| 44 |
+
|
| 45 |
+
with col1:
|
| 46 |
+
st.image(images[0], caption="Original Image", use_column_width=True)
|
| 47 |
+
image = preprocess_image_1(images[0])
|
| 48 |
+
|
| 49 |
+
with col2:
|
| 50 |
+
st.subheader("RGB Adjustments")
|
| 51 |
+
with st.expander("Expand"):
|
| 52 |
+
r_min, r_max = st.slider("Red", min_value=0, max_value=255, value=(0, 255), step=1)
|
| 53 |
+
g_min, g_max = st.slider("Green", min_value=0, max_value=255, value=(0, 255), step=1)
|
| 54 |
+
b_min, b_max = st.slider("Blue", min_value=0, max_value=255, value=(0, 255), step=1)
|
| 55 |
+
|
| 56 |
+
adjusted_image = adjust_rgb(image, r_min, r_max, g_min, g_max, b_min, b_max)
|
| 57 |
+
st.image(adjusted_image, caption="Adjusted Image", use_column_width=True)
|
| 58 |
+
|
| 59 |
+
st.subheader("Curves Adjustment")
|
| 60 |
+
with st.expander("Expand"):
|
| 61 |
+
r_curve = st.slider("Red Curve", min_value=0.0, max_value=1.0, value=1.0, step=0.05)
|
| 62 |
+
g_curve = st.slider("Green Curve", min_value=0.0, max_value=1.0, value=1.0, step=0.05)
|
| 63 |
+
b_curve = st.slider("Blue Curve", min_value=0.0, max_value=1.0, value=1.0, step=0.05)
|
| 64 |
+
|
| 65 |
+
adjusted_image = adjust_curves(adjusted_image, r_curve, g_curve, b_curve)
|
| 66 |
+
st.image(adjusted_image, caption="Adjusted Image", use_column_width=True)
|
| 67 |
+
|
| 68 |
+
st.subheader("Masking")
|
| 69 |
+
with st.expander("Expand"):
|
| 70 |
+
threshold = st.slider("Threshold", min_value=0, max_value=255, value=128, step=1)
|
| 71 |
+
|
| 72 |
+
adjusted_image = apply_masking(adjusted_image, threshold)
|
| 73 |
+
st.image(adjusted_image, caption="Adjusted Image", use_column_width=True)
|
| 74 |
+
|
| 75 |
+
with col1:
|
| 76 |
+
if st.button("Remove Background"):
|
| 77 |
+
with st.spinner("Removing background..."):
|
| 78 |
+
output_images = []
|
| 79 |
+
for image in images:
|
| 80 |
+
processed_image = preprocess_image_1(image)
|
| 81 |
+
adjusted_image = adjust_rgb(processed_image, r_min, r_max, g_min, g_max, b_min, b_max)
|
| 82 |
+
adjusted_image = adjust_curves(adjusted_image, r_curve, g_curve, b_curve)
|
| 83 |
+
adjusted_image = apply_masking(adjusted_image, threshold)
|
| 84 |
+
output_images.append(remove(adjusted_image))
|
| 85 |
+
|
| 86 |
+
with st.expander("Background Removed Images"):
|
| 87 |
+
for i in range(len(output_images)):
|
| 88 |
+
st.image(output_images[i], caption=f"Background Removed Image {i + 1}", use_column_width=True)
|
| 89 |
+
|
| 90 |
+
def preprocess_image_1(image):
|
| 91 |
+
if image.mode != "RGBA":
|
| 92 |
+
image = image.convert("RGBA")
|
| 93 |
+
return image
|
| 94 |
+
|
| 95 |
+
def adjust_rgb(image, r_min, r_max, g_min, g_max, b_min, b_max):
|
| 96 |
+
r, g, b, a = image.split()
|
| 97 |
+
r = ImageOps.autocontrast(r.point(lambda p: int(p * (r_max - r_min) / 255 + r_min)))
|
| 98 |
+
g = ImageOps.autocontrast(g.point(lambda p: int(p * (g_max - g_min) / 255 + g_min)))
|
| 99 |
+
b = ImageOps.autocontrast(b.point(lambda p: int(p * (b_max - b_min) / 255 + b_min)))
|
| 100 |
+
return Image.merge("RGBA", (r, g, b, a))
|
| 101 |
+
|
| 102 |
+
def adjust_curves(image, r_curve, g_curve, b_curve):
|
| 103 |
+
r, g, b, a = image.split()
|
| 104 |
+
enhancer_r = ImageEnhance.Brightness(r).enhance(r_curve)
|
| 105 |
+
enhancer_g = ImageEnhance.Brightness(g).enhance(g_curve)
|
| 106 |
+
enhancer_b = ImageEnhance.Brightness(b).enhance(b_curve)
|
| 107 |
+
return Image.merge("RGBA", (enhancer_r, enhancer_g, enhancer_b, a))
|
| 108 |
+
|
| 109 |
+
def apply_masking(image, threshold):
|
| 110 |
+
r, g, b, a = image.split()
|
| 111 |
+
mask = a.point(lambda p: 255 if p > threshold else 0)
|
| 112 |
+
return Image.merge("RGBA", (r, g, b, mask))
|
| 113 |
+
|
| 114 |
+
def remove_background(image, points):
|
| 115 |
+
mask = np.zeros(image.shape[:2], dtype=np.uint8)
|
| 116 |
+
points = np.array(points, dtype=np.int32)
|
| 117 |
+
points = points.reshape((-1, 1, 2))
|
| 118 |
+
cv2.fillPoly(mask, [points], (255))
|
| 119 |
+
result = np.dstack([image, mask])
|
| 120 |
+
|
| 121 |
+
return result
|
| 122 |
+
|
| 123 |
+
def tab3():
|
| 124 |
+
st.header("Manual Background Removal")
|
| 125 |
+
st.markdown(manual_bg_intro(), unsafe_allow_html=True)
|
| 126 |
+
uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])
|
| 127 |
+
|
| 128 |
+
if uploaded_file is not None:
|
| 129 |
+
|
| 130 |
+
col1, col2 = st.columns([2,1])
|
| 131 |
+
with col1:
|
| 132 |
+
image = Image.open(uploaded_file)
|
| 133 |
+
max_image_size = 700
|
| 134 |
+
if max(image.size) > max_image_size:
|
| 135 |
+
image.thumbnail((max_image_size, max_image_size), Image.LANCZOS) # Updated resampling filter
|
| 136 |
+
st.image(image, caption="Original Image")
|
| 137 |
+
image_width, image_height = image.size
|
| 138 |
+
|
| 139 |
+
with col2:
|
| 140 |
+
drawing_mode = "point"
|
| 141 |
+
stroke_width = st.slider("Stroke width: ", 1, 25, 3)
|
| 142 |
+
realtime_update = st.checkbox("Update in realtime", True)
|
| 143 |
+
|
| 144 |
+
with col1:
|
| 145 |
+
st.subheader("Select Points on the Canvas")
|
| 146 |
+
canvas_result = st_canvas(
|
| 147 |
+
fill_color="rgba(255, 165, 0, 0.3)",
|
| 148 |
+
stroke_width=stroke_width,
|
| 149 |
+
background_image=image,
|
| 150 |
+
update_streamlit=realtime_update,
|
| 151 |
+
height=image_height,
|
| 152 |
+
width=image_width,
|
| 153 |
+
drawing_mode=drawing_mode,
|
| 154 |
+
key="canvas",
|
| 155 |
+
)
|
| 156 |
+
|
| 157 |
+
if st.button("Remove Background"):
|
| 158 |
+
if canvas_result.json_data is not None:
|
| 159 |
+
points = []
|
| 160 |
+
for obj in canvas_result.json_data["objects"]:
|
| 161 |
+
if "type" in obj and obj["type"] == "circle":
|
| 162 |
+
x = obj["left"]
|
| 163 |
+
y = obj["top"]
|
| 164 |
+
points.append((x, y))
|
| 165 |
+
|
| 166 |
+
img_array = np.array(image)
|
| 167 |
+
result = remove_background(img_array, points)
|
| 168 |
+
|
| 169 |
+
result_image = Image.fromarray(result)
|
| 170 |
+
|
| 171 |
+
transparent_bg_result = result_image.convert("RGBA")
|
| 172 |
+
file_path = "background_removed.png"
|
| 173 |
+
transparent_bg_result.save(file_path, format="PNG")
|
| 174 |
+
st.image(transparent_bg_result, caption="Background Removed Image")
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
def tab4():
|
| 178 |
+
st.header("Image Perspective Correction")
|
| 179 |
+
st.write("Upload a transparent PNG image which you have removed the background using the previous tab.")
|
| 180 |
+
st.markdown(perspective_intro(),unsafe_allow_html=True)
|
| 181 |
+
|
| 182 |
+
uploaded_file = st.file_uploader("Choose a PNG image", type="png")
|
| 183 |
+
|
| 184 |
+
if uploaded_file is not None:
|
| 185 |
+
image = Image.open(uploaded_file)
|
| 186 |
+
|
| 187 |
+
col1, col2 = st.columns([2,1])
|
| 188 |
+
with col1:
|
| 189 |
+
st.image(image, caption="Original Image", use_column_width=True)
|
| 190 |
+
image_np = np.array(image)
|
| 191 |
+
|
| 192 |
+
with col2:
|
| 193 |
+
correction_method = st.selectbox("Correction Method", ["Four-Point Perspective Correction", "Convex Hull Homography Perspective Correction"])
|
| 194 |
+
|
| 195 |
+
if correction_method == "Four-Point Perspective Correction":
|
| 196 |
+
threshold_value = st.slider("Threshold Value", min_value=1, max_value=255, value=100)
|
| 197 |
+
min_line_length = st.slider("Minimum Line Length", min_value=1, max_value=500, value=100)
|
| 198 |
+
max_line_gap = st.slider("Maximum Line Gap", min_value=1, max_value=100, value=10)
|
| 199 |
+
elif correction_method == "Convex Hull Homography Perspective Correction":
|
| 200 |
+
threshold_value = st.slider("Threshold Value", min_value=1, max_value=255, value=100)
|
| 201 |
+
min_line_length = st.slider("Minimum Line Length", min_value=1, max_value=500, value=100)
|
| 202 |
+
max_line_gap = st.slider("Maximum Line Gap", min_value=1, max_value=100, value=10)
|
| 203 |
+
else:
|
| 204 |
+
st.write("Invalid correction method selected.")
|
| 205 |
+
return
|
| 206 |
+
|
| 207 |
+
with col1:
|
| 208 |
+
if st.button("Correct Perspective"):
|
| 209 |
+
with st.spinner("Correcting Perspective..."):
|
| 210 |
+
if uploaded_file is not None:
|
| 211 |
+
if correction_method == "Four-Point Perspective Correction":
|
| 212 |
+
corrected_image = perspective_correction(image_np, threshold_value, min_line_length, max_line_gap)
|
| 213 |
+
elif correction_method == "Convex Hull Homography Perspective Correction":
|
| 214 |
+
corrected_image = perspective_correction2(image_np, threshold_value, min_line_length, max_line_gap)
|
| 215 |
+
else:
|
| 216 |
+
st.write("Invalid correction method selected.")
|
| 217 |
+
return
|
| 218 |
+
|
| 219 |
+
st.image(corrected_image, caption="Corrected Image", use_column_width=True)
|
| 220 |
+
|
| 221 |
+
def main():
|
| 222 |
+
st.set_page_config(page_title="Background Removal Demo", page_icon=":memo:", layout="wide")
|
| 223 |
+
tabs = ["Intro", "AI Background Removal", "Manual Background Removal", "Perspective Correction"]
|
| 224 |
+
|
| 225 |
+
with st.sidebar:
|
| 226 |
+
|
| 227 |
+
current_tab = option_menu("Select a Tab", tabs, menu_icon="cast")
|
| 228 |
+
|
| 229 |
+
tab_functions = {
|
| 230 |
+
"Intro": tab1,
|
| 231 |
+
"AI Background Removal": tab2,
|
| 232 |
+
"Manual Background Removal": tab3,
|
| 233 |
+
"Perspective Correction": tab4,
|
| 234 |
+
}
|
| 235 |
+
|
| 236 |
+
if current_tab in tab_functions:
|
| 237 |
+
tab_functions[current_tab]()
|
| 238 |
+
|
| 239 |
+
if __name__ == "__main__":
|
| 240 |
+
main()
|
image.jpg
ADDED
|
markup.py
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
def real_estate_app():
|
| 2 |
+
return """
|
| 3 |
+
<h3 style='text-align: center;'>Introduction</h3>
|
| 4 |
+
|
| 5 |
+
<p>This app allows you to upload an image and remove its background using a custom trained model.</p>
|
| 6 |
+
|
| 7 |
+
<h4>Information:</h4>
|
| 8 |
+
<ul>
|
| 9 |
+
<li><b>Very low requirements:</b> This demo is running on a free single-core CPU. The app will work on any hardware. It may take a few minutes for the first run, but afterwards, it will perform faster.</li>
|
| 10 |
+
<li><b>Backend:</b> Python backend with an optimized ONNX model. No third party APIs or paid services used. Everything used is opensource</li>
|
| 11 |
+
<li><b>Frontend:</b> The demo's frontend is written in Python Streamlit. Unfortunately, I cannot write a JavaScript frontend as I am a backend engineer. However, I can refer you to a colleague if you prefer a better-looking UI.</li>
|
| 12 |
+
<li><b>Perspective correction:</b> Perspective correction is done via OpenCV.</li>
|
| 13 |
+
</ul>
|
| 14 |
+
</div>
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
def real_estate_app_hf():
|
| 18 |
+
return """
|
| 19 |
+
<div style='text-align: left;'>
|
| 20 |
+
<h3 style='text-align: center;'>About this background removal demo</h3>
|
| 21 |
+
<p>In this demo, the two requested features have been split into two separate tabs for better individual evaluation. They can be combined into one in the final submission.</p>
|
| 22 |
+
<br>
|
| 23 |
+
<h4>How to use:</h4>
|
| 24 |
+
<ul>
|
| 25 |
+
<li><b>Remove Background:</b> Use this tab to remove the background of the images. Right-click and save the image once done.</li>
|
| 26 |
+
<li><b>Correct Perspective:</b> Use this tab to check the perspective correction option. Please make sure to use an image with the background already removed.</li>
|
| 27 |
+
</ul>
|
| 28 |
+
<br>
|
| 29 |
+
<p><b>Update 1</b> RGB adjustment sliders for image preprocessing</p>
|
| 30 |
+
<p><b>Update 2</b> Addional options for perspective correction</p>
|
| 31 |
+
<p><b>Update 3</b> Multiple image support</p>
|
| 32 |
+
<p><b>Update 4</b> Manual background removal</p>
|
| 33 |
+
</div>
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
def sliders_intro():
|
| 37 |
+
return """
|
| 38 |
+
<p>Newly added sliders which will appear after an image is uploaded serve as interactive tools to adjust various aspects of the uploaded image. Each slider corresponds to a specific color channel (red, green, blue) or a curve adjustment.
|
| 39 |
+
By using these sliders, users can fine-tune the color levels and apply curve modifications to achieve the desired visual effect.</p>
|
| 40 |
+
<p>For the RGB Adjustments section, users can use the Red, Green, and Blue sliders to set the minimum and maximum values for each color channel.
|
| 41 |
+
By adjusting these values, users can enhance or reduce the intensity of each color channel individually, allowing for precise color adjustments.</p>
|
| 42 |
+
<p>In the Curves Adjustment section, users can utilize the Red Curve, Green Curve, and Blue Curve sliders to control the brightness of the respective color channels.
|
| 43 |
+
By moving these sliders, users can create custom curves, influencing the overall tone and contrast of the image.</p>
|
| 44 |
+
<p>The Masking section offers the Threshold slider, which determines the cutoff point for the transparency mask.
|
| 45 |
+
Users can adjust the threshold value to define the boundary between foreground and background elements. This feature enables users to isolate objects by selectively applying transparency to specific areas of the image.</p>
|
| 46 |
+
"""
|
| 47 |
+
|
| 48 |
+
def perspective_intro():
|
| 49 |
+
return """
|
| 50 |
+
there are two different perspective correction methods you can chose from, the difference is how they determine the transformation matrix used for warping the image.
|
| 51 |
+
|
| 52 |
+
In the Four-Point Perspective Correction, the method uses a four-point perspective transform. It first detects lines in the image using the HoughLinesP function, and then calculates the endpoints of these lines.
|
| 53 |
+
If enough endpoints are found (at least 4), a convex hull is created based on these endpoints. From the convex hull, a four-sided polygon is obtained, representing the region of interest.
|
| 54 |
+
The width and height of this polygon are determined, and a destination set of points is defined to which the polygon will be mapped.
|
| 55 |
+
Finally, a perspective transformation matrix is computed using getPerspectiveTransform function, and the image is warped accordingly.
|
| 56 |
+
|
| 57 |
+
In the Convex Hull Homography Perspective Correction, a similar process is followed, but instead of using a four-point perspective transform, it uses a homography transform.
|
| 58 |
+
After obtaining the endpoints, a convex hull is created, and a four-sided polygon is extracted from it. The width and height of this polygon are calculated, and a destination set of points is defined.
|
| 59 |
+
But instead of using getPerspectiveTransform, the findHomography function is used to compute the homography matrix.
|
| 60 |
+
This matrix defines the transformation between the source polygon and the destination polygon, and the image is warped using the warpPerspective function.
|
| 61 |
+
|
| 62 |
+
The parameters threshold_value, min_line_length, and max_line_gap in both methods control the detection of lines in the image.
|
| 63 |
+
These parameters affect the number and quality of lines detected, which in turn can impact the accuracy of the perspective correction.
|
| 64 |
+
Adjusting these values allows fine-tuning the perspective correction process based on the specific characteristics of the input image.
|
| 65 |
+
However, it is important to note that changing these values requires some experimentation to achieve the desired results, and it is recommended to find the optimal values through trial and error.
|
| 66 |
+
"""
|
| 67 |
+
|
| 68 |
+
def manual_bg_intro():
|
| 69 |
+
return """
|
| 70 |
+
Click on the canvas and select four or more points, starting from any one corner of the slab and proceeding in order in any direction. The background will be removed using the selected points.
|
| 71 |
+
"""
|
perspective_correction.py
ADDED
|
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
import numpy as np
|
| 3 |
+
import cv2
|
| 4 |
+
|
| 5 |
+
def preprocess_image(image):
|
| 6 |
+
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
| 7 |
+
threshold = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 15, 7)
|
| 8 |
+
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
|
| 9 |
+
opening = cv2.morphologyEx(threshold, cv2.MORPH_OPEN, kernel, iterations=1)
|
| 10 |
+
return opening
|
| 11 |
+
|
| 12 |
+
def four_point_transform(image, pts):
|
| 13 |
+
rect = np.zeros((4, 2), dtype=np.float32)
|
| 14 |
+
s = pts.sum(axis=1)
|
| 15 |
+
rect[0] = pts[np.argmin(s)]
|
| 16 |
+
rect[2] = pts[np.argmax(s)]
|
| 17 |
+
diff = np.diff(pts, axis=1)
|
| 18 |
+
rect[1] = pts[np.argmin(diff)]
|
| 19 |
+
rect[3] = pts[np.argmax(diff)]
|
| 20 |
+
(tl, tr, br, bl) = rect
|
| 21 |
+
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
|
| 22 |
+
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
|
| 23 |
+
maxWidth = max(int(widthA), int(widthB))
|
| 24 |
+
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
|
| 25 |
+
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
|
| 26 |
+
maxHeight = max(int(heightA), int(heightB))
|
| 27 |
+
dst = np.array([[0, 0], [maxWidth - 1, 0], [maxWidth - 1, maxHeight - 1], [0, maxHeight - 1]], dtype=np.float32)
|
| 28 |
+
M = cv2.getPerspectiveTransform(rect, dst)
|
| 29 |
+
warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
|
| 30 |
+
|
| 31 |
+
return warped
|
| 32 |
+
|
| 33 |
+
def perspective_correction(image, threshold_value=100, min_line_length=100, max_line_gap=10):
|
| 34 |
+
processed_image = preprocess_image(image)
|
| 35 |
+
lines = cv2.HoughLinesP(
|
| 36 |
+
processed_image,
|
| 37 |
+
rho=1,
|
| 38 |
+
theta=np.pi/180,
|
| 39 |
+
threshold=threshold_value,
|
| 40 |
+
minLineLength=min_line_length,
|
| 41 |
+
maxLineGap=max_line_gap
|
| 42 |
+
)
|
| 43 |
+
|
| 44 |
+
if len(lines) < 1:
|
| 45 |
+
st.write("No lines found.")
|
| 46 |
+
return image
|
| 47 |
+
|
| 48 |
+
endpoints = []
|
| 49 |
+
for line in lines:
|
| 50 |
+
x1, y1, x2, y2 = line[0]
|
| 51 |
+
endpoints.append((x1, y1))
|
| 52 |
+
endpoints.append((x2, y2))
|
| 53 |
+
|
| 54 |
+
if len(endpoints) >= 4:
|
| 55 |
+
endpoints = np.array(endpoints)
|
| 56 |
+
corrected_image = four_point_transform(image, endpoints)
|
| 57 |
+
else:
|
| 58 |
+
st.write("Insufficient endpoints found.")
|
| 59 |
+
return image
|
| 60 |
+
|
| 61 |
+
return corrected_image
|
| 62 |
+
|
| 63 |
+
def perspective_correction2(image, threshold_value=100, min_line_length=100, max_line_gap=10):
|
| 64 |
+
processed_image = preprocess_image(image)
|
| 65 |
+
lines = cv2.HoughLinesP(
|
| 66 |
+
processed_image,
|
| 67 |
+
rho=1,
|
| 68 |
+
theta=np.pi/180,
|
| 69 |
+
threshold=threshold_value,
|
| 70 |
+
minLineLength=min_line_length,
|
| 71 |
+
maxLineGap=max_line_gap
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
if len(lines) < 1:
|
| 75 |
+
st.write("No lines found.")
|
| 76 |
+
return image
|
| 77 |
+
|
| 78 |
+
endpoints = []
|
| 79 |
+
for line in lines:
|
| 80 |
+
x1, y1, x2, y2 = line[0]
|
| 81 |
+
endpoints.append((x1, y1))
|
| 82 |
+
endpoints.append((x2, y2))
|
| 83 |
+
|
| 84 |
+
if len(endpoints) >= 4:
|
| 85 |
+
endpoints = np.array(endpoints)
|
| 86 |
+
hull = cv2.convexHull(endpoints)
|
| 87 |
+
rect = np.zeros((4, 2), dtype=np.float32)
|
| 88 |
+
hull = hull.reshape(-1, 2)
|
| 89 |
+
rect[0] = hull[np.argmin(hull.sum(axis=1))]
|
| 90 |
+
rect[2] = hull[np.argmax(hull.sum(axis=1))]
|
| 91 |
+
rect[1] = hull[np.argmin(np.diff(hull, axis=1))]
|
| 92 |
+
rect[3] = hull[np.argmax(np.diff(hull, axis=1))]
|
| 93 |
+
widthA = np.sqrt(((rect[2][0] - rect[3][0]) ** 2) + ((rect[2][1] - rect[3][1]) ** 2))
|
| 94 |
+
widthB = np.sqrt(((rect[1][0] - rect[0][0]) ** 2) + ((rect[1][1] - rect[0][1]) ** 2))
|
| 95 |
+
maxWidth = max(int(widthA), int(widthB))
|
| 96 |
+
heightA = np.sqrt(((rect[1][0] - rect[2][0]) ** 2) + ((rect[1][1] - rect[2][1]) ** 2))
|
| 97 |
+
heightB = np.sqrt(((rect[0][0] - rect[3][0]) ** 2) + ((rect[0][1] - rect[3][1]) ** 2))
|
| 98 |
+
maxHeight = max(int(heightA), int(heightB))
|
| 99 |
+
dst = np.array([[0, 0], [maxWidth - 1, 0], [maxWidth - 1, maxHeight - 1], [0, maxHeight - 1]], dtype=np.float32)
|
| 100 |
+
M, _ = cv2.findHomography(rect, dst)
|
| 101 |
+
corrected_image = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
|
| 102 |
+
else:
|
| 103 |
+
st.write("Insufficient endpoints found.")
|
| 104 |
+
return image
|
| 105 |
+
|
| 106 |
+
return corrected_image
|
requirements.txt
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
rembg
|
| 2 |
+
streamlit
|
| 3 |
+
numpy
|
| 4 |
+
streamlit_option_menu
|
| 5 |
+
streamlit-drawable-canvas
|