Spaces:
Build error
Build error
primera carga de archivos para probar aplicacion
Browse files- .gitignore +39 -0
- README.md +78 -13
- app.py +379 -0
- requirements.txt +5 -0
.gitignore
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Python
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.py[cod]
|
| 4 |
+
*$py.class
|
| 5 |
+
*.so
|
| 6 |
+
.Python
|
| 7 |
+
env/
|
| 8 |
+
build/
|
| 9 |
+
develop-eggs/
|
| 10 |
+
dist/
|
| 11 |
+
downloads/
|
| 12 |
+
eggs/
|
| 13 |
+
.eggs/
|
| 14 |
+
lib/
|
| 15 |
+
lib64/
|
| 16 |
+
parts/
|
| 17 |
+
sdist/
|
| 18 |
+
var/
|
| 19 |
+
wheels/
|
| 20 |
+
*.egg-info/
|
| 21 |
+
.installed.cfg
|
| 22 |
+
*.egg
|
| 23 |
+
|
| 24 |
+
# Virtual Environment
|
| 25 |
+
venv/
|
| 26 |
+
ENV/
|
| 27 |
+
|
| 28 |
+
# IDE
|
| 29 |
+
.idea/
|
| 30 |
+
.vscode/
|
| 31 |
+
*.swp
|
| 32 |
+
*.swo
|
| 33 |
+
|
| 34 |
+
# Streamlit
|
| 35 |
+
.streamlit/
|
| 36 |
+
|
| 37 |
+
# System
|
| 38 |
+
.DS_Store
|
| 39 |
+
Thumbs.db
|
README.md
CHANGED
|
@@ -1,13 +1,78 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ImageLab Pro: Advanced Image Processing Laboratory
|
| 2 |
+
|
| 3 |
+
ImageLab Pro is an interactive image processing tool that offers a comprehensive suite of functionalities for manipulating and analyzing digital images. This web application, developed with Streamlit and OpenCV, provides an intuitive interface that allows users of all levels to experiment with advanced image processing techniques.
|
| 4 |
+
|
| 5 |
+
## Key Features
|
| 6 |
+
|
| 7 |
+
### 🖼️ Basic Operations
|
| 8 |
+
- Resizing and rotation
|
| 9 |
+
- Brightness and contrast adjustments
|
| 10 |
+
- Color quantization
|
| 11 |
+
- Geometric transformations
|
| 12 |
+
|
| 13 |
+
### 🎨 Advanced Processing
|
| 14 |
+
- Smoothing and enhancement filters
|
| 15 |
+
- Color space conversions
|
| 16 |
+
- Thresholding techniques
|
| 17 |
+
- Morphological operations
|
| 18 |
+
|
| 19 |
+
### 🔍 Image Analysis
|
| 20 |
+
- Edge detection
|
| 21 |
+
- Feature detection
|
| 22 |
+
- Histogram analysis
|
| 23 |
+
- Adaptive equalization
|
| 24 |
+
|
| 25 |
+
### 🎭 Artistic Effects
|
| 26 |
+
- Pencil sketch effect
|
| 27 |
+
- Cartoon style
|
| 28 |
+
- HDR effect
|
| 29 |
+
- Custom filters
|
| 30 |
+
|
| 31 |
+
## Installation
|
| 32 |
+
|
| 33 |
+
1. Clone the repository:
|
| 34 |
+
```bash
|
| 35 |
+
git clone [repository-url]
|
| 36 |
+
cd [repository-name]
|
| 37 |
+
```
|
| 38 |
+
|
| 39 |
+
2. Create a virtual environment:
|
| 40 |
+
```bash
|
| 41 |
+
python -m venv venv
|
| 42 |
+
```
|
| 43 |
+
|
| 44 |
+
3. Activate the virtual environment:
|
| 45 |
+
- Windows:
|
| 46 |
+
```bash
|
| 47 |
+
.\venv\Scripts\activate
|
| 48 |
+
```
|
| 49 |
+
- Linux/Mac:
|
| 50 |
+
```bash
|
| 51 |
+
source venv/bin/activate
|
| 52 |
+
```
|
| 53 |
+
|
| 54 |
+
4. Install the required packages:
|
| 55 |
+
```bash
|
| 56 |
+
pip install -r requirements.txt
|
| 57 |
+
```
|
| 58 |
+
|
| 59 |
+
5. Run the application:
|
| 60 |
+
```bash
|
| 61 |
+
streamlit run app.py
|
| 62 |
+
```
|
| 63 |
+
|
| 64 |
+
## Usage
|
| 65 |
+
|
| 66 |
+
1. Launch the application using the command above
|
| 67 |
+
2. Upload an image using the file uploader in the sidebar
|
| 68 |
+
3. Select the desired processing operation from the available options
|
| 69 |
+
4. Adjust the parameters as needed
|
| 70 |
+
5. Download the processed image if desired
|
| 71 |
+
|
| 72 |
+
## Requirements
|
| 73 |
+
|
| 74 |
+
See `requirements.txt` for a complete list of dependencies.
|
| 75 |
+
|
| 76 |
+
## License
|
| 77 |
+
|
| 78 |
+
This project is licensed under the MIT License - see the LICENSE file for details.
|
app.py
ADDED
|
@@ -0,0 +1,379 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
import cv2
|
| 3 |
+
import numpy as np
|
| 4 |
+
from PIL import Image
|
| 5 |
+
import io
|
| 6 |
+
import matplotlib.pyplot as plt
|
| 7 |
+
|
| 8 |
+
def apply_threshold(image, threshold_type, thresh_value=127, max_value=255):
|
| 9 |
+
if threshold_type == "Binary":
|
| 10 |
+
_, thresh = cv2.threshold(image, thresh_value, max_value, cv2.THRESH_BINARY)
|
| 11 |
+
elif threshold_type == "Binary Inverse":
|
| 12 |
+
_, thresh = cv2.threshold(image, thresh_value, max_value, cv2.THRESH_BINARY_INV)
|
| 13 |
+
elif threshold_type == "Truncate":
|
| 14 |
+
_, thresh = cv2.threshold(image, thresh_value, max_value, cv2.THRESH_TRUNC)
|
| 15 |
+
elif threshold_type == "To Zero":
|
| 16 |
+
_, thresh = cv2.threshold(image, thresh_value, max_value, cv2.THRESH_TOZERO)
|
| 17 |
+
elif threshold_type == "To Zero Inverse":
|
| 18 |
+
_, thresh = cv2.threshold(image, thresh_value, max_value, cv2.THRESH_TOZERO_INV)
|
| 19 |
+
elif threshold_type == "Adaptive Mean":
|
| 20 |
+
thresh = cv2.adaptiveThreshold(image, max_value, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 11, 2)
|
| 21 |
+
elif threshold_type == "Adaptive Gaussian":
|
| 22 |
+
thresh = cv2.adaptiveThreshold(image, max_value, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
|
| 23 |
+
elif threshold_type == "Otsu":
|
| 24 |
+
_, thresh = cv2.threshold(image, 0, max_value, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
|
| 25 |
+
return thresh
|
| 26 |
+
|
| 27 |
+
def apply_histogram_equalization(image, method):
|
| 28 |
+
if method == "Simple":
|
| 29 |
+
if len(image.shape) == 3:
|
| 30 |
+
img_yuv = cv2.cvtColor(image, cv2.COLOR_BGR2YUV)
|
| 31 |
+
img_yuv[:,:,0] = cv2.equalizeHist(img_yuv[:,:,0])
|
| 32 |
+
return cv2.cvtColor(img_yuv, cv2.COLOR_YUV2BGR)
|
| 33 |
+
else:
|
| 34 |
+
return cv2.equalizeHist(image)
|
| 35 |
+
elif method == "CLAHE":
|
| 36 |
+
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
|
| 37 |
+
if len(image.shape) == 3:
|
| 38 |
+
img_yuv = cv2.cvtColor(image, cv2.COLOR_BGR2YUV)
|
| 39 |
+
img_yuv[:,:,0] = clahe.apply(img_yuv[:,:,0])
|
| 40 |
+
return cv2.cvtColor(img_yuv, cv2.COLOR_YUV2BGR)
|
| 41 |
+
else:
|
| 42 |
+
return clahe.apply(image)
|
| 43 |
+
return image
|
| 44 |
+
|
| 45 |
+
def apply_color_quantization(image, k):
|
| 46 |
+
data = np.float32(image).reshape((-1,3))
|
| 47 |
+
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 20, 1.0)
|
| 48 |
+
_, label, center = cv2.kmeans(data, k, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
|
| 49 |
+
center = np.uint8(center)
|
| 50 |
+
result = center[label.flatten()]
|
| 51 |
+
return result.reshape(image.shape)
|
| 52 |
+
|
| 53 |
+
def main():
|
| 54 |
+
st.set_page_config(layout="wide")
|
| 55 |
+
st.title("Advanced Image Processing Laboratory")
|
| 56 |
+
st.markdown("""
|
| 57 |
+
<style>
|
| 58 |
+
.main {
|
| 59 |
+
background-color: #f0f2f6;
|
| 60 |
+
}
|
| 61 |
+
.stButton>button {
|
| 62 |
+
width: 100%;
|
| 63 |
+
}
|
| 64 |
+
</style>
|
| 65 |
+
""", unsafe_allow_html=True)
|
| 66 |
+
|
| 67 |
+
# Sidebar
|
| 68 |
+
st.sidebar.title("Controls Panel")
|
| 69 |
+
uploaded_file = st.sidebar.file_uploader("Upload Image", type=["jpg", "jpeg", "png"])
|
| 70 |
+
|
| 71 |
+
if uploaded_file is not None:
|
| 72 |
+
file_bytes = np.asarray(bytearray(uploaded_file.read()), dtype=np.uint8)
|
| 73 |
+
original_img = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR)
|
| 74 |
+
|
| 75 |
+
# Create columns for better layout
|
| 76 |
+
col1, col2 = st.columns(2)
|
| 77 |
+
|
| 78 |
+
with col1:
|
| 79 |
+
st.subheader("Original Image")
|
| 80 |
+
st.image(cv2.cvtColor(original_img, cv2.COLOR_BGR2RGB))
|
| 81 |
+
|
| 82 |
+
# Main processing options
|
| 83 |
+
processing_option = st.sidebar.selectbox(
|
| 84 |
+
"Select Processing Category",
|
| 85 |
+
["Basic Operations", "Filtering", "Color Spaces", "Thresholding",
|
| 86 |
+
"Morphological Operations", "Edge Detection", "Feature Detection",
|
| 87 |
+
"Histogram Operations", "Advanced Effects"]
|
| 88 |
+
)
|
| 89 |
+
|
| 90 |
+
# Process and display result
|
| 91 |
+
with col2:
|
| 92 |
+
st.subheader("Processed Image")
|
| 93 |
+
|
| 94 |
+
if processing_option == "Basic Operations":
|
| 95 |
+
operation = st.sidebar.selectbox(
|
| 96 |
+
"Select Operation",
|
| 97 |
+
["Resize", "Rotate", "Flip", "Brightness/Contrast", "Color Quantization"]
|
| 98 |
+
)
|
| 99 |
+
|
| 100 |
+
if operation == "Resize":
|
| 101 |
+
scale = st.sidebar.slider("Scale Factor", 0.1, 2.0, 1.0)
|
| 102 |
+
processed_img = cv2.resize(original_img, None, fx=scale, fy=scale)
|
| 103 |
+
|
| 104 |
+
elif operation == "Rotate":
|
| 105 |
+
angle = st.sidebar.slider("Angle", -180, 180, 0)
|
| 106 |
+
center = (original_img.shape[1] // 2, original_img.shape[0] // 2)
|
| 107 |
+
matrix = cv2.getRotationMatrix2D(center, angle, 1.0)
|
| 108 |
+
processed_img = cv2.warpAffine(original_img, matrix, (original_img.shape[1], original_img.shape[0]))
|
| 109 |
+
|
| 110 |
+
elif operation == "Flip":
|
| 111 |
+
flip_option = st.sidebar.selectbox("Flip Direction", ["Horizontal", "Vertical", "Both"])
|
| 112 |
+
if flip_option == "Horizontal":
|
| 113 |
+
processed_img = cv2.flip(original_img, 1)
|
| 114 |
+
elif flip_option == "Vertical":
|
| 115 |
+
processed_img = cv2.flip(original_img, 0)
|
| 116 |
+
else:
|
| 117 |
+
processed_img = cv2.flip(original_img, -1)
|
| 118 |
+
|
| 119 |
+
elif operation == "Brightness/Contrast":
|
| 120 |
+
brightness = st.sidebar.slider("Brightness", -100, 100, 0)
|
| 121 |
+
contrast = st.sidebar.slider("Contrast", -100, 100, 0)
|
| 122 |
+
|
| 123 |
+
processed_img = original_img.copy()
|
| 124 |
+
if brightness != 0:
|
| 125 |
+
if brightness > 0:
|
| 126 |
+
shadow = brightness
|
| 127 |
+
highlight = 255
|
| 128 |
+
else:
|
| 129 |
+
shadow = 0
|
| 130 |
+
highlight = 255 + brightness
|
| 131 |
+
alpha_b = (highlight - shadow)/255
|
| 132 |
+
gamma_b = shadow
|
| 133 |
+
processed_img = cv2.addWeighted(processed_img, alpha_b, processed_img, 0, gamma_b)
|
| 134 |
+
|
| 135 |
+
if contrast != 0:
|
| 136 |
+
f = 131*(contrast + 127)/(127*(131-contrast))
|
| 137 |
+
alpha_c = f
|
| 138 |
+
gamma_c = 127*(1-f)
|
| 139 |
+
processed_img = cv2.addWeighted(processed_img, alpha_c, processed_img, 0, gamma_c)
|
| 140 |
+
|
| 141 |
+
elif operation == "Color Quantization":
|
| 142 |
+
k = st.sidebar.slider("Number of Colors", 2, 16, 8)
|
| 143 |
+
processed_img = apply_color_quantization(original_img, k)
|
| 144 |
+
|
| 145 |
+
elif processing_option == "Filtering":
|
| 146 |
+
filter_type = st.sidebar.selectbox(
|
| 147 |
+
"Select Filter",
|
| 148 |
+
["Blur", "Gaussian", "Median", "Bilateral", "Custom Kernel"]
|
| 149 |
+
)
|
| 150 |
+
|
| 151 |
+
if filter_type == "Custom Kernel":
|
| 152 |
+
kernel_size = st.sidebar.slider("Kernel Size", 3, 7, 3, step=2)
|
| 153 |
+
kernel_type = st.sidebar.selectbox("Kernel Type", ["Sharpen", "Edge Detection", "Emboss"])
|
| 154 |
+
|
| 155 |
+
if kernel_type == "Sharpen":
|
| 156 |
+
kernel = np.array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]])
|
| 157 |
+
elif kernel_type == "Edge Detection":
|
| 158 |
+
kernel = np.array([[-1,-1,-1], [-1,8,-1], [-1,-1,-1]])
|
| 159 |
+
elif kernel_type == "Emboss":
|
| 160 |
+
kernel = np.array([[-2,-1,0], [-1,1,1], [0,1,2]])
|
| 161 |
+
|
| 162 |
+
processed_img = cv2.filter2D(original_img, -1, kernel)
|
| 163 |
+
else:
|
| 164 |
+
kernel_size = st.sidebar.slider("Kernel Size", 3, 15, 3, step=2)
|
| 165 |
+
|
| 166 |
+
if filter_type == "Blur":
|
| 167 |
+
processed_img = cv2.blur(original_img, (kernel_size, kernel_size))
|
| 168 |
+
elif filter_type == "Gaussian":
|
| 169 |
+
processed_img = cv2.GaussianBlur(original_img, (kernel_size, kernel_size), 0)
|
| 170 |
+
elif filter_type == "Median":
|
| 171 |
+
processed_img = cv2.medianBlur(original_img, kernel_size)
|
| 172 |
+
elif filter_type == "Bilateral":
|
| 173 |
+
d = st.sidebar.slider("d", 1, 15, 9)
|
| 174 |
+
sigma_color = st.sidebar.slider("Sigma Color", 1, 255, 75)
|
| 175 |
+
sigma_space = st.sidebar.slider("Sigma Space", 1, 255, 75)
|
| 176 |
+
processed_img = cv2.bilateralFilter(original_img, d, sigma_color, sigma_space)
|
| 177 |
+
|
| 178 |
+
elif processing_option == "Color Spaces":
|
| 179 |
+
color_space = st.sidebar.selectbox(
|
| 180 |
+
"Select Color Space",
|
| 181 |
+
["RGB", "HSV", "LAB", "YCrCb", "Individual Channels"]
|
| 182 |
+
)
|
| 183 |
+
|
| 184 |
+
if color_space == "RGB":
|
| 185 |
+
processed_img = cv2.cvtColor(original_img, cv2.COLOR_BGR2RGB)
|
| 186 |
+
elif color_space == "HSV":
|
| 187 |
+
processed_img = cv2.cvtColor(original_img, cv2.COLOR_BGR2HSV)
|
| 188 |
+
elif color_space == "LAB":
|
| 189 |
+
processed_img = cv2.cvtColor(original_img, cv2.COLOR_BGR2LAB)
|
| 190 |
+
elif color_space == "YCrCb":
|
| 191 |
+
processed_img = cv2.cvtColor(original_img, cv2.COLOR_BGR2YCrCb)
|
| 192 |
+
elif color_space == "Individual Channels":
|
| 193 |
+
channel = st.sidebar.selectbox("Select Channel", ["Blue", "Green", "Red"])
|
| 194 |
+
if channel == "Blue":
|
| 195 |
+
processed_img = original_img[:,:,0]
|
| 196 |
+
elif channel == "Green":
|
| 197 |
+
processed_img = original_img[:,:,1]
|
| 198 |
+
else:
|
| 199 |
+
processed_img = original_img[:,:,2]
|
| 200 |
+
|
| 201 |
+
elif processing_option == "Thresholding":
|
| 202 |
+
# Convert to grayscale for thresholding
|
| 203 |
+
gray_img = cv2.cvtColor(original_img, cv2.COLOR_BGR2GRAY)
|
| 204 |
+
threshold_type = st.sidebar.selectbox(
|
| 205 |
+
"Select Threshold Type",
|
| 206 |
+
["Binary", "Binary Inverse", "Truncate", "To Zero", "To Zero Inverse",
|
| 207 |
+
"Adaptive Mean", "Adaptive Gaussian", "Otsu"]
|
| 208 |
+
)
|
| 209 |
+
|
| 210 |
+
if threshold_type in ["Binary", "Binary Inverse", "Truncate", "To Zero", "To Zero Inverse"]:
|
| 211 |
+
thresh_value = st.sidebar.slider("Threshold Value", 0, 255, 127)
|
| 212 |
+
max_value = st.sidebar.slider("Maximum Value", 0, 255, 255)
|
| 213 |
+
processed_img = apply_threshold(gray_img, threshold_type, thresh_value, max_value)
|
| 214 |
+
else:
|
| 215 |
+
processed_img = apply_threshold(gray_img, threshold_type)
|
| 216 |
+
|
| 217 |
+
elif processing_option == "Morphological Operations":
|
| 218 |
+
operation = st.sidebar.selectbox(
|
| 219 |
+
"Select Operation",
|
| 220 |
+
["Erosion", "Dilation", "Opening", "Closing", "Gradient", "Top Hat", "Black Hat"]
|
| 221 |
+
)
|
| 222 |
+
|
| 223 |
+
kernel_size = st.sidebar.slider("Kernel Size", 3, 15, 5, step=2)
|
| 224 |
+
kernel = np.ones((kernel_size, kernel_size), np.uint8)
|
| 225 |
+
|
| 226 |
+
if operation == "Erosion":
|
| 227 |
+
processed_img = cv2.erode(original_img, kernel, iterations=1)
|
| 228 |
+
elif operation == "Dilation":
|
| 229 |
+
processed_img = cv2.dilate(original_img, kernel, iterations=1)
|
| 230 |
+
elif operation == "Opening":
|
| 231 |
+
processed_img = cv2.morphologyEx(original_img, cv2.MORPH_OPEN, kernel)
|
| 232 |
+
elif operation == "Closing":
|
| 233 |
+
processed_img = cv2.morphologyEx(original_img, cv2.MORPH_CLOSE, kernel)
|
| 234 |
+
elif operation == "Gradient":
|
| 235 |
+
processed_img = cv2.morphologyEx(original_img, cv2.MORPH_GRADIENT, kernel)
|
| 236 |
+
elif operation == "Top Hat":
|
| 237 |
+
processed_img = cv2.morphologyEx(original_img, cv2.MORPH_TOPHAT, kernel)
|
| 238 |
+
elif operation == "Black Hat":
|
| 239 |
+
processed_img = cv2.morphologyEx(original_img, cv2.MORPH_BLACKHAT, kernel)
|
| 240 |
+
|
| 241 |
+
elif processing_option == "Edge Detection":
|
| 242 |
+
detector = st.sidebar.selectbox(
|
| 243 |
+
"Select Detector",
|
| 244 |
+
["Canny", "Sobel", "Laplacian", "Scharr"]
|
| 245 |
+
)
|
| 246 |
+
|
| 247 |
+
if detector == "Canny":
|
| 248 |
+
threshold1 = st.sidebar.slider("Threshold 1", 0, 255, 100)
|
| 249 |
+
threshold2 = st.sidebar.slider("Threshold 2", 0, 255, 200)
|
| 250 |
+
processed_img = cv2.Canny(original_img, threshold1, threshold2)
|
| 251 |
+
|
| 252 |
+
elif detector == "Sobel":
|
| 253 |
+
dx = st.sidebar.slider("dx", 0, 2, 1)
|
| 254 |
+
dy = st.sidebar.slider("dy", 0, 2, 1)
|
| 255 |
+
ksize = st.sidebar.slider("Kernel Size", 1, 7, 3, step=2)
|
| 256 |
+
gray = cv2.cvtColor(original_img, cv2.COLOR_BGR2GRAY)
|
| 257 |
+
processed_img = cv2.Sobel(gray, cv2.CV_64F, dx, dy, ksize=ksize)
|
| 258 |
+
processed_img = np.uint8(np.absolute(processed_img))
|
| 259 |
+
|
| 260 |
+
elif detector == "Laplacian":
|
| 261 |
+
ksize = st.sidebar.slider("Kernel Size", 1, 7, 3, step=2)
|
| 262 |
+
gray = cv2.cvtColor(original_img, cv2.COLOR_BGR2GRAY)
|
| 263 |
+
processed_img = cv2.Laplacian(gray, cv2.CV_64F, ksize=ksize)
|
| 264 |
+
processed_img = np.uint8(np.absolute(processed_img))
|
| 265 |
+
|
| 266 |
+
elif detector == "Scharr":
|
| 267 |
+
direction = st.sidebar.selectbox("Direction", ["X", "Y"])
|
| 268 |
+
gray = cv2.cvtColor(original_img, cv2.COLOR_BGR2GRAY)
|
| 269 |
+
if direction == "X":
|
| 270 |
+
processed_img = cv2.Scharr(gray, cv2.CV_64F, 1, 0)
|
| 271 |
+
else:
|
| 272 |
+
processed_img = cv2.Scharr(gray, cv2.CV_64F, 0, 1)
|
| 273 |
+
processed_img = np.uint8(np.absolute(processed_img))
|
| 274 |
+
|
| 275 |
+
elif processing_option == "Feature Detection":
|
| 276 |
+
detector = st.sidebar.selectbox(
|
| 277 |
+
"Select Detector",
|
| 278 |
+
["Harris Corner", "Shi-Tomasi", "FAST"]
|
| 279 |
+
)
|
| 280 |
+
|
| 281 |
+
if detector == "Harris Corner":
|
| 282 |
+
gray = cv2.cvtColor(original_img, cv2.COLOR_BGR2GRAY)
|
| 283 |
+
block_size = st.sidebar.slider("Block Size", 2, 10, 2)
|
| 284 |
+
ksize = st.sidebar.slider("Kernel Size", 3, 31, 3)
|
| 285 |
+
k = st.sidebar.slider("k", 0.01, 0.1, 0.04)
|
| 286 |
+
|
| 287 |
+
processed_img = original_img.copy()
|
| 288 |
+
gray = np.float32(gray)
|
| 289 |
+
dst = cv2.cornerHarris(gray, block_size, ksize, k)
|
| 290 |
+
dst = cv2.dilate(dst, None)
|
| 291 |
+
processed_img[dst > 0.01 * dst.max()] = [0, 0, 255]
|
| 292 |
+
|
| 293 |
+
elif detector == "Shi-Tomasi":
|
| 294 |
+
gray = cv2.cvtColor(original_img, cv2.COLOR_BGR2GRAY)
|
| 295 |
+
corners = cv2.goodFeaturesToTrack(gray, 25, 0.01, 10)
|
| 296 |
+
corners = np.int0(corners)
|
| 297 |
+
processed_img = original_img.copy()
|
| 298 |
+
for i in corners:
|
| 299 |
+
x, y = i.ravel()
|
| 300 |
+
cv2.circle(processed_img, (x, y), 3, [0, 0, 255], -1)
|
| 301 |
+
|
| 302 |
+
elif detector == "FAST":
|
| 303 |
+
gray = cv2.cvtColor(original_img, cv2.COLOR_BGR2GRAY)
|
| 304 |
+
fast = cv2.FastFeatureDetector_create()
|
| 305 |
+
kp = fast.detect(gray, None)
|
| 306 |
+
processed_img = original_img.copy()
|
| 307 |
+
cv2.drawKeypoints(original_img, kp, processed_img, color=(0, 0, 255))
|
| 308 |
+
|
| 309 |
+
elif processing_option == "Histogram Operations":
|
| 310 |
+
operation = st.sidebar.selectbox(
|
| 311 |
+
"Select Operation",
|
| 312 |
+
["Show Histogram", "Equalization", "CLAHE"]
|
| 313 |
+
)
|
| 314 |
+
|
| 315 |
+
if operation == "Show Histogram":
|
| 316 |
+
fig, ax = plt.subplots()
|
| 317 |
+
if len(original_img.shape) == 3:
|
| 318 |
+
colors = ('b', 'g', 'r')
|
| 319 |
+
for i, color in enumerate(colors):
|
| 320 |
+
hist = cv2.calcHist([original_img], [i], None, [256], [0, 256])
|
| 321 |
+
ax.plot(hist, color=color)
|
| 322 |
+
else:
|
| 323 |
+
hist = cv2.calcHist([original_img], [0], None, [256], [0, 256])
|
| 324 |
+
ax.plot(hist)
|
| 325 |
+
st.pyplot(fig)
|
| 326 |
+
processed_img = original_img
|
| 327 |
+
|
| 328 |
+
elif operation == "Equalization":
|
| 329 |
+
processed_img = apply_histogram_equalization(original_img, "Simple")
|
| 330 |
+
|
| 331 |
+
elif operation == "CLAHE":
|
| 332 |
+
processed_img = apply_histogram_equalization(original_img, "CLAHE")
|
| 333 |
+
|
| 334 |
+
elif processing_option == "Advanced Effects":
|
| 335 |
+
effect = st.sidebar.selectbox(
|
| 336 |
+
"Select Effect",
|
| 337 |
+
["Pencil Sketch", "Cartoon", "HDR Effect"]
|
| 338 |
+
)
|
| 339 |
+
|
| 340 |
+
if effect == "Pencil Sketch":
|
| 341 |
+
gray = cv2.cvtColor(original_img, cv2.COLOR_BGR2GRAY)
|
| 342 |
+
inv = 255 - gray
|
| 343 |
+
blur = cv2.GaussianBlur(inv, (21, 21), 0)
|
| 344 |
+
processed_img = cv2.divide(gray, 255-blur, scale=256.0)
|
| 345 |
+
|
| 346 |
+
elif effect == "Cartoon":
|
| 347 |
+
gray = cv2.cvtColor(original_img, cv2.COLOR_BGR2GRAY)
|
| 348 |
+
gray = cv2.medianBlur(gray, 5)
|
| 349 |
+
edges = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 9, 9)
|
| 350 |
+
color = cv2.bilateralFilter(original_img, 9, 250, 250)
|
| 351 |
+
processed_img = cv2.bitwise_and(color, color, mask=edges)
|
| 352 |
+
|
| 353 |
+
elif effect == "HDR Effect":
|
| 354 |
+
processed_img = cv2.detailEnhance(original_img, sigma_s=12, sigma_r=0.15)
|
| 355 |
+
|
| 356 |
+
# Display processed image
|
| 357 |
+
if len(processed_img.shape) == 3:
|
| 358 |
+
st.image(cv2.cvtColor(processed_img, cv2.COLOR_BGR2RGB))
|
| 359 |
+
else:
|
| 360 |
+
st.image(processed_img, clamp=True)
|
| 361 |
+
|
| 362 |
+
# Add download button for processed image
|
| 363 |
+
if st.button("Download Processed Image"):
|
| 364 |
+
if len(processed_img.shape) == 3:
|
| 365 |
+
processed_img_rgb = cv2.cvtColor(processed_img, cv2.COLOR_BGR2RGB)
|
| 366 |
+
else:
|
| 367 |
+
processed_img_rgb = processed_img
|
| 368 |
+
pil_img = Image.fromarray(processed_img_rgb)
|
| 369 |
+
img_bytes = io.BytesIO()
|
| 370 |
+
pil_img.save(img_bytes, format='PNG')
|
| 371 |
+
st.download_button(
|
| 372 |
+
label="Download Image",
|
| 373 |
+
data=img_bytes.getvalue(),
|
| 374 |
+
file_name="processed_image.png",
|
| 375 |
+
mime="image/png"
|
| 376 |
+
)
|
| 377 |
+
|
| 378 |
+
if __name__ == "__main__":
|
| 379 |
+
main()
|
requirements.txt
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
streamlit==1.32.0
|
| 2 |
+
opencv-python==4.9.0.80
|
| 3 |
+
numpy==1.26.4
|
| 4 |
+
Pillow==10.2.0
|
| 5 |
+
matplotlib==3.8.3
|