Spaces:
Sleeping
Sleeping
Upload 10 files
Browse files- .gitattributes +2 -0
- README.md +63 -6
- advanced_tools.py +272 -0
- app.py +420 -0
- basic_tools.py +427 -0
- cat.png +3 -0
- gradio_client_app.py +13 -0
- requirements.txt +3 -0
- snake.png +3 -0
- tools.py +324 -0
- utils.py +49 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
cat.png filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
snake.png filter=lfs diff=lfs merge=lfs -text
|
README.md
CHANGED
|
@@ -1,12 +1,69 @@
|
|
| 1 |
---
|
| 2 |
-
title: Image
|
| 3 |
-
emoji:
|
| 4 |
-
colorFrom:
|
| 5 |
-
colorTo:
|
| 6 |
sdk: gradio
|
| 7 |
-
sdk_version: 5.
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
|
|
|
|
|
|
|
|
|
| 10 |
---
|
| 11 |
|
| 12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
---
|
| 2 |
+
title: Image Enhancer Pro
|
| 3 |
+
emoji: 📚
|
| 4 |
+
colorFrom: red
|
| 5 |
+
colorTo: purple
|
| 6 |
sdk: gradio
|
| 7 |
+
sdk_version: 5.33.1
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
+
license: mit
|
| 11 |
+
short_description: 'Enhance, transform, and analyze images with pro tools — all '
|
| 12 |
+
tags: [mcp-server-track,image_processing]
|
| 13 |
---
|
| 14 |
|
| 15 |
+
# 🖼️ Image Enhancer Pro Suite
|
| 16 |
+
|
| 17 |
+
A powerful and modular image processing suite featuring essential enhancements, advanced filters, and file utilities — all wrapped in a user-friendly interface with server-side processing and MCP support.
|
| 18 |
+
|
| 19 |
+
---
|
| 20 |
+
|
| 21 |
+
## 🚀 Quick Start Guide
|
| 22 |
+
|
| 23 |
+
### 🔧 Basic Tools
|
| 24 |
+
- **Color Correction**: Adjust brightness, contrast, and saturation
|
| 25 |
+
- **Sharpening**: Enhance image clarity and detail
|
| 26 |
+
- **Noise Reduction**: Remove unwanted noise from images
|
| 27 |
+
- **Upscaling**: Increase resolution using interpolation methods
|
| 28 |
+
- **Auto Enhance**: Automatically optimize overall image quality
|
| 29 |
+
- **Resize / Crop / Rotate / Flip**: Fundamental layout transformations
|
| 30 |
+
- **Grayscale / Blur / Invert**: Visual style and clarity adjustments
|
| 31 |
+
|
| 32 |
+
### 🧠 Advanced Tools
|
| 33 |
+
- **Edge Detection**: Extract edges using the Canny algorithm
|
| 34 |
+
- **Morphological Operations**: Apply erosion and dilation for shape processing
|
| 35 |
+
- **Contour Analysis**: Detect and analyze object contours
|
| 36 |
+
- **Noise & Effects**: Add creative visual effects and synthetic noise
|
| 37 |
+
|
| 38 |
+
### 🛠️ Utility Tools
|
| 39 |
+
- **Metadata Extraction**: Read EXIF and other metadata from images
|
| 40 |
+
- **Format Conversion**: Convert images between formats (JPEG, PNG, WebP, etc.)
|
| 41 |
+
- **File Operations**: List, read, and manage image files and folders
|
| 42 |
+
|
| 43 |
+
### 🧩 Extra Tools
|
| 44 |
+
- **Add Images**: Combine two images by pixel-wise addition
|
| 45 |
+
- **Concatenate Images**: Join two images side-by-side or top-to-bottom (horizontal/vertical)
|
| 46 |
+
|
| 47 |
+
---
|
| 48 |
+
|
| 49 |
+
## 💡 Tips for Best Results
|
| 50 |
+
|
| 51 |
+
- **Image Quality**: Start with high-resolution images for better outputs
|
| 52 |
+
- **Parameter Tuning**: Try different settings for optimal results
|
| 53 |
+
- **Preprocessing First**: Use basic tools before applying advanced effects
|
| 54 |
+
- **Batch Processing**: Automate workflows using utility tools
|
| 55 |
+
- **Format Choice**: Use PNG for lossless quality, JPEG for smaller size
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
## 📞 Support
|
| 59 |
+
|
| 60 |
+
If you're facing issues, please check:
|
| 61 |
+
|
| 62 |
+
- ✅ Your image is in a supported format
|
| 63 |
+
- ✅ File permissions allow reading and writing
|
| 64 |
+
- ✅ All required Python modules are installed and accessible
|
| 65 |
+
|
| 66 |
+
---
|
| 67 |
+
|
| 68 |
+
Enjoy exploring and enhancing your images! ✨
|
| 69 |
+
|
advanced_tools.py
ADDED
|
@@ -0,0 +1,272 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import random
|
| 2 |
+
|
| 3 |
+
import cv2
|
| 4 |
+
import numpy as np
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
from utils import temp_output
|
| 7 |
+
|
| 8 |
+
def detect_edges(input_path: str, threshold1: int = 100, threshold2: int = 200, output_path: str = None) -> str:
|
| 9 |
+
"""
|
| 10 |
+
Detect edges using Canny edge detection.
|
| 11 |
+
|
| 12 |
+
Args:
|
| 13 |
+
input_path (str): Input image path.
|
| 14 |
+
output_path (str): Output path for the edge image.
|
| 15 |
+
threshold1 (int): Lower threshold for hysteresis.
|
| 16 |
+
threshold2 (int): Upper threshold for hysteresis.
|
| 17 |
+
|
| 18 |
+
Returns:
|
| 19 |
+
str: Path to the saved edge-detected image.
|
| 20 |
+
"""
|
| 21 |
+
if output_path is None:
|
| 22 |
+
output_path = temp_output()
|
| 23 |
+
img = cv2.imread(str(Path(input_path).expanduser()), cv2.IMREAD_GRAYSCALE)
|
| 24 |
+
edges = cv2.Canny(img, threshold1, threshold2)
|
| 25 |
+
cv2.imwrite(str(Path(output_path).expanduser()), edges)
|
| 26 |
+
return str(output_path)
|
| 27 |
+
|
| 28 |
+
def apply_erosion(input_path: str, kernel_size: int = 3, iterations: int = 1,output_path: str = None) -> str:
|
| 29 |
+
"""
|
| 30 |
+
Apply erosion effect to an image.
|
| 31 |
+
|
| 32 |
+
Args:
|
| 33 |
+
input_path (str): Path to the original image.
|
| 34 |
+
output_path (str): Path to save the eroded image.
|
| 35 |
+
kernel_size (int): Size of the square kernel.
|
| 36 |
+
iterations (int): Number of erosion iterations.
|
| 37 |
+
|
| 38 |
+
Returns:
|
| 39 |
+
str: Path to the eroded image.
|
| 40 |
+
"""
|
| 41 |
+
img = cv2.imread(str(Path(input_path).expanduser()))
|
| 42 |
+
kernel = np.ones((kernel_size, kernel_size), np.uint8)
|
| 43 |
+
eroded = cv2.erode(img, kernel, iterations=iterations)
|
| 44 |
+
cv2.imwrite(str(Path(output_path).expanduser()), eroded)
|
| 45 |
+
return str(output_path)
|
| 46 |
+
|
| 47 |
+
def apply_dilation(input_path: str, kernel_size: int = 3, iterations: int = 1,output_path: str = None) -> str:
|
| 48 |
+
"""
|
| 49 |
+
Apply dilation effect to an image.
|
| 50 |
+
|
| 51 |
+
Args:
|
| 52 |
+
input_path (str): Path to the original image.
|
| 53 |
+
output_path (str): Path to save the dilated image.
|
| 54 |
+
kernel_size (int): Size of the square kernel.
|
| 55 |
+
iterations (int): Number of dilation iterations.
|
| 56 |
+
|
| 57 |
+
Returns:
|
| 58 |
+
str: Path to the dilated image.
|
| 59 |
+
"""
|
| 60 |
+
if output_path is None:
|
| 61 |
+
output_path = temp_output()
|
| 62 |
+
|
| 63 |
+
img = cv2.imread(str(Path(input_path).expanduser()))
|
| 64 |
+
kernel = np.ones((kernel_size, kernel_size), np.uint8)
|
| 65 |
+
dilated = cv2.dilate(img, kernel, iterations=iterations)
|
| 66 |
+
cv2.imwrite(str(Path(output_path).expanduser()), dilated)
|
| 67 |
+
return str(output_path)
|
| 68 |
+
|
| 69 |
+
def blur_advanced(input_path: str, kernel_size: int = 5,output_path: str = None) -> str:
|
| 70 |
+
"""
|
| 71 |
+
Apply Gaussian blur to an image.
|
| 72 |
+
|
| 73 |
+
Args:
|
| 74 |
+
input_path (str): Path to the source image.
|
| 75 |
+
output_path (str): Path to save the blurred image.
|
| 76 |
+
kernel_size (int): Size of the Gaussian kernel (must be odd).
|
| 77 |
+
|
| 78 |
+
Returns:
|
| 79 |
+
str: Path to the blurred image.
|
| 80 |
+
"""
|
| 81 |
+
if output_path is None:
|
| 82 |
+
output_path = temp_output()
|
| 83 |
+
if kernel_size % 2 == 0:
|
| 84 |
+
raise ValueError("kernel_size must be odd.")
|
| 85 |
+
img = cv2.imread(str(Path(input_path).expanduser()))
|
| 86 |
+
blurred = cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
|
| 87 |
+
cv2.imwrite(str(Path(output_path).expanduser()), blurred)
|
| 88 |
+
return str(output_path)
|
| 89 |
+
|
| 90 |
+
def find_contours(input_path: str, output_path: str) -> str:
|
| 91 |
+
"""
|
| 92 |
+
Find and draw contours on a binary or grayscale image.
|
| 93 |
+
|
| 94 |
+
Args:
|
| 95 |
+
input_path (str): Input image path.
|
| 96 |
+
output_path (str): Output path with drawn contours.
|
| 97 |
+
|
| 98 |
+
Returns:
|
| 99 |
+
str: Path to the image with contours drawn.
|
| 100 |
+
"""
|
| 101 |
+
if output_path is None:
|
| 102 |
+
output_path = temp_output()
|
| 103 |
+
|
| 104 |
+
img = cv2.imread(str(Path(input_path).expanduser()))
|
| 105 |
+
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
| 106 |
+
_, thresh = cv2.threshold(gray, 128, 255, cv2.THRESH_BINARY)
|
| 107 |
+
contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
| 108 |
+
cv2.drawContours(img, contours, -1, (0, 255, 0), 2)
|
| 109 |
+
cv2.imwrite(str(Path(output_path).expanduser()), img)
|
| 110 |
+
return str(output_path)
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
def isolate_large_objects(image_path: str, area_threshold: float = 1000.0,output_path: str = None) -> str:
|
| 115 |
+
"""
|
| 116 |
+
Isolate objects in an image whose area exceeds the given threshold.
|
| 117 |
+
|
| 118 |
+
Args:
|
| 119 |
+
image_path (str): Path to the input image.
|
| 120 |
+
output_path (str): Path to save the output.
|
| 121 |
+
area_threshold (float): Minimum area of object to keep.
|
| 122 |
+
|
| 123 |
+
Returns:
|
| 124 |
+
str: Path to the saved image with large objects highlighted.
|
| 125 |
+
"""
|
| 126 |
+
if output_path is None:
|
| 127 |
+
output_path = temp_output()
|
| 128 |
+
image = cv2.imread(str(image_path))
|
| 129 |
+
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
| 130 |
+
|
| 131 |
+
# Use Canny edge detection or thresholding
|
| 132 |
+
edges = cv2.Canny(gray, 50, 150)
|
| 133 |
+
|
| 134 |
+
# Find contours
|
| 135 |
+
contours, _ = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
| 136 |
+
|
| 137 |
+
# Filter by area and draw
|
| 138 |
+
result = np.zeros_like(image)
|
| 139 |
+
for cnt in contours:
|
| 140 |
+
area = cv2.contourArea(cnt)
|
| 141 |
+
if area > area_threshold:
|
| 142 |
+
cv2.drawContours(result, [cnt], -1, (0, 255, 0), thickness=cv2.FILLED)
|
| 143 |
+
|
| 144 |
+
# Save and return
|
| 145 |
+
output_path = Path(output_path).expanduser().resolve()
|
| 146 |
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
| 147 |
+
cv2.imwrite(str(output_path), result)
|
| 148 |
+
|
| 149 |
+
return str(output_path)
|
| 150 |
+
def extract_bounding_boxes(input_path: str, area_threshold: float = 1000.0) -> list[dict]:
|
| 151 |
+
"""
|
| 152 |
+
Detect contours and return bounding boxes for large objects.
|
| 153 |
+
|
| 154 |
+
Args:
|
| 155 |
+
input_path (str): Image path.
|
| 156 |
+
area_threshold (float): Minimum contour area.
|
| 157 |
+
|
| 158 |
+
Returns:
|
| 159 |
+
list of dict: Bounding boxes with keys x, y, width, height.
|
| 160 |
+
"""
|
| 161 |
+
|
| 162 |
+
img = cv2.imread(str(Path(input_path).expanduser()))
|
| 163 |
+
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
| 164 |
+
edges = cv2.Canny(gray, 50, 150)
|
| 165 |
+
contours, _ = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
| 166 |
+
|
| 167 |
+
boxes = []
|
| 168 |
+
for cnt in contours:
|
| 169 |
+
area = cv2.contourArea(cnt)
|
| 170 |
+
if area > area_threshold:
|
| 171 |
+
x, y, w, h = cv2.boundingRect(cnt)
|
| 172 |
+
boxes.append({"x": x, "y": y, "width": w, "height": h})
|
| 173 |
+
|
| 174 |
+
return boxes
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
def add_noise(input_path: str, r_scale: int = 1, g_scale: int = 1, b_scale: int = 1,output_path: str = None) -> str:
|
| 179 |
+
"""
|
| 180 |
+
Apply strong random color variation to each RGB channel independently.
|
| 181 |
+
|
| 182 |
+
Args:
|
| 183 |
+
input_path (str): Path to input image.
|
| 184 |
+
output_path (str): Path to save the result.
|
| 185 |
+
r_scale (int): Scale (1-5) for Red channel variation.
|
| 186 |
+
g_scale (int): Scale (1-5) for Green channel variation.
|
| 187 |
+
b_scale (int): Scale (1-5) for Blue channel variation.
|
| 188 |
+
|
| 189 |
+
Returns:
|
| 190 |
+
str: Path to the modified image.
|
| 191 |
+
"""
|
| 192 |
+
if output_path is None:
|
| 193 |
+
output_path = temp_output()
|
| 194 |
+
# Validate scales
|
| 195 |
+
for val in [r_scale, g_scale, b_scale]:
|
| 196 |
+
if not (1 <= val <= 5):
|
| 197 |
+
raise ValueError("Each channel scale must be between 1 and 5.")
|
| 198 |
+
|
| 199 |
+
input_path = Path(input_path).expanduser().resolve()
|
| 200 |
+
output_path = Path(output_path).expanduser().resolve()
|
| 201 |
+
|
| 202 |
+
img = cv2.imread(str(input_path))
|
| 203 |
+
if img is None:
|
| 204 |
+
raise FileNotFoundError(f"Image not found: {input_path}")
|
| 205 |
+
|
| 206 |
+
height, width, _ = img.shape
|
| 207 |
+
noise_r = np.random.randint(-51*r_scale, 51*r_scale, size=(height, width), dtype=np.int16)
|
| 208 |
+
noise_g = np.random.randint(-51*g_scale, 51*g_scale, size=(height, width), dtype=np.int16)
|
| 209 |
+
noise_b = np.random.randint(-51*b_scale, 51*b_scale, size=(height, width), dtype=np.int16)
|
| 210 |
+
|
| 211 |
+
# Split and add noise
|
| 212 |
+
b, g, r = cv2.split(img.astype(np.int16))
|
| 213 |
+
r = np.clip(r + noise_r, 0, 255).astype(np.uint8)
|
| 214 |
+
g = np.clip(g + noise_g, 0, 255).astype(np.uint8)
|
| 215 |
+
b = np.clip(b + noise_b, 0, 255).astype(np.uint8)
|
| 216 |
+
|
| 217 |
+
output_img = cv2.merge([b, g, r])
|
| 218 |
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
| 219 |
+
cv2.imwrite(str(output_path), output_img)
|
| 220 |
+
|
| 221 |
+
return str(output_path)
|
| 222 |
+
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
def random_channel_remix(input_path: str ,output_path: str = None) -> str:
|
| 227 |
+
"""
|
| 228 |
+
Randomly remap RGB channels with random scales:
|
| 229 |
+
- New R = random([R, G, B]) * random_scale
|
| 230 |
+
- New G = random([R, G, B]) * random_scale
|
| 231 |
+
- New B = random([R, G, B]) * random_scale
|
| 232 |
+
|
| 233 |
+
Args:
|
| 234 |
+
input_path (str): Path to input image.
|
| 235 |
+
output_path (str): Path to save output image.
|
| 236 |
+
|
| 237 |
+
Returns:
|
| 238 |
+
str: Path to saved image.
|
| 239 |
+
"""
|
| 240 |
+
if output_path is None:
|
| 241 |
+
output_path = temp_output()
|
| 242 |
+
input_path = Path(input_path).expanduser().resolve()
|
| 243 |
+
output_path = Path(output_path).expanduser().resolve()
|
| 244 |
+
|
| 245 |
+
img = cv2.imread(str(input_path))
|
| 246 |
+
if img is None:
|
| 247 |
+
raise FileNotFoundError(f"Image not found: {input_path}")
|
| 248 |
+
|
| 249 |
+
b, g, r = cv2.split(img.astype(np.float32))
|
| 250 |
+
channels = {'r': r, 'g': g, 'b': b}
|
| 251 |
+
|
| 252 |
+
# Randomly assign sources and scales
|
| 253 |
+
new_r_src = random.choice(['r', 'g', 'b'])
|
| 254 |
+
new_g_src = random.choice(['r', 'g', 'b'])
|
| 255 |
+
new_b_src = random.choice(['r', 'g', 'b'])
|
| 256 |
+
|
| 257 |
+
r_scale = random.uniform(0.5, 2.0)
|
| 258 |
+
g_scale = random.uniform(0.5, 2.0)
|
| 259 |
+
b_scale = random.uniform(0.5, 2.0)
|
| 260 |
+
|
| 261 |
+
# Apply remapping
|
| 262 |
+
new_r = np.clip(channels[new_r_src] * r_scale, 0, 255).astype(np.uint8)
|
| 263 |
+
new_g = np.clip(channels[new_g_src] * g_scale, 0, 255).astype(np.uint8)
|
| 264 |
+
new_b = np.clip(channels[new_b_src] * b_scale, 0, 255).astype(np.uint8)
|
| 265 |
+
|
| 266 |
+
out_img = cv2.merge([new_b, new_g, new_r])
|
| 267 |
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
| 268 |
+
cv2.imwrite(str(output_path), out_img)
|
| 269 |
+
|
| 270 |
+
print(f"R ← {new_r_src.upper()} × {r_scale:.2f}, G ← {new_g_src.upper()} × {g_scale:.2f}, B ← {new_b_src.upper()} × {b_scale:.2f}")
|
| 271 |
+
return str(output_path)
|
| 272 |
+
|
app.py
ADDED
|
@@ -0,0 +1,420 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import tempfile
|
| 3 |
+
from pathlib import Path
|
| 4 |
+
import os
|
| 5 |
+
from PIL import Image
|
| 6 |
+
from utils import temp_output
|
| 7 |
+
|
| 8 |
+
# --- Import all tools ---
|
| 9 |
+
from tools import (
|
| 10 |
+
apply_color_correction, apply_sharpening, reduce_noise,
|
| 11 |
+
upscale_resolution, auto_enhance
|
| 12 |
+
)
|
| 13 |
+
from basic_tools import (
|
| 14 |
+
resize_image, convert_grayscale, image_metadata, convert_format, blur_image,
|
| 15 |
+
rotate_image, crop_image, thumbnail_image, add_watermark, flip_image,
|
| 16 |
+
invert_colors, list_images_in_directory, get_image_metadata,
|
| 17 |
+
apply_random_color_variation, add_images, concat_images
|
| 18 |
+
)
|
| 19 |
+
from advanced_tools import (
|
| 20 |
+
detect_edges, apply_erosion, apply_dilation, blur_advanced,
|
| 21 |
+
find_contours, isolate_large_objects, extract_bounding_boxes,
|
| 22 |
+
add_noise, random_channel_remix
|
| 23 |
+
)
|
| 24 |
+
from utils import create_folder
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
# --- Create individual interfaces for each tool ---
|
| 30 |
+
def create_basics():
|
| 31 |
+
"""Create interfaces for basic tools"""
|
| 32 |
+
interfaces = []
|
| 33 |
+
|
| 34 |
+
# Color Correction
|
| 35 |
+
with gr.Tab("Color Correction"):
|
| 36 |
+
gr.Markdown("**Adjust brightness, contrast, and saturation of your image**")
|
| 37 |
+
with gr.Row():
|
| 38 |
+
with gr.Column():
|
| 39 |
+
cc_img = gr.Image(type="filepath", label="Input Image")
|
| 40 |
+
cc_brightness = gr.Slider(0.5, 2.0, value=1.0, step=0.05, label="Brightness")
|
| 41 |
+
cc_contrast = gr.Slider(0.5, 2.0, value=1.0, step=0.05, label="Contrast")
|
| 42 |
+
cc_saturation = gr.Slider(0.0, 2.0, value=1.0, step=0.05, label="Saturation")
|
| 43 |
+
cc_output_path = gr.Text(temp_output(), label="Output Image")
|
| 44 |
+
gr.Examples(
|
| 45 |
+
examples=["cat.png","snake.png"],
|
| 46 |
+
inputs=[cc_img],
|
| 47 |
+
label="Example Images"
|
| 48 |
+
)
|
| 49 |
+
cc_btn = gr.Button("Apply Color Correction", variant="primary")
|
| 50 |
+
with gr.Column():
|
| 51 |
+
cc_output = gr.Image(label="Enhanced Image")
|
| 52 |
+
cc_btn.click(apply_color_correction, [cc_img, cc_brightness, cc_contrast, cc_saturation,cc_output_path], cc_output)
|
| 53 |
+
|
| 54 |
+
# Sharpening
|
| 55 |
+
with gr.Tab("Sharpening"):
|
| 56 |
+
gr.Markdown("**Enhance image clarity and detail**")
|
| 57 |
+
with gr.Row():
|
| 58 |
+
with gr.Column():
|
| 59 |
+
sharp_img = gr.Image(type="filepath", label="Input Image")
|
| 60 |
+
sharp_strength = gr.Slider(0.5, 2.0, value=1.0, step=0.1, label="Sharpening Strength")
|
| 61 |
+
sharp_output_path = gr.Text(temp_output(), label="Output Image")
|
| 62 |
+
gr.Examples(
|
| 63 |
+
examples=["cat.png","snake.png"],
|
| 64 |
+
inputs=[sharp_img],
|
| 65 |
+
label="Example Images"
|
| 66 |
+
)
|
| 67 |
+
sharp_btn = gr.Button("Apply Sharpening", variant="primary")
|
| 68 |
+
with gr.Column():
|
| 69 |
+
sharp_output = gr.Image(label="Sharpened Image")
|
| 70 |
+
sharp_btn.click(apply_sharpening, [sharp_img, sharp_strength,sharp_output_path], sharp_output)
|
| 71 |
+
|
| 72 |
+
# Noise Reduction
|
| 73 |
+
with gr.Tab("Noise Reduction"):
|
| 74 |
+
gr.Markdown("**Remove unwanted noise from your image**")
|
| 75 |
+
with gr.Row():
|
| 76 |
+
with gr.Column():
|
| 77 |
+
noise_img = gr.Image(type="filepath", label="Input Image")
|
| 78 |
+
noise_strength = gr.Radio(["low", "medium", "high"], value="medium", label="Noise Reduction Strength")
|
| 79 |
+
noise_output_path = gr.Text(temp_output(), label="Output Image")
|
| 80 |
+
gr.Examples(
|
| 81 |
+
examples=["cat.png","snake.png"],
|
| 82 |
+
inputs=[noise_img],
|
| 83 |
+
label="Example Images"
|
| 84 |
+
)
|
| 85 |
+
noise_btn = gr.Button("Reduce Noise", variant="primary")
|
| 86 |
+
with gr.Column():
|
| 87 |
+
noise_output = gr.Image(label="Denoised Image")
|
| 88 |
+
noise_btn.click(reduce_noise, [noise_img, noise_strength,noise_output_path], noise_output)
|
| 89 |
+
|
| 90 |
+
# Upscaling
|
| 91 |
+
with gr.Tab("Upscaling"):
|
| 92 |
+
gr.Markdown("**Increase image resolution using advanced interpolation**")
|
| 93 |
+
with gr.Row():
|
| 94 |
+
with gr.Column():
|
| 95 |
+
up_img = gr.Image(type="filepath", label="Input Image")
|
| 96 |
+
up_scale = gr.Radio([2, 3, 4], value=2, label="Scale Factor")
|
| 97 |
+
up_method = gr.Dropdown(["nearest", "bilinear", "bicubic", "lanczos"], value="bicubic",
|
| 98 |
+
label="Interpolation Method")
|
| 99 |
+
up_output_path = gr.Text(temp_output(), label="Output Image")
|
| 100 |
+
gr.Examples(
|
| 101 |
+
examples=["cat.png","snake.png"],
|
| 102 |
+
inputs=[up_img],
|
| 103 |
+
label="Example Images"
|
| 104 |
+
)
|
| 105 |
+
up_btn = gr.Button("Upscale Image", variant="primary")
|
| 106 |
+
with gr.Column():
|
| 107 |
+
up_output = gr.Image(label="Upscaled Image")
|
| 108 |
+
up_btn.click(upscale_resolution, [up_img, up_scale, up_method,up_output_path], up_output)
|
| 109 |
+
|
| 110 |
+
# Auto Enhance
|
| 111 |
+
with gr.Tab("Auto Enhance"):
|
| 112 |
+
gr.Markdown("**Automatically optimize image quality**")
|
| 113 |
+
with gr.Row():
|
| 114 |
+
with gr.Column():
|
| 115 |
+
auto_img = gr.Image(type="filepath", label="Input Image")
|
| 116 |
+
auto_output_path = gr.Text(temp_output(), label="Output Image")
|
| 117 |
+
gr.Examples(
|
| 118 |
+
examples=["cat.png","snake.png"],
|
| 119 |
+
inputs=[auto_img],
|
| 120 |
+
label="Example Images"
|
| 121 |
+
)
|
| 122 |
+
auto_btn = gr.Button("Auto Enhance", variant="primary")
|
| 123 |
+
with gr.Column():
|
| 124 |
+
auto_output = gr.Image(label="Enhanced Image")
|
| 125 |
+
auto_btn.click(auto_enhance, [auto_img,auto_output_path], auto_output)
|
| 126 |
+
|
| 127 |
+
# Resize
|
| 128 |
+
with gr.Tab("Resize"):
|
| 129 |
+
gr.Markdown("**Change image dimensions**")
|
| 130 |
+
with gr.Row():
|
| 131 |
+
with gr.Column():
|
| 132 |
+
resize_img = gr.Image(type="filepath", label="Input Image")
|
| 133 |
+
resize_width = gr.Number(value=512, label="Width")
|
| 134 |
+
resize_height = gr.Number(value=512, label="Height")
|
| 135 |
+
resize_output_path = gr.Text(temp_output(), label="Output Image")
|
| 136 |
+
gr.Examples(
|
| 137 |
+
examples=["cat.png","snake.png"],
|
| 138 |
+
inputs=[resize_img],
|
| 139 |
+
label="Example Images"
|
| 140 |
+
)
|
| 141 |
+
resize_btn = gr.Button("Resize Image", variant="primary")
|
| 142 |
+
with gr.Column():
|
| 143 |
+
resize_output = gr.Image(label="Resized Image")
|
| 144 |
+
resize_btn.click(resize_image, [resize_img, resize_width, resize_height,resize_output_path], resize_output)
|
| 145 |
+
|
| 146 |
+
# Grayscale
|
| 147 |
+
with gr.Tab("Grayscale"):
|
| 148 |
+
gr.Markdown("**Convert image to grayscale**")
|
| 149 |
+
with gr.Row():
|
| 150 |
+
with gr.Column():
|
| 151 |
+
gray_img = gr.Image(type="filepath", label="Input Image")
|
| 152 |
+
gray_output_path = gr.Text(temp_output(), label="Output Image")
|
| 153 |
+
gr.Examples(
|
| 154 |
+
examples=["cat.png","snake.png"],
|
| 155 |
+
inputs=[gray_img],
|
| 156 |
+
label="Example Images"
|
| 157 |
+
)
|
| 158 |
+
gray_btn = gr.Button("Convert to Grayscale", variant="primary")
|
| 159 |
+
with gr.Column():
|
| 160 |
+
gray_output = gr.Image(label="Grayscale Image")
|
| 161 |
+
gray_btn.click(convert_grayscale, [gray_img,gray_output_path], gray_output)
|
| 162 |
+
|
| 163 |
+
# Additional basic tools (simplified for space)
|
| 164 |
+
with gr.Tab("More Basic Tools"):
|
| 165 |
+
gr.Markdown("**Quick access to common image operations**")
|
| 166 |
+
|
| 167 |
+
with gr.Accordion("Blur & Effects", open=False):
|
| 168 |
+
with gr.Row():
|
| 169 |
+
with gr.Column():
|
| 170 |
+
blur_img = gr.Image(type="filepath", label="Input Image")
|
| 171 |
+
blur_radius = gr.Slider(0.1, 20.0, value=2.0, step=0.1, label="Blur Radius")
|
| 172 |
+
blur_output_path = gr.Text(temp_output(), label="Output Image")
|
| 173 |
+
gr.Examples(
|
| 174 |
+
examples=["cat.png","snake.png"],
|
| 175 |
+
inputs=[blur_img],
|
| 176 |
+
label="Example Images"
|
| 177 |
+
)
|
| 178 |
+
blur_btn = gr.Button("Apply Blur")
|
| 179 |
+
with gr.Column():
|
| 180 |
+
blur_output = gr.Image(label="Blurred Image")
|
| 181 |
+
blur_btn.click(blur_image, [blur_img, blur_radius,blur_output_path], blur_output)
|
| 182 |
+
|
| 183 |
+
with gr.Accordion("Rotate & Flip", open=False):
|
| 184 |
+
with gr.Row():
|
| 185 |
+
with gr.Column():
|
| 186 |
+
rotate_img = gr.Image(type="filepath", label="Input Image")
|
| 187 |
+
rotate_angle = gr.Number(value=90.0, label="Rotation Angle")
|
| 188 |
+
rotate_output_path = gr.Text(temp_output(), label="Output Image")
|
| 189 |
+
gr.Examples(
|
| 190 |
+
examples=["cat.png","snake.png"],
|
| 191 |
+
inputs=[rotate_img],
|
| 192 |
+
label="Example Images"
|
| 193 |
+
)
|
| 194 |
+
rotate_btn = gr.Button("Rotate Image")
|
| 195 |
+
flip_mode = gr.Radio(["horizontal", "vertical"], value="horizontal", label="Flip Mode")
|
| 196 |
+
flip_output_path = gr.Text(temp_output(), label="Output Image")
|
| 197 |
+
|
| 198 |
+
flip_btn = gr.Button("Flip Image")
|
| 199 |
+
with gr.Column():
|
| 200 |
+
rotate_output = gr.Image(label="Rotated/Flipped Image")
|
| 201 |
+
rotate_btn.click(rotate_image, [rotate_img, rotate_angle,rotate_output_path], rotate_output)
|
| 202 |
+
flip_btn.click(flip_image, [rotate_img, flip_mode,flip_output_path], rotate_output)
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
def create_advanceds():
|
| 206 |
+
"""Create interfaces for advanced tools"""
|
| 207 |
+
|
| 208 |
+
# Edge Detection
|
| 209 |
+
with gr.Tab("Edge Detection"):
|
| 210 |
+
gr.Markdown("**Detect edges using Canny edge detection algorithm**")
|
| 211 |
+
with gr.Row():
|
| 212 |
+
with gr.Column():
|
| 213 |
+
edge_img = gr.Image(type="filepath", label="Input Image")
|
| 214 |
+
edge_t1 = gr.Number(value=100, label="Lower Threshold")
|
| 215 |
+
edge_t2 = gr.Number(value=200, label="Upper Threshold")
|
| 216 |
+
edge_output_path = gr.Text(temp_output(), label="Output Image")
|
| 217 |
+
edge_btn = gr.Button("Detect Edges", variant="primary")
|
| 218 |
+
with gr.Column():
|
| 219 |
+
edge_output = gr.Image(label="Edge Detection Result")
|
| 220 |
+
edge_btn.click(detect_edges, [edge_img, edge_t1, edge_t2,edge_output_path], edge_output)
|
| 221 |
+
|
| 222 |
+
# Morphological Operations
|
| 223 |
+
with gr.Tab("Morphological Ops"):
|
| 224 |
+
gr.Markdown("**Apply erosion and dilation operations**")
|
| 225 |
+
with gr.Row():
|
| 226 |
+
with gr.Column():
|
| 227 |
+
morph_img = gr.Image(type="filepath", label="Input Image")
|
| 228 |
+
morph_kernel = gr.Number(value=3, label="Kernel Size")
|
| 229 |
+
morph_iter = gr.Number(value=1, label="Iterations")
|
| 230 |
+
erosion_output_path = gr.Text(temp_output(), label="Output Erosion Image")
|
| 231 |
+
dilation_output_path = gr.Text(temp_output(), label="Output Dilation Image")
|
| 232 |
+
erosion_btn = gr.Button("Apply Erosion")
|
| 233 |
+
dilation_btn = gr.Button("Apply Dilation")
|
| 234 |
+
with gr.Column():
|
| 235 |
+
morph_output = gr.Image(label="Morphological Result")
|
| 236 |
+
erosion_btn.click(apply_erosion, [morph_img, morph_kernel, morph_iter,erosion_output_path], morph_output)
|
| 237 |
+
dilation_btn.click(apply_dilation, [morph_img, morph_kernel, morph_iter,dilation_output_path], morph_output)
|
| 238 |
+
|
| 239 |
+
# Contour Analysis
|
| 240 |
+
with gr.Tab("Contour Analysis"):
|
| 241 |
+
gr.Markdown("**Find and analyze contours in images**")
|
| 242 |
+
with gr.Row():
|
| 243 |
+
with gr.Column():
|
| 244 |
+
contour_img = gr.Image(type="filepath", label="Input Image")
|
| 245 |
+
contour_area = gr.Number(value=1000.0, label="Min Area Threshold")
|
| 246 |
+
contour_output_path = gr.Text(temp_output(), label="Countour Output Image")
|
| 247 |
+
isolate_output_path = gr.Text(temp_output(), label="Isolate Output Image")
|
| 248 |
+
contour_btn = gr.Button("Find Contours", variant="primary")
|
| 249 |
+
isolate_btn = gr.Button("Isolate Large Objects")
|
| 250 |
+
bbox_btn = gr.Button("Extract Bounding Boxes")
|
| 251 |
+
with gr.Column():
|
| 252 |
+
contour_output = gr.Image(label="Contour Result")
|
| 253 |
+
bbox_output = gr.JSON(label="Bounding Boxes")
|
| 254 |
+
contour_btn.click(find_contours, [contour_img,contour_output_path], contour_output,)
|
| 255 |
+
isolate_btn.click(isolate_large_objects, [contour_img, contour_area,isolate_output_path], contour_output)
|
| 256 |
+
bbox_btn.click(extract_bounding_boxes, [contour_img, contour_area], bbox_output)
|
| 257 |
+
|
| 258 |
+
# Noise and Effects
|
| 259 |
+
with gr.Tab("Noise & Effects"):
|
| 260 |
+
gr.Markdown("**Add noise and apply special effects**")
|
| 261 |
+
with gr.Row():
|
| 262 |
+
with gr.Column():
|
| 263 |
+
fx_img = gr.Image(type="filepath", label="Input Image")
|
| 264 |
+
noise_r = gr.Slider(1, 5, value=1, step=1, label="Red Noise Scale")
|
| 265 |
+
noise_g = gr.Slider(1, 5, value=1, step=1, label="Green Noise Scale")
|
| 266 |
+
noise_b = gr.Slider(1, 5, value=1, step=1, label="Blue Noise Scale")
|
| 267 |
+
noise_output_path =gr.Text(temp_output(), label="Noice Output Image")
|
| 268 |
+
remix_output_path = gr.Text(temp_output(), label="Remix Output Image")
|
| 269 |
+
noise_btn = gr.Button("Add Noise")
|
| 270 |
+
remix_btn = gr.Button("Random Channel Remix")
|
| 271 |
+
with gr.Column():
|
| 272 |
+
fx_output = gr.Image(label="Effect Result")
|
| 273 |
+
noise_btn.click(add_noise, [fx_img, noise_r, noise_g, noise_b,noise_output_path], fx_output)
|
| 274 |
+
remix_btn.click(random_channel_remix, [fx_img,remix_output_path], fx_output)
|
| 275 |
+
|
| 276 |
+
|
| 277 |
+
def create_utilitys():
|
| 278 |
+
"""Create interfaces for utility tools"""
|
| 279 |
+
|
| 280 |
+
# Metadata Tools
|
| 281 |
+
with gr.Tab("Metadata"):
|
| 282 |
+
gr.Markdown("**Extract and view image metadata**")
|
| 283 |
+
with gr.Row():
|
| 284 |
+
with gr.Column():
|
| 285 |
+
meta_img = gr.Image(type="filepath", label="Input Image")
|
| 286 |
+
meta_btn = gr.Button("Get Metadata (String)", variant="primary")
|
| 287 |
+
meta_dict_btn = gr.Button("Get Metadata (Dict)")
|
| 288 |
+
with gr.Column():
|
| 289 |
+
meta_output = gr.Textbox(label="Metadata String", lines=10)
|
| 290 |
+
meta_dict_output = gr.JSON(label="Metadata Dictionary")
|
| 291 |
+
meta_btn.click(image_metadata, [meta_img], meta_output)
|
| 292 |
+
meta_dict_btn.click(get_image_metadata, [meta_img], meta_dict_output)
|
| 293 |
+
|
| 294 |
+
# Format Conversion
|
| 295 |
+
with gr.Tab("Format Conversion"):
|
| 296 |
+
gr.Markdown("**Convert between different image formats**")
|
| 297 |
+
with gr.Row():
|
| 298 |
+
with gr.Column():
|
| 299 |
+
fmt_img = gr.Image(type="filepath", label="Input Image")
|
| 300 |
+
fmt_format = gr.Dropdown(["png", "jpeg", "bmp", "webp", "tiff"], value="png", label="Target Format")
|
| 301 |
+
fmt_output_path = gr.Text(temp_output(), label="Output Image")
|
| 302 |
+
fmt_btn = gr.Button("Convert Format", variant="primary")
|
| 303 |
+
with gr.Column():
|
| 304 |
+
fmt_output = gr.Image(label="Converted Image")
|
| 305 |
+
fmt_btn.click(convert_format, [fmt_img, fmt_format,fmt_output_path], fmt_output)
|
| 306 |
+
|
| 307 |
+
def create_extra_tools():
|
| 308 |
+
with gr.Tab("Image Merge"):
|
| 309 |
+
gr.Markdown("**Combine two images by addition or concatenation**")
|
| 310 |
+
|
| 311 |
+
with gr.Accordion("Add Images Pixel-wise", open=False):
|
| 312 |
+
with gr.Row():
|
| 313 |
+
with gr.Column():
|
| 314 |
+
img1_add = gr.Image(type="filepath", label="Image 1")
|
| 315 |
+
img2_add = gr.Image(type="filepath", label="Image 2")
|
| 316 |
+
add_output_path = gr.Text(temp_output(), label="Output Path")
|
| 317 |
+
add_btn = gr.Button("Add Images")
|
| 318 |
+
with gr.Column():
|
| 319 |
+
add_output = gr.Image(label="Added Image")
|
| 320 |
+
|
| 321 |
+
add_btn.click(add_images, [img1_add, img2_add, add_output_path], add_output)
|
| 322 |
+
|
| 323 |
+
with gr.Accordion("Concatenate Images", open=False):
|
| 324 |
+
with gr.Row():
|
| 325 |
+
with gr.Column():
|
| 326 |
+
img1_cat = gr.Image(type="filepath", label="Image 1")
|
| 327 |
+
img2_cat = gr.Image(type="filepath", label="Image 2")
|
| 328 |
+
concat_mode = gr.Radio(["horizontal", "vertical"], value="horizontal", label="Concatenation Mode")
|
| 329 |
+
concat_output_path = gr.Text(temp_output(), label="Output Path")
|
| 330 |
+
concat_btn = gr.Button("Concatenate")
|
| 331 |
+
with gr.Column():
|
| 332 |
+
concat_output = gr.Image(label="Concatenated Image")
|
| 333 |
+
|
| 334 |
+
concat_btn.click(concat_images, [img1_cat, img2_cat, concat_mode, concat_output_path], concat_output)
|
| 335 |
+
|
| 336 |
+
|
| 337 |
+
# Create the main application
|
| 338 |
+
with gr.Blocks(theme=gr.themes.Default(), title="Image Enhancer Pro Suite") as demo:
|
| 339 |
+
# Header
|
| 340 |
+
gr.Markdown("""
|
| 341 |
+
# 🖼️ Image Enhancer Pro Suite
|
| 342 |
+
### Professional Image Processing Tools - Organized & Easy to Use
|
| 343 |
+
|
| 344 |
+
Choose from our comprehensive collection of image processing tools organized by category.
|
| 345 |
+
""")
|
| 346 |
+
|
| 347 |
+
# Main tabbed interface
|
| 348 |
+
with gr.Tabs():
|
| 349 |
+
# Basic Tools Tab
|
| 350 |
+
with gr.TabItem("🔧 Basic Tools", id="basic"):
|
| 351 |
+
gr.Markdown("## Essential image processing operations")
|
| 352 |
+
create_basics()
|
| 353 |
+
|
| 354 |
+
# Advanced Tools Tab
|
| 355 |
+
with gr.TabItem("⚡ Advanced Tools", id="advanced"):
|
| 356 |
+
gr.Markdown("## Computer vision and advanced image analysis")
|
| 357 |
+
create_advanceds()
|
| 358 |
+
|
| 359 |
+
# Utility Tools Tab
|
| 360 |
+
with gr.TabItem("🛠️ Utilities", id="utilities"):
|
| 361 |
+
gr.Markdown("## File management and metadata tools")
|
| 362 |
+
create_utilitys()
|
| 363 |
+
|
| 364 |
+
with gr.TabItem("🧩 Extra Tools", id="extra"):
|
| 365 |
+
gr.Markdown("## Custom tools for image merging and combination")
|
| 366 |
+
create_extra_tools()
|
| 367 |
+
|
| 368 |
+
# Help & Info Tab
|
| 369 |
+
with gr.TabItem("📚 Help", id="help"):
|
| 370 |
+
gr.Markdown("""
|
| 371 |
+
## 🚀 Quick Start Guide
|
| 372 |
+
|
| 373 |
+
### Basic Tools
|
| 374 |
+
- **Color Correction**: Adjust brightness, contrast, and saturation
|
| 375 |
+
- **Sharpening**: Enhance image clarity and detail
|
| 376 |
+
- **Noise Reduction**: Remove unwanted noise from images
|
| 377 |
+
- **Upscaling**: Increase resolution with various interpolation methods
|
| 378 |
+
- **Auto Enhance**: Automatically optimize image quality
|
| 379 |
+
|
| 380 |
+
### Advanced Tools
|
| 381 |
+
- **Edge Detection**: Find edges using Canny algorithm
|
| 382 |
+
- **Morphological Operations**: Erosion and dilation for shape analysis
|
| 383 |
+
- **Contour Analysis**: Find and analyze object contours
|
| 384 |
+
- **Noise & Effects**: Add artistic effects and noise
|
| 385 |
+
|
| 386 |
+
### Utilities
|
| 387 |
+
- **Metadata**: Extract EXIF and other image information
|
| 388 |
+
- **Format Conversion**: Convert between image formats
|
| 389 |
+
- **File Operations**: Manage directories and files
|
| 390 |
+
|
| 391 |
+
## 💡 Tips for Best Results
|
| 392 |
+
|
| 393 |
+
1. **Image Quality**: Upload high-resolution images for best results
|
| 394 |
+
2. **Parameter Tuning**: Experiment with different parameter values
|
| 395 |
+
3. **Preprocessing**: Use basic tools before advanced operations
|
| 396 |
+
4. **Batch Processing**: Use utility tools for managing multiple files
|
| 397 |
+
5. **Format Choice**: PNG for quality, JPEG for size optimization
|
| 398 |
+
|
| 399 |
+
## 🔧 Technical Notes
|
| 400 |
+
|
| 401 |
+
- All processing is done server-side for maximum quality
|
| 402 |
+
- Temporary files are automatically cleaned up
|
| 403 |
+
- Supports most common image formats
|
| 404 |
+
- MCP server integration for extended functionality
|
| 405 |
+
|
| 406 |
+
## 📞 Support
|
| 407 |
+
|
| 408 |
+
If you encounter issues, check that:
|
| 409 |
+
- Your image file is in a supported format
|
| 410 |
+
- File permissions allow reading/writing
|
| 411 |
+
- Required tool modules are properly installed
|
| 412 |
+
""")
|
| 413 |
+
|
| 414 |
+
# Launch the application
|
| 415 |
+
if __name__ == "__main__":
|
| 416 |
+
demo.launch(
|
| 417 |
+
mcp_server=True,
|
| 418 |
+
allowed_paths=["/tmp", ".", "./output"],
|
| 419 |
+
show_error=True
|
| 420 |
+
)
|
basic_tools.py
ADDED
|
@@ -0,0 +1,427 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from pathlib import Path
|
| 3 |
+
from PIL import Image, ImageFilter, ImageOps, ImageDraw, ImageChops
|
| 4 |
+
import tempfile
|
| 5 |
+
|
| 6 |
+
def validate_path(p: str) -> Path:
|
| 7 |
+
path = Path(os.path.expanduser(p)).resolve()
|
| 8 |
+
if not path.exists():
|
| 9 |
+
raise FileNotFoundError(f"Path does not exist: {path}")
|
| 10 |
+
return path
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def temp_output(suffix=".png"):
|
| 14 |
+
return tempfile.NamedTemporaryFile(delete=False, suffix=suffix).name
|
| 15 |
+
|
| 16 |
+
def ensure_path_from_img(img) -> Path:
|
| 17 |
+
"""
|
| 18 |
+
Ensures that the input is converted to a valid image file path.
|
| 19 |
+
|
| 20 |
+
If `img` is a PIL.Image object, it saves it to a temporary file and returns the path.
|
| 21 |
+
If `img` is already a path (string or Path), it wraps and returns it as a Path.
|
| 22 |
+
|
| 23 |
+
Args:
|
| 24 |
+
img: A PIL.Image or a path string
|
| 25 |
+
|
| 26 |
+
Returns:
|
| 27 |
+
Path to the image file
|
| 28 |
+
"""
|
| 29 |
+
|
| 30 |
+
if isinstance(img, Image.Image):
|
| 31 |
+
temp_input = temp_output()
|
| 32 |
+
img.save(temp_input)
|
| 33 |
+
return Path(temp_input)
|
| 34 |
+
return validate_path(img)
|
| 35 |
+
|
| 36 |
+
def resize_image(input_path: str, width: int, height: int, output_path: str = None) -> str:
|
| 37 |
+
"""
|
| 38 |
+
Resize an image and save to a new file.
|
| 39 |
+
|
| 40 |
+
Args:
|
| 41 |
+
input_path (str): Path to source image.
|
| 42 |
+
output_path (str): Path to save resized image.
|
| 43 |
+
width (int): New width in pixels.
|
| 44 |
+
height (int): New height in pixels.
|
| 45 |
+
|
| 46 |
+
Returns:
|
| 47 |
+
str: Path to resized image.
|
| 48 |
+
"""
|
| 49 |
+
if output_path is None:
|
| 50 |
+
output_path = temp_output()
|
| 51 |
+
input_path = validate_path(input_path)
|
| 52 |
+
output_path = Path(output_path).expanduser().resolve()
|
| 53 |
+
with Image.open(input_path) as img:
|
| 54 |
+
img = img.resize((width, height))
|
| 55 |
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
| 56 |
+
img.save(output_path)
|
| 57 |
+
return str(output_path)
|
| 58 |
+
|
| 59 |
+
def convert_grayscale(input_path: str, output_path: str = None) -> str:
|
| 60 |
+
"""
|
| 61 |
+
Convert an image to grayscale.
|
| 62 |
+
|
| 63 |
+
Args:
|
| 64 |
+
input_path (str): Source image path.
|
| 65 |
+
output_path (str): Path to save grayscale version.
|
| 66 |
+
|
| 67 |
+
Returns:
|
| 68 |
+
str: Path to grayscale image.
|
| 69 |
+
"""
|
| 70 |
+
if output_path is None:
|
| 71 |
+
output_path = temp_output()
|
| 72 |
+
input_path = validate_path(input_path)
|
| 73 |
+
output_path = Path(output_path).expanduser().resolve()
|
| 74 |
+
with Image.open(input_path) as img:
|
| 75 |
+
gray = img.convert("L")
|
| 76 |
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
| 77 |
+
gray.save(output_path)
|
| 78 |
+
return str(output_path)
|
| 79 |
+
|
| 80 |
+
def image_metadata(path: str) -> str:
|
| 81 |
+
"""
|
| 82 |
+
Get image metadata.
|
| 83 |
+
|
| 84 |
+
Args:
|
| 85 |
+
path (str): Image path.
|
| 86 |
+
|
| 87 |
+
Returns:
|
| 88 |
+
str: Format, size, and mode info.
|
| 89 |
+
"""
|
| 90 |
+
path = validate_path(path)
|
| 91 |
+
with Image.open(path) as img:
|
| 92 |
+
return "\n".join([
|
| 93 |
+
f"Format: {img.format}",
|
| 94 |
+
f"Size: {img.size}",
|
| 95 |
+
f"Mode: {img.mode}"
|
| 96 |
+
])
|
| 97 |
+
|
| 98 |
+
def convert_format(input_path: str, format: str, output_path: str = None) -> str:
|
| 99 |
+
"""
|
| 100 |
+
Convert an image to a new format.
|
| 101 |
+
|
| 102 |
+
Args:
|
| 103 |
+
input_path (str): Source image path.
|
| 104 |
+
output_path (str): Destination path with extension.
|
| 105 |
+
format (str): Format (e.g. 'png', 'jpeg').
|
| 106 |
+
|
| 107 |
+
Returns:
|
| 108 |
+
str: Path to converted image.
|
| 109 |
+
"""
|
| 110 |
+
if output_path is None:
|
| 111 |
+
output_path = temp_output()
|
| 112 |
+
input_path = validate_path(input_path)
|
| 113 |
+
output_path = Path(output_path).expanduser().resolve()
|
| 114 |
+
with Image.open(input_path) as img:
|
| 115 |
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
| 116 |
+
img.save(output_path, format=format.upper())
|
| 117 |
+
return str(output_path)
|
| 118 |
+
|
| 119 |
+
def blur_image(input_path: str, radius: float, output_path: str = None) -> str:
|
| 120 |
+
"""
|
| 121 |
+
Apply Gaussian blur to an image.
|
| 122 |
+
|
| 123 |
+
Args:
|
| 124 |
+
input_path (str): Image path.
|
| 125 |
+
output_path (str): Path to save blurred image.
|
| 126 |
+
radius (float): Blur radius.
|
| 127 |
+
|
| 128 |
+
Returns:
|
| 129 |
+
str: Path to blurred image.
|
| 130 |
+
"""
|
| 131 |
+
if output_path is None:
|
| 132 |
+
output_path = temp_output()
|
| 133 |
+
input_path = validate_path(input_path)
|
| 134 |
+
output_path = Path(output_path).expanduser().resolve()
|
| 135 |
+
with Image.open(input_path) as img:
|
| 136 |
+
img = img.filter(ImageFilter.GaussianBlur(radius))
|
| 137 |
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
| 138 |
+
img.save(output_path)
|
| 139 |
+
return str(output_path)
|
| 140 |
+
|
| 141 |
+
def rotate_image(input_path: str, angle: float, output_path: str = None) -> str:
|
| 142 |
+
"""
|
| 143 |
+
Rotate an image.
|
| 144 |
+
|
| 145 |
+
Args:
|
| 146 |
+
input_path (str): Source path.
|
| 147 |
+
output_path (str): Destination path.
|
| 148 |
+
angle (float): Rotation angle in degrees.
|
| 149 |
+
|
| 150 |
+
Returns:
|
| 151 |
+
str: Path to rotated image.
|
| 152 |
+
"""
|
| 153 |
+
if output_path is None:
|
| 154 |
+
output_path = temp_output()
|
| 155 |
+
input_path = validate_path(input_path)
|
| 156 |
+
output_path = Path(output_path).expanduser().resolve()
|
| 157 |
+
with Image.open(input_path) as img:
|
| 158 |
+
img = img.rotate(angle, expand=True)
|
| 159 |
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
| 160 |
+
img.save(output_path)
|
| 161 |
+
return str(output_path)
|
| 162 |
+
|
| 163 |
+
def crop_image(input_path: str, left: int, top: int, right: int, bottom: int, output_path: str = None) -> str:
|
| 164 |
+
"""
|
| 165 |
+
Crop an image.
|
| 166 |
+
|
| 167 |
+
Args:
|
| 168 |
+
input_path (str): Source image.
|
| 169 |
+
output_path (str): Destination.
|
| 170 |
+
left, top, right, bottom (int): Crop box coordinates.
|
| 171 |
+
|
| 172 |
+
Returns:
|
| 173 |
+
str: Path to cropped image.
|
| 174 |
+
"""
|
| 175 |
+
if output_path is None:
|
| 176 |
+
output_path = temp_output()
|
| 177 |
+
input_path = validate_path(input_path)
|
| 178 |
+
output_path = Path(output_path).expanduser().resolve()
|
| 179 |
+
with Image.open(input_path) as img:
|
| 180 |
+
img = img.crop((left, top, right, bottom))
|
| 181 |
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
| 182 |
+
img.save(output_path)
|
| 183 |
+
return str(output_path)
|
| 184 |
+
|
| 185 |
+
def thumbnail_image(input_path: str, max_width: int, max_height: int, output_path: str = None) -> str:
|
| 186 |
+
"""
|
| 187 |
+
Resize image while maintaining aspect ratio.
|
| 188 |
+
|
| 189 |
+
Args:
|
| 190 |
+
input_path (str): Source.
|
| 191 |
+
output_path (str): Target.
|
| 192 |
+
max_width (int), max_height (int): Constraints.
|
| 193 |
+
|
| 194 |
+
Returns:
|
| 195 |
+
str: Path to thumbnail image.
|
| 196 |
+
"""
|
| 197 |
+
if output_path is None:
|
| 198 |
+
output_path = temp_output()
|
| 199 |
+
input_path = validate_path(input_path)
|
| 200 |
+
output_path = Path(output_path).expanduser().resolve()
|
| 201 |
+
with Image.open(input_path) as img:
|
| 202 |
+
img.thumbnail((max_width, max_height))
|
| 203 |
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
| 204 |
+
img.save(output_path)
|
| 205 |
+
return str(output_path)
|
| 206 |
+
|
| 207 |
+
def add_watermark(input_path: str, text: str, x: int, y: int, output_path: str = None) -> str:
|
| 208 |
+
"""
|
| 209 |
+
Add watermark text.
|
| 210 |
+
|
| 211 |
+
Args:
|
| 212 |
+
input_path (str): Image file.
|
| 213 |
+
output_path (str): Save path.
|
| 214 |
+
text (str): Watermark content.
|
| 215 |
+
x (int), y (int): Position.
|
| 216 |
+
|
| 217 |
+
Returns:
|
| 218 |
+
str: Path to watermarked image.
|
| 219 |
+
"""
|
| 220 |
+
if output_path is None:
|
| 221 |
+
output_path = temp_output()
|
| 222 |
+
input_path = validate_path(input_path)
|
| 223 |
+
output_path = Path(output_path).expanduser().resolve()
|
| 224 |
+
with Image.open(input_path).convert("RGBA") as img:
|
| 225 |
+
watermark = Image.new("RGBA", img.size)
|
| 226 |
+
draw = ImageDraw.Draw(watermark)
|
| 227 |
+
draw.text((x, y), text, fill=(255, 255, 255, 128))
|
| 228 |
+
img = Image.alpha_composite(img, watermark).convert("RGB")
|
| 229 |
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
| 230 |
+
img.save(output_path)
|
| 231 |
+
return str(output_path)
|
| 232 |
+
|
| 233 |
+
def flip_image(input_path: str, mode: str, output_path: str = None) -> str:
|
| 234 |
+
"""
|
| 235 |
+
Flip image horizontally or vertically.
|
| 236 |
+
|
| 237 |
+
Args:
|
| 238 |
+
input_path (str): Source.
|
| 239 |
+
output_path (str): Destination.
|
| 240 |
+
mode (str): 'horizontal' or 'vertical'.
|
| 241 |
+
|
| 242 |
+
Returns:
|
| 243 |
+
str: Path to flipped image.
|
| 244 |
+
"""
|
| 245 |
+
if output_path is None:
|
| 246 |
+
output_path = temp_output()
|
| 247 |
+
input_path = validate_path(input_path)
|
| 248 |
+
output_path = Path(output_path).expanduser().resolve()
|
| 249 |
+
with Image.open(input_path) as img:
|
| 250 |
+
if mode == "horizontal":
|
| 251 |
+
img = img.transpose(Image.FLIP_LEFT_RIGHT)
|
| 252 |
+
elif mode == "vertical":
|
| 253 |
+
img = img.transpose(Image.FLIP_TOP_BOTTOM)
|
| 254 |
+
else:
|
| 255 |
+
raise ValueError("Invalid mode")
|
| 256 |
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
| 257 |
+
img.save(output_path)
|
| 258 |
+
return str(output_path)
|
| 259 |
+
|
| 260 |
+
def invert_colors(input_path: str, output_path: str = None) -> str:
|
| 261 |
+
"""
|
| 262 |
+
Invert RGB color values.
|
| 263 |
+
|
| 264 |
+
Args:
|
| 265 |
+
input_path (str): Image file.
|
| 266 |
+
output_path (str): Save location.
|
| 267 |
+
|
| 268 |
+
Returns:
|
| 269 |
+
str: Path to inverted image.
|
| 270 |
+
"""
|
| 271 |
+
if output_path is None:
|
| 272 |
+
output_path = temp_output()
|
| 273 |
+
|
| 274 |
+
input_path = validate_path(input_path)
|
| 275 |
+
output_path = Path(output_path).expanduser().resolve()
|
| 276 |
+
with Image.open(input_path) as img:
|
| 277 |
+
img = ImageOps.invert(img.convert("RGB"))
|
| 278 |
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
| 279 |
+
img.save(output_path)
|
| 280 |
+
return str(output_path)
|
| 281 |
+
|
| 282 |
+
def list_images_in_directory(path: str) -> str:
|
| 283 |
+
"""
|
| 284 |
+
List image files in a directory.
|
| 285 |
+
|
| 286 |
+
Args:
|
| 287 |
+
path (str): Directory path.
|
| 288 |
+
|
| 289 |
+
Returns:
|
| 290 |
+
str: Newline-separated list of image paths.
|
| 291 |
+
"""
|
| 292 |
+
dir_path = Path(path).expanduser().resolve()
|
| 293 |
+
if not dir_path.is_dir():
|
| 294 |
+
raise NotADirectoryError(f"{dir_path} is not a directory.")
|
| 295 |
+
exts = {".png", ".jpg", ".jpeg", ".webp", ".bmp", ".gif"}
|
| 296 |
+
files = [str(p) for p in dir_path.iterdir() if p.suffix.lower() in exts]
|
| 297 |
+
return "\n".join(files) if files else "No image files found."
|
| 298 |
+
|
| 299 |
+
def get_image_metadata(path: str) -> dict:
|
| 300 |
+
"""
|
| 301 |
+
Get metadata from an image.
|
| 302 |
+
|
| 303 |
+
Args:
|
| 304 |
+
path (str): Image path.
|
| 305 |
+
|
| 306 |
+
Returns:
|
| 307 |
+
dict: Metadata fields including format, size, mode.
|
| 308 |
+
"""
|
| 309 |
+
|
| 310 |
+
path = validate_path(path)
|
| 311 |
+
with Image.open(path) as img:
|
| 312 |
+
return {
|
| 313 |
+
"path": str(path),
|
| 314 |
+
"format": img.format,
|
| 315 |
+
"mode": img.mode,
|
| 316 |
+
"size": img.size,
|
| 317 |
+
"width": img.width,
|
| 318 |
+
"height": img.height,
|
| 319 |
+
"info": img.info
|
| 320 |
+
}
|
| 321 |
+
|
| 322 |
+
import random
|
| 323 |
+
|
| 324 |
+
def apply_random_color_variation(input_path: str, strength: float = 0.1,output_path: str = None) -> str:
|
| 325 |
+
"""
|
| 326 |
+
Apply random color variation to an image and save the result.
|
| 327 |
+
|
| 328 |
+
Args:
|
| 329 |
+
input_path (str): Source image path.
|
| 330 |
+
output_path (str): Target image path.
|
| 331 |
+
strength (float): Amount of variation per channel (0.0 to 1.0, default = 0.1).
|
| 332 |
+
|
| 333 |
+
Returns:
|
| 334 |
+
str: Path to the color-augmented image.
|
| 335 |
+
"""
|
| 336 |
+
if output_path is None:
|
| 337 |
+
output_path = temp_output()
|
| 338 |
+
input_path = validate_path(input_path)
|
| 339 |
+
output_path = Path(output_path).expanduser().resolve()
|
| 340 |
+
|
| 341 |
+
with Image.open(input_path).convert("RGB") as img:
|
| 342 |
+
pixels = img.load()
|
| 343 |
+
for y in range(img.height):
|
| 344 |
+
for x in range(img.width):
|
| 345 |
+
r, g, b = pixels[x, y]
|
| 346 |
+
r = int(min(max(r + random.randint(-int(255 * strength), int(255 * strength)), 0), 255))
|
| 347 |
+
g = int(min(max(g + random.randint(-int(255 * strength), int(255 * strength)), 0), 255))
|
| 348 |
+
b = int(min(max(b + random.randint(-int(255 * strength), int(255 * strength)), 0), 255))
|
| 349 |
+
pixels[x, y] = (r, g, b)
|
| 350 |
+
|
| 351 |
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
| 352 |
+
img.save(output_path)
|
| 353 |
+
|
| 354 |
+
return str(output_path)
|
| 355 |
+
|
| 356 |
+
|
| 357 |
+
def add_images(image_path1: str, image_path2: str, output_path: str = None) -> str:
|
| 358 |
+
"""
|
| 359 |
+
Add two images pixel-wise and save the result.
|
| 360 |
+
|
| 361 |
+
Args:
|
| 362 |
+
image_path1 (str): Path to the first image.
|
| 363 |
+
image_path2 (str): Path to the second image.
|
| 364 |
+
output_path (str): Path to save the resulting image.
|
| 365 |
+
|
| 366 |
+
Returns:
|
| 367 |
+
str: Path to the added image.
|
| 368 |
+
"""
|
| 369 |
+
if output_path is None:
|
| 370 |
+
output_path = temp_output()
|
| 371 |
+
|
| 372 |
+
path1 = validate_path(image_path1)
|
| 373 |
+
path2 = validate_path(image_path2)
|
| 374 |
+
output_path = Path(output_path).expanduser().resolve()
|
| 375 |
+
|
| 376 |
+
with Image.open(path1).convert("RGB") as img1, Image.open(path2).convert("RGB") as img2:
|
| 377 |
+
if img1.size != img2.size:
|
| 378 |
+
raise ValueError("Images must be the same size")
|
| 379 |
+
result = ImageChops.add(img1, img2, scale=1.0, offset=0)
|
| 380 |
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
| 381 |
+
result.save(output_path)
|
| 382 |
+
|
| 383 |
+
return str(output_path)
|
| 384 |
+
from PIL import Image
|
| 385 |
+
from pathlib import Path
|
| 386 |
+
|
| 387 |
+
def concat_images(image_path1: str, image_path2: str, mode: str = "horizontal", output_path: str = None) -> str:
|
| 388 |
+
"""
|
| 389 |
+
Concatenate two images either horizontally or vertically.
|
| 390 |
+
|
| 391 |
+
Args:
|
| 392 |
+
image_path1 (str): Path to the first image.
|
| 393 |
+
image_path2 (str): Path to the second image.
|
| 394 |
+
output_path (str): Path to save the concatenated image.
|
| 395 |
+
mode (str): 'horizontal' or 'vertical'.
|
| 396 |
+
|
| 397 |
+
Returns:
|
| 398 |
+
str: Path to the output image.
|
| 399 |
+
"""
|
| 400 |
+
if output_path is None:
|
| 401 |
+
output_path = temp_output()
|
| 402 |
+
|
| 403 |
+
img1 = Image.open(image_path1).convert("RGB")
|
| 404 |
+
img2 = Image.open(image_path2).convert("RGB")
|
| 405 |
+
|
| 406 |
+
if mode == "horizontal":
|
| 407 |
+
new_width = img1.width + img2.width
|
| 408 |
+
new_height = max(img1.height, img2.height)
|
| 409 |
+
new_img = Image.new("RGB", (new_width, new_height))
|
| 410 |
+
new_img.paste(img1, (0, 0))
|
| 411 |
+
new_img.paste(img2, (img1.width, 0))
|
| 412 |
+
elif mode == "vertical":
|
| 413 |
+
new_width = max(img1.width, img2.width)
|
| 414 |
+
new_height = img1.height + img2.height
|
| 415 |
+
new_img = Image.new("RGB", (new_width, new_height))
|
| 416 |
+
new_img.paste(img1, (0, 0))
|
| 417 |
+
new_img.paste(img2, (0, img1.height))
|
| 418 |
+
else:
|
| 419 |
+
raise ValueError("Invalid mode. Use 'horizontal' or 'vertical'.")
|
| 420 |
+
|
| 421 |
+
output_path = Path(output_path).expanduser().resolve()
|
| 422 |
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
| 423 |
+
new_img.save(output_path)
|
| 424 |
+
return str(output_path)
|
| 425 |
+
|
| 426 |
+
|
| 427 |
+
|
cat.png
ADDED
|
Git LFS Details
|
gradio_client_app.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from gradio_client import Client, handle_file
|
| 2 |
+
from pathlib import Path
|
| 3 |
+
|
| 4 |
+
client = Client("ayman3000/Image_enhancer_pro"")
|
| 5 |
+
result = client.predict(
|
| 6 |
+
input_path=handle_file('cat.png'),
|
| 7 |
+
brightness=1.3,
|
| 8 |
+
contrast=1,
|
| 9 |
+
saturation=1,
|
| 10 |
+
output_path="/tmp/tmp_5cvuybs.png",
|
| 11 |
+
api_name="/apply_color_correction"
|
| 12 |
+
)
|
| 13 |
+
print(result)
|
requirements.txt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
pillow
|
| 2 |
+
numpy
|
| 3 |
+
opencv-python
|
snake.png
ADDED
|
Git LFS Details
|
tools.py
ADDED
|
@@ -0,0 +1,324 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 📁 File: tools/image_enhancer/tools.py
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
import cv2
|
| 5 |
+
import numpy as np
|
| 6 |
+
from PIL import Image, ImageEnhance, ImageFilter
|
| 7 |
+
from utils import validate_path, ensure_path_from_img, temp_output
|
| 8 |
+
|
| 9 |
+
# === TOOL IMPLEMENTATIONS ===
|
| 10 |
+
|
| 11 |
+
def apply_color_correction(input_path: str,
|
| 12 |
+
brightness: float = 1.0,
|
| 13 |
+
contrast: float = 1.0,
|
| 14 |
+
saturation: float = 1.0,
|
| 15 |
+
output_path: str = None):
|
| 16 |
+
"""
|
| 17 |
+
Applies color correction to an image including brightness, contrast and saturation adjustments.
|
| 18 |
+
|
| 19 |
+
Args:
|
| 20 |
+
input_path (str): Path to the input image file
|
| 21 |
+
output_path (str): Path where the corrected image will be saved
|
| 22 |
+
brightness (float): Brightness factor (1.0 is original, <1.0 darker, >1.0 brighter)
|
| 23 |
+
contrast (float): Contrast factor (1.0 is original, <1.0 less contrast, >1.0 more contrast)
|
| 24 |
+
saturation (float): Saturation factor (1.0 is original, <1.0 less saturated, >1.0 more saturated)
|
| 25 |
+
|
| 26 |
+
Returns:
|
| 27 |
+
None: The function saves the output file to the specified path
|
| 28 |
+
|
| 29 |
+
Raises:
|
| 30 |
+
FileNotFoundError: If the input file does not exist
|
| 31 |
+
ValueError: If the input is not a valid image
|
| 32 |
+
OSError: If there's an error creating the output directory or writing the file
|
| 33 |
+
"""
|
| 34 |
+
try:
|
| 35 |
+
if output_path is None:
|
| 36 |
+
output_path = temp_output()
|
| 37 |
+
img = ensure_path_from_img(input_path)
|
| 38 |
+
|
| 39 |
+
# Open the image
|
| 40 |
+
img = Image.open(img)
|
| 41 |
+
|
| 42 |
+
# Apply brightness adjustment
|
| 43 |
+
if brightness != 1.0:
|
| 44 |
+
enhancer = ImageEnhance.Brightness(img)
|
| 45 |
+
img = enhancer.enhance(brightness)
|
| 46 |
+
|
| 47 |
+
# Apply contrast adjustment
|
| 48 |
+
if contrast != 1.0:
|
| 49 |
+
enhancer = ImageEnhance.Contrast(img)
|
| 50 |
+
img = enhancer.enhance(contrast)
|
| 51 |
+
|
| 52 |
+
# Apply saturation adjustment
|
| 53 |
+
if saturation != 1.0:
|
| 54 |
+
enhancer = ImageEnhance.Color(img)
|
| 55 |
+
img = enhancer.enhance(saturation)
|
| 56 |
+
|
| 57 |
+
# Ensure the output directory exists
|
| 58 |
+
os.makedirs(os.path.dirname(output_path), exist_ok=True)
|
| 59 |
+
|
| 60 |
+
# Save the result
|
| 61 |
+
img.save(output_path)
|
| 62 |
+
except Exception as e:
|
| 63 |
+
# Re-raise with more context
|
| 64 |
+
raise type(e)(f"Error in color correction: {str(e)}")
|
| 65 |
+
return str(output_path)
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def apply_sharpening(input_path: str, strength: float = 1.0, output_path: str = None)-> str:
|
| 69 |
+
"""
|
| 70 |
+
Applies sharpening to an image to enhance edges and details.
|
| 71 |
+
|
| 72 |
+
Args:
|
| 73 |
+
input_path (str): Path to the input image file
|
| 74 |
+
output_path (str): Path where the sharpened image will be saved
|
| 75 |
+
strength (float): Sharpening strength (1.0 is standard, >1.0 is stronger)
|
| 76 |
+
|
| 77 |
+
Returns:
|
| 78 |
+
output_path (str): Path where the sharpened image will be saved
|
| 79 |
+
|
| 80 |
+
Raises:
|
| 81 |
+
FileNotFoundError: If the input file does not exist
|
| 82 |
+
ValueError: If the input is not a valid image
|
| 83 |
+
OSError: If there's an error creating the output directory or writing the file
|
| 84 |
+
"""
|
| 85 |
+
try:
|
| 86 |
+
if output_path is None:
|
| 87 |
+
output_path = temp_output()
|
| 88 |
+
# Validate input file exists
|
| 89 |
+
if not os.path.exists(input_path):
|
| 90 |
+
raise FileNotFoundError(f"Input file not found: {input_path}")
|
| 91 |
+
img = ensure_path_from_img(input_path)
|
| 92 |
+
|
| 93 |
+
# Open the image
|
| 94 |
+
img = Image.open(img)
|
| 95 |
+
|
| 96 |
+
# Apply sharpening
|
| 97 |
+
if strength <= 1.0:
|
| 98 |
+
# Use PIL's built-in SHARPEN filter for moderate sharpening
|
| 99 |
+
sharpened = img.filter(ImageFilter.SHARPEN)
|
| 100 |
+
else:
|
| 101 |
+
# For stronger sharpening, use UnsharpMask with adjusted parameters
|
| 102 |
+
radius = 2.0
|
| 103 |
+
percent = int(150 * strength)
|
| 104 |
+
threshold = 3
|
| 105 |
+
sharpened = img.filter(ImageFilter.UnsharpMask(radius, percent, threshold))
|
| 106 |
+
|
| 107 |
+
# Ensure the output directory exists
|
| 108 |
+
os.makedirs(os.path.dirname(output_path), exist_ok=True)
|
| 109 |
+
|
| 110 |
+
# Save the result
|
| 111 |
+
sharpened.save(output_path)
|
| 112 |
+
except Exception as e:
|
| 113 |
+
# Re-raise with more context
|
| 114 |
+
raise type(e)(f"Error in sharpening: {str(e)}")
|
| 115 |
+
return str(output_path)
|
| 116 |
+
|
| 117 |
+
# --- Constants for Denoising Parameters ---
|
| 118 |
+
# These values control the filtering strength.
|
| 119 |
+
# h: Luminance component filter strength. Larger h means more noise removal.
|
| 120 |
+
# hColor: Color component filter strength.
|
| 121 |
+
# templateWindowSize: Should be odd. Recommended: 7.
|
| 122 |
+
# searchWindowSize: Should be odd. Recommended: 21.
|
| 123 |
+
DENOISE_PARAMS = {
|
| 124 |
+
"low": {
|
| 125 |
+
"h": 5,
|
| 126 |
+
"hColor": 5,
|
| 127 |
+
"templateWindowSize": 7,
|
| 128 |
+
"searchWindowSize": 21,
|
| 129 |
+
},
|
| 130 |
+
"medium": {
|
| 131 |
+
"h": 10,
|
| 132 |
+
"hColor": 10,
|
| 133 |
+
"templateWindowSize": 7,
|
| 134 |
+
"searchWindowSize": 21,
|
| 135 |
+
},
|
| 136 |
+
"high": {
|
| 137 |
+
"h": 15,
|
| 138 |
+
"hColor": 15,
|
| 139 |
+
"templateWindowSize": 7,
|
| 140 |
+
"searchWindowSize": 21,
|
| 141 |
+
}
|
| 142 |
+
}
|
| 143 |
+
|
| 144 |
+
def reduce_noise(input_path: str, strength: str = "medium", output_path: str = None) -> str:
|
| 145 |
+
"""
|
| 146 |
+
Reduces noise in an image using cv2.fastNlMeansDenoisingColored.
|
| 147 |
+
|
| 148 |
+
Args:
|
| 149 |
+
input_path (str): Path to the input image file.
|
| 150 |
+
output_path (str): Path where the denoised image will be saved.
|
| 151 |
+
strength (str): Noise reduction strength ("low", "medium", or "high").
|
| 152 |
+
Returns:
|
| 153 |
+
output_path (str): Path where the sharpened image will be saved
|
| 154 |
+
|
| 155 |
+
Raises:
|
| 156 |
+
FileNotFoundError: If the input file does not exist.
|
| 157 |
+
ValueError: If the strength parameter is invalid or the image cannot be loaded.
|
| 158 |
+
OSError: If there's an error creating the output directory or writing the file.
|
| 159 |
+
"""
|
| 160 |
+
if output_path is None:
|
| 161 |
+
output_path = temp_output()
|
| 162 |
+
if not os.path.exists(input_path):
|
| 163 |
+
raise FileNotFoundError(f"Input file not found: {input_path}")
|
| 164 |
+
|
| 165 |
+
if strength not in DENOISE_PARAMS:
|
| 166 |
+
raise ValueError(f"Invalid strength value: {strength}. Must be one of {list(DENOISE_PARAMS.keys())}")
|
| 167 |
+
|
| 168 |
+
try:
|
| 169 |
+
img = cv2.imread(input_path)
|
| 170 |
+
|
| 171 |
+
if img is None:
|
| 172 |
+
# This can happen if the file exists but is corrupted or not a valid image format.
|
| 173 |
+
raise ValueError(f"Failed to load image from path: {input_path}. The file may be corrupted or in an unsupported format.")
|
| 174 |
+
|
| 175 |
+
# Get parameters for the chosen strength
|
| 176 |
+
params = DENOISE_PARAMS[strength]
|
| 177 |
+
|
| 178 |
+
# Apply noise reduction
|
| 179 |
+
denoised = cv2.fastNlMeansDenoisingColored(
|
| 180 |
+
img,
|
| 181 |
+
None,
|
| 182 |
+
params["h"],
|
| 183 |
+
params["hColor"],
|
| 184 |
+
params["templateWindowSize"],
|
| 185 |
+
params["searchWindowSize"]
|
| 186 |
+
)
|
| 187 |
+
|
| 188 |
+
# Ensure the output directory exists
|
| 189 |
+
output_dir = os.path.dirname(output_path)
|
| 190 |
+
if output_dir:
|
| 191 |
+
os.makedirs(output_dir, exist_ok=True)
|
| 192 |
+
|
| 193 |
+
# Save the result
|
| 194 |
+
success = cv2.imwrite(output_path, denoised)
|
| 195 |
+
if not success:
|
| 196 |
+
raise OSError(f"Failed to save the denoised image to {output_path}")
|
| 197 |
+
|
| 198 |
+
except (cv2.error, OSError) as e:
|
| 199 |
+
# Catch specific errors from OpenCV or file system and re-raise
|
| 200 |
+
# them to provide context without losing the original error.
|
| 201 |
+
raise OSError(f"An error occurred during image processing or file saving for {input_path}") from e
|
| 202 |
+
return str(output_path)
|
| 203 |
+
|
| 204 |
+
def upscale_resolution(input_path: str, scale_factor: int = 2, method: str = "bicubic", output_path: str = None) -> str:
|
| 205 |
+
"""
|
| 206 |
+
Increases the resolution of an image using various upscaling methods.
|
| 207 |
+
|
| 208 |
+
Args:
|
| 209 |
+
input_path (str): Path to the input image file
|
| 210 |
+
scale_factor (int): How much to increase resolution (2 = 2x, 3 = 3x, 4 = 4x)
|
| 211 |
+
method (str): Upscaling method ("nearest", "bilinear", "bicubic", or "lanczos")
|
| 212 |
+
output_path (str): Path where the upscaled image will be saved
|
| 213 |
+
|
| 214 |
+
Returns:
|
| 215 |
+
output_path (str): Path where the sharpened image will be saved
|
| 216 |
+
|
| 217 |
+
Raises:
|
| 218 |
+
FileNotFoundError: If the input file does not exist
|
| 219 |
+
ValueError: If the scale_factor or method parameters are invalid
|
| 220 |
+
OSError: If there's an error creating the output directory or writing the file
|
| 221 |
+
"""
|
| 222 |
+
try:
|
| 223 |
+
if output_path is None:
|
| 224 |
+
output_path = temp_output()
|
| 225 |
+
|
| 226 |
+
# Validate input file exists
|
| 227 |
+
if not os.path.exists(input_path):
|
| 228 |
+
raise FileNotFoundError(f"Input file not found: {input_path}")
|
| 229 |
+
|
| 230 |
+
# Validate scale factor
|
| 231 |
+
if scale_factor not in [2, 3, 4]:
|
| 232 |
+
raise ValueError(f"Invalid scale factor: {scale_factor}. Must be 2, 3, or 4")
|
| 233 |
+
|
| 234 |
+
# Map method names to PIL resampling filters
|
| 235 |
+
method_map = {
|
| 236 |
+
"nearest": Image.NEAREST,
|
| 237 |
+
"bilinear": Image.BILINEAR,
|
| 238 |
+
"bicubic": Image.BICUBIC,
|
| 239 |
+
"lanczos": Image.LANCZOS
|
| 240 |
+
}
|
| 241 |
+
|
| 242 |
+
# Validate method name
|
| 243 |
+
if method.lower() not in method_map:
|
| 244 |
+
raise ValueError(f"Invalid method: {method}. Must be one of {list(method_map.keys())}")
|
| 245 |
+
|
| 246 |
+
# Use bicubic as default if method is not recognized
|
| 247 |
+
resampling_filter = method_map.get(method.lower(), Image.BICUBIC)
|
| 248 |
+
|
| 249 |
+
# Open the image
|
| 250 |
+
img = Image.open(input_path)
|
| 251 |
+
|
| 252 |
+
# Get original dimensions
|
| 253 |
+
width, height = img.size
|
| 254 |
+
|
| 255 |
+
# Calculate new dimensions
|
| 256 |
+
new_width = width * scale_factor
|
| 257 |
+
new_height = height * scale_factor
|
| 258 |
+
|
| 259 |
+
# Resize the image
|
| 260 |
+
upscaled = img.resize((new_width, new_height), resampling_filter)
|
| 261 |
+
|
| 262 |
+
# Ensure the output directory exists
|
| 263 |
+
os.makedirs(os.path.dirname(output_path), exist_ok=True)
|
| 264 |
+
|
| 265 |
+
# Save the result
|
| 266 |
+
upscaled.save(output_path)
|
| 267 |
+
except Exception as e:
|
| 268 |
+
# Re-raise with more context
|
| 269 |
+
raise type(e)(f"Error in upscaling: {str(e)}")
|
| 270 |
+
return str(output_path)
|
| 271 |
+
|
| 272 |
+
def auto_enhance(input_path: str, output_path: str = None) -> str:
|
| 273 |
+
"""
|
| 274 |
+
Automatically enhances an image by applying balanced corrections to
|
| 275 |
+
brightness, contrast, color, and sharpness.
|
| 276 |
+
|
| 277 |
+
Args:
|
| 278 |
+
input_path (str): Path to the input image file
|
| 279 |
+
output_path (str): Path where the enhanced image will be saved
|
| 280 |
+
|
| 281 |
+
Returns:
|
| 282 |
+
output_path (str): Path where the sharpened image will be saved
|
| 283 |
+
|
| 284 |
+
Raises:
|
| 285 |
+
FileNotFoundError: If the input file does not exist
|
| 286 |
+
ValueError: If the input is not a valid image
|
| 287 |
+
OSError: If there's an error creating the output directory or writing the file
|
| 288 |
+
"""
|
| 289 |
+
try:
|
| 290 |
+
if output_path is None:
|
| 291 |
+
output_path = temp_output()
|
| 292 |
+
# Validate input file exists
|
| 293 |
+
if not os.path.exists(input_path):
|
| 294 |
+
raise FileNotFoundError(f"Input file not found: {input_path}")
|
| 295 |
+
|
| 296 |
+
# Open the image
|
| 297 |
+
img = Image.open(input_path)
|
| 298 |
+
|
| 299 |
+
# Auto-enhance: apply a series of balanced enhancements
|
| 300 |
+
# Step 1: Auto-contrast
|
| 301 |
+
enhancer = ImageEnhance.Contrast(img)
|
| 302 |
+
img = enhancer.enhance(1.2)
|
| 303 |
+
|
| 304 |
+
# Step 2: Moderate brightness adjustment
|
| 305 |
+
enhancer = ImageEnhance.Brightness(img)
|
| 306 |
+
img = enhancer.enhance(1.1)
|
| 307 |
+
|
| 308 |
+
# Step 3: Moderate color enhancement
|
| 309 |
+
enhancer = ImageEnhance.Color(img)
|
| 310 |
+
img = enhancer.enhance(1.1)
|
| 311 |
+
|
| 312 |
+
# Step 4: Moderate sharpening
|
| 313 |
+
enhancer = ImageEnhance.Sharpness(img)
|
| 314 |
+
img = enhancer.enhance(1.3)
|
| 315 |
+
|
| 316 |
+
# Ensure the output directory exists
|
| 317 |
+
os.makedirs(os.path.dirname(output_path), exist_ok=True)
|
| 318 |
+
|
| 319 |
+
# Save the result
|
| 320 |
+
img.save(output_path)
|
| 321 |
+
except Exception as e:
|
| 322 |
+
# Re-raise with more context
|
| 323 |
+
raise type(e)(f"Error in auto enhancement: {str(e)}")
|
| 324 |
+
return str(output_path)
|
utils.py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from pathlib import Path
|
| 3 |
+
import tempfile
|
| 4 |
+
|
| 5 |
+
from PIL import Image
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def create_folder(folder_path: str) -> str:
|
| 9 |
+
"""
|
| 10 |
+
Create a folder if it doesn't exist.
|
| 11 |
+
|
| 12 |
+
Args:
|
| 13 |
+
folder_path (str): Path to the folder to create.
|
| 14 |
+
|
| 15 |
+
Returns:
|
| 16 |
+
str: Absolute path to the created or existing folder.
|
| 17 |
+
"""
|
| 18 |
+
path = Path(folder_path).expanduser().resolve()
|
| 19 |
+
path.mkdir(parents=True, exist_ok=True)
|
| 20 |
+
return str(path)
|
| 21 |
+
|
| 22 |
+
def validate_path(p: str) -> Path:
|
| 23 |
+
path = Path(os.path.expanduser(p)).resolve()
|
| 24 |
+
if not path.exists():
|
| 25 |
+
raise FileNotFoundError(f"Path does not exist: {path}")
|
| 26 |
+
return path
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def temp_output(suffix=".png"):
|
| 30 |
+
return tempfile.NamedTemporaryFile(delete=False, suffix=suffix).name
|
| 31 |
+
|
| 32 |
+
def ensure_path_from_img(img) -> Path:
|
| 33 |
+
"""
|
| 34 |
+
Ensures that the input is converted to a valid image file path.
|
| 35 |
+
|
| 36 |
+
If `img` is a PIL.Image object, it saves it to a temporary file and returns the path.
|
| 37 |
+
If `img` is already a path (string or Path), it wraps and returns it as a Path.
|
| 38 |
+
|
| 39 |
+
Args:
|
| 40 |
+
img: A PIL.Image or a path string
|
| 41 |
+
|
| 42 |
+
Returns:
|
| 43 |
+
Path to the image file
|
| 44 |
+
"""
|
| 45 |
+
if isinstance(img, Image.Image):
|
| 46 |
+
temp_input = temp_output()
|
| 47 |
+
img.save(temp_input)
|
| 48 |
+
return Path(temp_input)
|
| 49 |
+
return validate_path(img)
|