Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -3,8 +3,8 @@ import cv2
|
|
| 3 |
import numpy as np
|
| 4 |
from PIL import Image
|
| 5 |
|
| 6 |
-
def
|
| 7 |
-
"""
|
| 8 |
if img_pil is None:
|
| 9 |
return None
|
| 10 |
|
|
@@ -15,138 +15,141 @@ def detect_text_lines(img_pil):
|
|
| 15 |
else:
|
| 16 |
gray = img
|
| 17 |
|
| 18 |
-
# Get image dimensions
|
| 19 |
img_height, img_width = gray.shape
|
| 20 |
print(f"Processing image: {img_width}x{img_height}")
|
| 21 |
|
| 22 |
-
#
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
result = try_extract_line(cropped_gray, border_x, border_y, img, "Strategy 1 (cropped)")
|
| 28 |
-
if result is not None:
|
| 29 |
-
return result
|
| 30 |
-
|
| 31 |
-
# Strategy 2: Try with full image
|
| 32 |
-
print("Strategy 1 failed, trying full image...")
|
| 33 |
-
result = try_extract_line(gray, 0, 0, img, "Strategy 2 (full)")
|
| 34 |
-
if result is not None:
|
| 35 |
-
return result
|
| 36 |
-
|
| 37 |
-
# Strategy 3: Simple horizontal strip from top
|
| 38 |
-
print("Strategy 2 failed, trying horizontal strip...")
|
| 39 |
-
strip_height = min(100, img_height // 8) # Top 1/8 of image or 100px
|
| 40 |
-
strip = img[0:strip_height, 0:img_width]
|
| 41 |
-
if strip.size > 0:
|
| 42 |
-
print(f"Returning top strip of size: {strip.shape}")
|
| 43 |
-
return Image.fromarray(strip)
|
| 44 |
-
|
| 45 |
-
# Strategy 4: Return original if all else fails
|
| 46 |
-
print("All strategies failed, returning original image")
|
| 47 |
-
return img_pil
|
| 48 |
-
|
| 49 |
-
def try_extract_line(gray, offset_x, offset_y, original_img, strategy_name):
|
| 50 |
-
"""Try to extract a text line with given parameters"""
|
| 51 |
-
print(f"\n--- {strategy_name} ---")
|
| 52 |
|
| 53 |
-
|
| 54 |
-
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
|
| 55 |
-
enhanced = clahe.apply(gray)
|
| 56 |
|
| 57 |
-
#
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
cv2.THRESH_BINARY_INV, 15, 8)
|
| 61 |
|
| 62 |
-
#
|
| 63 |
-
thresh = cv2.
|
|
|
|
| 64 |
|
| 65 |
-
#
|
| 66 |
-
|
| 67 |
-
|
|
|
|
| 68 |
|
| 69 |
# Find contours
|
| 70 |
-
contours, _ = cv2.findContours(
|
| 71 |
-
print(f"Found {len(contours)} contours")
|
| 72 |
|
| 73 |
-
|
| 74 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 75 |
|
| 76 |
-
# Get image dimensions
|
| 77 |
img_height, img_width = gray.shape
|
| 78 |
|
| 79 |
-
#
|
| 80 |
-
for
|
| 81 |
-
|
|
|
|
| 82 |
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
min_width = img_width * 0.1
|
| 86 |
-
max_height = img_height * 0.15
|
| 87 |
-
min_aspect = 2.0
|
| 88 |
-
max_area = 10.0
|
| 89 |
-
elif attempt == 1:
|
| 90 |
-
# Moderate criteria
|
| 91 |
-
min_width = img_width * 0.15
|
| 92 |
-
max_height = img_height * 0.12
|
| 93 |
-
min_aspect = 3.0
|
| 94 |
-
max_area = 8.0
|
| 95 |
-
else:
|
| 96 |
-
# Strict criteria
|
| 97 |
-
min_width = img_width * 0.2
|
| 98 |
-
max_height = img_height * 0.08
|
| 99 |
-
min_aspect = 5.0
|
| 100 |
-
max_area = 5.0
|
| 101 |
|
| 102 |
-
|
|
|
|
|
|
|
|
|
|
| 103 |
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
# Calculate metrics
|
| 109 |
-
aspect_ratio = w / h if h > 0 else 0
|
| 110 |
-
area_percent = (area / (img_width * img_height)) * 100
|
| 111 |
-
|
| 112 |
-
# Check criteria
|
| 113 |
-
if (w >= min_width and
|
| 114 |
-
h >= 5 and # Minimum readable height
|
| 115 |
-
h <= max_height and
|
| 116 |
-
aspect_ratio >= min_aspect and
|
| 117 |
-
area_percent <= max_area):
|
| 118 |
-
valid_contours.append((contour, x, y, w, h, area_percent))
|
| 119 |
-
print(f" ✓ Valid contour {i}: {w}x{h}, ratio={aspect_ratio:.1f}, area={area_percent:.1f}%")
|
| 120 |
-
else:
|
| 121 |
-
print(f" ✗ Contour {i}: {w}x{h}, ratio={aspect_ratio:.1f}, area={area_percent:.1f}%")
|
| 122 |
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
#
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
# Adjust coordinates back to original image
|
| 131 |
-
x_orig = x + offset_x
|
| 132 |
-
y_orig = y + offset_y
|
| 133 |
-
|
| 134 |
-
print(f"Selected line: x={x_orig}, y={y_orig}, w={w}, h={h}")
|
| 135 |
-
|
| 136 |
-
# Extract with margin
|
| 137 |
-
margin = 15
|
| 138 |
-
y_start = max(0, y_orig - margin)
|
| 139 |
-
y_end = min(original_img.shape[0], y_orig + h + margin)
|
| 140 |
-
x_start = max(0, x_orig - margin)
|
| 141 |
-
x_end = min(original_img.shape[1], x_orig + w + margin)
|
| 142 |
-
|
| 143 |
-
crop = original_img[y_start:y_end, x_start:x_end]
|
| 144 |
|
| 145 |
-
if
|
| 146 |
-
|
| 147 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 148 |
|
| 149 |
-
return
|
|
|
|
|
|
|
|
|
|
|
|
|
| 150 |
|
| 151 |
def preprocess_voynich_image(img_pil):
|
| 152 |
"""Enhanced preprocessing for Voynich manuscript images"""
|
|
@@ -169,8 +172,8 @@ def preprocess_voynich_image(img_pil):
|
|
| 169 |
|
| 170 |
return Image.fromarray(enhanced)
|
| 171 |
|
| 172 |
-
def
|
| 173 |
-
"""
|
| 174 |
if img_pil is None:
|
| 175 |
return None, None, None, None
|
| 176 |
|
|
@@ -180,101 +183,77 @@ def debug_text_detection(img_pil):
|
|
| 180 |
else:
|
| 181 |
gray = img
|
| 182 |
|
| 183 |
-
|
| 184 |
-
original_gray = Image.fromarray(gray)
|
| 185 |
|
| 186 |
-
#
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
enhanced_img = Image.fromarray(enhanced)
|
| 190 |
|
| 191 |
-
#
|
| 192 |
-
|
| 193 |
-
|
| 194 |
-
cv2.THRESH_BINARY_INV, 15, 8)
|
| 195 |
-
thresh_combined = cv2.bitwise_or(thresh_otsu, thresh_adaptive)
|
| 196 |
-
thresh_img = Image.fromarray(thresh_combined)
|
| 197 |
|
| 198 |
-
#
|
| 199 |
-
|
|
|
|
| 200 |
|
| 201 |
-
|
| 202 |
-
|
| 203 |
-
|
| 204 |
-
"""Extract top region of the manuscript for text analysis"""
|
| 205 |
-
if img_pil is None:
|
| 206 |
-
return None
|
| 207 |
|
| 208 |
-
|
| 209 |
-
|
|
|
|
| 210 |
|
| 211 |
-
#
|
| 212 |
-
|
| 213 |
-
region = img[0:region_pixels, :]
|
| 214 |
|
| 215 |
-
return Image.fromarray(
|
|
|
|
|
|
|
|
|
|
| 216 |
|
| 217 |
-
def
|
| 218 |
-
"""
|
| 219 |
if img_pil is None:
|
| 220 |
return None
|
| 221 |
|
| 222 |
img = np.array(img_pil)
|
| 223 |
-
|
| 224 |
-
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
|
| 225 |
-
else:
|
| 226 |
-
gray = img
|
| 227 |
-
|
| 228 |
-
# Find the first significant horizontal line by scanning from top
|
| 229 |
-
img_height, img_width = gray.shape
|
| 230 |
|
| 231 |
-
|
| 232 |
-
|
| 233 |
-
|
| 234 |
-
|
| 235 |
-
# Check if this region has enough "ink" (dark pixels)
|
| 236 |
-
_, thresh = cv2.threshold(line_region, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
|
| 237 |
-
ink_pixels = np.sum(thresh > 0)
|
| 238 |
-
total_pixels = line_region.shape[0] * line_region.shape[1]
|
| 239 |
-
ink_ratio = ink_pixels / total_pixels
|
| 240 |
-
|
| 241 |
-
# If we find a region with reasonable amount of ink, extract it
|
| 242 |
-
if ink_ratio > 0.05: # At least 5% ink
|
| 243 |
-
# Expand the region a bit
|
| 244 |
-
y_start = max(0, y - 10)
|
| 245 |
-
y_end = min(img_height, y + 50)
|
| 246 |
-
extracted = img[y_start:y_end, :]
|
| 247 |
-
|
| 248 |
-
print(f"Simple extraction found line at y={y}, ink_ratio={ink_ratio:.3f}")
|
| 249 |
-
return Image.fromarray(extracted)
|
| 250 |
|
| 251 |
-
|
| 252 |
-
|
| 253 |
-
return Image.fromarray(top_portion)
|
| 254 |
|
| 255 |
# Enhanced Gradio interface
|
| 256 |
-
with gr.Blocks(title="
|
| 257 |
-
gr.Markdown("#
|
| 258 |
-
gr.Markdown("
|
| 259 |
|
| 260 |
with gr.Row():
|
| 261 |
with gr.Column():
|
| 262 |
input_image = gr.Image(type="pil", label="Upload Voynich Folio")
|
| 263 |
enhance_btn = gr.Button("Enhance Image")
|
| 264 |
-
extract_btn = gr.Button("
|
| 265 |
-
|
| 266 |
-
|
| 267 |
-
|
|
|
|
|
|
|
|
|
|
| 268 |
|
| 269 |
with gr.Column():
|
| 270 |
enhanced_output = gr.Image(label="Enhanced Image")
|
| 271 |
-
line_output = gr.Image(label="Extracted
|
| 272 |
|
| 273 |
with gr.Row():
|
| 274 |
-
|
| 275 |
-
debug_enhanced = gr.Image(label="2. Enhanced
|
| 276 |
-
debug_thresh = gr.Image(label="3.
|
| 277 |
-
debug_result = gr.Image(label="4.
|
| 278 |
|
| 279 |
enhance_btn.click(
|
| 280 |
fn=preprocess_voynich_image,
|
|
@@ -283,27 +262,21 @@ with gr.Blocks(title="Robust Voynich Manuscript Line Extractor") as demo:
|
|
| 283 |
)
|
| 284 |
|
| 285 |
extract_btn.click(
|
| 286 |
-
fn=
|
| 287 |
-
inputs=input_image,
|
| 288 |
-
outputs=line_output
|
| 289 |
-
)
|
| 290 |
-
|
| 291 |
-
simple_btn.click(
|
| 292 |
-
fn=simple_line_extraction,
|
| 293 |
inputs=input_image,
|
| 294 |
outputs=line_output
|
| 295 |
)
|
| 296 |
|
| 297 |
-
|
| 298 |
-
fn=
|
| 299 |
-
inputs=input_image,
|
| 300 |
outputs=line_output
|
| 301 |
)
|
| 302 |
|
| 303 |
debug_btn.click(
|
| 304 |
-
fn=
|
| 305 |
inputs=input_image,
|
| 306 |
-
outputs=[
|
| 307 |
)
|
| 308 |
|
| 309 |
if __name__ == "__main__":
|
|
|
|
| 3 |
import numpy as np
|
| 4 |
from PIL import Image
|
| 5 |
|
| 6 |
+
def find_text_lines_voynich(img_pil):
|
| 7 |
+
"""Specialized function to find actual Voynich text lines, not page edges"""
|
| 8 |
if img_pil is None:
|
| 9 |
return None
|
| 10 |
|
|
|
|
| 15 |
else:
|
| 16 |
gray = img
|
| 17 |
|
|
|
|
| 18 |
img_height, img_width = gray.shape
|
| 19 |
print(f"Processing image: {img_width}x{img_height}")
|
| 20 |
|
| 21 |
+
# Skip the top portion where page edges and headers might be
|
| 22 |
+
# Look for text in the middle and lower portions
|
| 23 |
+
skip_top = int(img_height * 0.15) # Skip top 15%
|
| 24 |
+
search_area = gray[skip_top:, :]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
|
| 26 |
+
print(f"Searching in area starting from y={skip_top}")
|
|
|
|
|
|
|
| 27 |
|
| 28 |
+
# Enhance contrast specifically for faded manuscript text
|
| 29 |
+
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
|
| 30 |
+
enhanced = clahe.apply(search_area)
|
|
|
|
| 31 |
|
| 32 |
+
# Use adaptive thresholding which works better for manuscripts
|
| 33 |
+
thresh = cv2.adaptiveThreshold(enhanced, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
|
| 34 |
+
cv2.THRESH_BINARY_INV, 11, 2)
|
| 35 |
|
| 36 |
+
# Create a small horizontal kernel to connect characters within words
|
| 37 |
+
# But keep it small to avoid connecting different lines
|
| 38 |
+
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (8, 1))
|
| 39 |
+
connected = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
|
| 40 |
|
| 41 |
# Find contours
|
| 42 |
+
contours, _ = cv2.findContours(connected, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
| 43 |
+
print(f"Found {len(contours)} contours in search area")
|
| 44 |
|
| 45 |
+
# Filter for text-like contours
|
| 46 |
+
text_contours = []
|
| 47 |
+
search_height, search_width = search_area.shape
|
| 48 |
+
|
| 49 |
+
for i, contour in enumerate(contours):
|
| 50 |
+
x, y, w, h = cv2.boundingRect(contour)
|
| 51 |
+
area = cv2.contourArea(contour)
|
| 52 |
+
|
| 53 |
+
# Calculate properties
|
| 54 |
+
aspect_ratio = w / h if h > 0 else 0
|
| 55 |
+
width_percent = (w / search_width) * 100
|
| 56 |
+
height_percent = (h / search_height) * 100
|
| 57 |
+
|
| 58 |
+
print(f"Contour {i}: pos=({x},{y}), size=({w},{h}), ratio={aspect_ratio:.1f}, w%={width_percent:.1f}, h%={height_percent:.1f}")
|
| 59 |
+
|
| 60 |
+
# Criteria for Voynich text lines:
|
| 61 |
+
# - Should be reasonably wide (at least 15% of width)
|
| 62 |
+
# - Should not be too tall (text lines are horizontal)
|
| 63 |
+
# - Should have good aspect ratio (wider than tall)
|
| 64 |
+
# - Should not be tiny (at least 10 pixels high for readability)
|
| 65 |
+
if (w >= search_width * 0.15 and # Minimum width
|
| 66 |
+
h >= 10 and # Minimum height
|
| 67 |
+
h <= search_height * 0.05 and # Maximum height (5% of search area)
|
| 68 |
+
aspect_ratio >= 3.0 and # Should be wide
|
| 69 |
+
width_percent <= 90): # Not the entire width (avoid page edges)
|
| 70 |
+
|
| 71 |
+
text_contours.append((contour, x, y + skip_top, w, h)) # Add skip_top back to y
|
| 72 |
+
print(f" ✓ ACCEPTED as text line")
|
| 73 |
+
else:
|
| 74 |
+
print(f" ✗ REJECTED")
|
| 75 |
+
|
| 76 |
+
print(f"Found {len(text_contours)} potential text lines")
|
| 77 |
+
|
| 78 |
+
if text_contours:
|
| 79 |
+
# Sort by y-coordinate to get the topmost text line
|
| 80 |
+
text_contours.sort(key=lambda x: x[2]) # Sort by y position
|
| 81 |
+
|
| 82 |
+
# Take the first text line found
|
| 83 |
+
contour, x, y, w, h = text_contours[0]
|
| 84 |
+
|
| 85 |
+
print(f"Extracting text line at: x={x}, y={y}, w={w}, h={h}")
|
| 86 |
+
|
| 87 |
+
# Extract with generous margins
|
| 88 |
+
margin_x = 30
|
| 89 |
+
margin_y = 20
|
| 90 |
+
y_start = max(0, y - margin_y)
|
| 91 |
+
y_end = min(img_height, y + h + margin_y)
|
| 92 |
+
x_start = max(0, x - margin_x)
|
| 93 |
+
x_end = min(img_width, x + w + margin_x)
|
| 94 |
+
|
| 95 |
+
extracted = img[y_start:y_end, x_start:x_end]
|
| 96 |
+
|
| 97 |
+
if extracted.size > 0:
|
| 98 |
+
print(f"Successfully extracted line: {extracted.shape}")
|
| 99 |
+
return Image.fromarray(extracted)
|
| 100 |
+
|
| 101 |
+
# Fallback: If no text lines found, try scanning line by line in lower portion
|
| 102 |
+
print("No contours found, trying line-by-line scan...")
|
| 103 |
+
return scan_for_text_lines(img, skip_top)
|
| 104 |
+
|
| 105 |
+
def scan_for_text_lines(img, start_y):
|
| 106 |
+
"""Scan line by line looking for text content"""
|
| 107 |
+
if len(img.shape) == 3:
|
| 108 |
+
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
|
| 109 |
+
else:
|
| 110 |
+
gray = img
|
| 111 |
|
|
|
|
| 112 |
img_height, img_width = gray.shape
|
| 113 |
|
| 114 |
+
# Scan from start_y downward
|
| 115 |
+
for y in range(start_y, img_height - 40, 10): # Check every 10 pixels
|
| 116 |
+
# Take a 40-pixel high strip
|
| 117 |
+
strip = gray[y:y+40, :]
|
| 118 |
|
| 119 |
+
# Apply threshold
|
| 120 |
+
_, thresh = cv2.threshold(strip, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 121 |
|
| 122 |
+
# Count dark pixels (ink)
|
| 123 |
+
ink_pixels = np.sum(thresh > 0)
|
| 124 |
+
total_pixels = strip.shape[0] * strip.shape[1]
|
| 125 |
+
ink_ratio = ink_pixels / total_pixels
|
| 126 |
|
| 127 |
+
# Also check if the ink is distributed horizontally (like text)
|
| 128 |
+
# Sum ink pixels in each row
|
| 129 |
+
row_sums = np.sum(thresh, axis=1)
|
| 130 |
+
rows_with_ink = np.sum(row_sums > 0)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 131 |
|
| 132 |
+
print(f"y={y}: ink_ratio={ink_ratio:.3f}, rows_with_ink={rows_with_ink}")
|
| 133 |
+
|
| 134 |
+
# If we find a region with reasonable ink and horizontal distribution
|
| 135 |
+
if ink_ratio > 0.02 and rows_with_ink >= 5: # At least 5 rows with ink
|
| 136 |
+
# Expand the region
|
| 137 |
+
y_start = max(0, y - 15)
|
| 138 |
+
y_end = min(img_height, y + 55)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 139 |
|
| 140 |
+
if len(img.shape) == 3:
|
| 141 |
+
extracted = img[y_start:y_end, :]
|
| 142 |
+
else:
|
| 143 |
+
extracted = gray[y_start:y_end, :]
|
| 144 |
+
|
| 145 |
+
print(f"Found text at y={y}, extracting region {y_start}:{y_end}")
|
| 146 |
+
return Image.fromarray(extracted)
|
| 147 |
|
| 148 |
+
# If still nothing found, return a middle section
|
| 149 |
+
print("No text found, returning middle section")
|
| 150 |
+
mid_y = img_height // 2
|
| 151 |
+
section = img[mid_y:mid_y + img_height//4, :]
|
| 152 |
+
return Image.fromarray(section)
|
| 153 |
|
| 154 |
def preprocess_voynich_image(img_pil):
|
| 155 |
"""Enhanced preprocessing for Voynich manuscript images"""
|
|
|
|
| 172 |
|
| 173 |
return Image.fromarray(enhanced)
|
| 174 |
|
| 175 |
+
def debug_voynich_detection(img_pil):
|
| 176 |
+
"""Debug function showing the detection process"""
|
| 177 |
if img_pil is None:
|
| 178 |
return None, None, None, None
|
| 179 |
|
|
|
|
| 183 |
else:
|
| 184 |
gray = img
|
| 185 |
|
| 186 |
+
img_height, img_width = gray.shape
|
|
|
|
| 187 |
|
| 188 |
+
# Show the search area (skip top 15%)
|
| 189 |
+
skip_top = int(img_height * 0.15)
|
| 190 |
+
search_area = gray[skip_top:, :]
|
|
|
|
| 191 |
|
| 192 |
+
# Create a visualization showing the search area
|
| 193 |
+
search_viz = np.copy(gray)
|
| 194 |
+
cv2.rectangle(search_viz, (0, skip_top), (img_width, img_height), (128), 2)
|
|
|
|
|
|
|
|
|
|
| 195 |
|
| 196 |
+
# Apply CLAHE to search area
|
| 197 |
+
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))
|
| 198 |
+
enhanced = clahe.apply(search_area)
|
| 199 |
|
| 200 |
+
# Apply threshold
|
| 201 |
+
thresh = cv2.adaptiveThreshold(enhanced, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
|
| 202 |
+
cv2.THRESH_BINARY_INV, 11, 2)
|
|
|
|
|
|
|
|
|
|
| 203 |
|
| 204 |
+
# Show full-size threshold result
|
| 205 |
+
thresh_full = np.zeros_like(gray)
|
| 206 |
+
thresh_full[skip_top:, :] = thresh
|
| 207 |
|
| 208 |
+
# Get the final result
|
| 209 |
+
result = find_text_lines_voynich(img_pil)
|
|
|
|
| 210 |
|
| 211 |
+
return (Image.fromarray(search_viz),
|
| 212 |
+
Image.fromarray(enhanced),
|
| 213 |
+
Image.fromarray(thresh_full),
|
| 214 |
+
result)
|
| 215 |
|
| 216 |
+
def extract_text_block(img_pil, start_percent=0.2, height_percent=0.4):
|
| 217 |
+
"""Extract a block of text from a specific region"""
|
| 218 |
if img_pil is None:
|
| 219 |
return None
|
| 220 |
|
| 221 |
img = np.array(img_pil)
|
| 222 |
+
img_height = img.shape[0]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 223 |
|
| 224 |
+
start_y = int(img_height * start_percent)
|
| 225 |
+
block_height = int(img_height * height_percent)
|
| 226 |
+
end_y = min(img_height, start_y + block_height)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 227 |
|
| 228 |
+
block = img[start_y:end_y, :]
|
| 229 |
+
return Image.fromarray(block)
|
|
|
|
| 230 |
|
| 231 |
# Enhanced Gradio interface
|
| 232 |
+
with gr.Blocks(title="Voynich Text Line Extractor - Fixed") as demo:
|
| 233 |
+
gr.Markdown("# Voynich Text Line Extractor - Fixed Version")
|
| 234 |
+
gr.Markdown("This version specifically looks for actual text lines in the manuscript, not page edges. It skips the top portion and searches in the text areas.")
|
| 235 |
|
| 236 |
with gr.Row():
|
| 237 |
with gr.Column():
|
| 238 |
input_image = gr.Image(type="pil", label="Upload Voynich Folio")
|
| 239 |
enhance_btn = gr.Button("Enhance Image")
|
| 240 |
+
extract_btn = gr.Button("Find Text Lines")
|
| 241 |
+
block_btn = gr.Button("Extract Text Block")
|
| 242 |
+
debug_btn = gr.Button("Debug Detection")
|
| 243 |
+
|
| 244 |
+
# Add slider for text block extraction
|
| 245 |
+
start_slider = gr.Slider(0.1, 0.8, 0.2, label="Start Position (% from top)")
|
| 246 |
+
height_slider = gr.Slider(0.1, 0.6, 0.4, label="Block Height (% of image)")
|
| 247 |
|
| 248 |
with gr.Column():
|
| 249 |
enhanced_output = gr.Image(label="Enhanced Image")
|
| 250 |
+
line_output = gr.Image(label="Extracted Text")
|
| 251 |
|
| 252 |
with gr.Row():
|
| 253 |
+
debug_search = gr.Image(label="1. Search Area")
|
| 254 |
+
debug_enhanced = gr.Image(label="2. Enhanced")
|
| 255 |
+
debug_thresh = gr.Image(label="3. Threshold")
|
| 256 |
+
debug_result = gr.Image(label="4. Result")
|
| 257 |
|
| 258 |
enhance_btn.click(
|
| 259 |
fn=preprocess_voynich_image,
|
|
|
|
| 262 |
)
|
| 263 |
|
| 264 |
extract_btn.click(
|
| 265 |
+
fn=find_text_lines_voynich,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 266 |
inputs=input_image,
|
| 267 |
outputs=line_output
|
| 268 |
)
|
| 269 |
|
| 270 |
+
block_btn.click(
|
| 271 |
+
fn=extract_text_block,
|
| 272 |
+
inputs=[input_image, start_slider, height_slider],
|
| 273 |
outputs=line_output
|
| 274 |
)
|
| 275 |
|
| 276 |
debug_btn.click(
|
| 277 |
+
fn=debug_voynich_detection,
|
| 278 |
inputs=input_image,
|
| 279 |
+
outputs=[debug_search, debug_enhanced, debug_thresh, debug_result]
|
| 280 |
)
|
| 281 |
|
| 282 |
if __name__ == "__main__":
|