nnibras's picture
Upload 2 files
eac4855 verified
import cv2
import numpy as np
import gradio as gr
from skimage.metrics import structural_similarity as compare_ssim
def ascii_art_generator(input_image, contrast, threshold1, threshold2, ascii_chars):
# Resize the image
new_width = 100
height, width = input_image.shape[:2]
aspect_ratio = height / width
new_height = int(aspect_ratio * new_width * 0.55)
resized_image = cv2.resize(input_image, (new_width, int(new_height)))
# Convert to grayscale
gray_image = cv2.cvtColor(resized_image, cv2.COLOR_BGR2GRAY)
# Apply transformation algorithm with user parameters
adjusted_image = cv2.convertScaleAbs(gray_image, alpha=contrast, beta=0)
edges = cv2.Canny(
adjusted_image, threshold1=int(threshold1), threshold2=int(threshold2)
)
processed_image = cv2.bitwise_not(edges)
# Map intensities to ASCII characters
def pixel_to_ascii(pixel_value):
num_chars = len(ascii_chars)
index = int(pixel_value / 255 * (num_chars - 1))
return ascii_chars[index]
# Generate the ASCII art string
ascii_art_lines = []
for row in processed_image:
line = "".join([pixel_to_ascii(pixel) for pixel in row])
ascii_art_lines.append(line)
ascii_art = "\n".join(ascii_art_lines)
# Save the ASCII art to a text file
with open("ascii_art.txt", "w") as f:
f.write(ascii_art)
# Convert processed_image to RGB format for display
processed_image_rgb = cv2.cvtColor(processed_image, cv2.COLOR_GRAY2RGB)
# Convert ASCII art back to image
def ascii_to_image(ascii_art, ascii_chars):
# Create a mapping from characters to grayscale values
char_to_intensity = {
char: int(i / (len(ascii_chars) - 1) * 255)
for i, char in enumerate(ascii_chars)
}
ascii_lines = ascii_art.split("\n")
height = len(ascii_lines)
width = len(ascii_lines[0]) if height > 0 else 0
ascii_image = np.zeros((height, width), dtype=np.uint8)
for y, line in enumerate(ascii_lines):
for x, char in enumerate(line):
ascii_image[y, x] = char_to_intensity.get(char, 0)
return ascii_image
ascii_image = ascii_to_image(ascii_art, ascii_chars)
# Normalize the original image
def normalize_images(original_image, ascii_image_shape):
resized_original = cv2.resize(
original_image, (ascii_image_shape[1], ascii_image_shape[0])
)
if len(resized_original.shape) == 3:
resized_original = cv2.cvtColor(resized_original, cv2.COLOR_BGR2GRAY)
return resized_original
normalized_original = normalize_images(gray_image, ascii_image.shape)
# Calculate similarity metrics
def calculate_mse(image1, image2):
mse = np.mean((image1.astype("float") - image2.astype("float")) ** 2)
return mse
def calculate_ssim(image1, image2):
ssim, _ = compare_ssim(image1, image2, full=True)
return ssim
def calculate_psnr(image1, image2):
mse = calculate_mse(image1, image2)
if mse == 0:
return float("inf")
PIXEL_MAX = 255.0
psnr = 20 * np.log10(PIXEL_MAX / np.sqrt(mse))
return psnr
mse = calculate_mse(normalized_original, ascii_image)
ssim = calculate_ssim(normalized_original, ascii_image)
psnr = calculate_psnr(normalized_original, ascii_image)
# Prepare metrics output
metrics_text = f"MSE: {mse:.2f}\nSSIM: {ssim:.4f}\nPSNR: {psnr:.2f} dB"
return ascii_art, "ascii_art.txt", processed_image_rgb, metrics_text
# Define the Gradio interface using the updated components
iface = gr.Interface(
fn=ascii_art_generator,
inputs=[
gr.Image(type="numpy", label="Upload Image"),
gr.Slider(0.5, 3.0, step=0.1, value=1.5, label="Contrast"),
gr.Slider(0, 255, step=1, value=50, label="Edge Detection Threshold1"),
gr.Slider(0, 255, step=1, value=150, label="Edge Detection Threshold2"),
gr.Textbox(value="@%#*+=-:. ", label="ASCII Characters (Dark to Light)"),
],
outputs=[
gr.Textbox(label="ASCII Art"),
gr.File(label="Download ASCII Art as Text File"),
gr.Image(type="numpy", label="Processed Image"),
gr.Textbox(label="Performance Metrics"),
],
title="Interactive ASCII Art Generator with Performance Metric",
description="Upload an image and adjust parameters to convert it into ASCII art. The performance metric quantifies the similarity between the original image and the ASCII art.",
allow_flagging="never",
)
iface.launch(debug=True, share=True)