File size: 1,890 Bytes
cc3131f 7cd4453 cc3131f 025ae61 3e50a17 cc3131f 934926c cc3131f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 |
import gradio as gr
import pyiqa
import torch
import pandas as pd
import numpy as np
from PIL import Image
# Initialize device
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
# Load Models (Pre-selected for variety: Statistical, Perception, Deep Learning)
# Note: You can add more like 'niqe', 'clipiqa', etc.
metrics = {
"BRISQUE (Lower=Better)": pyiqa.create_metric('brisque', device=device),
"MUSIQ (Higher=Better)": pyiqa.create_metric('musiq', device=device),
"NIMA (Higher=Better)": pyiqa.create_metric('nima', device=device)
}
def analyze_image(input_img):
if input_img is None:
return None
np_img = np.array(input_img)
np_img = np_img / 255.0
# Standardize input for pyiqa
img_tensor = pyiqa.utils.img2tensor(np_img).unsqueeze(0).to(device)
results = []
with torch.no_grad():
for name, metric in metrics.items():
score = metric(img_tensor).item()
results.append({"Model": name, "Score": round(score, 4)})
return pd.DataFrame(results)
# Create Gradio Interface
with gr.Blocks(title="Multi-Model IQA Comparison") as demo:
gr.Markdown("# 📸 Image Quality Checker")
gr.Markdown("Upload an image to evaluate its quality score across various SOTA models.")
with gr.Row():
image_input = gr.Image(type="pil", label="Input Image")
result_table = gr.Dataframe(label="Model Scores")
btn = gr.Button("Get Scores")
btn.click(fn=analyze_image, inputs=image_input, outputs=result_table)
gr.Markdown("### Model Descriptions:")
gr.Markdown("- **BRISQUE:** Statistical model focusing on naturalness and noise.")
gr.Markdown("- **MUSIQ:** Transformer-based model optimized for varied image sizes.")
gr.Markdown("- **NIMA:** Neural Aesthetic model that mimics human perception.")
demo.launch() |