Saurav Chaudhari
Add application file
3e50a17
import gradio as gr
import pyiqa
import torch
import pandas as pd
import numpy as np
from PIL import Image
# Initialize device
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
# Load Models (Pre-selected for variety: Statistical, Perception, Deep Learning)
# Note: You can add more like 'niqe', 'clipiqa', etc.
metrics = {
"BRISQUE (Lower=Better)": pyiqa.create_metric('brisque', device=device),
"MUSIQ (Higher=Better)": pyiqa.create_metric('musiq', device=device),
"NIMA (Higher=Better)": pyiqa.create_metric('nima', device=device)
}
def analyze_image(input_img):
if input_img is None:
return None
np_img = np.array(input_img)
np_img = np_img / 255.0
# Standardize input for pyiqa
img_tensor = pyiqa.utils.img2tensor(np_img).unsqueeze(0).to(device)
results = []
with torch.no_grad():
for name, metric in metrics.items():
score = metric(img_tensor).item()
results.append({"Model": name, "Score": round(score, 4)})
return pd.DataFrame(results)
# Create Gradio Interface
with gr.Blocks(title="Multi-Model IQA Comparison") as demo:
gr.Markdown("# 📸 Image Quality Checker")
gr.Markdown("Upload an image to evaluate its quality score across various SOTA models.")
with gr.Row():
image_input = gr.Image(type="pil", label="Input Image")
result_table = gr.Dataframe(label="Model Scores")
btn = gr.Button("Get Scores")
btn.click(fn=analyze_image, inputs=image_input, outputs=result_table)
gr.Markdown("### Model Descriptions:")
gr.Markdown("- **BRISQUE:** Statistical model focusing on naturalness and noise.")
gr.Markdown("- **MUSIQ:** Transformer-based model optimized for varied image sizes.")
gr.Markdown("- **NIMA:** Neural Aesthetic model that mimics human perception.")
demo.launch()