muhammadhamza-stack
update app
9dc7070
import gradio as gr
from transformers import SegformerForSemanticSegmentation, SegformerImageProcessor
from PIL import Image
import torch
import numpy as np
# --- Documentation Strings ---
USAGE_GUIDELINES = """
## 1. Quick Start Guide: CellVision AI (Grayscale Mask)
CellVision AI generates a grayscale segmentation mask for microscopy images.
Steps:
1. Upload your image.
2. Click "Analyze Image".
3. Review the gray-white mask result.
"""
INPUT_EXPLANATION = """
## 2. Input Requirements
| Field | Format |
|-------|--------|
| Image Upload | JPG / PNG |
Image is resized to 512×512 before inference.
"""
OUTPUT_EXPLANATION = """
## 3. Output Description (Gray & White Mask)
• Background = White
• Segmented Objects = Gray
• Enlarged by 300% (3×)
• Subtle grayscale research-style output
"""
# --------------------
# Model
# --------------------
processor = SegformerImageProcessor(do_reduce_labels=False)
model = SegformerForSemanticSegmentation.from_pretrained(
"nvidia/segformer-b0-finetuned-ade-512-512"
)
model.eval()
def segment_image(input_image):
if input_image is None:
gr.Warning("Upload an image first.")
return None
inputs = processor(images=input_image, return_tensors="pt")
with torch.no_grad():
outputs = model(**inputs)
logits = outputs.logits
pred_mask = torch.argmax(logits, dim=1)[0].cpu().numpy()
# Gray & White mask
gray_mask = np.where(pred_mask == 0, 255, 128).astype(np.uint8)
output_image = Image.fromarray(gray_mask)
# Scale 3x
scale_factor = 3
new_size = (output_image.width * scale_factor, output_image.height * scale_factor)
return output_image.resize(new_size, resample=Image.NEAREST)
# --------------------
# UI
# --------------------
with gr.Blocks(title="CellVision AI - Segment the Malaria Cells from Blood smeers", theme=gr.themes.Soft()) as demo:
gr.Markdown("<h1 style='text-align:center; background-color:#1a1a1a; color:#00ffcc; padding:12px;'>CellVision AI - Grayscale Segmentation</h1>")
with gr.Accordion(" Documentation", open=False):
gr.Markdown(USAGE_GUIDELINES)
gr.Markdown("---")
gr.Markdown(INPUT_EXPLANATION)
gr.Markdown("---")
gr.Markdown(OUTPUT_EXPLANATION)
with gr.Row():
with gr.Column(scale=1):
gr.Markdown("## Step 1: Upload Blood Smear Image")
# Define Input component directly inside the column (No .render() needed)
input_image = gr.Image(type="pil", label="Upload Microscopy Image", width=600, height=600)
gr.Markdown("## Step 2: Click Submit for Segmentation")
with gr.Row():
submit_button = gr.Button("Analyze Image", variant="primary")
with gr.Column(scale=1):
gr.Markdown("## Output")
# Define Output component directly inside the column (No .render() needed)
output_image = gr.Image(type="pil", label="Gray & White Mask (3x)", width=600, height=600)
gr.Markdown("---")
gr.Markdown("## Example Images")
gr.Examples(
examples=["data/1.png", "data/2.png", "data/3.png"],
inputs=input_image,
outputs=output_image,
fn=segment_image,
cache_examples=False,
label="Try with Sample Blood cell Images"
)
submit_button.click(segment_image, input_image, output_image)
if __name__ == "__main__":
demo.launch()