muhammadhamza-stack
update app
1281dba
import gradio as gr
from transformers import SegformerForSemanticSegmentation, SegformerImageProcessor
from PIL import Image
import torch
import numpy as np
# --- Documentation Strings ---
USAGE_GUIDELINES = """
## 1. Quick Start Guide: HemaScan Pro (Binary Mask)
HemaScan Pro generates a high-contrast black & white segmentation mask.
1. Upload a blood smear image (JPG/PNG).
2. Click "Run Segmentation".
3. View the generated binary mask.
"""
INPUT_EXPLANATION = """
## 2. Expected Inputs
| Field | Requirement |
|-------|------------|
| Upload Image | JPG / PNG blood smear image |
✔ Automatically resized to 512×512.
"""
OUTPUT_EXPLANATION = """
## 3. Output Description (Black & White Mask)
• Background = White
• Detected Regions = Black
• Enlarged by 400% (4×) for clarity
• Clean binary medical-style visualization
"""
# --------------------
# Model
# --------------------
processor = SegformerImageProcessor(do_reduce_labels=False)
model = SegformerForSemanticSegmentation.from_pretrained(
"nvidia/segformer-b0-finetuned-ade-512-512"
)
model.eval()
def segment_image(input_image):
if input_image is None:
gr.Warning("Please upload an image.")
return None
inputs = processor(images=input_image, return_tensors="pt")
with torch.no_grad():
outputs = model(**inputs)
logits = outputs.logits
pred_mask = torch.argmax(logits, dim=1)[0].cpu().numpy()
# Convert to binary mask (object vs background)
binary_mask = np.where(pred_mask == 0, 255, 0).astype(np.uint8)
output_image = Image.fromarray(binary_mask)
# Scale 4x
scale_factor = 4
new_size = (output_image.width * scale_factor, output_image.height * scale_factor)
return output_image.resize(new_size, resample=Image.NEAREST)
# --------------------
# UI
# --------------------
with gr.Blocks(title="Malaria Cell Segmentation Tool") as demo:
gr.Markdown("<h1 style='text-align:center; background:linear-gradient(90deg,#4facfe,#00f2fe); color:white; padding:10px;'>HemaScan Pro - Binary Segmentation</h1>")
with gr.Accordion(" Documentation", open=False):
gr.Markdown(USAGE_GUIDELINES)
gr.Markdown("---")
gr.Markdown(INPUT_EXPLANATION)
gr.Markdown("---")
gr.Markdown(OUTPUT_EXPLANATION)
with gr.Row():
with gr.Column(scale=1):
gr.Markdown("## Step 1: Upload Blood Smear Image")
# Define Input component directly inside the column (No .render() needed)
input_image = gr.Image(type="pil", label="Step 1: Upload Blood Smear Image", width=600, height=600)
gr.Markdown("## Step 2: Click Submit for Segmentation")
with gr.Row():
submit_button = gr.Button("Submit for Segmentation", variant="primary")
with gr.Column(scale=1):
gr.Markdown("## Output")
# Define Output component directly inside the column (No .render() needed)
output_image = gr.Image(type="pil", label="Step 3: Predicted Masks", width=600, height=600)
gr.Markdown("---")
gr.Markdown("## Example Images")
gr.Examples(
examples=["data/1.png", "data/2.png", "data/3.png"],
inputs=input_image,
outputs=output_image,
fn=segment_image,
cache_examples=False,
)
submit_button.click(segment_image, input_image, output_image)
if __name__ == "__main__":
demo.launch()