Spaces:
Sleeping
Sleeping
File size: 3,440 Bytes
f902c85 9dc7070 f902c85 9dc7070 f902c85 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 | import gradio as gr
from transformers import SegformerForSemanticSegmentation, SegformerImageProcessor
from PIL import Image
import torch
import numpy as np
# --- Documentation Strings ---
USAGE_GUIDELINES = """
## 1. Quick Start Guide: CellVision AI (Grayscale Mask)
CellVision AI generates a grayscale segmentation mask for microscopy images.
Steps:
1. Upload your image.
2. Click "Analyze Image".
3. Review the gray-white mask result.
"""
INPUT_EXPLANATION = """
## 2. Input Requirements
| Field | Format |
|-------|--------|
| Image Upload | JPG / PNG |
Image is resized to 512×512 before inference.
"""
OUTPUT_EXPLANATION = """
## 3. Output Description (Gray & White Mask)
• Background = White
• Segmented Objects = Gray
• Enlarged by 300% (3×)
• Subtle grayscale research-style output
"""
# --------------------
# Model
# --------------------
processor = SegformerImageProcessor(do_reduce_labels=False)
model = SegformerForSemanticSegmentation.from_pretrained(
"nvidia/segformer-b0-finetuned-ade-512-512"
)
model.eval()
def segment_image(input_image):
if input_image is None:
gr.Warning("Upload an image first.")
return None
inputs = processor(images=input_image, return_tensors="pt")
with torch.no_grad():
outputs = model(**inputs)
logits = outputs.logits
pred_mask = torch.argmax(logits, dim=1)[0].cpu().numpy()
# Gray & White mask
gray_mask = np.where(pred_mask == 0, 255, 128).astype(np.uint8)
output_image = Image.fromarray(gray_mask)
# Scale 3x
scale_factor = 3
new_size = (output_image.width * scale_factor, output_image.height * scale_factor)
return output_image.resize(new_size, resample=Image.NEAREST)
# --------------------
# UI
# --------------------
with gr.Blocks(title="CellVision AI - Segment the Malaria Cells from Blood smeers", theme=gr.themes.Soft()) as demo:
gr.Markdown("<h1 style='text-align:center; background-color:#1a1a1a; color:#00ffcc; padding:12px;'>CellVision AI - Grayscale Segmentation</h1>")
with gr.Accordion(" Documentation", open=False):
gr.Markdown(USAGE_GUIDELINES)
gr.Markdown("---")
gr.Markdown(INPUT_EXPLANATION)
gr.Markdown("---")
gr.Markdown(OUTPUT_EXPLANATION)
with gr.Row():
with gr.Column(scale=1):
gr.Markdown("## Step 1: Upload Blood Smear Image")
# Define Input component directly inside the column (No .render() needed)
input_image = gr.Image(type="pil", label="Upload Microscopy Image", width=600, height=600)
gr.Markdown("## Step 2: Click Submit for Segmentation")
with gr.Row():
submit_button = gr.Button("Analyze Image", variant="primary")
with gr.Column(scale=1):
gr.Markdown("## Output")
# Define Output component directly inside the column (No .render() needed)
output_image = gr.Image(type="pil", label="Gray & White Mask (3x)", width=600, height=600)
gr.Markdown("---")
gr.Markdown("## Example Images")
gr.Examples(
examples=["data/1.png", "data/2.png", "data/3.png"],
inputs=input_image,
outputs=output_image,
fn=segment_image,
cache_examples=False,
label="Try with Sample Blood cell Images"
)
submit_button.click(segment_image, input_image, output_image)
if __name__ == "__main__":
demo.launch()
|