| | import gradio as gr |
| | import cv2 |
| | import torch |
| | import numpy as np |
| | import matplotlib.pyplot as plt |
| | from celldetection import fetch_model, to_tensor |
| |
|
| |
|
| | |
| |
|
| | USAGE_GUIDELINES = """ |
| | ## 1. Clear Setup and Run Instructions (Quick Start) |
| | This application uses the advanced GINORO segmentation model, pre-trained for identifying cell nuclei in microscopy images. |
| | |
| | 1. **Preparation:** Ensure your image is a clear microscopy slide image, preferably showing distinct cell nuclei. |
| | 2. **Upload:** Click the 'Input Microscopy Image' box and upload your image (drag and drop, or click to select). |
| | 3. **Run:** Click the **"Run Segmentation"** button. If using an example, clicking the thumbnail will load and run the segmentation automatically. |
| | 4. **Review:** The result panel will display two images side-by-side: the Original (Left) and the Segmented result (Right). |
| | """ |
| |
|
| | INPUT_EXPLANATION = """ |
| | ## 2. Expected Inputs |
| | |
| | | Input Field | Purpose | Requirement | |
| | | :--- | :--- | :--- | |
| | | **Input Microscopy Image** | The high-resolution image containing the cells you wish to analyze. | Must be an image file (PNG, JPG, TIF). Optimal results are achieved with clear, well-focused images typical of fluorescence microscopy (e.g., DAPI staining for nuclei). | |
| | |
| | """ |
| |
|
| | OUTPUT_EXPLANATION = """ |
| | ## 3. Expected Outputs (Side-by-Side Segmentation) |
| | |
| | The output is a single image combining the original input and the segmented result for easy comparison. |
| | |
| | * **Left Side (Original):** The unmodified input image. |
| | * **Right Side (Segmented):** The same image with outlines (contours) drawn over the detected cellular structures. |
| | * **Contour Color:** The detected cell nuclei are outlined in **Blue**. |
| | |
| | """ |
| |
|
| | |
| | device = 'cpu' |
| | model = fetch_model('ginoro_CpnResNeXt101UNet-fbe875f1a3e5ce2c').to(device).eval() |
| |
|
| | |
| | def segment(image): |
| | img_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) / 255.0 |
| | x = to_tensor(img_rgb, transpose=True, device=device, dtype=torch.float32)[None] |
| |
|
| | with torch.no_grad(): |
| | output = model(x) |
| |
|
| | contours = output['contours'][0] |
| | original = (img_rgb * 255).astype(np.uint8).copy() |
| | segmented = original.copy() |
| |
|
| | for contour in contours: |
| | contour = np.array(contour.cpu(), dtype=np.int32) |
| | cv2.drawContours(segmented, [contour], -1, (255, 0, 0), 2) |
| |
|
| | h, w, c = original.shape |
| | gap = 60 |
| | canvas = np.zeros((h, w * 2 + gap, c), dtype=np.uint8) |
| | canvas[:, :w, :] = original |
| | canvas[:, w + gap:, :] = segmented |
| |
|
| | return cv2.cvtColor(canvas, cv2.COLOR_RGB2BGR) |
| |
|
| | |
| | examples = [ |
| | ["./sample_data/1.png"], |
| | ["./sample_data/2.png"], |
| | ["./sample_data/3.png"] |
| | ] |
| |
|
| | |
| |
|
| | with gr.Blocks(title="Cell Segmentation Demo (FZJ-INM1)") as demo: |
| | |
| | gr.Markdown( |
| | """ |
| | # Cell Segmentation Demo (FZJ-INM1) |
| | **Purpose:** Automatically identify and outline cell nuclei in microscopy images using a specialized neural network. |
| | """ |
| | ) |
| |
|
| | |
| | with gr.Accordion(" Tips & Guidelines ", open=False): |
| | gr.Markdown(USAGE_GUIDELINES) |
| | gr.Markdown("---") |
| | gr.Markdown(INPUT_EXPLANATION) |
| | gr.Markdown("---") |
| | gr.Markdown(OUTPUT_EXPLANATION) |
| | |
| | gr.Markdown("---") |
| | |
| |
|
| | |
| | gr.Markdown("## Step 1: Upload an Image") |
| | input_image = gr.Image(type="numpy", label="Input Microscopy Image") |
| |
|
| | gr.Markdown("## Step 2: Click button") |
| | run_button = gr.Button("Run Segmentation", variant="primary") |
| |
|
| | gr.Markdown("## Output") |
| | output_image = gr.Image(label="Output: Original (Left) vs. Segmented (Right)") |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | run_button.click( |
| | fn=segment, |
| | inputs=input_image, |
| | outputs=output_image |
| | ) |
| | |
| | gr.Markdown("---") |
| | gr.Markdown("## Examples ") |
| | |
| | |
| | |
| | gr.Examples( |
| | examples=examples, |
| | inputs=[input_image], |
| | outputs=output_image, |
| | fn=segment, |
| | label="Click on an image thumbnail below to load and run a sample segmentation.", |
| | ) |
| |
|
| | demo.launch( |
| | server_name = "0.0.0.0", |
| | server_port = 7860 |
| | ) |
| |
|
| |
|
| |
|
| |
|
| |
|
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| | |
| | |
| |
|
| | |
| | |
| | |
| | |
| |
|
| | |
| | |
| |
|
| | |
| | |
| | |
| |
|
| | |
| | |
| | |
| |
|
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |