muhammadhamza-stack commited on
Commit
45b4aee
Β·
1 Parent(s): e1e111e

resize the images

Browse files
Files changed (1) hide show
  1. app.py +67 -28
app.py CHANGED
@@ -3,72 +3,111 @@ from transformers import SegformerForSemanticSegmentation, SegformerImageProcess
3
  from PIL import Image
4
  import torch
5
  import numpy as np
6
- import os
7
 
8
  # --- Documentation Strings ---
9
 
10
  USAGE_GUIDELINES = """
11
- ## Quick Start: HemaScan Segmentation
12
- Upload an image of a blood smear to generate a segmentation mask.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  """
14
 
15
- INPUT_EXPLANATION = "Upload a JPG or PNG blood smear image (512x512 auto-resized)."
16
- OUTPUT_EXPLANATION = "Predicted grayscale mask highlighting detected objects (scaled 4Γ— for clarity)."
 
 
 
 
 
 
 
 
 
 
17
 
18
  # --------------------
19
- # Core Pipeline Functions
20
  # --------------------
21
  processor = SegformerImageProcessor(do_reduce_labels=False)
22
- model = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512")
 
 
23
  model.eval()
24
 
 
 
 
 
 
 
 
 
 
25
  def segment_image(input_image):
26
  if input_image is None:
27
- gr.Warning("Upload an image.")
28
  return None
29
-
30
  inputs = processor(images=input_image, return_tensors="pt")
 
31
  with torch.no_grad():
32
  outputs = model(**inputs)
33
- logits = outputs.logits
34
 
 
35
  pred_mask = torch.argmax(logits, dim=1)[0].cpu().numpy()
36
- num_classes = logits.shape[1]
37
- # normalized_mask = (pred_mask * (255 // num_classes)).astype(np.uint8)
38
- #make the mask color to whit and the background to black
39
- normalized_mask = np.where(pred_mask > 0, 255, 0).astype(np.uint8)
40
- output_image = Image.fromarray(normalized_mask)
41
 
42
- # Bigger mask (4x)
 
 
 
 
 
 
43
  scale_factor = 4
44
  new_size = (output_image.width * scale_factor, output_image.height * scale_factor)
45
  return output_image.resize(new_size, resample=Image.NEAREST)
46
 
47
  # --------------------
48
- # Gradio UI
49
  # --------------------
50
- with gr.Blocks(title="HemaScan Segmentation Tool") as demo:
51
- gr.Markdown("<h1 style='text-align:center; background: linear-gradient(90deg, #4facfe 0%, #00f2fe 100%); padding: 10px; color:white;'>HemaScan Segmentation Tool</h1>")
52
- gr.Markdown("Analyze blood smear images and generate segmentation masks.")
53
 
54
- with gr.Accordion("Tips & Guidelines", open=False):
55
  gr.Markdown(USAGE_GUIDELINES)
 
56
  gr.Markdown(INPUT_EXPLANATION)
 
57
  gr.Markdown(OUTPUT_EXPLANATION)
58
 
59
- input_image = gr.Image(type="pil", label="Upload Blood Smear Image")
60
- submit_button = gr.Button("Segment Image", variant="primary")
61
- output_image = gr.Image(type="pil", label="Predicted Mask (4x)")
62
 
63
  gr.Examples(
64
  examples=["data/1.png", "data/2.png", "data/3.png", "data/211.png"],
65
- inputs=[input_image],
66
- outputs=[output_image],
67
  fn=segment_image,
68
- cache_examples=False
69
  )
70
 
71
- submit_button.click(fn=segment_image, inputs=input_image, outputs=output_image)
72
 
73
  if __name__ == "__main__":
74
  demo.launch()
 
3
  from PIL import Image
4
  import torch
5
  import numpy as np
 
6
 
7
  # --- Documentation Strings ---
8
 
9
  USAGE_GUIDELINES = """
10
+ ## 1. Quick Start Guide: HemaScan Pro
11
+
12
+ HemaScan Pro uses an advanced semantic segmentation AI model to detect structural regions in microscopic blood smear images.
13
+
14
+ 1. **Upload Image** – Select a JPG or PNG blood smear image.
15
+ 2. **Try Samples** – Click on example images for quick testing.
16
+ 3. **Run Analysis** – Press the "Run Segmentation" button.
17
+ 4. **View Results** – A high-visibility color segmentation mask will appear.
18
+ """
19
+
20
+ INPUT_EXPLANATION = """
21
+ ## 2. Expected Inputs
22
+
23
+ | Input Field | Description | Format |
24
+ |-------------|------------|--------|
25
+ | Upload Image | Microscopic blood smear image | JPG / PNG |
26
+
27
+ βœ” Image is automatically resized to 512Γ—512 for processing.
28
  """
29
 
30
+ OUTPUT_EXPLANATION = """
31
+ ## 3. Expected Outputs (Color Segmentation Mask)
32
+
33
+ The output is a **color-coded segmentation mask**:
34
+
35
+ β€’ Each detected object category is assigned a distinct color.
36
+ β€’ Enhances boundary clarity between cells and background.
37
+ β€’ Mask is enlarged by **400% (4Γ—)** for improved visibility.
38
+
39
+ ### Example Testing
40
+ Click any example image below to automatically run segmentation.
41
+ """
42
 
43
  # --------------------
44
+ # Model
45
  # --------------------
46
  processor = SegformerImageProcessor(do_reduce_labels=False)
47
+ model = SegformerForSemanticSegmentation.from_pretrained(
48
+ "nvidia/segformer-b0-finetuned-ade-512-512"
49
+ )
50
  model.eval()
51
 
52
+ # Create vibrant color palette
53
+ def create_color_palette(num_classes=150):
54
+ np.random.seed(42)
55
+ palette = np.random.randint(0, 255, size=(num_classes, 3))
56
+ palette[0] = [0, 0, 0]
57
+ return palette
58
+
59
+ palette = create_color_palette()
60
+
61
  def segment_image(input_image):
62
  if input_image is None:
63
+ gr.Warning("Please upload an image.")
64
  return None
65
+
66
  inputs = processor(images=input_image, return_tensors="pt")
67
+
68
  with torch.no_grad():
69
  outputs = model(**inputs)
 
70
 
71
+ logits = outputs.logits
72
  pred_mask = torch.argmax(logits, dim=1)[0].cpu().numpy()
 
 
 
 
 
73
 
74
+ # Apply color palette
75
+ colored_mask = palette[pred_mask]
76
+ colored_mask = colored_mask.astype(np.uint8)
77
+
78
+ output_image = Image.fromarray(colored_mask)
79
+
80
+ # Scale 4x
81
  scale_factor = 4
82
  new_size = (output_image.width * scale_factor, output_image.height * scale_factor)
83
  return output_image.resize(new_size, resample=Image.NEAREST)
84
 
85
  # --------------------
86
+ # UI
87
  # --------------------
88
+ with gr.Blocks(title="HemaScan Pro") as demo:
89
+ gr.Markdown("<h1 style='text-align:center; background:linear-gradient(90deg,#4facfe,#00f2fe); color:white; padding:10px;'>HemaScan Pro - Blood Smear Segmentation</h1>")
 
90
 
91
+ with gr.Accordion("πŸ“˜ Documentation & Usage", open=False):
92
  gr.Markdown(USAGE_GUIDELINES)
93
+ gr.Markdown("---")
94
  gr.Markdown(INPUT_EXPLANATION)
95
+ gr.Markdown("---")
96
  gr.Markdown(OUTPUT_EXPLANATION)
97
 
98
+ input_image = gr.Image(type="pil", label="Upload Blood Smear Image", width=512, height=512)
99
+ submit_button = gr.Button("Run Segmentation", variant="primary")
100
+ output_image = gr.Image(type="pil", label="Color Segmentation Mask (4x)", width=512, height=512)
101
 
102
  gr.Examples(
103
  examples=["data/1.png", "data/2.png", "data/3.png", "data/211.png"],
104
+ inputs=input_image,
105
+ outputs=output_image,
106
  fn=segment_image,
107
+ cache_examples=False,
108
  )
109
 
110
+ submit_button.click(segment_image, input_image, output_image)
111
 
112
  if __name__ == "__main__":
113
  demo.launch()