cameron-d commited on
Commit
c00307f
·
verified ·
1 Parent(s): a9cf6b9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -27
app.py CHANGED
@@ -130,20 +130,20 @@ def generate_heatmap(image):
130
  return superimposed_img, prediction_text
131
 
132
  # Initialize models when the script loads
133
- print("Initializing models... This may take a moment.")
134
  initialize_models()
135
- print("Models initialized successfully!")
136
 
137
  # Create Gradio interface
138
  with gr.Blocks(title="Class Activation Heatmap Visualizer") as demo:
139
  gr.Markdown(
140
  """
141
- # 🔥 Class Activation Heatmap Visualizer
142
 
143
- Upload an image to see what parts of the image the neural network focuses on when making predictions.
144
  The heatmap shows which regions of the image are most important for the top predicted class.
145
 
146
- Adapted from: https://deeplearningwithpython.io/chapters/chapter10_interpreting-what-convnets-learn/#visualizing-heatmaps-of-class-activation
147
 
148
  **Model:** Xception trained on ImageNet (1,000 classes)
149
  """
@@ -157,6 +157,27 @@ with gr.Blocks(title="Class Activation Heatmap Visualizer") as demo:
157
  height=400
158
  )
159
  submit_btn = gr.Button("Generate Heatmap", variant="primary", size="lg")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
160
 
161
  with gr.Column():
162
  output_image = gr.Image(
@@ -170,28 +191,6 @@ with gr.Blocks(title="Class Activation Heatmap Visualizer") as demo:
170
  interactive=False
171
  )
172
 
173
- gr.Markdown(
174
- """
175
- ### How to interpret the heatmap:
176
- - **Red/Yellow regions**: Areas the model focuses on most for its prediction
177
- - **Blue/Purple regions**: Areas the model considers less important
178
- - The heatmap is overlaid at 40% opacity on your original image
179
- """
180
- )
181
-
182
- # Example images
183
- gr.Examples(
184
- examples=[
185
- ["elephant.jpg"],
186
- ["dog.jpg"],
187
- ["F1_car.jpg"],
188
- ["multiple_animals.jpg"],
189
- ["osprey.jpeg"],
190
- ],
191
- inputs=input_image,
192
- label="Try an example:"
193
- )
194
-
195
  # Connect the button to the function
196
  submit_btn.click(
197
  fn=generate_heatmap,
 
130
  return superimposed_img, prediction_text
131
 
132
  # Initialize models when the script loads
133
+ print("Initializing models... this may take a moment.")
134
  initialize_models()
135
+ print("Models initialized!")
136
 
137
  # Create Gradio interface
138
  with gr.Blocks(title="Class Activation Heatmap Visualizer") as demo:
139
  gr.Markdown(
140
  """
141
+ # Class Activation Heatmap Visualizer
142
 
143
+ Upload an image or choose one of the examples to see what parts of the image the neural network focuses on when making predictions.
144
  The heatmap shows which regions of the image are most important for the top predicted class.
145
 
146
+ Code adapted from: https://deeplearningwithpython.io/chapters/chapter10_interpreting-what-convnets-learn/#visualizing-heatmaps-of-class-activation
147
 
148
  **Model:** Xception trained on ImageNet (1,000 classes)
149
  """
 
157
  height=400
158
  )
159
  submit_btn = gr.Button("Generate Heatmap", variant="primary", size="lg")
160
+
161
+ # Example images
162
+ gr.Examples(
163
+ examples=[
164
+ ["images/elephant.jpg"],
165
+ ["images/dog.jpg"],
166
+ ["images/F1_car.jpg"],
167
+ ["images/multiple_animals.jpg"],
168
+ ["images/osprey.jpeg"]
169
+ ],
170
+ inputs=input_image,
171
+ label="Try an example:"
172
+ )
173
+
174
+ gr.Markdown(
175
+ """
176
+ ### How to interpret the heatmap:
177
+ - **Red/Yellow regions**: Areas the model focuses on most for its prediction
178
+ - **Blue/Purple regions**: Areas the model considers less important
179
+ """
180
+ )
181
 
182
  with gr.Column():
183
  output_image = gr.Image(
 
191
  interactive=False
192
  )
193
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
194
  # Connect the button to the function
195
  submit_btn.click(
196
  fn=generate_heatmap,