Spaces:
Runtime error
Runtime error
Commit
·
62cf953
1
Parent(s):
a8cc2eb
initial commit
Browse files- README.md +3 -3
- app.py +119 -0
- examples/gina-domenique-LmrAUrHinqk-unsplash.jpg +0 -0
- examples/kirsten-frank-o1sXiz_LU1A-unsplash.jpg +0 -0
- examples/oscar-fickel-F5ze5FkEu1g-unsplash.jpg +0 -0
- examples/robby-mccullough-r05GkQBcaPM-unsplash.jpg +0 -0
- examples/ting-tian-_79ZJS8pV70-unsplash.jpg +0 -0
- model/saved_model.pb +3 -0
- requirements.txt +3 -0
README.md
CHANGED
|
@@ -1,10 +1,10 @@
|
|
| 1 |
---
|
| 2 |
-
title: Saliency
|
| 3 |
emoji: ⚡
|
| 4 |
-
colorFrom:
|
| 5 |
colorTo: blue
|
| 6 |
sdk: gradio
|
| 7 |
-
sdk_version: 4.
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
license: mit
|
|
|
|
| 1 |
---
|
| 2 |
+
title: Visual Saliency Prediction
|
| 3 |
emoji: ⚡
|
| 4 |
+
colorFrom: pink
|
| 5 |
colorTo: blue
|
| 6 |
sdk: gradio
|
| 7 |
+
sdk_version: 4.26.0
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
| 10 |
license: mit
|
app.py
ADDED
|
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import matplotlib.pyplot as plt
|
| 3 |
+
import tensorflow as tf
|
| 4 |
+
|
| 5 |
+
loaded_model = tf.saved_model.load("model/")
|
| 6 |
+
loaded_model = loaded_model.signatures["serving_default"]
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def get_target_shape(original_shape):
|
| 10 |
+
original_aspect_ratio = original_shape[0] / original_shape[1]
|
| 11 |
+
|
| 12 |
+
square_mode = abs(original_aspect_ratio - 1.0)
|
| 13 |
+
landscape_mode = abs(original_aspect_ratio - 240 / 320)
|
| 14 |
+
portrait_mode = abs(original_aspect_ratio - 320 / 240)
|
| 15 |
+
|
| 16 |
+
best_mode = min(square_mode, landscape_mode, portrait_mode)
|
| 17 |
+
|
| 18 |
+
if best_mode == square_mode:
|
| 19 |
+
target_shape = (320, 320)
|
| 20 |
+
elif best_mode == landscape_mode:
|
| 21 |
+
target_shape = (240, 320)
|
| 22 |
+
else:
|
| 23 |
+
target_shape = (320, 240)
|
| 24 |
+
|
| 25 |
+
return target_shape
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def preprocess_input(input_image, target_shape):
|
| 29 |
+
input_tensor = tf.expand_dims(input_image, axis=0)
|
| 30 |
+
|
| 31 |
+
input_tensor = tf.image.resize(
|
| 32 |
+
input_tensor, target_shape, preserve_aspect_ratio=True
|
| 33 |
+
)
|
| 34 |
+
|
| 35 |
+
vertical_padding = target_shape[0] - input_tensor.shape[1]
|
| 36 |
+
horizontal_padding = target_shape[1] - input_tensor.shape[2]
|
| 37 |
+
|
| 38 |
+
vertical_padding_1 = vertical_padding // 2
|
| 39 |
+
vertical_padding_2 = vertical_padding - vertical_padding_1
|
| 40 |
+
|
| 41 |
+
horizontal_padding_1 = horizontal_padding // 2
|
| 42 |
+
horizontal_padding_2 = horizontal_padding - horizontal_padding_1
|
| 43 |
+
|
| 44 |
+
input_tensor = tf.pad(
|
| 45 |
+
input_tensor,
|
| 46 |
+
[
|
| 47 |
+
[0, 0],
|
| 48 |
+
[vertical_padding_1, vertical_padding_2],
|
| 49 |
+
[horizontal_padding_1, horizontal_padding_2],
|
| 50 |
+
[0, 0],
|
| 51 |
+
],
|
| 52 |
+
)
|
| 53 |
+
|
| 54 |
+
return (
|
| 55 |
+
input_tensor,
|
| 56 |
+
[vertical_padding_1, vertical_padding_2],
|
| 57 |
+
[horizontal_padding_1, horizontal_padding_2],
|
| 58 |
+
)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def postprocess_output(
|
| 62 |
+
output_tensor, vertical_padding, horizontal_padding, original_shape
|
| 63 |
+
):
|
| 64 |
+
output_tensor = output_tensor[
|
| 65 |
+
:,
|
| 66 |
+
vertical_padding[0] : output_tensor.shape[1] - vertical_padding[1],
|
| 67 |
+
horizontal_padding[0] : output_tensor.shape[2] - horizontal_padding[1],
|
| 68 |
+
:,
|
| 69 |
+
]
|
| 70 |
+
|
| 71 |
+
output_tensor = tf.image.resize(output_tensor, original_shape)
|
| 72 |
+
|
| 73 |
+
output_array = output_tensor.numpy().squeeze()
|
| 74 |
+
output_array = plt.cm.inferno(output_array)[..., :3]
|
| 75 |
+
|
| 76 |
+
return output_array
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def compute_saliency(input_image, alpha=0.65):
|
| 80 |
+
if input_image is not None:
|
| 81 |
+
original_shape = input_image.shape[:2]
|
| 82 |
+
target_shape = get_target_shape(original_shape)
|
| 83 |
+
|
| 84 |
+
input_tensor, vertical_padding, horizontal_padding = preprocess_input(
|
| 85 |
+
input_image, target_shape
|
| 86 |
+
)
|
| 87 |
+
|
| 88 |
+
saliency_map = loaded_model(input_tensor)["output"]
|
| 89 |
+
|
| 90 |
+
saliency_map = postprocess_output(
|
| 91 |
+
saliency_map, vertical_padding, horizontal_padding, original_shape
|
| 92 |
+
)
|
| 93 |
+
|
| 94 |
+
blended_image = alpha * saliency_map + (1 - alpha) * input_image / 255
|
| 95 |
+
|
| 96 |
+
return blended_image
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
examples = [
|
| 100 |
+
"examples/kirsten-frank-o1sXiz_LU1A-unsplash.jpg",
|
| 101 |
+
"examples/oscar-fickel-F5ze5FkEu1g-unsplash.jpg",
|
| 102 |
+
"examples/ting-tian-_79ZJS8pV70-unsplash.jpg",
|
| 103 |
+
"examples/gina-domenique-LmrAUrHinqk-unsplash.jpg",
|
| 104 |
+
"examples/robby-mccullough-r05GkQBcaPM-unsplash.jpg",
|
| 105 |
+
]
|
| 106 |
+
|
| 107 |
+
demo = gr.Interface(
|
| 108 |
+
fn=compute_saliency,
|
| 109 |
+
inputs=gr.Image(label="Input Image"),
|
| 110 |
+
outputs=gr.Image(label="Saliency Map"),
|
| 111 |
+
examples=examples,
|
| 112 |
+
title="Visual Saliency Prediction",
|
| 113 |
+
description="A demo to predict where humans fixate an image using a deep learning model trained on eye movement data. Upload an image file, take a snapshot from your webcam, or paste an image from the clipboard to compute the saliency map.",
|
| 114 |
+
article="For more information on the model, check out [GitHub](https://github.com/alexanderkroner/saliency) and the corresponding [paper](https://www.sciencedirect.com/science/article/pii/S0893608020301660).",
|
| 115 |
+
allow_flagging="never",
|
| 116 |
+
)
|
| 117 |
+
|
| 118 |
+
if __name__ == "__main__":
|
| 119 |
+
demo.queue().launch()
|
examples/gina-domenique-LmrAUrHinqk-unsplash.jpg
ADDED
|
examples/kirsten-frank-o1sXiz_LU1A-unsplash.jpg
ADDED
|
examples/oscar-fickel-F5ze5FkEu1g-unsplash.jpg
ADDED
|
examples/robby-mccullough-r05GkQBcaPM-unsplash.jpg
ADDED
|
examples/ting-tian-_79ZJS8pV70-unsplash.jpg
ADDED
|
model/saved_model.pb
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:803688d92e101794f6ced50ca08b747706e385e7b815f309344632def6e1609b
|
| 3 |
+
size 99864092
|
requirements.txt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
gradio==4.26.0
|
| 2 |
+
matplotlib==3.8.4
|
| 3 |
+
tensorflow==2.16.1
|