Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,26 +1,12 @@
|
|
| 1 |
-
import
|
|
|
|
|
|
|
| 2 |
import matplotlib.pyplot as plt
|
| 3 |
import tensorflow as tf
|
| 4 |
|
| 5 |
loaded_model = tf.saved_model.load("model/")
|
| 6 |
loaded_model = loaded_model.signatures["serving_default"]
|
| 7 |
|
| 8 |
-
def js_to_prefere_the_back_camera_of_mobilephones():
|
| 9 |
-
custom_html = """
|
| 10 |
-
<script>
|
| 11 |
-
const originalGetUserMedia = navigator.mediaDevices.getUserMedia.bind(navigator.mediaDevices);
|
| 12 |
-
|
| 13 |
-
navigator.mediaDevices.getUserMedia = (constraints) => {
|
| 14 |
-
if (!constraints.video.facingMode) {
|
| 15 |
-
constraints.video.facingMode = {ideal: "environment"};
|
| 16 |
-
}
|
| 17 |
-
constraints.video.transform = [{flipX: true}];
|
| 18 |
-
return originalGetUserMedia(constraints);
|
| 19 |
-
};
|
| 20 |
-
</script>
|
| 21 |
-
"""
|
| 22 |
-
return custom_html
|
| 23 |
-
|
| 24 |
def get_target_shape(original_shape):
|
| 25 |
original_aspect_ratio = original_shape[0] / original_shape[1]
|
| 26 |
|
|
@@ -111,13 +97,7 @@ def compute_saliency(input_image, alpha=0.65):
|
|
| 111 |
return blended_image
|
| 112 |
|
| 113 |
|
| 114 |
-
|
| 115 |
-
"examples/kirsten-frank-o1sXiz_LU1A-unsplash.jpg",
|
| 116 |
-
"examples/oscar-fickel-F5ze5FkEu1g-unsplash.jpg",
|
| 117 |
-
"examples/ting-tian-_79ZJS8pV70-unsplash.jpg",
|
| 118 |
-
"examples/gina-domenique-LmrAUrHinqk-unsplash.jpg",
|
| 119 |
-
"examples/robby-mccullough-r05GkQBcaPM-unsplash.jpg",
|
| 120 |
-
]
|
| 121 |
|
| 122 |
with gr.Blocks(head=js_to_prefere_the_back_camera_of_mobilephones()) as demo:
|
| 123 |
with gr.Row():
|
|
@@ -127,8 +107,5 @@ with gr.Blocks(head=js_to_prefere_the_back_camera_of_mobilephones()) as demo:
|
|
| 127 |
btn.click(fn=compute_saliency, inputs=input_image, outputs=output_image, api_name="compute_saliency")
|
| 128 |
|
| 129 |
if __name__ == "__main__":
|
| 130 |
-
|
| 131 |
-
#demo.description("A demo to predict where humans fixate on an image using a deep learning model trained on eye movement data. Upload an image file, take a snapshot from your webcam, or paste an image from the clipboard to compute the saliency map.")
|
| 132 |
-
#demo.article("For more information on the model, check out [GitHub](https://github.com/alexanderkroner/saliency) and the corresponding [paper](https://doi.org/10.1016/j.neunet.2020.05.004).")
|
| 133 |
-
#demo.allow_flagging("never")
|
| 134 |
demo.launch()
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
from streamlit_back_camera_input import back_camera_input
|
| 3 |
+
|
| 4 |
import matplotlib.pyplot as plt
|
| 5 |
import tensorflow as tf
|
| 6 |
|
| 7 |
loaded_model = tf.saved_model.load("model/")
|
| 8 |
loaded_model = loaded_model.signatures["serving_default"]
|
| 9 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
def get_target_shape(original_shape):
|
| 11 |
original_aspect_ratio = original_shape[0] / original_shape[1]
|
| 12 |
|
|
|
|
| 97 |
return blended_image
|
| 98 |
|
| 99 |
|
| 100 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 101 |
|
| 102 |
with gr.Blocks(head=js_to_prefere_the_back_camera_of_mobilephones()) as demo:
|
| 103 |
with gr.Row():
|
|
|
|
| 107 |
btn.click(fn=compute_saliency, inputs=input_image, outputs=output_image, api_name="compute_saliency")
|
| 108 |
|
| 109 |
if __name__ == "__main__":
|
| 110 |
+
|
|
|
|
|
|
|
|
|
|
| 111 |
demo.launch()
|