Spaces:
Build error
Build error
Commit ·
8f578aa
1
Parent(s): 2e1c089
updated app
Browse files- app.py +22 -29
- requirements.txt +1 -2
app.py
CHANGED
|
@@ -11,6 +11,7 @@ import tensorflow as tf
|
|
| 11 |
from tensorflow import keras
|
| 12 |
from tensorflow.keras import layers
|
| 13 |
from keras.applications.vgg16 import preprocess_input
|
|
|
|
| 14 |
|
| 15 |
# Image processing
|
| 16 |
import PIL
|
|
@@ -23,19 +24,19 @@ filename_model_VGG16 = 'vgg16_best.keras'
|
|
| 23 |
example_images_path = './example_images'
|
| 24 |
|
| 25 |
# Loading model
|
| 26 |
-
model =
|
| 27 |
|
| 28 |
# Defining parameters
|
| 29 |
-
breed_names_norm = [Chihuahua, papillon, beagle,
|
| 30 |
-
Yorkshire_terrier, Australian_terrier,
|
| 31 |
-
Scotch_terrier, golden_retriever,
|
| 32 |
-
malinois, kelpie, Doberman, miniature_pinscher,
|
| 33 |
-
Great_Dane, Pomeranian, standard_poodle, Mexican_hairless]
|
| 34 |
|
| 35 |
|
| 36 |
# Importing stopwords
|
| 37 |
-
with open('./stopwords/stopwords.txt') as file:
|
| 38 |
-
my_stopwords = {line.rstrip() for line in file}
|
| 39 |
|
| 40 |
# Function definitions
|
| 41 |
|
|
@@ -51,7 +52,7 @@ def load_img_path(img_path, target_size, show=False):
|
|
| 51 |
|
| 52 |
return img
|
| 53 |
|
| 54 |
-
def
|
| 55 |
"""
|
| 56 |
Returns a dictionnary: predicted_breeds, where the
|
| 57 |
keys are [1,2,3] for the first, second and third more probable
|
|
@@ -61,36 +62,28 @@ def predict(model, img, show=False):
|
|
| 61 |
Parameters:
|
| 62 |
img: returned by the function load_img_path
|
| 63 |
"""
|
| 64 |
-
img_array = keras.preprocessing.image.img_to_array(
|
| 65 |
img_array = tf.expand_dims(img_array, 0) # Creates a batch axis
|
| 66 |
|
| 67 |
-
predictions = model.predict(img_array, verbose=0)
|
| 68 |
-
|
| 69 |
|
| 70 |
-
|
| 71 |
-
predicted_breeds = {}
|
| 72 |
-
for i in [1,2,3]:
|
| 73 |
-
idx = scores.argsort()[-i] # First breed is the last proba when sorted
|
| 74 |
-
name = breed_names_norm[idx]
|
| 75 |
-
confidence = round(scores[idx]*100,2)
|
| 76 |
-
predicted_breeds[i] = {'idx':idx,'name':name,'confidence':confidence}
|
| 77 |
-
|
| 78 |
-
return predicted_breeds
|
| 79 |
|
| 80 |
# --------------------------------------------------
|
| 81 |
|
| 82 |
examples = [
|
| 83 |
-
[
|
| 84 |
-
['
|
| 85 |
-
['
|
|
|
|
| 86 |
]
|
| 87 |
|
| 88 |
-
demo = gr.Interface(fn=
|
| 89 |
-
inputs=
|
| 90 |
-
outputs=
|
| 91 |
examples=examples)
|
| 92 |
|
| 93 |
|
| 94 |
if __name__ == "__main__":
|
| 95 |
-
demo.launch()
|
| 96 |
-
|
|
|
|
| 11 |
from tensorflow import keras
|
| 12 |
from tensorflow.keras import layers
|
| 13 |
from keras.applications.vgg16 import preprocess_input
|
| 14 |
+
from huggingface_hub import from_pretrained_keras
|
| 15 |
|
| 16 |
# Image processing
|
| 17 |
import PIL
|
|
|
|
| 24 |
example_images_path = './example_images'
|
| 25 |
|
| 26 |
# Loading model
|
| 27 |
+
model = from_pretrained_keras('ana-bernal/keras_15_dog_breed_eff')
|
| 28 |
|
| 29 |
# Defining parameters
|
| 30 |
+
breed_names_norm = ['Chihuahua', 'papillon', 'beagle',
|
| 31 |
+
'Yorkshire_terrier', 'Australian_terrier',
|
| 32 |
+
'Scotch_terrier', 'golden_retriever',
|
| 33 |
+
'malinois', 'kelpie', 'Doberman', 'miniature_pinscher',
|
| 34 |
+
'Great_Dane', 'Pomeranian', 'standard_poodle', 'Mexican_hairless']
|
| 35 |
|
| 36 |
|
| 37 |
# Importing stopwords
|
| 38 |
+
# with open('./stopwords/stopwords.txt') as file:
|
| 39 |
+
# my_stopwords = {line.rstrip() for line in file}
|
| 40 |
|
| 41 |
# Function definitions
|
| 42 |
|
|
|
|
| 52 |
|
| 53 |
return img
|
| 54 |
|
| 55 |
+
def classify_image(inp):
|
| 56 |
"""
|
| 57 |
Returns a dictionnary: predicted_breeds, where the
|
| 58 |
keys are [1,2,3] for the first, second and third more probable
|
|
|
|
| 62 |
Parameters:
|
| 63 |
img: returned by the function load_img_path
|
| 64 |
"""
|
| 65 |
+
img_array = keras.preprocessing.image.img_to_array(inp)
|
| 66 |
img_array = tf.expand_dims(img_array, 0) # Creates a batch axis
|
| 67 |
|
| 68 |
+
predictions = model.predict(img_array, verbose=0).flatten()
|
| 69 |
+
confidences = {breed_names_norm[i]: float(predictions[i]) for i in range(15)}
|
| 70 |
|
| 71 |
+
return confidences
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 72 |
|
| 73 |
# --------------------------------------------------
|
| 74 |
|
| 75 |
examples = [
|
| 76 |
+
['example_images/01_test.jpg'],
|
| 77 |
+
['example_images/02_test.jpg'],
|
| 78 |
+
['example_images/03_test.jpg'],
|
| 79 |
+
['example_images/04_test.jpg'],
|
| 80 |
]
|
| 81 |
|
| 82 |
+
demo = gr.Interface(fn=classify_image,
|
| 83 |
+
inputs=gr.Image(shape=(180, 180)),
|
| 84 |
+
outputs=gr.Label(num_top_classes=3),
|
| 85 |
examples=examples)
|
| 86 |
|
| 87 |
|
| 88 |
if __name__ == "__main__":
|
| 89 |
+
demo.launch()
|
|
|
requirements.txt
CHANGED
|
@@ -1,5 +1,4 @@
|
|
| 1 |
gradio
|
| 2 |
joblib
|
| 3 |
tensorflow
|
| 4 |
-
tensorflow_hub
|
| 5 |
-
scikit-learn
|
|
|
|
| 1 |
gradio
|
| 2 |
joblib
|
| 3 |
tensorflow
|
| 4 |
+
tensorflow_hub
|
|
|