akaashp15 commited on
Commit
5dc4333
·
1 Parent(s): 63f5c99

Upload 5 files

Browse files
Files changed (5) hide show
  1. flowers_model_run.py +51 -0
  2. flowers_train.py +154 -0
  3. model.tflite +3 -0
  4. requirements.txt +6 -0
  5. rose_example.png +0 -0
flowers_model_run.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tensorflow as tf
2
+ import numpy as np
3
+
4
+ from flowers_train import class_names
5
+
6
+ #Loader Parameters
7
+ batch_size = 32
8
+ img_height = 180
9
+ img_width = 180
10
+
11
+ TF_MODEL_FILE_PATH = 'model.tflite'
12
+
13
+ def flower_classification(img):
14
+ interpreter = tf.lite.Interpreter(model_path = TF_MODEL_FILE_PATH)
15
+
16
+ #sunflower_url = "https://storage.googleapis.com/download.tensorflow.org/example_images/592px-Red_sunflower.jpg"
17
+ #sunflower_path = tf.keras.utils.get_file('Red_sunflower', origin=sunflower_url)
18
+
19
+ img_array = tf.keras.utils.img_to_array(img)
20
+ img_array = tf.expand_dims(img_array, 0)
21
+
22
+ classify_lite = interpreter.get_signature_runner('serving_default')
23
+ predictions_lite = classify_lite(rescaling_1_input = img_array)['dense_1']
24
+ score_lite = tf.nn.softmax(predictions_lite)
25
+
26
+ return_msg = "This image most likely belongs to {} with a {:.2f} percent confidence.".format(class_names[np.argmax(score_lite)], 100 * np.max(score_lite))
27
+ return return_msg
28
+
29
+ interpreter = tf.lite.Interpreter(model_path = TF_MODEL_FILE_PATH)
30
+
31
+ sunflower_url = "https://storage.googleapis.com/download.tensorflow.org/example_images/592px-Red_sunflower.jpg"
32
+ sunflower_path = tf.keras.utils.get_file('Red_sunflower', origin=sunflower_url)
33
+
34
+ sunflower_img = tf.keras.utils.load_img(
35
+ sunflower_path, target_size=(img_height, img_width)
36
+ )
37
+
38
+ img_array = tf.keras.utils.img_to_array(sunflower_img)
39
+ img_array = tf.expand_dims(img_array, 0)
40
+
41
+ print(interpreter.get_signature_list())
42
+
43
+
44
+ classify_lite = interpreter.get_signature_runner('serving_default')
45
+ predictions_lite = classify_lite(rescaling_1_input = img_array)['dense_1']
46
+ score_lite = tf.nn.softmax(predictions_lite)
47
+
48
+ print(
49
+ "This image most likely belongs to {} with a {:.2f} percent confidence."
50
+ .format(class_names[np.argmax(score_lite)], 100 * np.max(score_lite))
51
+ )
flowers_train.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import matplotlib.pyplot as plt
2
+ import numpy as np
3
+ import PIL
4
+ import requests
5
+ import tensorflow as tf
6
+
7
+ from tensorflow import keras
8
+ from tensorflow.keras import layers
9
+ from tensorflow.keras.models import Sequential
10
+
11
+ import pathlib
12
+
13
+
14
+ #Import Data and set directory for the data
15
+ dataset_url = "https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz"
16
+ data_dir = tf.keras.utils.get_file('flower_photos', origin = dataset_url, untar = True)
17
+ data_dir = pathlib.Path(data_dir)
18
+
19
+ ##Print the number of images in the dataset
20
+ image_count = len(list(data_dir.glob('*/*.jpg')))
21
+ print(image_count)
22
+
23
+
24
+ ##Can access the subset of images containing a certain name/tag
25
+ roses = list(data_dir.glob('roses/*'))
26
+ ##Use PIL.Image.open to view the image
27
+ rose_0 = PIL.Image.open(str(roses[0]))
28
+
29
+ #Loader Parameters
30
+ batch_size = 32
31
+ img_height = 180
32
+ img_width = 180
33
+
34
+ ##Formalize the training dataset
35
+ train_ds = tf.keras.utils.image_dataset_from_directory(
36
+ data_dir,
37
+ validation_split = 0.2,
38
+ subset = "validation",
39
+ seed = 123,
40
+ image_size = (img_height, img_width),
41
+ batch_size = batch_size
42
+ )
43
+
44
+ ##Formalize the validation dataset
45
+ val_ds = tf.keras.utils.image_dataset_from_directory(
46
+ data_dir,
47
+ validation_split = 0.2,
48
+ subset = 'validation',
49
+ seed = 123,
50
+ image_size = (img_height, img_width),
51
+ batch_size = batch_size
52
+ )
53
+
54
+ ## Printing class names
55
+ class_names = train_ds.class_names
56
+ #print(class_names)
57
+
58
+ ##Autotunes the value of data dynamically at runtime
59
+ AUTOTUNE = tf.data.AUTOTUNE
60
+
61
+ train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size = AUTOTUNE)
62
+ val_ds = val_ds.cache().prefetch(buffer_size = AUTOTUNE)
63
+
64
+ normalization_layer = layers.Rescaling(1./255)
65
+
66
+ normalized_ds = train_ds.map(lambda x, y: (normalization_layer(x), y))
67
+ image_batch, labels_batch = next(iter(normalized_ds))
68
+
69
+ ##Keras Model
70
+ num_classes = len(class_names)
71
+
72
+ model = Sequential([
73
+ layers.Rescaling(1./255, input_shape = (img_height, img_width, 3)),
74
+ layers.Conv2D(16, 3, padding = 'same', activation = 'relu'),
75
+ layers.MaxPooling2D(),
76
+ layers.Conv2D(32, 3, padding = 'same', activation = 'relu'),
77
+ layers.MaxPooling2D(),
78
+ layers.Conv2D(64, 3, padding = 'same', activation = 'relu'),
79
+ layers.MaxPooling2D(),
80
+ layers.Flatten(),
81
+ layers.Dense(128, activation = 'relu'),
82
+ layers.Dense(num_classes)
83
+ ])
84
+
85
+ ##Setting framework for the loss functions/optimization of tuning
86
+ model.compile(optimizer = 'adam',
87
+ loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits = True),
88
+ metrics = ['accuracy'])
89
+
90
+
91
+ ##This is the model framework
92
+ print(model.summary())
93
+
94
+
95
+ ##Training the model for 10 epochs
96
+ epochs = 10
97
+ history = model.fit(
98
+ train_ds,
99
+ validation_data = val_ds,
100
+ epochs = epochs
101
+ )
102
+
103
+ ##Analyze results
104
+ acc = history.history['accuracy']
105
+ val_acc = history.history['val_accuracy']
106
+
107
+ loss = history.history['loss']
108
+ val_loss = history.history['val_loss']
109
+
110
+ epochs_range = range(epochs)
111
+
112
+ ##Visualize training stats
113
+ # plt.figure(figsize = (8,8))
114
+ # plt.subplot(1, 2, 1)
115
+ # plt.plot(epochs_range, acc, label = 'Training Accuracy')
116
+ # plt.plot(epochs_range, val_acc, label = 'Validation Accuracy')
117
+ # plt.legend(loc = 'lower right')
118
+ # plt.title('Training and Validation Accuracy')
119
+
120
+ # plt.subplot(1, 2, 2)
121
+ # plt.plot(epochs_range, loss, label= 'Training Loss')
122
+ # plt.plot(epochs_range, val_loss, label= 'Validation Loss')
123
+ # plt.legend(loc = 'upper right')
124
+ # plt.title('Training and Validation Loss')
125
+ # plt.show()
126
+
127
+ ##Predict on new data
128
+ sunflower_url = "https://storage.googleapis.com/download.tensorflow.org/example_images/592px-Red_sunflower.jpg"
129
+ sunflower_path = tf.keras.utils.get_file('Red_sunflower', origin=sunflower_url)
130
+
131
+ sunflower_img = tf.keras.utils.load_img(
132
+ sunflower_path, target_size=(img_height, img_width)
133
+ )
134
+
135
+ img_array = tf.keras.utils.img_to_array(sunflower_img)
136
+ img_array = tf.expand_dims(img_array, 0)
137
+
138
+ predictions = model.predict(img_array)
139
+ score = tf.nn.softmax(predictions[0])
140
+
141
+ print(
142
+ "This image most likely belongs to {} with a {:.2f} percent confidence."
143
+ .format(class_names[np.argmax(score)], 100 * np.max(score))
144
+ )
145
+
146
+
147
+ ##Convert model to TensorflowLite Model
148
+ converter = tf.lite.TFLiteConverter.from_keras_model(model)
149
+ tflite_model = converter.convert()
150
+
151
+ ##Save model to be used again
152
+ with open('model.tflite', 'wb') as f:
153
+ f.write(tflite_model)
154
+
model.tflite ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:245150e42934566332f6e599d74400f82a73417f1311f43a7ac50346142fc4f1
3
+ size 15961108
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ matplotlib == 3.6.2
2
+ numpy
3
+ pillow
4
+ tensorflow
5
+ pathlib
6
+ gradio
rose_example.png ADDED