Kenvin1410 commited on
Commit
8c11839
·
verified ·
1 Parent(s): 51bcffe

Upload trained model

Browse files
.gitattributes CHANGED
@@ -33,3 +33,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ base_model_best.keras filter=lfs diff=lfs merge=lfs -text
37
+ base_model_trained.hdf5 filter=lfs diff=lfs merge=lfs -text
38
+ fine_tune_model_best.keras filter=lfs diff=lfs merge=lfs -text
39
+ fine_tune_model_trained.hdf5 filter=lfs diff=lfs merge=lfs -text
base_model_best.keras ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd9506031065ae9e9721e586262640ac9d57d7262f2866d27072519c01ba198c
3
+ size 243791847
base_model_trained.hdf5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:21cf4e2830672bbcd62963b1b8b58491b66b2098aebc68e09a9549166e6e6137
3
+ size 243607320
fine_tune_model_best.keras ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77af2dd4f3918c1c8f327a09aa13b7687b366c0952fc69c049371a7f833d2a7e
3
+ size 308164497
fine_tune_model_trained.hdf5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6886051c2f9782bbff94e565556e2323d79810e5dce2076902d31f8038f85d4
3
+ size 307983704
models.ipynb ADDED
@@ -0,0 +1,606 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "metadata": {},
7
+ "outputs": [
8
+ {
9
+ "name": "stdout",
10
+ "output_type": "stream",
11
+ "text": [
12
+ "Retrieving speedtest.net configuration...\n",
13
+ "Testing from VNPT (14.177.252.75)...\n",
14
+ "Retrieving speedtest.net server list...\n",
15
+ "Selecting best server based on ping...\n",
16
+ "Hosted by Viettel IDC (Vinh) [261.88 km]: 10.311 ms\n",
17
+ "Testing download speed................................................................................\n",
18
+ "Download: 184.35 Mbit/s\n",
19
+ "Testing upload speed......................................................................................................\n",
20
+ "Upload: 202.71 Mbit/s\n"
21
+ ]
22
+ }
23
+ ],
24
+ "source": [
25
+ "import tensorflow as tf\n",
26
+ "import matplotlib.pyplot as plt\n",
27
+ "import h5py\n",
28
+ "\n",
29
+ "tf.get_logger().setLevel('ERROR')\n",
30
+ "!curl -s https://raw.githubusercontent.com/sivel/speedtest-cli/master/speedtest.py | python -"
31
+ ]
32
+ },
33
+ {
34
+ "cell_type": "code",
35
+ "execution_count": 2,
36
+ "metadata": {},
37
+ "outputs": [],
38
+ "source": [
39
+ "TRAIN_PATH = 'D:/UETCodeCamp/dataset/dataset/Images/Train'\n",
40
+ "VALIDATE_PATH = 'D:/UETCodeCamp/dataset/dataset/Images/Validate'\n",
41
+ "TEST_PATH = 'D:/UETCodeCamp/dataset/dataset/Images/Test'"
42
+ ]
43
+ },
44
+ {
45
+ "cell_type": "code",
46
+ "execution_count": 3,
47
+ "metadata": {},
48
+ "outputs": [],
49
+ "source": [
50
+ "import os\n",
51
+ "PATH = 'Models/ResNet152V2'\n",
52
+ "\n",
53
+ "BASE_MODEL_BEST = os.path.join(PATH, 'base_model_best.hdf5')\n",
54
+ "BASE_MODEL_TRAINED = os.path.join(PATH, 'base_model_trained.hdf5')\n",
55
+ "BASE_MODEL_FIG = os.path.join(PATH, 'base_model_fig.jpg')\n",
56
+ "\n",
57
+ "FINE_TUNE_MODEL_BEST = os.path.join(PATH, 'fine_tune_model_best.hdf5')\n",
58
+ "FINE_TUNE_MODEL_TRAINED = os.path.join(PATH, 'fine_tune_model_trained.hdf5')\n",
59
+ "FINE_TUNE_MODE_FIG = os.path.join(PATH, 'fine_tune_model_fig.jpg')"
60
+ ]
61
+ },
62
+ {
63
+ "cell_type": "code",
64
+ "execution_count": 4,
65
+ "metadata": {},
66
+ "outputs": [],
67
+ "source": [
68
+ "IMAGE_SIZE = (300, 300)\n",
69
+ "BATCH_SIZE = 128"
70
+ ]
71
+ },
72
+ {
73
+ "cell_type": "code",
74
+ "execution_count": 5,
75
+ "metadata": {},
76
+ "outputs": [],
77
+ "source": [
78
+ "from tensorflow.keras.preprocessing.image import ImageDataGenerator\n",
79
+ "train_generator = ImageDataGenerator(\n",
80
+ " rescale = 1./255,\n",
81
+ " rotation_range = 40, \n",
82
+ " width_shift_range = 0.2, \n",
83
+ " height_shift_range = 0.2,\n",
84
+ " shear_range = 0.2,\n",
85
+ " zoom_range = 0.2,\n",
86
+ " horizontal_flip = True\n",
87
+ ")\n",
88
+ "validate_generator = ImageDataGenerator(rescale=1./255)\n",
89
+ "test_generator = ImageDataGenerator(rescale=1./255)"
90
+ ]
91
+ },
92
+ {
93
+ "cell_type": "code",
94
+ "execution_count": 6,
95
+ "metadata": {},
96
+ "outputs": [
97
+ {
98
+ "name": "stdout",
99
+ "output_type": "stream",
100
+ "text": [
101
+ "d:\\UETCodeCamp\\Model\n"
102
+ ]
103
+ }
104
+ ],
105
+ "source": [
106
+ "print(os.getcwd())"
107
+ ]
108
+ },
109
+ {
110
+ "cell_type": "code",
111
+ "execution_count": 7,
112
+ "metadata": {},
113
+ "outputs": [
114
+ {
115
+ "name": "stdout",
116
+ "output_type": "stream",
117
+ "text": [
118
+ "Found 18751 images belonging to 38 classes.\n",
119
+ "Found 2757 images belonging to 38 classes.\n",
120
+ "Found 5169 images belonging to 38 classes.\n"
121
+ ]
122
+ }
123
+ ],
124
+ "source": [
125
+ "generated_train_data = train_generator.flow_from_directory(TRAIN_PATH, target_size=IMAGE_SIZE, batch_size=BATCH_SIZE)\n",
126
+ "generated_validate_data = validate_generator.flow_from_directory(VALIDATE_PATH, target_size=IMAGE_SIZE, batch_size=BATCH_SIZE)\n",
127
+ "generated_test_data = test_generator.flow_from_directory(TEST_PATH, target_size=IMAGE_SIZE)"
128
+ ]
129
+ },
130
+ {
131
+ "cell_type": "code",
132
+ "execution_count": 8,
133
+ "metadata": {},
134
+ "outputs": [],
135
+ "source": [
136
+ "CLASSES = 38\n",
137
+ "INITIAL_EPOCHS = 15\n",
138
+ "FINE_TUNE_EPOCHS = 15\n",
139
+ "TOTAL_EPOCHS = INITIAL_EPOCHS + FINE_TUNE_EPOCHS\n",
140
+ "FINE_TUNE_AT = 516"
141
+ ]
142
+ },
143
+ {
144
+ "cell_type": "code",
145
+ "execution_count": 9,
146
+ "metadata": {},
147
+ "outputs": [],
148
+ "source": [
149
+ "from tensorflow.keras.applications.resnet_v2 import ResNet152V2\n",
150
+ "from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Dropout\n",
151
+ "from tensorflow.keras.models import Model"
152
+ ]
153
+ },
154
+ {
155
+ "cell_type": "code",
156
+ "execution_count": 10,
157
+ "metadata": {},
158
+ "outputs": [],
159
+ "source": [
160
+ "pretrained_model = ResNet152V2(weights='imagenet', include_top=False)\n",
161
+ "last_output = pretrained_model.output\n",
162
+ "x = GlobalAveragePooling2D()(last_output)\n",
163
+ "x = Dense(512, activation='relu')(x)\n",
164
+ "x = Dropout(0.2)(x)\n",
165
+ "outputs = Dense(CLASSES, activation='softmax')(x)\n",
166
+ "model = Model(inputs=pretrained_model.input, outputs=outputs)\n"
167
+ ]
168
+ },
169
+ {
170
+ "cell_type": "code",
171
+ "execution_count": 11,
172
+ "metadata": {},
173
+ "outputs": [],
174
+ "source": [
175
+ "from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint\n",
176
+ "base_checkpointer = ModelCheckpoint(\n",
177
+ " filepath=BASE_MODEL_BEST.replace('.hdf5', '.keras'), \n",
178
+ " save_best_only=True, \n",
179
+ " verbose=1\n",
180
+ ")\n",
181
+ "\n",
182
+ "fine_tune_checkpointer = ModelCheckpoint(\n",
183
+ " filepath=FINE_TUNE_MODEL_BEST.replace('.hdf5', '.keras'), \n",
184
+ " save_best_only=True,\n",
185
+ " verbose=1, \n",
186
+ ")\n",
187
+ "\n",
188
+ "\n",
189
+ "# Stop if no improvement after 3 epochs\n",
190
+ "early_stopping = EarlyStopping(monitor='val_loss', patience=3, verbose=1)"
191
+ ]
192
+ },
193
+ {
194
+ "cell_type": "code",
195
+ "execution_count": 12,
196
+ "metadata": {},
197
+ "outputs": [],
198
+ "source": [
199
+ "import os\n",
200
+ "\n",
201
+ "os.makedirs('Models/ResNet152V2', exist_ok=True)"
202
+ ]
203
+ },
204
+ {
205
+ "cell_type": "code",
206
+ "execution_count": 13,
207
+ "metadata": {},
208
+ "outputs": [],
209
+ "source": [
210
+ "for layer in pretrained_model.layers: layer.trainable = False\n",
211
+ "model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])"
212
+ ]
213
+ },
214
+ {
215
+ "cell_type": "code",
216
+ "execution_count": 14,
217
+ "metadata": {},
218
+ "outputs": [
219
+ {
220
+ "name": "stdout",
221
+ "output_type": "stream",
222
+ "text": [
223
+ "Epoch 1/15\n"
224
+ ]
225
+ },
226
+ {
227
+ "name": "stderr",
228
+ "output_type": "stream",
229
+ "text": [
230
+ "c:\\Users\\VuongQuan14\\AppData\\Local\\Programs\\Python\\Python310\\lib\\site-packages\\keras\\src\\trainers\\data_adapters\\py_dataset_adapter.py:121: UserWarning: Your `PyDataset` class should call `super().__init__(**kwargs)` in its constructor. `**kwargs` can include `workers`, `use_multiprocessing`, `max_queue_size`. Do not pass these arguments to `fit()`, as they will be ignored.\n",
231
+ " self._warn_if_super_not_called()\n"
232
+ ]
233
+ },
234
+ {
235
+ "name": "stdout",
236
+ "output_type": "stream",
237
+ "text": [
238
+ "\u001b[1m 55/146\u001b[0m \u001b[32m━━━━━━━\u001b[0m\u001b[37m━━━━━━━━━━━━━\u001b[0m \u001b[1m33:44\u001b[0m 22s/step - accuracy: 0.2474 - loss: 2.9693"
239
+ ]
240
+ },
241
+ {
242
+ "name": "stderr",
243
+ "output_type": "stream",
244
+ "text": [
245
+ "c:\\Users\\VuongQuan14\\AppData\\Local\\Programs\\Python\\Python310\\lib\\site-packages\\PIL\\Image.py:1056: UserWarning: Palette images with Transparency expressed in bytes should be converted to RGBA images\n",
246
+ " warnings.warn(\n"
247
+ ]
248
+ },
249
+ {
250
+ "name": "stdout",
251
+ "output_type": "stream",
252
+ "text": [
253
+ "\u001b[1m146/146\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 24s/step - accuracy: 0.3447 - loss: 2.4622 \n",
254
+ "Epoch 1: val_loss improved from inf to 1.41906, saving model to Models/ResNet152V2\\base_model_best.keras\n",
255
+ "\u001b[1m146/146\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3788s\u001b[0m 26s/step - accuracy: 0.3453 - loss: 2.4589 - val_accuracy: 0.5796 - val_loss: 1.4191\n",
256
+ "Epoch 2/15\n",
257
+ "\u001b[1m 1/146\u001b[0m \u001b[37m━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[1m38:06\u001b[0m 16s/step - accuracy: 0.5469 - loss: 1.6239"
258
+ ]
259
+ },
260
+ {
261
+ "name": "stderr",
262
+ "output_type": "stream",
263
+ "text": [
264
+ "c:\\Users\\VuongQuan14\\AppData\\Local\\Programs\\Python\\Python310\\lib\\contextlib.py:153: UserWarning: Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches. You may need to use the `.repeat()` function when building your dataset.\n",
265
+ " self.gen.throw(typ, value, traceback)\n"
266
+ ]
267
+ },
268
+ {
269
+ "name": "stdout",
270
+ "output_type": "stream",
271
+ "text": [
272
+ "\n",
273
+ "Epoch 2: val_loss did not improve from 1.41906\n",
274
+ "\u001b[1m146/146\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m24s\u001b[0m 59ms/step - accuracy: 0.5469 - loss: 1.6239 - val_accuracy: 0.5217 - val_loss: 1.6660\n",
275
+ "Epoch 3/15\n",
276
+ "\u001b[1m146/146\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 21s/step - accuracy: 0.5845 - loss: 1.3994 \n",
277
+ "Epoch 3: val_loss improved from 1.41906 to 1.29527, saving model to Models/ResNet152V2\\base_model_best.keras\n",
278
+ "\u001b[1m146/146\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3664s\u001b[0m 25s/step - accuracy: 0.5846 - loss: 1.3993 - val_accuracy: 0.6131 - val_loss: 1.2953\n",
279
+ "Epoch 4/15\n",
280
+ "\u001b[1m 1/146\u001b[0m \u001b[37m━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[1m1:11:06\u001b[0m 29s/step - accuracy: 0.5938 - loss: 1.2649\n",
281
+ "Epoch 4: val_loss improved from 1.29527 to 1.22025, saving model to Models/ResNet152V2\\base_model_best.keras\n",
282
+ "\u001b[1m146/146\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m50s\u001b[0m 143ms/step - accuracy: 0.5938 - loss: 1.2649 - val_accuracy: 0.6667 - val_loss: 1.2203\n",
283
+ "Epoch 5/15\n",
284
+ "\u001b[1m146/146\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 21s/step - accuracy: 0.6200 - loss: 1.2513 \n",
285
+ "Epoch 5: val_loss improved from 1.22025 to 1.17931, saving model to Models/ResNet152V2\\base_model_best.keras\n",
286
+ "\u001b[1m146/146\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3524s\u001b[0m 24s/step - accuracy: 0.6201 - loss: 1.2513 - val_accuracy: 0.6481 - val_loss: 1.1793\n",
287
+ "Epoch 6/15\n",
288
+ "\u001b[1m 1/146\u001b[0m \u001b[37m━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[1m38:32\u001b[0m 16s/step - accuracy: 0.6484 - loss: 1.1963\n",
289
+ "Epoch 6: val_loss improved from 1.17931 to 1.05118, saving model to Models/ResNet152V2\\base_model_best.keras\n",
290
+ "\u001b[1m146/146\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m30s\u001b[0m 95ms/step - accuracy: 0.6484 - loss: 1.1963 - val_accuracy: 0.6232 - val_loss: 1.0512\n",
291
+ "Epoch 7/15\n",
292
+ "\u001b[1m146/146\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 44s/step - accuracy: 0.6569 - loss: 1.1265 \n",
293
+ "Epoch 7: val_loss did not improve from 1.05118\n",
294
+ "\u001b[1m146/146\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m8147s\u001b[0m 56s/step - accuracy: 0.6569 - loss: 1.1265 - val_accuracy: 0.6715 - val_loss: 1.1207\n",
295
+ "Epoch 8/15\n",
296
+ "\u001b[1m 1/146\u001b[0m \u001b[37m━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[1m4:56:50\u001b[0m 123s/step - accuracy: 0.7188 - loss: 0.9636\n",
297
+ "Epoch 8: val_loss did not improve from 1.05118\n",
298
+ "\u001b[1m146/146\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m192s\u001b[0m 475ms/step - accuracy: 0.7188 - loss: 0.9636 - val_accuracy: 0.6667 - val_loss: 1.1370\n",
299
+ "Epoch 9/15\n",
300
+ "\u001b[1m146/146\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 20s/step - accuracy: 0.6758 - loss: 1.0515 \n",
301
+ "Epoch 9: val_loss did not improve from 1.05118\n",
302
+ "\u001b[1m146/146\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m3502s\u001b[0m 23s/step - accuracy: 0.6758 - loss: 1.0515 - val_accuracy: 0.6864 - val_loss: 1.0649\n",
303
+ "Epoch 9: early stopping\n"
304
+ ]
305
+ },
306
+ {
307
+ "name": "stderr",
308
+ "output_type": "stream",
309
+ "text": [
310
+ "WARNING:absl:You are saving your model as an HDF5 file via `model.save()` or `keras.saving.save_model(model)`. This file format is considered legacy. We recommend using instead the native Keras format, e.g. `model.save('my_model.keras')` or `keras.saving.save_model(model, 'my_model.keras')`. \n"
311
+ ]
312
+ }
313
+ ],
314
+ "source": [
315
+ "history = model.fit(\n",
316
+ " generated_train_data,\n",
317
+ " validation_data = generated_validate_data,\n",
318
+ " validation_steps = generated_validate_data.n // BATCH_SIZE,\n",
319
+ " steps_per_epoch = generated_train_data.n // BATCH_SIZE,\n",
320
+ " callbacks = [base_checkpointer, early_stopping],\n",
321
+ " epochs = INITIAL_EPOCHS,\n",
322
+ " verbose = 1,\n",
323
+ ")\n",
324
+ "model.save(BASE_MODEL_TRAINED)"
325
+ ]
326
+ },
327
+ {
328
+ "cell_type": "code",
329
+ "execution_count": 15,
330
+ "metadata": {},
331
+ "outputs": [
332
+ {
333
+ "name": "stdout",
334
+ "output_type": "stream",
335
+ "text": [
336
+ "(300, 300, 3)\n",
337
+ "{'Banh beo': 0, 'Banh bot loc': 1, 'Banh can': 2, 'Banh canh': 3, 'Banh chung': 4, 'Banh cuon': 5, 'Banh duc': 6, 'Banh gio': 7, 'Banh khot': 8, 'Banh mi': 9, 'Banh pia': 10, 'Banh tet': 11, 'Banh trang nuong': 12, 'Banh xeo': 13, 'Bun bo Hue': 14, 'Bun dau mam tom': 15, 'Bun mam': 16, 'Bun rieu': 17, 'Bun thit nuong': 18, 'Bánh cu đơ': 19, 'Bánh mì cay': 20, 'Bánh đa cua': 21, 'Bánh đậu xanh': 22, 'Bò bía': 23, 'Bún cá': 24, 'Ca kho to': 25, 'Canh chua': 26, 'Cao lau': 27, 'Chao long': 28, 'Com tam': 29, 'Cơm cháy': 30, 'Goi cuon': 31, 'Hu tieu': 32, 'Mi quang': 33, 'Nem chua': 34, 'Nem nướng': 35, 'Pho': 36, 'Xoi xeo': 37}\n"
338
+ ]
339
+ }
340
+ ],
341
+ "source": [
342
+ "print(generated_train_data.image_shape)\n",
343
+ "print(generated_train_data.class_indices)\n"
344
+ ]
345
+ },
346
+ {
347
+ "cell_type": "code",
348
+ "execution_count": 16,
349
+ "metadata": {},
350
+ "outputs": [],
351
+ "source": [
352
+ "\n",
353
+ "for layer in pretrained_model.layers[:FINE_TUNE_AT]: layer.trainable = False\n",
354
+ "for layer in pretrained_model.layers[FINE_TUNE_AT:]: layer.trainable = True"
355
+ ]
356
+ },
357
+ {
358
+ "cell_type": "code",
359
+ "execution_count": 17,
360
+ "metadata": {},
361
+ "outputs": [],
362
+ "source": [
363
+ "from tensorflow.keras.optimizers import SGD\n",
364
+ "model.compile(\n",
365
+ " optimizer = SGD(learning_rate=1e-4, momentum=0.9), \n",
366
+ " loss = 'categorical_crossentropy', \n",
367
+ " metrics = ['accuracy']\n",
368
+ ")"
369
+ ]
370
+ },
371
+ {
372
+ "cell_type": "code",
373
+ "execution_count": 18,
374
+ "metadata": {},
375
+ "outputs": [
376
+ {
377
+ "name": "stdout",
378
+ "output_type": "stream",
379
+ "text": [
380
+ "Epoch 9/30\n",
381
+ "\u001b[1m146/146\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 25s/step - accuracy: 0.5182 - loss: 1.7415 \n",
382
+ "Epoch 9: val_loss improved from inf to 1.11962, saving model to Models/ResNet152V2\\fine_tune_model_best.keras\n",
383
+ "\u001b[1m146/146\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m4345s\u001b[0m 30s/step - accuracy: 0.5186 - loss: 1.7399 - val_accuracy: 0.6819 - val_loss: 1.1196\n",
384
+ "Epoch 10/30\n",
385
+ "\u001b[1m 1/146\u001b[0m \u001b[37m━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[1m1:22:53\u001b[0m 34s/step - accuracy: 0.7031 - loss: 1.0933\n",
386
+ "Epoch 10: val_loss did not improve from 1.11962\n",
387
+ "\u001b[1m146/146\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m51s\u001b[0m 112ms/step - accuracy: 0.7031 - loss: 1.0933 - val_accuracy: 0.6667 - val_loss: 1.5291\n",
388
+ "Epoch 11/30\n",
389
+ "\u001b[1m146/146\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m0s\u001b[0m 33s/step - accuracy: 0.6629 - loss: 1.1745 \n",
390
+ "Epoch 11: val_loss improved from 1.11962 to 1.06462, saving model to Models/ResNet152V2\\fine_tune_model_best.keras\n",
391
+ "\u001b[1m146/146\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m5206s\u001b[0m 35s/step - accuracy: 0.6630 - loss: 1.1742 - val_accuracy: 0.6886 - val_loss: 1.0646\n",
392
+ "Epoch 11: early stopping\n"
393
+ ]
394
+ },
395
+ {
396
+ "name": "stderr",
397
+ "output_type": "stream",
398
+ "text": [
399
+ "WARNING:absl:You are saving your model as an HDF5 file via `model.save()` or `keras.saving.save_model(model)`. This file format is considered legacy. We recommend using instead the native Keras format, e.g. `model.save('my_model.keras')` or `keras.saving.save_model(model, 'my_model.keras')`. \n"
400
+ ]
401
+ }
402
+ ],
403
+ "source": [
404
+ "history_fine = model.fit(\n",
405
+ " generated_train_data,\n",
406
+ " validation_data = generated_validate_data,\n",
407
+ " validation_steps = generated_validate_data.n // BATCH_SIZE,\n",
408
+ " steps_per_epoch = generated_train_data.n // BATCH_SIZE,\n",
409
+ " epochs = TOTAL_EPOCHS,\n",
410
+ " initial_epoch = history.epoch[-1],\n",
411
+ " callbacks = [fine_tune_checkpointer, early_stopping],\n",
412
+ " verbose = 1,\n",
413
+ ")\n",
414
+ "model.save(FINE_TUNE_MODEL_TRAINED)"
415
+ ]
416
+ },
417
+ {
418
+ "cell_type": "code",
419
+ "execution_count": 19,
420
+ "metadata": {},
421
+ "outputs": [
422
+ {
423
+ "name": "stdout",
424
+ "output_type": "stream",
425
+ "text": [
426
+ "\u001b[1m162/162\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1063s\u001b[0m 7s/step - accuracy: 0.6897 - loss: 1.0454\n",
427
+ "Test accuracy: 0.6889146566390991\n"
428
+ ]
429
+ }
430
+ ],
431
+ "source": [
432
+ "loss, accuracy = model.evaluate(generated_test_data)\n",
433
+ "print('Test accuracy:', accuracy)"
434
+ ]
435
+ },
436
+ {
437
+ "cell_type": "code",
438
+ "execution_count": 22,
439
+ "metadata": {},
440
+ "outputs": [
441
+ {
442
+ "name": "stdout",
443
+ "output_type": "stream",
444
+ "text": [
445
+ "Requirement already satisfied: scipy in c:\\users\\vuongquan14\\appdata\\roaming\\python\\python310\\site-packages (1.10.1)\n",
446
+ "Collecting scipy\n",
447
+ " Downloading scipy-1.14.0-cp310-cp310-win_amd64.whl.metadata (60 kB)\n",
448
+ "Requirement already satisfied: numpy<2.3,>=1.23.5 in c:\\users\\vuongquan14\\appdata\\local\\programs\\python\\python310\\lib\\site-packages (from scipy) (1.23.5)\n",
449
+ "Downloading scipy-1.14.0-cp310-cp310-win_amd64.whl (44.8 MB)\n",
450
+ " ---------------------------------------- 0.0/44.8 MB ? eta -:--:--\n",
451
+ " --------------------------------------- 1.0/44.8 MB 7.1 MB/s eta 0:00:07\n",
452
+ " ---- ----------------------------------- 5.0/44.8 MB 13.7 MB/s eta 0:00:03\n",
453
+ " ----------- ---------------------------- 12.6/44.8 MB 22.5 MB/s eta 0:00:02\n",
454
+ " ------------------ --------------------- 20.2/44.8 MB 26.0 MB/s eta 0:00:01\n",
455
+ " ------------------------ --------------- 27.0/44.8 MB 27.1 MB/s eta 0:00:01\n",
456
+ " ------------------------------ --------- 34.1/44.8 MB 28.1 MB/s eta 0:00:01\n",
457
+ " ------------------------------------- -- 41.7/44.8 MB 29.5 MB/s eta 0:00:01\n",
458
+ " ---------------------------------------- 44.8/44.8 MB 27.9 MB/s eta 0:00:00\n",
459
+ "Installing collected packages: scipy\n",
460
+ " Attempting uninstall: scipy\n",
461
+ " Found existing installation: scipy 1.10.1\n",
462
+ " Uninstalling scipy-1.10.1:\n",
463
+ " Successfully uninstalled scipy-1.10.1\n",
464
+ " Rolling back uninstall of scipy\n",
465
+ " Moving to c:\\users\\vuongquan14\\appdata\\roaming\\python\\python310\\site-packages\\scipy-1.10.1-cp310-cp310-win_amd64.whl\n",
466
+ " from C:\\Users\\VuongQuan14\\AppData\\Local\\Temp\\pip-uninstall-bxl8tsg7\\scipy-1.10.1-cp310-cp310-win_amd64.whl\n",
467
+ " Moving to c:\\users\\vuongquan14\\appdata\\roaming\\python\\python310\\site-packages\\scipy-1.10.1.dist-info\\\n",
468
+ " from C:\\Users\\VuongQuan14\\AppData\\Roaming\\Python\\Python310\\site-packages\\~cipy-1.10.1.dist-info\n",
469
+ " Moving to c:\\users\\vuongquan14\\appdata\\roaming\\python\\python310\\site-packages\\scipy.libs\\\n",
470
+ " from C:\\Users\\VuongQuan14\\AppData\\Roaming\\Python\\Python310\\site-packages\\~cipy.libs\n",
471
+ " Moving to c:\\users\\vuongquan14\\appdata\\roaming\\python\\python310\\site-packages\\scipy\\\n",
472
+ " from C:\\Users\\VuongQuan14\\AppData\\Roaming\\Python\\Python310\\site-packages\\~cipy\n",
473
+ "Note: you may need to restart the kernel to use updated packages.\n"
474
+ ]
475
+ },
476
+ {
477
+ "name": "stderr",
478
+ "output_type": "stream",
479
+ "text": [
480
+ "WARNING: Ignoring invalid distribution -cipy (c:\\users\\vuongquan14\\appdata\\local\\programs\\python\\python310\\lib\\site-packages)\n",
481
+ "WARNING: Ignoring invalid distribution -cipy (c:\\users\\vuongquan14\\appdata\\local\\programs\\python\\python310\\lib\\site-packages)\n",
482
+ "ERROR: Could not install packages due to an OSError: [WinError 5] Access is denied: 'c:\\\\Users\\\\VuongQuan14\\\\AppData\\\\Local\\\\Programs\\\\Python\\\\Python310\\\\Lib\\\\site-packages\\\\scipy\\\\linalg\\\\cython_blas.cp310-win_amd64.pyd'\n",
483
+ "Consider using the `--user` option or check the permissions.\n",
484
+ "\n"
485
+ ]
486
+ }
487
+ ],
488
+ "source": [
489
+ "pip install --upgrade scipy\n"
490
+ ]
491
+ },
492
+ {
493
+ "cell_type": "code",
494
+ "execution_count": 23,
495
+ "metadata": {},
496
+ "outputs": [
497
+ {
498
+ "name": "stdout",
499
+ "output_type": "stream",
500
+ "text": [
501
+ "Found 5169 images belonging to 38 classes.\n"
502
+ ]
503
+ },
504
+ {
505
+ "name": "stderr",
506
+ "output_type": "stream",
507
+ "text": [
508
+ "c:\\Users\\VuongQuan14\\AppData\\Local\\Programs\\Python\\Python310\\lib\\site-packages\\keras\\src\\trainers\\data_adapters\\py_dataset_adapter.py:121: UserWarning: Your `PyDataset` class should call `super().__init__(**kwargs)` in its constructor. `**kwargs` can include `workers`, `use_multiprocessing`, `max_queue_size`. Do not pass these arguments to `fit()`, as they will be ignored.\n",
509
+ " self._warn_if_super_not_called()\n"
510
+ ]
511
+ },
512
+ {
513
+ "name": "stdout",
514
+ "output_type": "stream",
515
+ "text": [
516
+ "\u001b[1m41/41\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m883s\u001b[0m 22s/step\n",
517
+ " precision recall f1-score support\n",
518
+ "\n",
519
+ " Banh beo 0.84 0.71 0.77 129\n",
520
+ " Banh bot loc 0.60 0.61 0.60 144\n",
521
+ " Banh can 0.83 0.69 0.75 149\n",
522
+ " Banh canh 0.44 0.37 0.40 193\n",
523
+ " Banh chung 0.80 0.77 0.79 102\n",
524
+ " Banh cuon 0.70 0.68 0.69 228\n",
525
+ " Banh duc 0.41 0.20 0.27 133\n",
526
+ " Banh gio 0.75 0.81 0.78 129\n",
527
+ " Banh khot 0.69 0.83 0.76 167\n",
528
+ " Banh mi 0.92 0.91 0.91 268\n",
529
+ " Banh pia 0.86 0.84 0.85 89\n",
530
+ " Banh tet 0.83 0.73 0.78 138\n",
531
+ "Banh trang nuong 0.90 0.75 0.82 159\n",
532
+ " Banh xeo 0.81 0.83 0.82 235\n",
533
+ " Bun bo Hue 0.54 0.68 0.60 306\n",
534
+ " Bun dau mam tom 0.90 0.90 0.90 184\n",
535
+ " Bun mam 0.62 0.61 0.62 155\n",
536
+ " Bun rieu 0.54 0.68 0.60 231\n",
537
+ " Bun thit nuong 0.57 0.65 0.61 150\n",
538
+ " Bánh cu đơ 0.68 0.72 0.70 18\n",
539
+ " Bánh mì cay 0.82 0.64 0.72 14\n",
540
+ " Bánh đa cua 0.50 0.21 0.30 14\n",
541
+ " Bánh đậu xanh 0.91 0.59 0.71 17\n",
542
+ " Bò bía 0.57 0.42 0.48 19\n",
543
+ " Bún cá 0.00 0.00 0.00 13\n",
544
+ " Ca kho to 0.86 0.86 0.86 136\n",
545
+ " Canh chua 0.62 0.63 0.62 165\n",
546
+ " Cao lau 0.60 0.67 0.63 124\n",
547
+ " Chao long 0.71 0.73 0.72 215\n",
548
+ " Com tam 0.77 0.81 0.79 189\n",
549
+ " Cơm cháy 0.81 0.72 0.76 18\n",
550
+ " Goi cuon 0.78 0.78 0.78 172\n",
551
+ " Hu tieu 0.46 0.38 0.42 197\n",
552
+ " Mi quang 0.55 0.75 0.63 177\n",
553
+ " Nem chua 0.67 0.55 0.61 109\n",
554
+ " Nem nướng 0.56 0.56 0.56 16\n",
555
+ " Pho 0.55 0.48 0.51 162\n",
556
+ " Xoi xeo 0.89 0.77 0.83 105\n",
557
+ "\n",
558
+ " accuracy 0.69 5169\n",
559
+ " macro avg 0.68 0.65 0.66 5169\n",
560
+ " weighted avg 0.69 0.69 0.69 5169\n",
561
+ "\n"
562
+ ]
563
+ }
564
+ ],
565
+ "source": [
566
+ "from sklearn.metrics import classification_report\n",
567
+ "import numpy as np\n",
568
+ "\n",
569
+ "\n",
570
+ "# Sử dụng generator để dự đoán nhãn cho dữ liệu kiểm tra\n",
571
+ "generated_test_data = test_generator.flow_from_directory(TEST_PATH, target_size=IMAGE_SIZE, batch_size=BATCH_SIZE, shuffle=False)\n",
572
+ "\n",
573
+ "# Dự đoán nhãn\n",
574
+ "predictions = model.predict(generated_test_data)\n",
575
+ "y_pred = np.argmax(predictions, axis=1)\n",
576
+ "y_true = generated_test_data.classes\n",
577
+ "\n",
578
+ "# Tính toán và in ra các chỉ số\n",
579
+ "class_labels = list(generated_test_data.class_indices.keys())\n",
580
+ "report = classification_report(y_true, y_pred, target_names=class_labels)\n",
581
+ "print(report)\n"
582
+ ]
583
+ }
584
+ ],
585
+ "metadata": {
586
+ "kernelspec": {
587
+ "display_name": "Python 3",
588
+ "language": "python",
589
+ "name": "python3"
590
+ },
591
+ "language_info": {
592
+ "codemirror_mode": {
593
+ "name": "ipython",
594
+ "version": 3
595
+ },
596
+ "file_extension": ".py",
597
+ "mimetype": "text/x-python",
598
+ "name": "python",
599
+ "nbconvert_exporter": "python",
600
+ "pygments_lexer": "ipython3",
601
+ "version": "3.10.9"
602
+ }
603
+ },
604
+ "nbformat": 4,
605
+ "nbformat_minor": 2
606
+ }