{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Import library" ] }, { "cell_type": "code", "execution_count": 6, "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "import pandas as pd\n", "\n", "import cv2\n", "import tensorflow as tf\n", "from tensorflow import keras\n", "from keras import layers, models, optimizers, losses, metrics, preprocessing\n", "from keras.models import Sequential\n", "from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout\n", "from tensorflow.keras.preprocessing.image import ImageDataGenerator" ] }, { "cell_type": "code", "execution_count": 9, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Found 1823 images belonging to 2 classes.\n", "Found 200 images belonging to 2 classes.\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "c:\\Users\\Home\\anaconda3\\Lib\\site-packages\\keras\\src\\trainers\\data_adapters\\py_dataset_adapter.py:121: UserWarning: Your `PyDataset` class should call `super().__init__(**kwargs)` in its constructor. `**kwargs` can include `workers`, `use_multiprocessing`, `max_queue_size`. Do not pass these arguments to `fit()`, as they will be ignored.\n", " self._warn_if_super_not_called()\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Epoch 1/15\n", "\u001b[1m 92/100\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m━━\u001b[0m \u001b[1m1s\u001b[0m 209ms/step - accuracy: 0.5003 - loss: 0.7080" ] }, { "name": "stderr", "output_type": "stream", "text": [ "c:\\Users\\Home\\anaconda3\\Lib\\site-packages\\keras\\src\\trainers\\epoch_iterator.py:107: UserWarning: Your input ran out of data; interrupting training. Make sure that your dataset or generator can generate at least `steps_per_epoch * epochs` batches. You may need to use the `.repeat()` function when building your dataset.\n", " self._interrupted_warning()\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "\u001b[1m100/100\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m22s\u001b[0m 202ms/step - accuracy: 0.5011 - loss: 0.7071 - val_accuracy: 0.4850 - val_loss: 0.6858\n", "Epoch 2/15\n", "\u001b[1m100/100\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m38s\u001b[0m 385ms/step - accuracy: 0.5660 - loss: 0.6854 - val_accuracy: 0.5800 - val_loss: 0.6501\n", "Epoch 3/15\n", "\u001b[1m100/100\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m44s\u001b[0m 437ms/step - accuracy: 0.6389 - loss: 0.6303 - val_accuracy: 0.5950 - val_loss: 0.6571\n", "Epoch 4/15\n", "\u001b[1m100/100\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m43s\u001b[0m 433ms/step - accuracy: 0.6392 - loss: 0.6369 - val_accuracy: 0.6750 - val_loss: 0.6175\n", "Epoch 5/15\n", "\u001b[1m100/100\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m44s\u001b[0m 433ms/step - accuracy: 0.6641 - loss: 0.6072 - val_accuracy: 0.6750 - val_loss: 0.6209\n", "Epoch 6/15\n", "\u001b[1m100/100\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m44s\u001b[0m 436ms/step - accuracy: 0.6638 - loss: 0.6029 - val_accuracy: 0.7000 - val_loss: 0.5767\n", "Epoch 7/15\n", "\u001b[1m100/100\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m44s\u001b[0m 433ms/step - accuracy: 0.6974 - loss: 0.5602 - val_accuracy: 0.7200 - val_loss: 0.5445\n", "Epoch 8/15\n", "\u001b[1m100/100\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m43s\u001b[0m 432ms/step - accuracy: 0.7302 - loss: 0.5413 - val_accuracy: 0.7700 - val_loss: 0.5169\n", "Epoch 9/15\n", "\u001b[1m100/100\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m44s\u001b[0m 433ms/step - accuracy: 0.7109 - loss: 0.5401 - val_accuracy: 0.7200 - val_loss: 0.5424\n", "Epoch 10/15\n", "\u001b[1m100/100\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m44s\u001b[0m 434ms/step - accuracy: 0.7552 - loss: 0.4997 - val_accuracy: 0.6900 - val_loss: 0.5927\n", "Epoch 11/15\n", "\u001b[1m100/100\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m44s\u001b[0m 433ms/step - accuracy: 0.7483 - loss: 0.5010 - val_accuracy: 0.7550 - val_loss: 0.5527\n", "Epoch 12/15\n", "\u001b[1m100/100\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m42s\u001b[0m 417ms/step - accuracy: 0.7430 - loss: 0.5135 - val_accuracy: 0.7550 - val_loss: 0.5013\n", "Epoch 13/15\n", "\u001b[1m100/100\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m43s\u001b[0m 433ms/step - accuracy: 0.7678 - loss: 0.4847 - val_accuracy: 0.7800 - val_loss: 0.5143\n", "Epoch 14/15\n", "\u001b[1m100/100\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m44s\u001b[0m 434ms/step - accuracy: 0.7890 - loss: 0.4463 - val_accuracy: 0.7600 - val_loss: 0.5209\n", "Epoch 15/15\n", "\u001b[1m100/100\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m43s\u001b[0m 426ms/step - accuracy: 0.8115 - loss: 0.4065 - val_accuracy: 0.7650 - val_loss: 0.4908\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "WARNING:absl:You are saving your model as an HDF5 file via `model.save()` or `keras.saving.save_model(model)`. This file format is considered legacy. We recommend using instead the native Keras format, e.g. `model.save('my_model.keras')` or `keras.saving.save_model(model, 'my_model.keras')`. \n" ] } ], "source": [ "img_width, img_height = 150, 150\n", "\n", "# Model CNN đơn giản\n", "model = Sequential([\n", " # lớp tích chập đầu tiên với 32 bộ lọc, kích thước kernel (3, 5)\n", " Conv2D(32, (3, 3), activation='relu', input_shape=(img_width, img_height, 3)),\n", " MaxPooling2D(pool_size=(2, 2)),\n", " \n", " # lớp tích chập thứ 2 có 64 bộ lọc\n", " Conv2D(64, (3, 3), activation='relu'),\n", " MaxPooling2D(pool_size=(2, 2)),\n", " \n", " # lớp tích chập thứ 3 có 128 bộ lọc\n", " Conv2D(128, (3, 3), activation='relu'),\n", " MaxPooling2D(pool_size=(2, 2)),\n", " \n", " # chuyển đổi đầu ra -> vector 1d\n", " Flatten(),\n", " \n", " # lớp dense 512 neuron\n", " Dense(512, activation='relu'),\n", " Dropout(0.5),\n", " \n", " # lớp đầu ra: 1 neuron với hàm activation là sigmoid -> phân loại chó/mèo\n", " Dense(1, activation='sigmoid')\n", "])\n", "\n", "# biên dịch mô hình\n", "model.compile(loss='binary_crossentropy',\n", " optimizer='adam',\n", " metrics=['accuracy'])\n", "\n", "# dùng ImageDataGenerator -> chuẩn hóa ảnh, thực hiện data augmentation\n", "train_datagen = ImageDataGenerator(\n", " rescale=1./255,\n", " shear_range=0.2,\n", " zoom_range=0.2,\n", " horizontal_flip=True\n", ")\n", "\n", "test_datagen = ImageDataGenerator(rescale=1./255)\n", "\n", "# tạo generator cho train và valid\n", "train_generator = train_datagen.flow_from_directory(\n", " 'data.cat_dog/train',\n", " target_size=(img_width, img_height),\n", " batch_size=20,\n", " class_mode='binary'\n", ")\n", "\n", "valid_generator = test_datagen.flow_from_directory(\n", " 'data.cat_dog/valid',\n", " target_size=(img_width, img_height),\n", " batch_size=20,\n", " class_mode='binary'\n", ")\n", "\n", "# huấn luyện\n", "history= model.fit(\n", " train_generator,\n", " steps_per_epoch=100,\n", " epochs=15,\n", " validation_data=valid_generator,\n", " validation_steps=50\n", ")\n", "\n", "model.save('cnn_cats_dogs.h5')" ] } ], "metadata": { "kernelspec": { "display_name": "base", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.12.7" } }, "nbformat": 4, "nbformat_minor": 2 }