{ "cells": [ { "cell_type": "code", "execution_count": 1, "id": "113985e3", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "C:\\Users\\utkar\\anaconda4\\lib\\site-packages\\scipy\\__init__.py:138: UserWarning: A NumPy version >=1.16.5 and <1.23.0 is required for this version of SciPy (detected version 1.23.5)\n", " warnings.warn(f\"A NumPy version >={np_minversion} and <{np_maxversion} is required for this version of \"\n" ] } ], "source": [ "import pickle\n", "from tqdm.notebook import tqdm\n", "import os\n", "import pandas \n", "import numpy as np\n", "from tensorflow.keras.applications.vgg16 import VGG16,preprocess_input\n", "from tensorflow.keras.preprocessing.image import load_img,img_to_array\n", "from tensorflow.keras.preprocessing.text import Tokenizer\n", "from tensorflow.keras.preprocessing.sequence import pad_sequences\n", "from tensorflow.keras.models import Model\n", "from tensorflow.keras.utils import to_categorical,plot_model\n", "from tensorflow.keras.layers import Input,Dense,LSTM,Embedding, Dropout, add" ] }, { "cell_type": "code", "execution_count": 2, "id": "6f9ba09d", "metadata": {}, "outputs": [], "source": [ "work=\"C:\\crawlers\\Project_hastag\\save\"\n", "base=\"C:\\crawlers\\Project_hastag\\Archive\"" ] }, { "cell_type": "code", "execution_count": 3, "id": "204bf9d6", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Model: \"model\"\n", "_________________________________________________________________\n", " Layer (type) Output Shape Param # \n", "=================================================================\n", " input_1 (InputLayer) [(None, 224, 224, 3)] 0 \n", " \n", " block1_conv1 (Conv2D) (None, 224, 224, 64) 1792 \n", " \n", " block1_conv2 (Conv2D) (None, 224, 224, 64) 36928 \n", " \n", " block1_pool (MaxPooling2D) (None, 112, 112, 64) 0 \n", " \n", " block2_conv1 (Conv2D) (None, 112, 112, 128) 73856 \n", " \n", " block2_conv2 (Conv2D) (None, 112, 112, 128) 147584 \n", " \n", " block2_pool (MaxPooling2D) (None, 56, 56, 128) 0 \n", " \n", " block3_conv1 (Conv2D) (None, 56, 56, 256) 295168 \n", " \n", " block3_conv2 (Conv2D) (None, 56, 56, 256) 590080 \n", " \n", " block3_conv3 (Conv2D) (None, 56, 56, 256) 590080 \n", " \n", " block3_pool (MaxPooling2D) (None, 28, 28, 256) 0 \n", " \n", " block4_conv1 (Conv2D) (None, 28, 28, 512) 1180160 \n", " \n", " block4_conv2 (Conv2D) (None, 28, 28, 512) 2359808 \n", " \n", " block4_conv3 (Conv2D) (None, 28, 28, 512) 2359808 \n", " \n", " block4_pool (MaxPooling2D) (None, 14, 14, 512) 0 \n", " \n", " block5_conv1 (Conv2D) (None, 14, 14, 512) 2359808 \n", " \n", " block5_conv2 (Conv2D) (None, 14, 14, 512) 2359808 \n", " \n", " block5_conv3 (Conv2D) (None, 14, 14, 512) 2359808 \n", " \n", " block5_pool (MaxPooling2D) (None, 7, 7, 512) 0 \n", " \n", " flatten (Flatten) (None, 25088) 0 \n", " \n", " fc1 (Dense) (None, 4096) 102764544 \n", " \n", " fc2 (Dense) (None, 4096) 16781312 \n", " \n", "=================================================================\n", "Total params: 134,260,544\n", "Trainable params: 134,260,544\n", "Non-trainable params: 0\n", "_________________________________________________________________\n" ] } ], "source": [ "model=VGG16()\n", "model=Model(model.inputs,outputs=model.layers[-2].output)\n", "model.summary()" ] }, { "cell_type": "code", "execution_count": 4, "id": "22708632", "metadata": {}, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "908848215aa6423a84b9e8398a2da55b", "version_major": 2, "version_minor": 0 }, "text/plain": [ " 0%| | 0/8091 [00:001])+\" endseq\"\n", " return cap\n", "\n", "def clean(ma):\n", " for key, cap in ma.items():\n", " for i in range(len(cap)):\n", " cap[i]=process_text(cap[i])\n", "\n" ] }, { "cell_type": "code", "execution_count": 9, "id": "249084e3", "metadata": {}, "outputs": [], "source": [ "clean(ma)" ] }, { "cell_type": "code", "execution_count": 10, "id": "82d3499a", "metadata": {}, "outputs": [], "source": [ "all_captions = []\n", "for key in mapping:\n", " for caption in mapping[key]:\n", " all_captions.append(caption)" ] }, { "cell_type": "code", "execution_count": 11, "id": "cd90e55f", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "40455" ] }, "execution_count": 11, "metadata": {}, "output_type": "execute_result" } ], "source": [ "len(all_captions)" ] }, { "cell_type": "code", "execution_count": 12, "id": "b195a348", "metadata": {}, "outputs": [], "source": [ "tokenizer = Tokenizer()\n", "tokenizer.fit_on_texts(all_captions)\n", "vocab_size = len(tokenizer.word_index) + 1" ] }, { "cell_type": "code", "execution_count": 13, "id": "06788c74", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "35" ] }, "execution_count": 13, "metadata": {}, "output_type": "execute_result" } ], "source": [ "max_length = max(len(caption.split()) for caption in all_captions)\n", "max_length" ] }, { "cell_type": "code", "execution_count": 14, "id": "6edafc86", "metadata": {}, "outputs": [], "source": [ "image_ids = list(mapping.keys())\n", "split = int(len(image_ids) * 0.90)\n", "train = image_ids[:split]\n", "test = image_ids[split:]" ] }, { "cell_type": "code", "execution_count": 15, "id": "b214a4dd", "metadata": {}, "outputs": [], "source": [ "def data_generator(data_keys, mapping, features, tokenizer, max_length, vocab_size, batch_size):\n", " X1, X2, y = list(), list(), list()\n", " n = 0\n", " while 1:\n", " for key in data_keys:\n", " n += 1\n", " captions = mapping[key]\n", " for caption in captions:\n", " seq = tokenizer.texts_to_sequences([caption])[0]\n", " for i in range(1, len(seq)):\n", " in_seq, out_seq = seq[:i], seq[i]\n", " in_seq = pad_sequences([in_seq], maxlen=max_length)[0]\n", " out_seq = to_categorical([out_seq], num_classes=vocab_size)[0]\n", " X1.append(features[key][0])\n", " X2.append(in_seq)\n", " y.append(out_seq)\n", " if n == batch_size:\n", " X1, X2, y = np.array(X1), np.array(X2), np.array(y)\n", " yield [X1, X2], y\n", " X1, X2, y = list(), list(), list()\n", " n = 0" ] }, { "cell_type": "code", "execution_count": 16, "id": "ba019340", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "You must install pydot (`pip install pydot`) and install graphviz (see instructions at https://graphviz.gitlab.io/download/) for plot_model to work.\n" ] } ], "source": [ "inputs1 = Input(shape=(4096,))\n", "fe1 = Dropout(0.4)(inputs1)\n", "fe2 = Dense(256, activation='relu')(fe1)\n", "inputs2 = Input(shape=(max_length,))\n", "se1 = Embedding(vocab_size, 256, mask_zero=True)(inputs2)\n", "se2 = Dropout(0.4)(se1)\n", "se3 = LSTM(256)(se2)\n", "decoder1 = add([fe2, se3])\n", "decoder2 = Dense(256, activation='relu')(decoder1)\n", "outputs = Dense(vocab_size, activation='softmax')(decoder2)\n", "\n", "model = Model(inputs=[inputs1, inputs2], outputs=outputs)\n", "model.compile(loss='categorical_crossentropy', optimizer='adam')\n", "\n", "plot_model(model, show_shapes=True)" ] }, { "cell_type": "code", "execution_count": 17, "id": "c9cd441e", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "227/227 [==============================] - 634s 3s/step - loss: 5.2148\n", "227/227 [==============================] - 552s 2s/step - loss: 3.9993\n", "227/227 [==============================] - 547s 2s/step - loss: 3.5808\n", "227/227 [==============================] - 565s 2s/step - loss: 3.3151\n", "227/227 [==============================] - 583s 3s/step - loss: 3.1139\n", "227/227 [==============================] - 563s 2s/step - loss: 2.9658\n", "227/227 [==============================] - 563s 2s/step - loss: 2.8508\n", "227/227 [==============================] - 562s 2s/step - loss: 2.7600\n", "227/227 [==============================] - 570s 3s/step - loss: 2.6801\n", "227/227 [==============================] - 564s 2s/step - loss: 2.6098\n", "227/227 [==============================] - 564s 2s/step - loss: 2.5561\n", "227/227 [==============================] - 568s 3s/step - loss: 2.4974\n", "227/227 [==============================] - 575s 3s/step - loss: 2.4453\n", "227/227 [==============================] - 572s 3s/step - loss: 2.3967\n", "227/227 [==============================] - 576s 3s/step - loss: 2.3553\n", "227/227 [==============================] - 570s 3s/step - loss: 2.3203\n", "227/227 [==============================] - 570s 3s/step - loss: 2.2833\n", "227/227 [==============================] - 560s 2s/step - loss: 2.2474\n", "227/227 [==============================] - 559s 2s/step - loss: 2.2182\n", "227/227 [==============================] - 561s 2s/step - loss: 2.1891\n" ] } ], "source": [ "epochs = 20\n", "batch_size = 32\n", "steps = len(train)\n", "\n", "for i in range(epochs):\n", " generator = data_generator(train, ma, features, tokenizer, max_length, vocab_size, batch_size)\n", " model.fit(generator, epochs=1, steps_per_epoch=steps, verbose=1)" ] }, { "cell_type": "code", "execution_count": 19, "id": "3e22a08f", "metadata": {}, "outputs": [], "source": [ "model.save(work+'/image_caption.h5')" ] }, { "cell_type": "code", "execution_count": 20, "id": "8d6cae78", "metadata": {}, "outputs": [], "source": [ "def idx_word(integer,tok):\n", " for word,index in tok.word_index.items():\n", " if index== integer:\n", " return word\n", " return none" ] }, { "cell_type": "code", "execution_count": 25, "id": "68502106", "metadata": {}, "outputs": [], "source": [ "def predict_caption(model,image,tok,max_len):\n", " in_text=\"startseq\"\n", " for i in range(max_len):\n", " seq=tok.texts_to_sequences([in_text])[0]\n", " seq=pad_sequences([seq],max_len)\n", " yhat = model.predict([image, seq], verbose=0)\n", " yhat = np.argmax(yhat)\n", " word = idx_word(yhat, tok)\n", " if word is None:\n", " break\n", " in_text += \" \" + word\n", " if word == 'endseq':\n", " break\n", " return in_text" ] }, { "cell_type": "code", "execution_count": null, "id": "d6fa2905", "metadata": {}, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "cebaf5ee07d54f4bb56ce83763063629", "version_major": 2, "version_minor": 0 }, "text/plain": [ " 0%| | 0/810 [00:001])+\" [end]\"\n", " return cap" ] }, { "cell_type": "code", "execution_count": 10, "id": "f75f26df", "metadata": {}, "outputs": [], "source": [ "def clean(ma):\n", " for key, cap in ma.items():\n", " for i in range(len(cap)):\n", " cap[i]=process_text(cap[i])\n" ] }, { "cell_type": "code", "execution_count": 11, "id": "15693ddd", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "['A child in a pink dress is climbing up a set of stairs in an entry way .',\n", " 'A girl going into a wooden building .',\n", " 'A little girl climbing into a wooden playhouse .',\n", " 'A little girl climbing the stairs to her playhouse .',\n", " 'A little girl in a pink dress going into a wooden cabin .']" ] }, "execution_count": 11, "metadata": {}, "output_type": "execute_result" } ], "source": [ "ma[\"1000268201_693b08cb0e\"] # just a check before " ] }, { "cell_type": "code", "execution_count": 12, "id": "defc5403", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "['[start] child in pink dress is climbing up set of stairs in an entry way [end]',\n", " '[start] girl going into wooden building [end]',\n", " '[start] little girl climbing into wooden playhouse [end]',\n", " '[start] little girl climbing the stairs to her playhouse [end]',\n", " '[start] little girl in pink dress going into wooden cabin [end]']" ] }, "execution_count": 12, "metadata": {}, "output_type": "execute_result" } ], "source": [ "clean(ma)\n", "ma[\"1000268201_693b08cb0e\"]" ] }, { "cell_type": "code", "execution_count": 13, "id": "f5913f53", "metadata": {}, "outputs": [], "source": [ "all_cap=[]\n", "for key in ma.keys():\n", " for cap in ma[key]:\n", " all_cap.append(cap)" ] }, { "cell_type": "code", "execution_count": 14, "id": "84d681f2", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "40455" ] }, "execution_count": 14, "metadata": {}, "output_type": "execute_result" } ], "source": [ "len(all_cap)" ] }, { "cell_type": "code", "execution_count": 15, "id": "4dbe92b1", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "8311" ] }, "execution_count": 15, "metadata": {}, "output_type": "execute_result" } ], "source": [ "tok=Tokenizer()\n", "tok.fit_on_texts(all_cap)\n", "vocab_size=len(tok.word_index)+1\n", "vocab_size" ] }, { "cell_type": "code", "execution_count": 16, "id": "776312f5", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "31" ] }, "execution_count": 16, "metadata": {}, "output_type": "execute_result" } ], "source": [ "max_len=max(len(cap.split())for cap in all_cap)\n", "max_len" ] }, { "cell_type": "code", "execution_count": 17, "id": "57a14f3f", "metadata": {}, "outputs": [], "source": [ "image_ids=list(ma.keys())\n", "split=int(len(image_ids)*0.90)\n", "train=image_ids[:split]\n", "test=image_ids[split:]" ] }, { "cell_type": "code", "execution_count": 18, "id": "69b7ff8a", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "7281" ] }, "execution_count": 18, "metadata": {}, "output_type": "execute_result" } ], "source": [ "len(train)" ] }, { "cell_type": "code", "execution_count": 19, "id": "378f6cb7", "metadata": {}, "outputs": [], "source": [ "def data_gen(data_keys,ma,fs,tok,max_len,vocab_size,batch_size):\n", " x1,x2,y=list(),list(),list()\n", " n=0;\n", " while True:\n", " for key in data_keys:\n", " n+=1\n", " cap=ma[key]\n", " for cap_i in cap:\n", " seq=tok.texts_to_sequences([cap_i])[0]\n", " for i in range(len(seq)):\n", " in_seq,out_seq=seq[:i],seq[i]\n", " in_seq=pad_sequences([in_seq],maxlen=max_len)[0]\n", " out_seq=to_categorical([out_seq],num_classes=vocab_size)[0]\n", " x1.append(fs[key][0])\n", " x2.append(in_seq)\n", " y.append(out_seq)\n", " if n==batch_size:\n", " x1=np.array(x1)\n", " x2=np.array(x2)\n", " y=np.array(y)\n", " yield[x1,x2],y\n", " x1,x2,y=list(),list(),list()\n", " n=0" ] }, { "cell_type": "code", "execution_count": 20, "id": "f5f13047", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "You must install pydot (`pip install pydot`) and install graphviz (see instructions at https://graphviz.gitlab.io/download/) for plot_model to work.\n" ] } ], "source": [ "inputs1=Input(shape=(4096,))\n", "fe1=Dropout(0.4)(inputs1)\n", "fe2=Dense(256,activation='relu')(fe1)\n", "inputs2=Input(shape=(max_len,))\n", "se1=Embedding(vocab_size,256,mask_zero=True)(inputs2)\n", "se2=Dropout(0.4)(se1)\n", "se3=LSTM(256)(se2)\n", "\n", "decoder1=add([fe2,se3])\n", "decoder2=Dense(256,activation='relu')(decoder1)\n", "outputs=Dense(vocab_size,activation='softmax')(decoder2)\n", "\n", "model=Model(inputs=[inputs1,inputs2],outputs=outputs)\n", "model.compile(loss=\"categorical_crossentropy\",optimizer='adam')\n", "\n", "\n", "plot_model(model,show_shapes=True)" ] }, { "cell_type": "code", "execution_count": null, "id": "d63d6d4b", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\r", " 1/7281 [..............................] - ETA: 38:03:57 - loss: 9.0597" ] } ], "source": [ "epochs=15\n", "batch_size=64\n", "steps=len(train)\n", "for i in range(epochs):\n", " generator=data_gen(train,ma,fs,tok,max_len,vocab_size,batch_size)\n", " model.fit(generator,epochs=1,steps_per_epoch=steps,verbose=1)" ] }, { "cell_type": "code", "execution_count": 18, "id": "3322120d", "metadata": {}, "outputs": [], "source": [ "model.save(work+'/image_caption.h5')" ] }, { "cell_type": "code", "execution_count": null, "id": "303f7a8e", "metadata": {}, "outputs": [], "source": [ "def idx_word(integer,tok):\n", " for word,index in tok.word_index.items():\n", " if index== integer:\n", " return word\n", " return none" ] }, { "cell_type": "code", "execution_count": null, "id": "541d09e8", "metadata": {}, "outputs": [], "source": [ "def predict_caption(model,image,tok,max_len):\n", " in_text=\"[start]\"\n", " for i in range(max_len):\n", " seq=tok.texts_to_sequences([in_text])[0]\n", " seq=pad_sequences([seq],max_len)[0]\n", " yhat" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.9.13" } }, "nbformat": 4, "nbformat_minor": 5 }