Abdulvajid commited on
Commit
b2e6878
·
1 Parent(s): 638a214

Add application file

Browse files
app.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import pickle as pkl
3
+
4
+ from tensorflow.keras.applications.vgg16 import VGG16, preprocess_input
5
+ from tensorflow.keras.models import Model
6
+ import tensorflow as tf
7
+ from tensorflow.keras.preprocessing.text import Tokenizer
8
+ from tensorflow.keras.preprocessing.sequence import pad_sequences
9
+
10
+
11
+ # Feature Extracting model
12
+ vgg_model = VGG16()
13
+ vgg_model.trainable = False
14
+
15
+ img_model = Model(inputs=vgg_model.input,
16
+ outputs=vgg_model.layers[-2].output)
17
+
18
+ # Caption genartion model
19
+ model = tf.keras.models.load_model('caption_genaration_model.h5')
20
+
21
+ # load Tokenizer
22
+ with open('tokenizer.pkl','rb') as f:
23
+ tokenizer = pkl.load(f)
24
+
25
+ # convert index to word from prediction
26
+ def index_to_word(word_idx):
27
+ return tokenizer.index_word[word_idx]
28
+
29
+ # Resize layer
30
+ resize_img = tf.keras.layers.Resizing(height=224, width=224)
31
+
32
+ # Preprocces input Image
33
+ def img_preprocces(img):
34
+ img = tf.expand_dims(img,axis=0)
35
+ resized_image = resize_img(img)
36
+ img = preprocess_input(resized_image)
37
+ feature = vgg_model.predict(img,verbose=False)
38
+ return feature
39
+
40
+ def genarate_caption(img):
41
+ seq_in = 'startseq'
42
+ feature_img = img_preprocces(img)
43
+
44
+ for _ in range(30):
45
+ # Tokenization & Padding
46
+ seq_in_sequence = tokenizer.texts_to_sequences([seq_in])[0]
47
+ seq_in_padded = pad_sequences([seq_in_sequence], padding='post',maxlen=30)
48
+
49
+ # Predict next word
50
+ y_hat = model.predict([feature_img,seq_in_padded],verbose=False)
51
+ word_index = y_hat.argmax(axis=1)
52
+ predicted_word = index_to_word(word_index[0])
53
+ if predicted_word == 'endseq':
54
+ break
55
+ seq_in = seq_in + ' ' + predicted_word
56
+
57
+
58
+ return seq_in[9:]
59
+
60
+ app = gr.Interface(
61
+ fn=genarate_caption,
62
+ inputs=['image'],
63
+ outputs=['text']
64
+ )
65
+
66
+ app.launch()
caption_genaration_model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68b8eeff090f40b3022131ef4fdc453382ae21a1ec977c05000e648f32a19323
3
+ size 59311104
captions.txt ADDED
The diff for this file is too large to render. See raw diff
 
image_captioning.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
img_features.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c525fbb5a34227892244bb48044be939a7ccd3ede395af8dc6ee4748aa185c31
3
+ size 32884479
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ gradio==3.37.0
2
+ tensorflow==2.10.0
3
+ numpy==1.22.4
4
+ Pillow==9.5.0
tokenizer.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eb68cd2f85436829d5a5f902cc035e7e61e1b8d1245200459f76ef2dc8b5e69b
3
+ size 335196