Kasamuday commited on
Commit
fa1eaa7
·
verified ·
1 Parent(s): 5365365

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +71 -277
app.py CHANGED
@@ -1,278 +1,72 @@
1
- {
2
- "cells": [
3
- {
4
- "cell_type": "code",
5
- "execution_count": null,
6
- "metadata": {
7
- "id": "146BB11JpfDA"
8
- },
9
- "outputs": [],
10
- "source": [
11
- "import os"
12
- ]
13
- },
14
- {
15
- "cell_type": "code",
16
- "execution_count": null,
17
- "metadata": {
18
- "id": "42hJEdo_pfDB"
19
- },
20
- "outputs": [],
21
- "source": [
22
- "CUSTOM_MODEL_NAME = 'my_ssd_mobnet' \n",
23
- "PRETRAINED_MODEL_NAME = 'ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8'\n",
24
- "PRETRAINED_MODEL_URL = 'http://download.tensorflow.org/models/object_detection/tf2/20200711/ssd_mobilenet_v2_fpnlite_320x320_coco17_tpu-8.tar.gz'\n",
25
- "TF_RECORD_SCRIPT_NAME = 'generate_tfrecord.py'\n",
26
- "LABEL_MAP_NAME = 'label_map.pbtxt'"
27
- ]
28
- },
29
- {
30
- "cell_type": "code",
31
- "execution_count": null,
32
- "metadata": {
33
- "id": "hbPhYVy_pfDB"
34
- },
35
- "outputs": [],
36
- "source": [
37
- "paths = {\n",
38
- " 'WORKSPACE_PATH': os.path.join('Tensorflow', 'workspace'),\n",
39
- " 'SCRIPTS_PATH': os.path.join('Tensorflow','scripts'),\n",
40
- " 'APIMODEL_PATH': os.path.join('Tensorflow','models'),\n",
41
- " 'ANNOTATION_PATH': os.path.join('Tensorflow', 'workspace','annotations'),\n",
42
- " 'IMAGE_PATH': os.path.join('Tensorflow', 'workspace','images'),\n",
43
- " 'MODEL_PATH': os.path.join('Tensorflow', 'workspace','models'),\n",
44
- " 'PRETRAINED_MODEL_PATH': os.path.join('Tensorflow', 'workspace','pre-trained-models'),\n",
45
- " 'CHECKPOINT_PATH': os.path.join('Tensorflow', 'workspace','models',CUSTOM_MODEL_NAME), \n",
46
- " 'OUTPUT_PATH': os.path.join('Tensorflow', 'workspace','models',CUSTOM_MODEL_NAME, 'export'), \n",
47
- " 'TFJS_PATH':os.path.join('Tensorflow', 'workspace','models',CUSTOM_MODEL_NAME, 'tfjsexport'), \n",
48
- " 'TFLITE_PATH':os.path.join('Tensorflow', 'workspace','models',CUSTOM_MODEL_NAME, 'tfliteexport'), \n",
49
- " 'PROTOC_PATH':os.path.join('Tensorflow','protoc')\n",
50
- " }"
51
- ]
52
- },
53
- {
54
- "cell_type": "code",
55
- "execution_count": null,
56
- "metadata": {
57
- "id": "LwhWZMI0pfDC"
58
- },
59
- "outputs": [],
60
- "source": [
61
- "files = {\n",
62
- " 'PIPELINE_CONFIG':os.path.join('Tensorflow', 'workspace','models', CUSTOM_MODEL_NAME, 'pipeline.config'),\n",
63
- " 'TF_RECORD_SCRIPT': os.path.join(paths['SCRIPTS_PATH'], TF_RECORD_SCRIPT_NAME), \n",
64
- " 'LABELMAP': os.path.join(paths['ANNOTATION_PATH'], LABEL_MAP_NAME)\n",
65
- "}"
66
- ]
67
- },
68
- {
69
- "cell_type": "code",
70
- "execution_count": null,
71
- "metadata": {
72
- "id": "HR-TfDGrpfDC"
73
- },
74
- "outputs": [],
75
- "source": [
76
- "for path in paths.values():\n",
77
- " if not os.path.exists(path):\n",
78
- " if os.name == 'posix':\n",
79
- " !mkdir -p {path}\n",
80
- " if os.name == 'nt':\n",
81
- " !mkdir {path}"
82
- ]
83
- },
84
- {
85
- "cell_type": "code",
86
- "execution_count": null,
87
- "metadata": {
88
- "id": "K-Cmz2edpfDE",
89
- "scrolled": true
90
- },
91
- "outputs": [],
92
- "source": [
93
- "if os.name=='nt':\n",
94
- " !pip install wget\n",
95
- " import wget"
96
- ]
97
- },
98
- {
99
- "cell_type": "code",
100
- "execution_count": null,
101
- "metadata": {
102
- "id": "iA1DIq5OpfDE"
103
- },
104
- "outputs": [],
105
- "source": [
106
- "if not os.path.exists(os.path.join(paths['APIMODEL_PATH'], 'research', 'object_detection')):\n",
107
- " !git clone https://github.com/tensorflow/models {paths['APIMODEL_PATH']}"
108
- ]
109
- },
110
- {
111
- "cell_type": "code",
112
- "execution_count": null,
113
- "metadata": {
114
- "id": "rJjMHbnDs3Tv"
115
- },
116
- "outputs": [],
117
- "source": [
118
- "# Install Tensorflow Object Detection \n",
119
- "if os.name=='posix': \n",
120
- " !apt-get install protobuf-compiler\n",
121
- " !cd Tensorflow/models/research && protoc object_detection/protos/*.proto --python_out=. && cp object_detection/packages/tf2/setup.py . && python -m pip install . \n",
122
- " \n",
123
- "if os.name=='nt':\n",
124
- " url=\"https://github.com/protocolbuffers/protobuf/releases/download/v3.15.6/protoc-3.15.6-win64.zip\"\n",
125
- " wget.download(url)\n",
126
- " !move protoc-3.15.6-win64.zip {paths['PROTOC_PATH']}\n",
127
- " !cd {paths['PROTOC_PATH']} && tar -xf protoc-3.15.6-win64.zip\n",
128
- " os.environ['PATH'] += os.pathsep + os.path.abspath(os.path.join(paths['PROTOC_PATH'], 'bin')) \n",
129
- " !cd Tensorflow/models/research && protoc object_detection/protos/*.proto --python_out=. && copy object_detection\\\\packages\\\\tf2\\\\setup.py setup.py && python setup.py build && python setup.py install\n",
130
- " !cd Tensorflow/models/research/slim && pip install -e . "
131
- ]
132
- },
133
- {
134
- "cell_type": "code",
135
- "execution_count": null,
136
- "metadata": {
137
- "scrolled": true
138
- },
139
- "outputs": [],
140
- "source": [
141
- "VERIFICATION_SCRIPT = os.path.join(paths['APIMODEL_PATH'], 'research', 'object_detection', 'builders', 'model_builder_tf2_test.py')\n",
142
- "# Verify Installation\n",
143
- "!python {VERIFICATION_SCRIPT}"
144
- ]
145
- },
146
- {
147
- "cell_type": "code",
148
- "execution_count": null,
149
- "metadata": {},
150
- "outputs": [],
151
- "source": [
152
- "pip install scipy"
153
- ]
154
- },
155
- {
156
- "cell_type": "code",
157
- "execution_count": null,
158
- "metadata": {},
159
- "outputs": [],
160
- "source": [
161
- "pip install keras==2.4.0"
162
- ]
163
- },
164
- {
165
- "cell_type": "code",
166
- "execution_count": null,
167
- "metadata": {},
168
- "outputs": [],
169
- "source": [
170
- "pip install tf-models-official"
171
- ]
172
- },
173
- {
174
- "cell_type": "code",
175
- "execution_count": null,
176
- "metadata": {},
177
- "outputs": [],
178
- "source": [
179
- "pip install Pillow"
180
- ]
181
- },
182
- {
183
- "cell_type": "code",
184
- "execution_count": null,
185
- "metadata": {},
186
- "outputs": [],
187
- "source": [
188
- "pip install matplotlib==3.2"
189
- ]
190
- },
191
- {
192
- "cell_type": "code",
193
- "execution_count": null,
194
- "metadata": {},
195
- "outputs": [],
196
- "source": [
197
- "pip install tensorflow_io"
198
- ]
199
- },
200
- {
201
- "cell_type": "code",
202
- "execution_count": null,
203
- "metadata": {},
204
- "outputs": [],
205
- "source": [
206
- "pip install scipy"
207
- ]
208
- },
209
- {
210
- "cell_type": "code",
211
- "execution_count": null,
212
- "metadata": {},
213
- "outputs": [],
214
- "source": [
215
- "pip install protobuf==3.20.*"
216
- ]
217
- },
218
- {
219
- "cell_type": "code",
220
- "execution_count": null,
221
- "metadata": {},
222
- "outputs": [],
223
- "source": [
224
- "!pip install tensorflow --upgrade"
225
- ]
226
- },
227
- {
228
- "cell_type": "code",
229
- "execution_count": null,
230
- "metadata": {
231
- "collapsed": true,
232
- "jupyter": {
233
- "outputs_hidden": true
234
- }
235
- },
236
- "outputs": [],
237
- "source": [
238
- "!pip uninstall protobuf matplotlib -y\n",
239
- "!pip install protobuf matplotlib==3.2"
240
- ]
241
- },
242
- {
243
- "cell_type": "code",
244
- "execution_count": null,
245
- "metadata": {},
246
- "outputs": [],
247
- "source": [
248
- "pip install tensorflow-object-detection-api"
249
- ]
250
- }
251
- ],
252
- "metadata": {
253
- "accelerator": "GPU",
254
- "colab": {
255
- "name": "3. Training and Detection.ipynb",
256
- "provenance": []
257
- },
258
- "kernelspec": {
259
- "display_name": "hamza1",
260
- "language": "python",
261
- "name": "hamza1"
262
- },
263
- "language_info": {
264
- "codemirror_mode": {
265
- "name": "ipython",
266
- "version": 3
267
- },
268
- "file_extension": ".py",
269
- "mimetype": "text/x-python",
270
- "name": "python",
271
- "nbconvert_exporter": "python",
272
- "pygments_lexer": "ipython3",
273
- "version": "3.8.0"
274
- }
275
- },
276
- "nbformat": 4,
277
- "nbformat_minor": 4
278
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import cv2
3
+ from PIL import Image
4
+ import os
5
+ import numpy as np
6
+ import tensorflow as tf
7
+ from object_detection.utils import label_map_util
8
+ from object_detection.utils import visualization_utils as viz_utils
9
+ from object_detection.builders import model_builder
10
+ from object_detection.utils import config_util
11
+
12
+
13
+ CUSTOM_MODEL_NAME = 'my_ssd_mobnet'
14
+ paths = {
15
+ 'CHECKPOINT_PATH': os.path.join('Tensorflow', 'workspace', 'models', CUSTOM_MODEL_NAME),
16
+ 'LABELMAP': os.path.join('Tensorflow', 'workspace', 'annotations', 'label_map.pbtxt')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  }
18
+
19
+ configs = config_util.get_configs_from_pipeline_file(os.path.join(paths['CHECKPOINT_PATH'], 'pipeline.config'))
20
+ detection_model = model_builder.build(model_config=configs['model'], is_training=False)
21
+ ckpt = tf.compat.v2.train.Checkpoint(model=detection_model)
22
+ ckpt.restore(os.path.join(paths['CHECKPOINT_PATH'], 'ckpt-3')).expect_partial()
23
+ category_index = label_map_util.create_category_index_from_labelmap(paths['LABELMAP'])
24
+
25
+
26
+ @tf.function
27
+ def detect_fn(image):
28
+ image, shapes = detection_model.preprocess(image)
29
+ prediction_dict = detection_model.predict(image, shapes)
30
+ detections = detection_model.postprocess(prediction_dict, shapes)
31
+ return detections
32
+
33
+
34
+ def main():
35
+ st.title('Furniture Detection')
36
+
37
+ uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "png", "jpeg"])
38
+
39
+ if uploaded_file is not None:
40
+ image = np.array(Image.open(uploaded_file))
41
+ st.image(image, caption='Uploaded Image', use_column_width=True)
42
+ st.write("")
43
+ st.write("Detection In Process...")
44
+
45
+
46
+ input_tensor = tf.convert_to_tensor(np.expand_dims(image, 0), dtype=tf.float32)
47
+ detections = detect_fn(input_tensor)
48
+
49
+ num_detections = int(detections.pop('num_detections'))
50
+ detections = {key: value[0, :num_detections].numpy() for key, value in detections.items()}
51
+ detections['num_detections'] = num_detections
52
+ detections['detection_classes'] = detections['detection_classes'].astype(np.int64)
53
+
54
+ label_id_offset = 1
55
+ image_np_with_detections = image.copy()
56
+
57
+ viz_utils.visualize_boxes_and_labels_on_image_array(
58
+ image_np_with_detections,
59
+ detections['detection_boxes'],
60
+ detections['detection_classes'] + label_id_offset,
61
+ detections['detection_scores'],
62
+ category_index,
63
+ use_normalized_coordinates=True,
64
+ max_boxes_to_draw=5,
65
+ min_score_thresh=.3,
66
+ agnostic_mode=False
67
+ )
68
+
69
+ st.image(image_np_with_detections, caption='Detected Teeth', use_column_width=True)
70
+
71
+ if __name__ == "__main__":
72
+ main()