suku9 commited on
Commit
60e5c1b
·
verified ·
1 Parent(s): d1dfa2a

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +128 -0
  2. car_damage_detector.ipynb +304 -0
app.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # AUTOGENERATED! DO NOT EDIT! File to edit: car_damage_detector.ipynb.
2
+
3
+ # %% auto 0
4
+ __all__ = ['assets_path', 'models_path', 'examples_path', 'imagenet_labels', 'model', 'transform', 'catogories', 'title',
5
+ 'description', 'examples', 'learn_damaged_or_not', 'learn_damage_location', 'learn_damage_severity', 'intf',
6
+ 'get_imagenet_classes', 'create_model', 'car_or_not_inference', 'predict', 'main_predictor']
7
+
8
+ # %% car_damage_detector.ipynb 2
9
+ # imports
10
+ # import os
11
+ import timm
12
+ # import json
13
+ import torch
14
+ import gradio as gr
15
+ import pickle as pk
16
+ # from PIL import Image
17
+ import fastbook
18
+ fastbook.setup_book()
19
+
20
+ from fastbook import *
21
+ from fastai.vision.widgets import *
22
+ # from collections import Counter, defaultdict
23
+
24
+ assets_path = 'assets/'
25
+ models_path = 'assets/models/'
26
+ examples_path = 'assets/examples/'
27
+
28
+ # %% car_damage_detector.ipynb 3
29
+ # Imagenet Class
30
+ def get_imagenet_classes():
31
+ # read idx file
32
+ imagenet_file = open(assets_path+"imagenet_class_index.txt", "r").read()
33
+ # seperate elements and onvert string to list
34
+ imagenet_labels_raw = imagenet_file.strip().split('\n')
35
+ # keep first label
36
+ imagenet_labels = [item.split(',')[0] for item in imagenet_labels_raw]
37
+ return imagenet_labels
38
+
39
+ imagenet_labels = get_imagenet_classes()
40
+
41
+ # Create Model
42
+ def create_model(model_name='vgg16.tv_in1k'):
43
+ # import required model
44
+ model = timm.create_model(model_name, pretrained=True).eval()
45
+ # transform data as required by the model
46
+ transform = timm.data.create_transform(
47
+ **timm.data.resolve_data_config(model.pretrained_cfg)
48
+ )
49
+ return model, transform
50
+
51
+ model, transform = create_model()
52
+
53
+ # Car or Not : Main Inferene Code
54
+ catogories = ('Is a Car', 'Not a Car')
55
+ def car_or_not_inference(input_image):
56
+
57
+ # print ("Validating that this is a picture of a car...")
58
+
59
+ # retain the top 'n' most occuring items \\ n=36
60
+ top_n_cat_list = ['sports_car', 'minivan', 'convertible', 'beach_wagon', 'limousine', 'pickup', 'car_wheel', 'grille', 'racer', 'minibus', 'jeep', 'moving_van', 'tow_truck', 'cab', 'police_van', 'snowplow', 'amphibian', 'trailer_truck', 'recreational_vehicle', 'ambulance', 'motor_scooter', 'cassette_player', 'fire_engine', 'car_mirror', 'mobile_home', 'crash_helmet', 'mouse', 'snowmobile', 'Model_T', 'passenger_car', 'solar_dish', 'garbage_truck', 'photocopier', 'mountain_tent', 'half_track', 'speedboat']
61
+
62
+ # image = PILImage.create(input_image)
63
+ # transform image as required for prediction
64
+ image_tensor = transform(input_image)
65
+ # predict on image
66
+ output = model(image_tensor.unsqueeze(0))
67
+ # get probabilites
68
+ probabilities = torch.nn.functional.softmax(output[0], dim=0)
69
+ # select top 5 probs
70
+ _, indices = torch.topk(probabilities, 5)
71
+
72
+ for idx in indices:
73
+ pred_label = imagenet_labels[idx]
74
+ if pred_label in top_n_cat_list:
75
+ return 1.0 #dict(zip(catogories, [1.0, 0.0])) #"Validation complete - proceed to damage evaluation"
76
+
77
+ return 0.0 #dict(zip(catogories, [0.0, 1.0]))#"Are you sure this is a picture of your car? Please take another picture (try a different angle or lighting) and try again."
78
+
79
+
80
+ # %% car_damage_detector.ipynb 5
81
+ title = "Car Care"
82
+ description = "A vision based car damage identifier."
83
+ examples = [examples_path+'lambo.jpg', examples_path+'dog.jpg', examples_path+'front_moderate.jpg']
84
+
85
+ learn_damaged_or_not = load_learner(models_path+'car_damaged_or_not.pkl')
86
+ learn_damage_location = load_learner(models_path+'car_damage_side.pkl')
87
+ learn_damage_severity = load_learner(models_path+'car_damage_severity.pkl')
88
+
89
+ def predict(img, learn):
90
+ # img = PILImage.create(img)
91
+ pred, idx, probs = learn.predict(img)
92
+ return pred#, float(probs[idx])
93
+
94
+ def main_predictor(img, progress=gr.Progress()):
95
+
96
+ progress((0,4), desc="Starting Analysis...")
97
+ input_image = PILImage.create(img)
98
+ car_or_not = car_or_not_inference(input_image)
99
+
100
+ progress((1,4))
101
+ if car_or_not:
102
+ gr.Info("Car check completed.")
103
+ damaged_or_not = predict(input_image, learn_damaged_or_not)
104
+
105
+ progress((2,4))
106
+ if damaged_or_not == 'damage':
107
+ gr.Info("Damage check completed.")
108
+ damaged_location = predict(input_image, learn_damage_location)
109
+ progress((3,4))
110
+ gr.Info("Damage Location identified.")
111
+ damaged_severity = predict(input_image, learn_damage_severity)
112
+ progress((4,4), desc="Analysis Complete")
113
+ gr.Info("Damage Severity assessed.")
114
+ # refer below sections for Location and Severity
115
+ return f"""Results: \n Car Check: it's a Car \n Damage Check: Car is Damaged \n Location: {damaged_location} \n Severity: {damaged_severity}"""
116
+ else:
117
+ progress((4,4))
118
+ return "Are you sure your car is damaged ?. \nMake sure you click a clear picture of the damaged portion. \nPlease resubmit the picture"
119
+ else:
120
+ progress((4,4))
121
+ return "Are you sure this is a picture of your car? \nPlease take another picture (try a different angle or lighting) and try again."
122
+
123
+ # input_image = 'assets/examples/severe.jpg'
124
+ # main_predictor(input_image)
125
+
126
+ # %% car_damage_detector.ipynb 6
127
+ intf = gr.Interface(fn=main_predictor,inputs=gr.Image(),outputs=gr.Textbox(),title=title,description=description,examples=examples)
128
+ intf.launch(share=True)
car_damage_detector.ipynb ADDED
@@ -0,0 +1,304 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "id": "1db7d295-3145-4c5d-b140-170265d1d28e",
7
+ "metadata": {},
8
+ "outputs": [],
9
+ "source": [
10
+ "#|default_exp app"
11
+ ]
12
+ },
13
+ {
14
+ "cell_type": "code",
15
+ "execution_count": null,
16
+ "id": "ca158c7d-0ff3-4792-9950-5e7cf8665de2",
17
+ "metadata": {},
18
+ "outputs": [],
19
+ "source": [
20
+ "# changes to do :\n",
21
+ "# shorten the first runction / damaged or not\n",
22
+ "# read image in one place\n",
23
+ "# streamline functions"
24
+ ]
25
+ },
26
+ {
27
+ "cell_type": "code",
28
+ "execution_count": null,
29
+ "id": "24a0e983-2c3a-4150-9113-72ff07e77587",
30
+ "metadata": {},
31
+ "outputs": [],
32
+ "source": [
33
+ "#|export\n",
34
+ "# imports\n",
35
+ "# import os\n",
36
+ "import timm\n",
37
+ "# import json\n",
38
+ "import torch\n",
39
+ "import gradio as gr\n",
40
+ "import pickle as pk\n",
41
+ "# from PIL import Image\n",
42
+ "import fastbook\n",
43
+ "fastbook.setup_book()\n",
44
+ "\n",
45
+ "from fastbook import *\n",
46
+ "from fastai.vision.widgets import * \n",
47
+ "# from collections import Counter, defaultdict\n",
48
+ "\n",
49
+ "assets_path = 'assets/'\n",
50
+ "models_path = 'assets/models/'\n",
51
+ "examples_path = 'assets/examples/'"
52
+ ]
53
+ },
54
+ {
55
+ "cell_type": "code",
56
+ "execution_count": null,
57
+ "id": "43d3ad2d-5d60-4458-8441-1919e6a579f7",
58
+ "metadata": {},
59
+ "outputs": [],
60
+ "source": [
61
+ "#|export\n",
62
+ "# Imagenet Class\n",
63
+ "def get_imagenet_classes():\n",
64
+ " # read idx file\n",
65
+ " imagenet_file = open(assets_path+\"imagenet_class_index.txt\", \"r\").read()\n",
66
+ " # seperate elements and onvert string to list\n",
67
+ " imagenet_labels_raw = imagenet_file.strip().split('\\n')\n",
68
+ " # keep first label\n",
69
+ " imagenet_labels = [item.split(',')[0] for item in imagenet_labels_raw]\n",
70
+ " return imagenet_labels\n",
71
+ "\n",
72
+ "imagenet_labels = get_imagenet_classes()\n",
73
+ "\n",
74
+ "# Create Model\n",
75
+ "def create_model(model_name='vgg16.tv_in1k'):\n",
76
+ " # import required model\n",
77
+ " model = timm.create_model(model_name, pretrained=True).eval()\n",
78
+ " # transform data as required by the model\n",
79
+ " transform = timm.data.create_transform(\n",
80
+ " **timm.data.resolve_data_config(model.pretrained_cfg)\n",
81
+ " )\n",
82
+ " return model, transform\n",
83
+ "\n",
84
+ "model, transform = create_model()\n",
85
+ "\n",
86
+ "# Car or Not : Main Inferene Code\n",
87
+ "catogories = ('Is a Car', 'Not a Car')\n",
88
+ "def car_or_not_inference(input_image):\n",
89
+ "\n",
90
+ " # print (\"Validating that this is a picture of a car...\")\n",
91
+ "\n",
92
+ " # retain the top 'n' most occuring items \\\\ n=36\n",
93
+ " top_n_cat_list = ['sports_car', 'minivan', 'convertible', 'beach_wagon', 'limousine', 'pickup', 'car_wheel', 'grille', 'racer', 'minibus', 'jeep', 'moving_van', 'tow_truck', 'cab', 'police_van', 'snowplow', 'amphibian', 'trailer_truck', 'recreational_vehicle', 'ambulance', 'motor_scooter', 'cassette_player', 'fire_engine', 'car_mirror', 'mobile_home', 'crash_helmet', 'mouse', 'snowmobile', 'Model_T', 'passenger_car', 'solar_dish', 'garbage_truck', 'photocopier', 'mountain_tent', 'half_track', 'speedboat']\n",
94
+ "\n",
95
+ " # image = PILImage.create(input_image)\n",
96
+ " # transform image as required for prediction\n",
97
+ " image_tensor = transform(input_image)\n",
98
+ " # predict on image\n",
99
+ " output = model(image_tensor.unsqueeze(0))\n",
100
+ " # get probabilites\n",
101
+ " probabilities = torch.nn.functional.softmax(output[0], dim=0)\n",
102
+ " # select top 5 probs\n",
103
+ " _, indices = torch.topk(probabilities, 5)\n",
104
+ "\n",
105
+ " for idx in indices:\n",
106
+ " pred_label = imagenet_labels[idx]\n",
107
+ " if pred_label in top_n_cat_list:\n",
108
+ " return 1.0 #dict(zip(catogories, [1.0, 0.0])) #\"Validation complete - proceed to damage evaluation\"\n",
109
+ "\n",
110
+ " return 0.0 #dict(zip(catogories, [0.0, 1.0]))#\"Are you sure this is a picture of your car? Please take another picture (try a different angle or lighting) and try again.\"\n"
111
+ ]
112
+ },
113
+ {
114
+ "cell_type": "code",
115
+ "execution_count": null,
116
+ "id": "29a9aacc-fcb0-4dd0-a7a9-82d56ceccb3c",
117
+ "metadata": {},
118
+ "outputs": [],
119
+ "source": [
120
+ "# input_image = examples_path+'rolls.jpg'\n",
121
+ "# car_or_not_inference(input_image)\n",
122
+ "\n",
123
+ "# input_image = 'assets/examples/severe.jpg'\n",
124
+ "# print(predict(input_image, learn_damaged_or_not))\n",
125
+ "# print(predict(input_image, learn_damage_location))\n",
126
+ "# print(predict(input_image, learn_damage_severity))"
127
+ ]
128
+ },
129
+ {
130
+ "cell_type": "code",
131
+ "execution_count": null,
132
+ "id": "4e40a3a0-ad15-40ae-b4b4-a0afd610bc82",
133
+ "metadata": {},
134
+ "outputs": [],
135
+ "source": [
136
+ "#|export\n",
137
+ "\n",
138
+ "title = \"Car Care\"\n",
139
+ "description = \"A vision based car damage identifier.\"\n",
140
+ "examples = [examples_path+'lambo.jpg', examples_path+'dog.jpg', examples_path+'front_moderate.jpg']\n",
141
+ "\n",
142
+ "learn_damaged_or_not = load_learner(models_path+'car_damaged_or_not.pkl')\n",
143
+ "learn_damage_location = load_learner(models_path+'car_damage_side.pkl')\n",
144
+ "learn_damage_severity = load_learner(models_path+'car_damage_severity.pkl')\n",
145
+ "\n",
146
+ "def predict(img, learn):\n",
147
+ " # img = PILImage.create(img)\n",
148
+ " pred, idx, probs = learn.predict(img)\n",
149
+ " return pred#, float(probs[idx])\n",
150
+ "\n",
151
+ "def main_predictor(img, progress=gr.Progress()):\n",
152
+ " \n",
153
+ " progress((0,4), desc=\"Starting Analysis...\") \n",
154
+ " input_image = PILImage.create(img)\n",
155
+ " car_or_not = car_or_not_inference(input_image)\n",
156
+ "\n",
157
+ " progress((1,4))\n",
158
+ " if car_or_not:\n",
159
+ " gr.Info(\"Car check completed.\")\n",
160
+ " damaged_or_not = predict(input_image, learn_damaged_or_not)\n",
161
+ "\n",
162
+ " progress((2,4))\n",
163
+ " if damaged_or_not == 'damage':\n",
164
+ " gr.Info(\"Damage check completed.\")\n",
165
+ " damaged_location = predict(input_image, learn_damage_location)\n",
166
+ " progress((3,4))\n",
167
+ " gr.Info(\"Damage Location identified.\")\n",
168
+ " damaged_severity = predict(input_image, learn_damage_severity)\n",
169
+ " progress((4,4), desc=\"Analysis Complete\")\n",
170
+ " gr.Info(\"Damage Severity assessed.\")\n",
171
+ " # refer below sections for Location and Severity\n",
172
+ " return f\"\"\"Results: \\n Car Check: it's a Car \\n Damage Check: Car is Damaged \\n Location: {damaged_location} \\n Severity: {damaged_severity}\"\"\"\n",
173
+ " else:\n",
174
+ " progress((4,4))\n",
175
+ " return \"Are you sure your car is damaged ?. \\nMake sure you click a clear picture of the damaged portion. \\nPlease resubmit the picture\"\n",
176
+ " else:\n",
177
+ " progress((4,4))\n",
178
+ " return \"Are you sure this is a picture of your car? \\nPlease take another picture (try a different angle or lighting) and try again.\"\n",
179
+ "\n",
180
+ "# input_image = 'assets/examples/severe.jpg'\n",
181
+ "# main_predictor(input_image)"
182
+ ]
183
+ },
184
+ {
185
+ "cell_type": "code",
186
+ "execution_count": null,
187
+ "id": "0955b1fb-5216-464a-bc30-618751136fc6",
188
+ "metadata": {},
189
+ "outputs": [
190
+ {
191
+ "name": "stdout",
192
+ "output_type": "stream",
193
+ "text": [
194
+ "Running on local URL: http://127.0.0.1:7860\n",
195
+ "\n",
196
+ "Could not create share link. Please check your internet connection or our status page: https://status.gradio.app.\n"
197
+ ]
198
+ },
199
+ {
200
+ "data": {
201
+ "text/html": [
202
+ "<div><iframe src=\"http://127.0.0.1:7860/\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
203
+ ],
204
+ "text/plain": [
205
+ "<IPython.core.display.HTML object>"
206
+ ]
207
+ },
208
+ "metadata": {},
209
+ "output_type": "display_data"
210
+ },
211
+ {
212
+ "data": {
213
+ "text/plain": []
214
+ },
215
+ "execution_count": null,
216
+ "metadata": {},
217
+ "output_type": "execute_result"
218
+ }
219
+ ],
220
+ "source": [
221
+ "#|export\n",
222
+ "intf = gr.Interface(fn=main_predictor,inputs=gr.Image(),outputs=gr.Textbox(),title=title,description=description,examples=examples)\n",
223
+ "intf.launch(share=True)"
224
+ ]
225
+ },
226
+ {
227
+ "cell_type": "code",
228
+ "execution_count": null,
229
+ "id": "19a074b9-9edf-441b-8ff9-8fb91fd07952",
230
+ "metadata": {},
231
+ "outputs": [],
232
+ "source": [
233
+ "import nbdev\n",
234
+ "nbdev.export.nb_export('car_damage_detector.ipynb','.')"
235
+ ]
236
+ },
237
+ {
238
+ "cell_type": "code",
239
+ "execution_count": null,
240
+ "id": "416c2277-fac4-4cee-9adb-7fc80026ca2a",
241
+ "metadata": {},
242
+ "outputs": [],
243
+ "source": []
244
+ },
245
+ {
246
+ "cell_type": "code",
247
+ "execution_count": null,
248
+ "id": "384b395a-20fa-4c3c-a8a5-fd0a4e529a51",
249
+ "metadata": {},
250
+ "outputs": [],
251
+ "source": []
252
+ },
253
+ {
254
+ "cell_type": "code",
255
+ "execution_count": null,
256
+ "id": "e39de30a-d5b5-4140-9dde-9e84b7510eef",
257
+ "metadata": {},
258
+ "outputs": [
259
+ {
260
+ "name": "stdout",
261
+ "output_type": "stream",
262
+ "text": [
263
+ "Loaded as API: https://suku9-car-damage-detection.hf.space ✔\n",
264
+ "Are you sure this is a picture of your car? \n",
265
+ "Please take another picture (try a different angle or lighting) and try again.\n"
266
+ ]
267
+ }
268
+ ],
269
+ "source": [
270
+ "from gradio_client import Client, file\n",
271
+ "client = Client(\"suku9/Car_Damage_Detection\")\n",
272
+ "\n",
273
+ "input_image = 'assets/examples/dog.jpg'\n",
274
+ "res = client.predict(file(input_image),api_name=\"/predict\")\n",
275
+ "print(res)"
276
+ ]
277
+ },
278
+ {
279
+ "cell_type": "code",
280
+ "execution_count": null,
281
+ "id": "963bf22b-3280-4d0a-ab2e-9c189b9705e4",
282
+ "metadata": {},
283
+ "outputs": [],
284
+ "source": []
285
+ },
286
+ {
287
+ "cell_type": "code",
288
+ "execution_count": null,
289
+ "id": "ebd92af9-434c-4433-ac71-263e18bf66d0",
290
+ "metadata": {},
291
+ "outputs": [],
292
+ "source": []
293
+ }
294
+ ],
295
+ "metadata": {
296
+ "kernelspec": {
297
+ "display_name": "python3",
298
+ "language": "python",
299
+ "name": "python3"
300
+ }
301
+ },
302
+ "nbformat": 4,
303
+ "nbformat_minor": 5
304
+ }