suku9's picture
Upload 2 files
60e5c1b verified
# AUTOGENERATED! DO NOT EDIT! File to edit: car_damage_detector.ipynb.
# %% auto 0
__all__ = ['assets_path', 'models_path', 'examples_path', 'imagenet_labels', 'model', 'transform', 'catogories', 'title',
'description', 'examples', 'learn_damaged_or_not', 'learn_damage_location', 'learn_damage_severity', 'intf',
'get_imagenet_classes', 'create_model', 'car_or_not_inference', 'predict', 'main_predictor']
# %% car_damage_detector.ipynb 2
# imports
# import os
import timm
# import json
import torch
import gradio as gr
import pickle as pk
# from PIL import Image
import fastbook
fastbook.setup_book()
from fastbook import *
from fastai.vision.widgets import *
# from collections import Counter, defaultdict
assets_path = 'assets/'
models_path = 'assets/models/'
examples_path = 'assets/examples/'
# %% car_damage_detector.ipynb 3
# Imagenet Class
def get_imagenet_classes():
# read idx file
imagenet_file = open(assets_path+"imagenet_class_index.txt", "r").read()
# seperate elements and onvert string to list
imagenet_labels_raw = imagenet_file.strip().split('\n')
# keep first label
imagenet_labels = [item.split(',')[0] for item in imagenet_labels_raw]
return imagenet_labels
imagenet_labels = get_imagenet_classes()
# Create Model
def create_model(model_name='vgg16.tv_in1k'):
# import required model
model = timm.create_model(model_name, pretrained=True).eval()
# transform data as required by the model
transform = timm.data.create_transform(
**timm.data.resolve_data_config(model.pretrained_cfg)
)
return model, transform
model, transform = create_model()
# Car or Not : Main Inferene Code
catogories = ('Is a Car', 'Not a Car')
def car_or_not_inference(input_image):
# print ("Validating that this is a picture of a car...")
# retain the top 'n' most occuring items \\ n=36
top_n_cat_list = ['sports_car', 'minivan', 'convertible', 'beach_wagon', 'limousine', 'pickup', 'car_wheel', 'grille', 'racer', 'minibus', 'jeep', 'moving_van', 'tow_truck', 'cab', 'police_van', 'snowplow', 'amphibian', 'trailer_truck', 'recreational_vehicle', 'ambulance', 'motor_scooter', 'cassette_player', 'fire_engine', 'car_mirror', 'mobile_home', 'crash_helmet', 'mouse', 'snowmobile', 'Model_T', 'passenger_car', 'solar_dish', 'garbage_truck', 'photocopier', 'mountain_tent', 'half_track', 'speedboat']
# image = PILImage.create(input_image)
# transform image as required for prediction
image_tensor = transform(input_image)
# predict on image
output = model(image_tensor.unsqueeze(0))
# get probabilites
probabilities = torch.nn.functional.softmax(output[0], dim=0)
# select top 5 probs
_, indices = torch.topk(probabilities, 5)
for idx in indices:
pred_label = imagenet_labels[idx]
if pred_label in top_n_cat_list:
return 1.0 #dict(zip(catogories, [1.0, 0.0])) #"Validation complete - proceed to damage evaluation"
return 0.0 #dict(zip(catogories, [0.0, 1.0]))#"Are you sure this is a picture of your car? Please take another picture (try a different angle or lighting) and try again."
# %% car_damage_detector.ipynb 5
title = "Car Care"
description = "A vision based car damage identifier."
examples = [examples_path+'lambo.jpg', examples_path+'dog.jpg', examples_path+'front_moderate.jpg']
learn_damaged_or_not = load_learner(models_path+'car_damaged_or_not.pkl')
learn_damage_location = load_learner(models_path+'car_damage_side.pkl')
learn_damage_severity = load_learner(models_path+'car_damage_severity.pkl')
def predict(img, learn):
# img = PILImage.create(img)
pred, idx, probs = learn.predict(img)
return pred#, float(probs[idx])
def main_predictor(img, progress=gr.Progress()):
progress((0,4), desc="Starting Analysis...")
input_image = PILImage.create(img)
car_or_not = car_or_not_inference(input_image)
progress((1,4))
if car_or_not:
gr.Info("Car check completed.")
damaged_or_not = predict(input_image, learn_damaged_or_not)
progress((2,4))
if damaged_or_not == 'damage':
gr.Info("Damage check completed.")
damaged_location = predict(input_image, learn_damage_location)
progress((3,4))
gr.Info("Damage Location identified.")
damaged_severity = predict(input_image, learn_damage_severity)
progress((4,4), desc="Analysis Complete")
gr.Info("Damage Severity assessed.")
# refer below sections for Location and Severity
return f"""Results: \n Car Check: it's a Car \n Damage Check: Car is Damaged \n Location: {damaged_location} \n Severity: {damaged_severity}"""
else:
progress((4,4))
return "Are you sure your car is damaged ?. \nMake sure you click a clear picture of the damaged portion. \nPlease resubmit the picture"
else:
progress((4,4))
return "Are you sure this is a picture of your car? \nPlease take another picture (try a different angle or lighting) and try again."
# input_image = 'assets/examples/severe.jpg'
# main_predictor(input_image)
# %% car_damage_detector.ipynb 6
intf = gr.Interface(fn=main_predictor,inputs=gr.Image(),outputs=gr.Textbox(),title=title,description=description,examples=examples)
intf.launch(share=True)