Spaces:
Build error
Build error
| from pickle import HIGHEST_PROTOCOL | |
| # Import necessary libraries | |
| import numpy as np | |
| import math | |
| import matplotlib.pyplot as plt | |
| import cv2 | |
| import json | |
| import gradio as gr | |
| from huggingface_hub import hf_hub_download | |
| from onnx import hub | |
| import onnxruntime as ort | |
| import tempfile | |
| import onnx | |
| # Load the ONNX model from ONNX Model Zoo | |
| model = hub.load("efficientnet-lite4") | |
| # Save the ModelProto object to a temporary file | |
| with tempfile.NamedTemporaryFile(suffix=".onnx", delete=False) as temp_file: | |
| onnx.save(model, temp_file.name) | |
| model_path = temp_file.name | |
| # Load the labels from a text file | |
| labels = json.load(open("/content/drive/MyDrive/labels_map.txt", "r")) | |
| # Define a function to preprocess the image for the EfficientNet-Lite4 model | |
| def pre_process_edgetpu(img, dims): | |
| # Unpack the dimensions | |
| output_height, output_width, _ = dims | |
| # Resize the image while maintaining aspect ratio | |
| img = resize_with_aspectratio( | |
| img, | |
| output_height, | |
| output_width, | |
| inter_pol=cv2.INTER_LINEAR | |
| ) | |
| # Crop the image from the center | |
| img = center_crop(img, output_height, output_width) | |
| # Convert image to float32 numpy array | |
| img = np.asarray(img, dtype='float32') | |
| # Normalize pixel values from [0-255] to [-1.0, 1.0] | |
| img -= [127.0, 127.0, 127.0] | |
| img /= [128.0, 128.0, 128.0] | |
| return img | |
| # Define a function to resize the image while maintaining aspect ratio | |
| def resize_with_aspectratio( | |
| img, | |
| out_height, | |
| out_width, | |
| scale=87.5, | |
| inter_pol=cv2.INTER_LINEAR): | |
| # Get original image dimensions | |
| height, width, _ = img.shape | |
| # Calculate new dimensions | |
| new_height = int(100. * out_height / scale) | |
| new_width = int(100. * out_width / scale) | |
| # Determine which dimension to scale based on aspect ratio | |
| if height > width: | |
| w = new_width | |
| h = int(new_height * height / width) | |
| else: | |
| h = new_height | |
| w = int(new_width * width / height) | |
| # Resize the image | |
| img = cv2.resize(img, (w, h), interpolation=inter_pol) | |
| return img | |
| # Define a function to crop the image from the center | |
| def center_crop(img, out_height, out_width): | |
| # Get image dimensions | |
| height, width, _ = img.shape | |
| # Calculate crop coordinates | |
| left = int((width - out_width) / 2) | |
| right = int((width + out_width) / 2) | |
| top = int((height - out_height) / 2) | |
| bottom = int((height + out_height) / 2) | |
| # Crop the image | |
| img = img[top:bottom, left:right] | |
| return img | |
| # Create an ONNX Runtime inference session | |
| sess = ort.InferenceSession(model_path) | |
| # Define the main inference function | |
| def inference(img): | |
| # Read the image file | |
| img = cv2.imread(img) | |
| # Convert BGR to RGB color space | |
| img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) | |
| # Preprocess the image | |
| img = pre_process_edgetpu(img, (224, 224, 3)) | |
| # Add batch dimension to the image | |
| img_batch = np.expand_dims(img, axis=0) | |
| # Run inference using the ONNX model | |
| results = sess.run(["Softmax:0"], {"images:0": img_batch})[0] | |
| # Get the top 5 predictions | |
| result = reversed(results[0].argsort()[-5:]) | |
| # Create a dictionary to store results | |
| resultdic = {} | |
| for r in result: | |
| resultdic[labels[str(r)]] = float(results[0][r]) | |
| return resultdic | |
| # Set up the Gradio interface | |
| title = "EfficientNet-Lite4" | |
| description = """EfficientNet-Lite 4 is the largest variant and most accurate of the set of | |
| EfficientNet-Lite model. It is an integer-only quantized model that produces the HIGHEST_PROTOCOL | |
| accuracy of all of the EfficientNet models. It achieves 80.4% ImageNet top-1 accuracy, while | |
| still running in real-time (e.g. 30ms/image) on a Pixel 4 CPU.""" | |
| examples = [['catonnx.jpg']] | |
| # Launch the Gradio interface | |
| gr.Interface( | |
| inference, | |
| gr.Image(type="filepath"), | |
| "label", | |
| title=title, | |
| description=description, | |
| examples=examples).launch() | |