Spaces:
Sleeping
Sleeping
| import matplotlib.pyplot as plt | |
| import numpy as np | |
| from six import BytesIO | |
| from PIL import Image | |
| import tensorflow as tf | |
| from object_detection.utils import label_map_util | |
| from object_detection.utils import visualization_utils as viz_utils | |
| from object_detection.utils import ops as utils_op | |
| import tarfile | |
| import wget | |
| import gradio as gr | |
| from huggingface_hub import snapshot_download | |
| import os | |
| import cv2 | |
| #PATH_TO_LABELS = 'data/label_map.pbtxt' | |
| PATH_TO_LABELS = 'label_map.pbtxt' | |
| category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True) | |
| def pil_image_as_numpy_array(pilimg): | |
| img_array = tf.keras.utils.img_to_array(pilimg) | |
| img_array = np.expand_dims(img_array, axis=0) | |
| return img_array | |
| def load_image_into_numpy_array(path): | |
| image = None | |
| image_data = tf.io.gfile.GFile(path, 'rb').read() | |
| image = Image.open(BytesIO(image_data)) | |
| return pil_image_as_numpy_array(image) | |
| def load_model(): | |
| download_dir = snapshot_download(REPO_ID) | |
| saved_model_dir = os.path.join(download_dir, "saved_model") | |
| detection_model = tf.saved_model.load(saved_model_dir) | |
| return detection_model | |
| #def load_model2(): | |
| # wget.download("https://nyp-aicourse.s3-ap-southeast-1.amazonaws.com/pretrained-models/balloon_model.tar.gz") | |
| # tarfile.open("balloon_model.tar.gz").extractall() | |
| # model_dir = 'saved_model' | |
| # detection_model = tf.saved_model.load(str(model_dir)) | |
| # return detection_model | |
| # samples_folder = 'test_samples | |
| # image_path = 'test_samples/sample_balloon.jpeg | |
| # | |
| def predict(pilimg): | |
| image_np = pil_image_as_numpy_array(pilimg) | |
| return predict2(image_np) | |
| def predict2(image_np): | |
| results = detection_model(image_np) | |
| # different object detection models have additional results | |
| result = {key:value.numpy() for key,value in results.items()} | |
| label_id_offset = 0 | |
| image_np_with_detections = image_np.copy() | |
| viz_utils.visualize_boxes_and_labels_on_image_array( | |
| image_np_with_detections[0], | |
| result['detection_boxes'][0], | |
| (result['detection_classes'][0] + label_id_offset).astype(int), | |
| result['detection_scores'][0], | |
| category_index, | |
| use_normalized_coordinates=True, | |
| max_boxes_to_draw=200, | |
| min_score_thresh=.60, | |
| agnostic_mode=False, | |
| line_thickness=2) | |
| result_pil_img = tf.keras.utils.array_to_img(image_np_with_detections[0]) | |
| return result_pil_img | |
| ### | |
| def predict_on_video(video_in_filepath, video_out_filepath, detection_model, category_index): | |
| video_reader = cv2.VideoCapture(video_in_filepath) | |
| frame_h = int(video_reader.get(cv2.CAP_PROP_FRAME_HEIGHT)) | |
| frame_w = int(video_reader.get(cv2.CAP_PROP_FRAME_WIDTH)) | |
| fps = video_reader.get(cv2.CAP_PROP_FPS) | |
| video_writer = cv2.VideoWriter( | |
| video_out_filepath, | |
| cv2.VideoWriter_fourcc(*'mp4v'), | |
| fps, | |
| (frame_w, frame_h) | |
| ) | |
| while True: | |
| ret, frame = video_reader.read() | |
| if not ret: | |
| break # Break the loop if the video is finished | |
| processed_frame = predict(frame) | |
| processed_frame_np = np.array(processed_frame) | |
| video_writer.write(processed_frame_np) | |
| # Release camera and close windows | |
| video_reader.release() | |
| video_writer.release() | |
| cv2.destroyAllWindows() | |
| cv2.waitKey(1) | |
| video_reader.release() | |
| video_writer.release() | |
| cv2.destroyAllWindows() | |
| cv2.waitKey(1) | |
| # Function to process a video | |
| def process_video(video_path): | |
| output_path = "output_video.mp4" # Output path for the processed video | |
| predict_on_video(video_path, output_path, detection_model, category_index) | |
| return output_path | |
| # Specify paths to example images | |
| sample_images = [["sample1.jpg"], ["sample2.jpg"], | |
| ["sample3.jpg"] | |
| ] | |
| ### | |
| REPO_ID = "gregarific/assignmodel" | |
| detection_model = load_model() | |
| # pil_image = Image.open(image_path) | |
| # image_arr = pil_image_as_numpy_array(pil_image) | |
| ### | |
| tab1 = gr.Interface(fn=predict, | |
| inputs=gr.Image(type="pil"), | |
| outputs=gr.Image(type="pil"), | |
| examples=[["sample1.jpg"],["sample2.jpg"],["sample3.jpg"]], | |
| title="Object Detection (WheelChair & Motorized WheelChair)", | |
| description='Model Applied: SSD MobileNet V2 320x320.' | |
| ) | |
| #gr.Interface(fn=predict, | |
| # inputs=gr.Image(type="pil"), | |
| # outputs=gr.Image(type="pil") | |
| # ).launch(share=True) | |
| tab2 = gr.Interface( | |
| fn=process_video, | |
| inputs=gr.File(label="Upload a Video"), | |
| outputs=gr.File(label="Output Analysis"), | |
| examples=["Wheelchair Snippet.mp4"], | |
| title='Object Detection (WheelChair & Motorized Wheelchair)', | |
| description='Model Applied: SSD MobileNet V2 320x320' | |
| ) | |
| iface = gr.TabbedInterface([tab1, tab2], tab_names = ['Image','Video'], title='WheelChair Type Detection') | |
| iface.launch(share=True) |