Spaces:
Sleeping
Sleeping
| import matplotlib.pyplot as plt | |
| import requests, validators | |
| import torch | |
| import pathlib | |
| import numpy as np | |
| from PIL import Image | |
| import cv2 as cv | |
| from transformers import DetrFeatureExtractor, DetrForSegmentation, MaskFormerImageProcessor, MaskFormerForInstanceSegmentation | |
| # from transformers.models.detr.feature_extraction_detr import rgb_to_id | |
| from transformers.image_transforms import rgb_to_id | |
| TEST_IMAGE = Image.open(r"images/9999999_00783_d_0000358.jpg") | |
| MODEL_NAME_DETR = "facebook/detr-resnet-50-panoptic" | |
| MODEL_NAME_MASKFORMER = "facebook/maskformer-swin-large-coco" | |
| DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
| ####### | |
| # Parameters | |
| ####### | |
| image = TEST_IMAGE | |
| model_name = MODEL_NAME_MASKFORMER | |
| # Starting with MaskFormer | |
| processor = MaskFormerImageProcessor.from_pretrained(model_name) # <class 'transformers.models.maskformer.image_processing_maskformer.MaskFormerImageProcessor'> | |
| # DIR() --> ['__call__', '__class__', '__delattr__', '__dict__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__gt__', '__hash__', '__init__', | |
| # '__init_subclass__', '__le__', '__lt__', '__module__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__', '__subclasshook__', | |
| # '__weakref__', '_auto_class', '_create_repo', '_get_files_timestamps', '_max_size', '_pad_image', '_preprocess', '_preprocess_image', '_preprocess_mask', '_processor_class', | |
| # '_set_processor_class', '_upload_modified_files', 'center_crop', 'convert_segmentation_map_to_binary_masks', 'do_normalize', 'do_reduce_labels', 'do_rescale', 'do_resize', | |
| # 'encode_inputs', 'fetch_images', 'from_dict', 'from_json_file', 'from_pretrained', 'get_image_processor_dict', 'ignore_index', 'image_mean', 'image_std', 'model_input_names', | |
| # 'normalize', 'pad', 'post_process_instance_segmentation', 'post_process_panoptic_segmentation', 'post_process_segmentation', 'post_process_semantic_segmentation', 'preprocess', | |
| # 'push_to_hub', 'register_for_auto_class', 'resample', 'rescale', 'rescale_factor', 'resize', 'save_pretrained', 'size', 'size_divisor', 'to_dict', 'to_json_file', 'to_json_string'] | |
| model = MaskFormerForInstanceSegmentation.from_pretrained(model_name) # <class 'transformers.models.maskformer.modeling_maskformer.MaskFormerForInstanceSegmentation'> | |
| # DIR for model was too big | |
| model.to(DEVICE) | |
| # img = np.array(TEST_IMAGE) | |
| inputs = processor(images=image, return_tensors="pt") # <class 'transformers.image_processing_utils.BatchFeature'> | |
| # DIR() --> ['_MutableMapping__marker', '__abstractmethods__', '__class__', '__contains__', '__copy__', '__delattr__', '__delitem__', '__dict__', '__dir__', '__doc__', '__eq__', '__format__', | |
| # '__ge__', '__getattr__', '__getattribute__', '__getitem__', '__getstate__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__iter__', '__le__', '__len__', '__lt__', | |
| # '__module__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__reversed__', '__setattr__', '__setitem__', '__setstate__', '__sizeof__', '__slots__', '__str__', | |
| # '__subclasshook__', '__weakref__', '_abc_impl', '_get_is_as_tensor_fns', 'clear', 'convert_to_tensors', 'copy', 'data', 'fromkeys', 'get', 'items', 'keys', 'pop', 'popitem', | |
| # 'setdefault', 'to', 'update', 'values'] | |
| inputs.to(DEVICE) | |
| outputs = model(**inputs) # <class 'transformers.models.maskformer.modeling_maskformer.MaskFormerForInstanceSegmentationOutput'> | |
| # Each element of this class is a <class 'torch.Tensor'> | |
| # DIR() --> ['__annotations__', '__class__', '__contains__', '__dataclass_fields__', '__dataclass_params__', '__delattr__', '__delitem__', '__dict__', '__dir__', | |
| # '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__getitem__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__iter__', | |
| # '__le__', '__len__', '__lt__', '__module__', '__ne__', '__new__', '__post_init__', '__reduce__', '__reduce_ex__', '__repr__', '__reversed__', '__setattr__', | |
| # '__setitem__', '__sizeof__', '__str__', '__subclasshook__', 'attentions', 'auxiliary_logits', 'class_queries_logits', 'clear', 'copy', 'encoder_hidden_states', | |
| # 'encoder_last_hidden_state', 'fromkeys', 'get', 'hidden_states', 'items', 'keys', 'loss', 'masks_queries_logits', 'move_to_end', 'pixel_decoder_hidden_states', | |
| # 'pixel_decoder_last_hidden_state', 'pop', 'popitem', 'setdefault', 'to_tuple', 'transformer_decoder_hidden_states', 'transformer_decoder_last_hidden_state', | |
| # 'update', 'values'] | |
| results = processor.post_process_panoptic_segmentation(outputs, target_sizes=[image.size[::-1]])[0] | |
| # <class 'dict'> | |
| # Keys: dict_keys(['segmentation', 'segments_info']) | |
| # type(results["segments_info"]) --> list | |
| # type(results["segmentation"]) --> <class 'torch.Tensor'> | |
| def show_mask_for_number(map_to_use, label_id): | |
| """ | |
| map_to_use: You have to pass in `results["segmentation"]` | |
| """ | |
| if torch.cuda.is_available(): | |
| mask = (map_to_use.cpu().numpy() == label_id) | |
| else: | |
| mask = (map_to_use.numpy() == label_id) | |
| visual_mask = (mask* 255).astype(np.uint8) | |
| visual_mask = Image.fromarray(visual_mask) | |
| plt.imshow(visual_mask) | |
| plt.show() | |
| def show_mask_for_number_over_image(map_to_use, label_id, image_object): | |
| """ | |
| map_to_use: You have to pass in `results["segmentation"]` | |
| """ | |
| if torch.cuda.is_available(): | |
| mask = (map_to_use.cpu().numpy() == label_id) | |
| else: | |
| mask = (map_to_use.numpy() == label_id) | |
| visual_mask = (mask* 255).astype(np.uint8) | |
| visual_mask = Image.fromarray(visual_mask) | |
| plt.imshow(image_object) | |
| plt.imshow(visual_mask, alpha=0.25) | |
| plt.show() | |
| def get_coordinates_for_bb_simple(map_to_use, label_id): | |
| """ | |
| map_to_use: You have to pass in `results["segmentation"]` | |
| """ | |
| if torch.cuda.is_available(): | |
| mask = (map_to_use.cpu().numpy() == label_id) | |
| else: | |
| mask = (map_to_use.numpy() == label_id) | |
| x, y = np.where(mask==True) | |
| x_max, x_min = max(x), min(x) | |
| y_max, y_min = max(y), min(y) | |
| return (x_min, y_min), (x_max, y_max) | |
| def make_simple_box(left_top, right_bottom, map_size): | |
| full_mask = np.full(map_size, False) | |
| left_x, top_y = left_top | |
| right_x, bottom_y = right_bottom | |
| full_mask[left_x:right_x, top_y] = True | |
| full_mask[left_x:right_x, bottom_y] = True | |
| full_mask[left_x, top_y:bottom_y] = True | |
| full_mask[right_x, top_y:bottom_y] = True | |
| visual_mask = (full_mask* 255).astype(np.uint8) | |
| visual_mask = Image.fromarray(visual_mask) | |
| plt.imshow(visual_mask) | |
| plt.show() | |
| def test(map_to_use, label_id): | |
| """ | |
| map_to_use: You have to pass in `results["segmentation"]` | |
| """ | |
| if torch.cuda.is_available(): | |
| mask = (map_to_use.cpu().numpy() == label_id) | |
| else: | |
| mask = (map_to_use.numpy() == label_id) | |
| lt, rb = get_coordinates_for_bb_simple(map_to_use, label_id) | |
| left_x, top_y = lt | |
| right_x, bottom_y = rb | |
| mask[left_x:right_x, top_y] = .5 | |
| mask[left_x:right_x, bottom_y] = .5 | |
| mask[left_x, top_y:bottom_y] = .5 | |
| mask[right_x, top_y:bottom_y] = .5 | |
| visual_mask = (mask* 255).astype(np.uint8) | |
| visual_mask = Image.fromarray(visual_mask) | |
| plt.imshow(visual_mask) | |
| plt.show() | |
| def contour_map(map_to_use, label_id): | |
| """ | |
| map_to_use: You have to pass in `results["segmentation"]` | |
| """ | |
| if torch.cuda.is_available(): | |
| mask = (map_to_use.cpu().numpy() == label_id) | |
| else: | |
| mask = (map_to_use.numpy() == label_id) | |
| visual_mask = (mask* 255).astype(np.uint8) | |
| contours, hierarchy = cv.findContours(visual_mask, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE) | |
| return contours, hierarchy | |