File size: 4,921 Bytes
352cafd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
#!/usr/bin/env python2
'''
Visualization demo for panoptic COCO sample_data

The code shows an example of color generation for panoptic data (with
"generate_new_colors" set to True). For each segment distinct color is used in
a way that it close to the color of corresponding semantic class.
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os, sys
import numpy as np
import json
import argparse

import PIL.Image as Image
import matplotlib.pyplot as plt
from skimage.segmentation import find_boundaries

from panopticapi.utils import IdGenerator, rgb2id

from detectron2.config import get_cfg
from detectron2.data import MetadataCatalog
from detectron2.utils.visualizer import ColorMode, Visualizer
from detectron2.utils.logger import setup_logger
from detectron2.projects.deeplab import add_deeplab_config
from detectron2.data.datasets.builtin_meta import ADE20K_PAN_SEG_CATEGORIES

from OPSNet.mask2former import add_maskformer2_config

def setup_cfg(args):
    # load config from file and command-line arguments
    cfg = get_cfg()
    add_deeplab_config(cfg)
    add_maskformer2_config(cfg)
    cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()
    return cfg

def get_parser():
    parser = argparse.ArgumentParser(description="maskformer2 demo for builtin configs")
    parser.add_argument(
        "--config-file",
        default="configs/coco/panoptic-segmentation/maskformer2_R50_bs16_50ep.yaml",
        metavar="FILE",
        help="path to config file",
    )
    parser.add_argument("--webcam", action="store_true", help="Take inputs from webcam.")
    parser.add_argument("--video-input", help="Path to video file.")
    parser.add_argument(
        "--input",
        nargs="+",
        help="A list of space separated input images; "
        "or a single glob pattern such as 'directory/*.jpg'",
    )
    parser.add_argument(
        "--output",
        help="A file or directory to save output visualizations. "
        "If not given, will show output in an OpenCV window.",
    )

    parser.add_argument(
        "--confidence-threshold",
        type=float,
        default=0.5,
        help="Minimum score for instance predictions to be shown",
    )
    parser.add_argument(
        "--opts",
        help="Modify config options using the command-line 'KEY VALUE' pairs",
        default=[],
        nargs=argparse.REMAINDER,
    )
    return parser

if __name__ == "__main__":
    #mp.set_start_method("spawn", force=True)
    args = get_parser().parse_args()
    setup_logger(name="fvcore")
    logger = setup_logger()
    logger.info("Arguments: " + str(args))

    cfg = setup_cfg(args)

    # whether from the PNG are used or new colors are generated
    generate_new_colors = True

    json_file = '/group/20018/gavinqi/zhao/OPSNet/output/inference/ade_predictions.json'
    segmentations_folder = '/group/20018/gavinqi/zhao/OPSNet/output/'
    img_folder = '/group/20018/gavinqi/zhao/datasets/ADEChallengeData2016/images/validation'
    panoptic_coco_categories = './panoptic_coco_categories.json'

    with open(json_file, 'r') as f:
        coco_d = json.load(f)

    categegories = {category['id']: category for category in ADE20K_PAN_SEG_CATEGORIES}

    # find input img that correspond to the annotation
    img = None
    print('+'*100)
    for pred_ann in coco_d['annotations'][:10]: # [{'id': 'ADE_val_00001896', 'file_name': 'ADE_val_00001896.jpg', 'width': 512, 'height': 774},]
        image_id = pred_ann['image_id']
        img = np.array(Image.open(os.path.join(img_folder, image_id+'.jpg')))
        segmentation = np.array(
            Image.open(os.path.join(segmentations_folder, image_id+'.png')),
            dtype=np.uint8
        )
        segmentation_id = rgb2id(segmentation)

        # find segments boundaries
        boundaries = find_boundaries(segmentation_id, mode='thick')

        if generate_new_colors:
            segmentation[:, :, :] = 0
            color_generator = IdGenerator(categegories)
            segments_info = pred_ann['segments_info']
            print('*'*100)
            print(segments_info)
            for segment_info in segments_info:
                color = color_generator.get_color(segment_info['category_id'])
                mask = segmentation_id == segment_info['id']
                segmentation[mask] = color

        # depict boundaries
        segmentation[boundaries] = [0, 0, 0]
        metadata = MetadataCatalog.get(
                    cfg.DATASETS.TEST[0] if len(cfg.DATASETS.TEST) else "__unused"
                )
        instance_mode_local = ColorMode.IMAGE,
        visualizer = Visualizer(image, metadata, instance_mode=instance_mode_local)
        vis_output = visualizer.draw_panoptic_seg_predictions(
            segmentation_id, segments_info
        )