Datasets:

ArXiv:
File size: 6,916 Bytes
4e02fa8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
#!/usr/bin/env python
#
# file: $ISIP_EXP/tuh_dpath/exp_0074/scripts/decode.py
#
# revision history:
#  20190925 (TE): first version
#
# usage:
#  python decode.py odir mfile data
#
# arguments:
#  odir: the directory where the hypotheses will be stored
#  mfile: input model file
#  data: the input data list to be decoded
#
# This script decodes data using a simple MLP model.
#------------------------------------------------------------------------------

# import pytorch modules
#
import torch
from tqdm import tqdm

# visualize:
import matplotlib.pyplot as plt
import numpy as np
import matplotlib
matplotlib.style.use('ggplot')
import sys
import os


################ customized parameters #################
################ please modify them based on your dataset #################
DATASET_ODIR = "~/semantic2d_data/2024-04-04-12-16-41"  # the directory path of the raw data
DATASET_NAME = "train" # select the train, dev, and test 
SEMANTIC_MASK_ODIR = "./output"

# Hokuyo UTM-30LX-EW:
POINTS = 1081 # the number of lidar points
AGNLE_MIN = -2.356194496154785
AGNLE_MAX = 2.356194496154785
RANGE_MAX = 60.0

# # WLR-716:
# POINTS = 811 # the number of lidar points
# AGNLE_MIN = -2.356194496154785
# AGNLE_MAX = 2.356194496154785
# RANGE_MAX = 25.0
# # RPLIDAR-S2:
# POINTS = 1972 # the number of lidar points
# AGNLE_MIN = -3.1415927410125732
# AGNLE_MAX = 3.1415927410125732
# RANGE_MAX = 16.0

################# read dataset ###################
NEW_LINE = "\n"
# for reproducibility, we seed the rng
#
class Semantic2DLidarDataset(torch.utils.data.Dataset):
    def __init__(self, img_path, file_name):
        # initialize the data and labels
        # read the names of image data:
        self.scan_file_names = []
        self.intensity_file_names = []
        self.vel_file_names = []
        self.label_file_names = []
        # parameters:
        self.s_max = 30
        self.s_min = 0
        # open train.txt or dev.txt:
        fp_file = open(img_path+'/'+file_name+'.txt', 'r')

        # for each line of the file:
        for line in fp_file.read().split(NEW_LINE):
            if('.npy' in line): 
                self.scan_file_names.append(img_path+'/scans_lidar/'+line)
                self.intensity_file_names.append(img_path+'/intensities_lidar/'+line)
                self.label_file_names.append(img_path+'/semantic_label/'+line)
        # close txt file:
        fp_file.close()
        self.length = len(self.scan_file_names)

        print("dataset length: ", self.length)


    def __len__(self):
        return self.length

    def __getitem__(self, idx):
        # get the index of start point:
        scan = np.zeros((1, POINTS))
        intensity = np.zeros((1, POINTS))
        label = np.zeros((1, POINTS))
        
        # get the scan data:
        scan_name = self.scan_file_names[idx]
        scan = np.load(scan_name)

        # get the intensity data:
        intensity_name = self.intensity_file_names[idx]
        intensity = np.load(intensity_name)

        # get the semantic label data:
        label_name = self.label_file_names[idx]
        label = np.load(label_name)
        
        # initialize:
        scan[np.isnan(scan)] = 0.
        scan[np.isinf(scan)] = 0.

        intensity[np.isnan(intensity)] = 0.
        intensity[np.isinf(intensity)] = 0.

        scan[scan >= 15] = 0.

        label[np.isnan(label)] = 0.
        label[np.isinf(label)] = 0.

        # transfer to pytorch tensor:
        scan_tensor = torch.FloatTensor(scan)
        intensity_tensor = torch.FloatTensor(intensity)
        label_tensor =  torch.FloatTensor(label)

        data = {
                'scan': scan_tensor,
                'intensity': intensity_tensor,
                'label': label_tensor,
                }

        return data

#------------------------------------------------------------------------------
#
# the main program starts here
#
#------------------------------------------------------------------------------

# function: main
#
# arguments: none
#
# return: none
#
# This method is the main function.
#
if __name__ == '__main__':
    # input parameters:
    dataset_odir = DATASET_ODIR
    dataset_name = DATASET_NAME
    semantic_mask_odir = SEMANTIC_MASK_ODIR
    # create the folder for the semantic label mask:
    if not os.path.exists(semantic_mask_odir):
        os.makedirs(semantic_mask_odir)

    # read dataset:
    eval_dataset = Semantic2DLidarDataset(dataset_odir, dataset_name)
    eval_dataloader = torch.utils.data.DataLoader(eval_dataset, batch_size=1, num_workers=2, \
                                                 shuffle=False, drop_last=True, pin_memory=True)
    
    # for each batch in increments of batch size:
    cnt = 0
    cnt_m = 0
    # get the number of batches (ceiling of train_data/batch_size):
    num_batches = int(len(eval_dataset)/eval_dataloader.batch_size)
    for i, batch in tqdm(enumerate(eval_dataloader), total=num_batches):
        # collect the samples as a batch: 10 timesteps
        if(i % 200 == 0):
            scans = batch['scan']
            scans = scans.detach().cpu().numpy()
            labels = batch['label']
            labels = labels.detach().cpu().numpy()

            # lidar data:
            r = scans.reshape(POINTS) 
            theta = np.linspace(AGNLE_MIN, AGNLE_MAX, num=POINTS, endpoint='true') 

            ## plot semantic label:
            fig = plt.figure(figsize=(12, 12))
            ax = fig.add_subplot(1,1,1, projection='polar', facecolor='seashell')
            smap = labels.reshape(POINTS)

            # add the background label:
            theta =  np.insert(theta, -1, np.pi)
            r = np.insert(r, -1, 1)
            smap = np.insert(smap, -1, 0)
            label_val = np.unique(smap).astype(int)
            print("label_values: ", label_val)

            colors = smap 
            area = 6
            scatter = ax.scatter(theta, r, c=colors, s=area, cmap='nipy_spectral', alpha=0.95, linewidth=10)
            ax.set_xticks(np.linspace(AGNLE_MIN, AGNLE_MAX, 8, endpoint='true'))
            ax.set_thetamin(-135)
            ax.set_thetamax(135)
            ax.set_yticklabels([])
            # produce a legend with the unique colors from the scatter
            classes = ['Other', 'Chair', 'Door', 'Elevator', 'Person', 'Pillar', 'Sofa', 'Table', 'Trash bin', 'Wall']
            plt.xticks(fontsize=16) 
            plt.yticks(fontsize=16)     
            plt.legend(handles=scatter.legend_elements(num=[j for j in label_val])[0], labels=[classes[j] for j in label_val], bbox_to_anchor=(0.5, -0.08), loc='lower center', fontsize=18)
            ax.grid(False)
            ax.set_theta_offset(np.pi/2)
            
            input_img_name = semantic_mask_odir + "/semantic_mask" + str(i)+ ".png"
            plt.savefig(input_img_name, bbox_inches='tight')
            plt.show()

            print(i)