|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import torch |
|
|
import torch.nn as nn |
|
|
from tqdm import tqdm |
|
|
|
|
|
|
|
|
import matplotlib.pyplot as plt |
|
|
import numpy as np |
|
|
|
|
|
|
|
|
import matplotlib |
|
|
matplotlib.style.use('ggplot') |
|
|
|
|
|
|
|
|
|
|
|
from model import * |
|
|
|
|
|
|
|
|
import sys |
|
|
import os |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
NUM_ARGS = 3 |
|
|
SPACE = " " |
|
|
|
|
|
|
|
|
POINTS = 1081 |
|
|
NUM_CLASSES = 9 |
|
|
NUM_INPUT_CHANNELS = 1 |
|
|
NUM_OUTPUT_CHANNELS = NUM_CLASSES |
|
|
|
|
|
|
|
|
POINTS = 1081 |
|
|
AGNLE_MIN = -2.356194496154785 |
|
|
AGNLE_MAX = 2.356194496154785 |
|
|
RANGE_MAX = 60.0 |
|
|
|
|
|
|
|
|
|
|
|
set_seed(SEED1) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def main(argv): |
|
|
|
|
|
if(len(argv) != NUM_ARGS): |
|
|
print("usage: python nedc_decode_mdl.py [ODIR] [MDL_PATH] [EVAL_SET]") |
|
|
exit(-1) |
|
|
|
|
|
|
|
|
odir = argv[0] |
|
|
mdl_path = argv[1] |
|
|
fImg = argv[2] |
|
|
|
|
|
|
|
|
if not os.path.exists(odir): |
|
|
os.makedirs(odir) |
|
|
|
|
|
|
|
|
|
|
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
eval_dataset = VaeTestDataset(fImg,'dev') |
|
|
eval_dataloader = torch.utils.data.DataLoader(eval_dataset, batch_size=1, \ |
|
|
shuffle=False, drop_last=True) |
|
|
|
|
|
|
|
|
model = S3Net(input_channels=NUM_INPUT_CHANNELS, |
|
|
output_channels=NUM_OUTPUT_CHANNELS) |
|
|
|
|
|
model.to(device) |
|
|
|
|
|
|
|
|
|
|
|
model.eval() |
|
|
|
|
|
|
|
|
criterion = nn.MSELoss(reduction='sum') |
|
|
criterion.to(device) |
|
|
|
|
|
|
|
|
|
|
|
checkpoint = torch.load(mdl_path, map_location=device) |
|
|
model.load_state_dict(checkpoint['model']) |
|
|
|
|
|
|
|
|
counter = 0 |
|
|
num_samples = 32 |
|
|
|
|
|
num_batches = int(len(eval_dataset)/eval_dataloader.batch_size) |
|
|
with torch.no_grad(): |
|
|
for i, batch in tqdm(enumerate(eval_dataloader), total=num_batches): |
|
|
|
|
|
if(i % 100 == 0): |
|
|
counter += 1 |
|
|
|
|
|
scans = batch['scan'] |
|
|
scans = scans.to(device) |
|
|
intensities = batch['intensity'] |
|
|
intensities = intensities.to(device) |
|
|
angle_incidence = batch['angle_incidence'] |
|
|
angle_incidence = angle_incidence.to(device) |
|
|
labels = batch['label'] |
|
|
labels = labels.to(device) |
|
|
|
|
|
|
|
|
inputs_samples = scans.repeat(num_samples,1,1) |
|
|
intensity_samples = intensities.repeat(num_samples,1,1) |
|
|
angle_incidence_samples = angle_incidence.repeat(num_samples,1,1) |
|
|
|
|
|
|
|
|
semantic_scan, semantic_channels, kl_loss = model(inputs_samples, intensity_samples, angle_incidence_samples) |
|
|
|
|
|
semantic_scans = semantic_scan.cpu().detach().numpy() |
|
|
semantic_scans_mx = semantic_scans.argmax(axis=1) |
|
|
|
|
|
|
|
|
semantic_scans_mx_mean = semantic_scans_mx.mode(0).values |
|
|
|
|
|
|
|
|
r = scans.cpu().detach().numpy().reshape(POINTS) |
|
|
theta = np.linspace(AGNLE_MIN, AGNLE_MAX, num=POINTS, endpoint='true') |
|
|
|
|
|
|
|
|
fig = plt.figure(figsize=(12, 12)) |
|
|
ax = fig.add_subplot(1,1,1, projection='polar', facecolor='seashell') |
|
|
smap = labels.reshape(POINTS) |
|
|
|
|
|
|
|
|
theta = np.insert(theta, -1, np.pi) |
|
|
r = np.insert(r, -1, 1) |
|
|
smap = np.insert(smap, -1, 0) |
|
|
label_val = np.unique(smap).astype(int) |
|
|
|
|
|
colors = smap |
|
|
area = 6 |
|
|
scatter = ax.scatter(theta, r, c=colors, s=area, cmap='nipy_spectral', alpha=0.95, linewidth=10) |
|
|
ax.set_xticks(np.linspace(AGNLE_MIN, AGNLE_MAX, 8, endpoint='true')) |
|
|
ax.set_thetamin(-135) |
|
|
ax.set_thetamax(135) |
|
|
ax.set_yticklabels([]) |
|
|
|
|
|
classes = ['Other', 'Chair', 'Door', 'Elevator', 'Person', 'Pillar', 'Sofa', 'Table', 'Trash bin', 'Wall'] |
|
|
plt.xticks(fontsize=16) |
|
|
plt.yticks(fontsize=16) |
|
|
plt.legend(handles=scatter.legend_elements(num=[j for j in label_val])[0], labels=[classes[j] for j in label_val], bbox_to_anchor=(0.5, -0.08), loc='lower center', fontsize=18) |
|
|
ax.grid(False) |
|
|
ax.set_theta_offset(np.pi/2) |
|
|
|
|
|
input_img_name = "./output/semantic_ground_truth_" + str(i)+ ".jpg" |
|
|
plt.savefig(input_img_name, bbox_inches='tight') |
|
|
|
|
|
|
|
|
|
|
|
fig = plt.figure(figsize=(12, 12)) |
|
|
ax = fig.add_subplot(1,1,1, projection='polar', facecolor='seashell') |
|
|
smap = semantic_scans_mx_mean.reshape(POINTS) |
|
|
|
|
|
|
|
|
theta = np.insert(theta, -1, np.pi) |
|
|
r = np.insert(r, -1, 1) |
|
|
smap = np.insert(smap, -1, 0) |
|
|
label_val = np.unique(smap).astype(int) |
|
|
|
|
|
colors = smap |
|
|
area = 6 |
|
|
scatter = ax.scatter(theta, r, c=colors, s=area, cmap='nipy_spectral', alpha=0.95, linewidth=10) |
|
|
ax.set_xticks(np.linspace(AGNLE_MIN, AGNLE_MAX, 8, endpoint='true')) |
|
|
ax.set_thetamin(-135) |
|
|
ax.set_thetamax(135) |
|
|
ax.set_yticklabels([]) |
|
|
|
|
|
classes = ['Other', 'Chair', 'Door', 'Elevator', 'Person', 'Pillar', 'Sofa', 'Table', 'Trash bin', 'Wall'] |
|
|
plt.xticks(fontsize=16) |
|
|
plt.yticks(fontsize=16) |
|
|
plt.legend(handles=scatter.legend_elements(num=[j for j in label_val])[0], labels=[classes[j] for j in label_val], bbox_to_anchor=(0.5, -0.08), loc='lower center', fontsize=18) |
|
|
ax.grid(False) |
|
|
ax.set_theta_offset(np.pi/2) |
|
|
|
|
|
input_img_name = "./output/semantic_s3net_" + str(i)+ ".jpg" |
|
|
plt.savefig(input_img_name, bbox_inches='tight') |
|
|
|
|
|
print(i) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return True |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__': |
|
|
main(sys.argv[1:]) |
|
|
|
|
|
|
|
|
|