zzuxzt's picture
Upload folder using huggingface_hub
5523920 verified
#!/usr/bin/env python
#
# file: $ISIP_EXP/tuh_dpath/exp_0074/scripts/decode.py
#
# revision history:
# 20190925 (TE): first version
#
# usage:
# python decode.py odir mfile data
#
# arguments:
# odir: the directory where the hypotheses will be stored
# mfile: input model file
# data: the input data list to be decoded
#
# This script decodes data using a simple MLP model.
#------------------------------------------------------------------------------
# import pytorch modules
#
import torch
import torch.nn as nn
from tqdm import tqdm
# import the model and all of its variables/functions
#
from model import *
# visualize:
import numpy as np
# import modules
#
import sys
import os
from sklearn.metrics import explained_variance_score, mean_squared_error
#-----------------------------------------------------------------------------
#
# global variables are listed here
#
#-----------------------------------------------------------------------------
model_dir = './model/semantic_SemanticCNN_model.pth' # the path of model storage: 1400 is the best one
NUM_ARGS = 3
HYP_EXT = ".hyp"
GRT_EXT = ".grt"
# general global values
#
SPACE = " "
NEW_LINE = "\n"
#------------------------------------------------------------------------------
#
# the main program starts here
#
#------------------------------------------------------------------------------
### explained variance:
def explained_variance(input, target):
ev = 1 - np.var(target - input) / np.var(input)
return ev
# function: main
#
# arguments: none
#
# return: none
#
# This method is the main function.
#
def main(argv):
# ensure we have the correct amount of arguments
#
#global cur_batch_win
if(len(argv) != NUM_ARGS):
print("usage: python nedc_train_mdl.py [MDL_PATH] [TRAIN_PATH] [DEV_PATH]")
exit(-1)
# define local variables
#
# define local variables:
odir = argv[0]
mdl_path = argv[1]
pTest = argv[2]
# if the odir doesn't exist, we make it
#
if not os.path.exists(odir):
os.makedirs(odir)
# get the hyp file name
#
hyp_name = os.path.splitext(os.path.basename(pTest))[0] + HYP_EXT
grt_name = os.path.splitext(os.path.basename(pTest))[0] + GRT_EXT
# set the device to use GPU if available
#
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# get array of the data
# data: [[0, 1, ... 26], [27, 28, ...] ...]
# labels: [0, 0, 1, ...]
#
eval_dataset = NavDataset(pTest,'test')
eval_dataloader = torch.utils.data.DataLoader(eval_dataset, batch_size=1, \
shuffle=False, drop_last=True) #, pin_memory=True)
# instantiate a model:
model = SemanticCNN(Bottleneck, [2, 1, 1])
# moves the model to device (cpu in our case so no change):
model.to(device)
# set the model to evaluate
#
model.eval()
# set the loss criterion:
criterion = nn.MSELoss(reduction='sum')
criterion.to(device)
# load the weights
#
checkpoint = torch.load(mdl_path, map_location=device)
model.load_state_dict(checkpoint['model'])
# the output file
#
try:
ofile = open(os.path.join(odir, hyp_name), 'w+')
vel_file = open(os.path.join(odir, grt_name), 'w+')
except IOError as e:
print(os.path.join(odir, hyp_name))
print("[%s]: %s" % (hyp_name, e.strerror))
exit(-1)
# for each batch in increments of batch size:
counter = 0
running_loss = 0
# get the number of batches (ceiling of train_data/batch_size):
num_batches = int(len(eval_dataset)/eval_dataloader.batch_size)
with torch.no_grad():
for i, batch in tqdm(enumerate(eval_dataloader), total=num_batches):
#for i, batch in enumerate(dataloader, 0):
counter += 1
# collect the samples as a batch:
scan_maps = batch['scan_map']
scan_maps = scan_maps.to(device)
semantic_maps = batch['semantic_map']
semantic_maps = semantic_maps.to(device)
sub_goals = batch['sub_goal']
sub_goals = sub_goals.to(device)
velocities = batch['velocity']
velocities = velocities.to(device)
# feed the network the batch
#
output = model(scan_maps, semantic_maps, sub_goals)
#writer.add_graph(model,[batch_ped_pos_t, batch_scan_t, batch_goal_t])
# get the loss
#
loss = criterion(output, velocities)
# get the loss:
# multiple GPUs:
if torch.cuda.device_count() > 1:
loss = loss.mean()
running_loss += loss.item()
# write the highest probablity to the file
#
ofile.write(str(float(output.data.cpu().numpy()[0,0])) + \
SPACE + str(float(output.data.cpu().numpy()[0,1])) + NEW_LINE)
vel_file.write(str(float(velocities[0,0])) + \
SPACE + str(float(velocities[0,1])) + NEW_LINE)
# loss:
val_loss = running_loss / counter
print('Validation set: Average loss: {:.4f}'.format(val_loss))
# close the file
#
ofile.close()
vel_file.close()
# exit gracefully
#
return True
#
# end of function
# begin gracefully
#
if __name__ == '__main__':
main(sys.argv[1:])
#
# end of file